2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/etherdevice.h>
18 #include <net/ieee80211_radiotap.h>
19 #include <linux/if_arp.h>
20 #include <linux/moduleparam.h>
26 static bool rtap_include_phy_info;
27 module_param(rtap_include_phy_info, bool, S_IRUGO);
28 MODULE_PARM_DESC(rtap_include_phy_info,
29 " Include PHY info in the radiotap header, default - no");
31 static inline int wil_vring_is_empty(struct vring *vring)
33 return vring->swhead == vring->swtail;
36 static inline u32 wil_vring_next_tail(struct vring *vring)
38 return (vring->swtail + 1) % vring->size;
41 static inline void wil_vring_advance_head(struct vring *vring, int n)
43 vring->swhead = (vring->swhead + n) % vring->size;
46 static inline int wil_vring_is_full(struct vring *vring)
48 return wil_vring_next_tail(vring) == vring->swhead;
51 * Available space in Tx Vring
53 static inline int wil_vring_avail_tx(struct vring *vring)
55 u32 swhead = vring->swhead;
56 u32 swtail = vring->swtail;
57 int used = (vring->size + swhead - swtail) % vring->size;
59 return vring->size - used - 1;
62 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
64 struct device *dev = wil_to_dev(wil);
65 size_t sz = vring->size * sizeof(vring->va[0]);
68 BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
72 vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
78 * vring->va should be aligned on its size rounded up to power of 2
79 * This is granted by the dma_alloc_coherent
81 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
87 /* initially, all descriptors are SW owned
88 * For Tx and Rx, ownership bit is at the same location, thus
91 for (i = 0; i < vring->size; i++) {
92 volatile struct vring_tx_desc *d = &(vring->va[i].tx);
93 d->dma.status = TX_DMA_STATUS_DU;
96 wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
97 vring->va, (unsigned long long)vring->pa, vring->ctx);
102 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
105 struct device *dev = wil_to_dev(wil);
106 size_t sz = vring->size * sizeof(vring->va[0]);
108 while (!wil_vring_is_empty(vring)) {
111 volatile struct vring_tx_desc *d =
112 &vring->va[vring->swtail].tx;
113 dma_addr_t pa = d->dma.addr_low |
114 ((u64)d->dma.addr_high << 32);
115 struct sk_buff *skb = vring->ctx[vring->swtail];
116 dmalen = le16_to_cpu(d->dma.length);
118 dma_unmap_single(dev, pa, dmalen,
120 dev_kfree_skb_any(skb);
121 vring->ctx[vring->swtail] = NULL;
123 dma_unmap_page(dev, pa, dmalen,
126 vring->swtail = wil_vring_next_tail(vring);
128 volatile struct vring_rx_desc *d =
129 &vring->va[vring->swtail].rx;
130 dma_addr_t pa = d->dma.addr_low |
131 ((u64)d->dma.addr_high << 32);
132 struct sk_buff *skb = vring->ctx[vring->swhead];
133 dmalen = le16_to_cpu(d->dma.length);
134 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
136 wil_vring_advance_head(vring, 1);
139 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
147 * Allocate one skb for Rx VRING
149 * Safe to call from IRQ
151 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
154 struct device *dev = wil_to_dev(wil);
155 unsigned int sz = RX_BUF_LEN;
156 volatile struct vring_rx_desc *d = &(vring->va[i].rx);
160 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
164 skb_reserve(skb, headroom);
167 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
168 if (unlikely(dma_mapping_error(dev, pa))) {
173 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
174 d->dma.addr_low = lower_32_bits(pa);
175 d->dma.addr_high = (u16)upper_32_bits(pa);
176 /* ip_length don't care */
178 /* error don't care */
179 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
180 d->dma.length = cpu_to_le16(sz);
187 * Adds radiotap header
189 * Any error indicated as "Bad FCS"
191 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
192 * - Rx descriptor: 32 bytes
195 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
198 struct wireless_dev *wdev = wil->wdev;
199 struct wil6210_rtap {
200 struct ieee80211_radiotap_header rthdr;
201 /* fields should be in the order of bits in rthdr.it_present */
205 __le16 chnl_freq __aligned(2);
212 struct wil6210_rtap_vendor {
213 struct wil6210_rtap rtap;
215 u8 vendor_oui[3] __aligned(2);
220 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
221 struct wil6210_rtap_vendor *rtap_vendor;
222 int rtap_len = sizeof(struct wil6210_rtap);
223 int phy_length = 0; /* phy info header size, bytes */
224 static char phy_data[128];
225 struct ieee80211_channel *ch = wdev->preset_chandef.chan;
227 if (rtap_include_phy_info) {
228 rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
229 /* calculate additional length */
230 if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
232 * PHY info starts from 8-byte boundary
233 * there are 8-byte lines, last line may be partially
234 * written (HW bug), thus FW configures for last line
235 * to be excessive. Driver skips this last line.
237 int len = min_t(int, 8 + sizeof(phy_data),
238 wil_rxdesc_phy_length(d));
240 void *p = skb_tail_pointer(skb);
241 void *pa = PTR_ALIGN(p, 8);
242 if (skb_tailroom(skb) >= len + (pa - p)) {
243 phy_length = len - 8;
244 memcpy(phy_data, pa, phy_length);
248 rtap_len += phy_length;
251 if (skb_headroom(skb) < rtap_len &&
252 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
253 wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
257 rtap_vendor = (void *)skb_push(skb, rtap_len);
258 memset(rtap_vendor, 0, rtap_len);
260 rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
261 rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
262 rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
263 (1 << IEEE80211_RADIOTAP_FLAGS) |
264 (1 << IEEE80211_RADIOTAP_CHANNEL) |
265 (1 << IEEE80211_RADIOTAP_MCS));
266 if (d->dma.status & RX_DMA_STATUS_ERROR)
267 rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
269 rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
270 rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
272 rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
273 rtap_vendor->rtap.mcs_flags = 0;
274 rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
276 if (rtap_include_phy_info) {
277 rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
278 IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
279 /* OUI for Wilocity 04:ce:14 */
280 rtap_vendor->vendor_oui[0] = 0x04;
281 rtap_vendor->vendor_oui[1] = 0xce;
282 rtap_vendor->vendor_oui[2] = 0x14;
283 rtap_vendor->vendor_ns = 1;
284 /* Rx descriptor + PHY data */
285 rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
287 memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
288 memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
294 * Fast swap in place between 2 registers
296 static void wil_swap_u16(u16 *a, u16 *b)
303 static void wil_swap_ethaddr(void *data)
305 struct ethhdr *eth = data;
306 u16 *s = (u16 *)eth->h_source;
307 u16 *d = (u16 *)eth->h_dest;
309 wil_swap_u16(s++, d++);
310 wil_swap_u16(s++, d++);
315 * reap 1 frame from @swhead
317 * Rx descriptor copied to skb->cb
319 * Safe to call from IRQ
321 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
324 struct device *dev = wil_to_dev(wil);
325 struct net_device *ndev = wil_to_ndev(wil);
326 volatile struct vring_rx_desc *d;
327 struct vring_rx_desc *d1;
330 unsigned int sz = RX_BUF_LEN;
335 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
337 if (wil_vring_is_empty(vring))
340 d = &(vring->va[vring->swhead].rx);
341 if (!(d->dma.status & RX_DMA_STATUS_DU)) {
342 /* it is not error, we just reached end of Rx done area */
346 pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
347 skb = vring->ctx[vring->swhead];
348 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
350 d1 = wil_skb_rxdesc(skb);
352 dmalen = le16_to_cpu(d1->dma.length);
353 skb_trim(skb, dmalen);
355 wil->stats.last_mcs_rx = wil_rxdesc_mcs(d1);
357 /* use radiotap header only if required */
358 if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
359 wil_rx_add_radiotap_header(wil, skb);
361 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
362 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
363 (const void *)d, sizeof(*d), false);
365 wil_vring_advance_head(vring, 1);
367 /* no extra checks if in sniffer mode */
368 if (ndev->type != ARPHRD_ETHER)
371 * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
372 * Driver should recognize it by frame type, that is found
373 * in Rx descriptor. If type is not data, it is 802.11 frame as is
375 ftype = wil_rxdesc_ftype(d1) << 2;
376 if (ftype != IEEE80211_FTYPE_DATA) {
377 wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
378 /* TODO: process it */
383 if (skb->len < ETH_HLEN) {
384 wil_err(wil, "Short frame, len = %d\n", skb->len);
385 /* TODO: process it (i.e. BAR) */
390 ds_bits = wil_rxdesc_ds_bits(d1);
393 * HW bug - in ToDS mode, i.e. Rx on AP side,
394 * addresses get swapped
396 wil_swap_ethaddr(skb->data);
403 * allocate and fill up to @count buffers in rx ring
404 * buffers posted at @swtail
406 static int wil_rx_refill(struct wil6210_priv *wil, int count)
408 struct net_device *ndev = wil_to_ndev(wil);
409 struct vring *v = &wil->vring_rx;
412 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
413 WIL6210_RTAP_SIZE : 0;
415 for (; next_tail = wil_vring_next_tail(v),
416 (next_tail != v->swhead) && (count-- > 0);
417 v->swtail = next_tail) {
418 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
420 wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
425 iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
431 * Pass Rx packet to the netif. Update statistics.
433 static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
436 unsigned int len = skb->len;
443 rc = netif_rx_ni(skb);
445 if (likely(rc == NET_RX_SUCCESS)) {
446 ndev->stats.rx_packets++;
447 ndev->stats.rx_bytes += len;
450 ndev->stats.rx_dropped++;
455 * Proceed all completed skb's from Rx VRING
457 * Safe to call from IRQ
459 void wil_rx_handle(struct wil6210_priv *wil)
461 struct net_device *ndev = wil_to_ndev(wil);
462 struct vring *v = &wil->vring_rx;
466 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
469 wil_dbg_txrx(wil, "%s()\n", __func__);
470 while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
471 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
472 skb->data, skb_headlen(skb), false);
474 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
476 skb_reset_mac_header(skb);
477 skb->ip_summed = CHECKSUM_UNNECESSARY;
478 skb->pkt_type = PACKET_OTHERHOST;
479 skb->protocol = htons(ETH_P_802_2);
482 skb->protocol = eth_type_trans(skb, ndev);
485 wil_netif_rx_any(skb, ndev);
487 wil_rx_refill(wil, v->size);
490 int wil_rx_init(struct wil6210_priv *wil)
492 struct vring *vring = &wil->vring_rx;
495 vring->size = WIL6210_RX_RING_SIZE;
496 rc = wil_vring_alloc(wil, vring);
500 rc = wmi_rx_chain_add(wil, vring);
504 rc = wil_rx_refill(wil, vring->size);
510 wil_vring_free(wil, vring, 0);
515 void wil_rx_fini(struct wil6210_priv *wil)
517 struct vring *vring = &wil->vring_rx;
520 wil_vring_free(wil, vring, 0);
523 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
527 struct wmi_vring_cfg_cmd cmd = {
528 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
531 .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
532 .ring_size = cpu_to_le16(size),
535 .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
536 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
541 .priority = cpu_to_le16(0),
542 .timeslot_us = cpu_to_le16(0xfff),
547 struct wil6210_mbox_hdr_wmi wmi;
548 struct wmi_vring_cfg_done_event cmd;
550 struct vring *vring = &wil->vring_tx[id];
553 wil_err(wil, "Tx ring [%d] already allocated\n", id);
559 rc = wil_vring_alloc(wil, vring);
563 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
565 rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
566 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
570 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
571 wil_err(wil, "Tx config failed, status 0x%02x\n",
576 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
580 wil_vring_free(wil, vring, 1);
586 void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
588 struct vring *vring = &wil->vring_tx[id];
593 wil_vring_free(wil, vring, 1);
596 static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
599 struct vring *v = &wil->vring_tx[0];
607 static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
608 dma_addr_t pa, u32 len)
610 d->dma.addr_low = lower_32_bits(pa);
611 d->dma.addr_high = (u16)upper_32_bits(pa);
612 d->dma.ip_length = 0;
613 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
614 d->dma.b11 = 0/*14 | BIT(7)*/;
616 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
617 d->dma.length = cpu_to_le16((u16)len);
622 d->mac.ucode_cmd = 0;
623 /* use dst index 0 */
624 d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
625 (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
626 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
627 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
628 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
633 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
636 struct device *dev = wil_to_dev(wil);
637 volatile struct vring_tx_desc *d;
638 u32 swhead = vring->swhead;
639 int avail = wil_vring_avail_tx(vring);
640 int nr_frags = skb_shinfo(skb)->nr_frags;
642 int vring_index = vring - wil->vring_tx;
646 wil_dbg_txrx(wil, "%s()\n", __func__);
648 if (avail < vring->size/8)
649 netif_tx_stop_all_queues(wil_to_ndev(wil));
650 if (avail < 1 + nr_frags) {
651 wil_err(wil, "Tx ring full. No space for %d fragments\n",
655 d = &(vring->va[i].tx);
657 /* FIXME FW can accept only unicast frames for the peer */
658 memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
660 pa = dma_map_single(dev, skb->data,
661 skb_headlen(skb), DMA_TO_DEVICE);
663 wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
664 skb->data, (unsigned long long)pa);
665 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
666 skb->data, skb_headlen(skb), false);
668 if (unlikely(dma_mapping_error(dev, pa)))
671 wil_tx_desc_map(d, pa, skb_headlen(skb));
672 d->mac.d[2] |= ((nr_frags + 1) <<
673 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
674 /* middle segments */
675 for (f = 0; f < nr_frags; f++) {
676 const struct skb_frag_struct *frag =
677 &skb_shinfo(skb)->frags[f];
678 int len = skb_frag_size(frag);
679 i = (swhead + f + 1) % vring->size;
680 d = &(vring->va[i].tx);
681 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
683 if (unlikely(dma_mapping_error(dev, pa)))
685 wil_tx_desc_map(d, pa, len);
686 vring->ctx[i] = NULL;
688 /* for the last seg only */
689 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
690 d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
691 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
692 d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
694 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
695 (const void *)d, sizeof(*d), false);
698 wil_vring_advance_head(vring, nr_frags + 1);
699 wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
700 iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
701 /* hold reference to skb
702 * to prevent skb release before accounting
703 * in case of immediate "tx done"
705 vring->ctx[i] = skb_get(skb);
709 /* unmap what we have mapped */
710 /* Note: increment @f to operate with positive index */
711 for (f++; f > 0; f--) {
714 i = (swhead + f) % vring->size;
715 d = &(vring->va[i].tx);
716 d->dma.status = TX_DMA_STATUS_DU;
717 pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
718 dmalen = le16_to_cpu(d->dma.length);
720 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
722 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
729 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
731 struct wil6210_priv *wil = ndev_to_wil(ndev);
735 wil_dbg_txrx(wil, "%s()\n", __func__);
736 if (!test_bit(wil_status_fwready, &wil->status)) {
737 wil_err(wil, "FW not ready\n");
740 if (!test_bit(wil_status_fwconnected, &wil->status)) {
741 wil_err(wil, "FW not connected\n");
744 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
745 wil_err(wil, "Xmit in monitor mode not supported\n");
748 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
749 rc = wmi_tx_eapol(wil, skb);
752 vring = wil_find_tx_vring(wil, skb);
754 wil_err(wil, "No Tx VRING available\n");
757 /* set up vring entry */
758 rc = wil_tx_vring(wil, vring, skb);
762 /* statistics will be updated on the tx_complete */
763 dev_kfree_skb_any(skb);
766 return NETDEV_TX_BUSY;
768 break; /* goto drop; */
771 netif_tx_stop_all_queues(ndev);
772 ndev->stats.tx_dropped++;
773 dev_kfree_skb_any(skb);
775 return NET_XMIT_DROP;
779 * Clean up transmitted skb's from the Tx VRING
781 * Safe to call from IRQ
783 void wil_tx_complete(struct wil6210_priv *wil, int ringid)
785 struct net_device *ndev = wil_to_ndev(wil);
786 struct device *dev = wil_to_dev(wil);
787 struct vring *vring = &wil->vring_tx[ringid];
790 wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
794 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
796 while (!wil_vring_is_empty(vring)) {
797 volatile struct vring_tx_desc *d1 =
798 &vring->va[vring->swtail].tx;
799 struct vring_tx_desc dd, *d = ⅆ
806 if (!(d->dma.status & TX_DMA_STATUS_DU))
809 dmalen = le16_to_cpu(d->dma.length);
811 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
812 vring->swtail, dmalen, d->dma.status,
814 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
815 (const void *)d, sizeof(*d), false);
817 pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
818 skb = vring->ctx[vring->swtail];
820 if (d->dma.error == 0) {
821 ndev->stats.tx_packets++;
822 ndev->stats.tx_bytes += skb->len;
824 ndev->stats.tx_errors++;
827 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
828 dev_kfree_skb_any(skb);
829 vring->ctx[vring->swtail] = NULL;
831 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
834 d->dma.addr_high = 0;
836 d->dma.status = TX_DMA_STATUS_DU;
837 vring->swtail = wil_vring_next_tail(vring);
839 if (wil_vring_avail_tx(vring) > vring->size/4)
840 netif_tx_wake_all_queues(wil_to_ndev(wil));