2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
32 #include <asm/addrspace.h>
36 #error "hnddma.c shouldn't be needed for FULLMAC"
41 #define DMA_ERROR(args) \
43 if (!(*di->msg_level & 1)) \
48 #define DMA_TRACE(args) \
50 if (!(*di->msg_level & 2)) \
56 #define DMA_ERROR(args)
57 #define DMA_TRACE(args)
60 #define DMA_NONE(args)
62 #define d64txregs dregs.d64_u.txregs_64
63 #define d64rxregs dregs.d64_u.rxregs_64
64 #define txd64 dregs.d64_u.txd_64
65 #define rxd64 dregs.d64_u.rxd_64
67 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
68 static uint dma_msg_level;
70 #define MAXNAMEL 8 /* 8 char names */
72 #define DI_INFO(dmah) ((dma_info_t *)dmah)
74 #define R_SM(r) (*(r))
75 #define W_SM(r, v) (*(r) = (v))
77 /* dma engine software state */
78 typedef struct dma_info {
79 struct hnddma_pub hnddma; /* exported structure */
80 uint *msg_level; /* message level pointer */
81 char name[MAXNAMEL]; /* callers name for diag msgs */
83 struct osl_info *osh; /* os handle */
84 void *pbus; /* bus handle */
86 bool dma64; /* this dma engine is operating in 64-bit mode */
87 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
91 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
92 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
93 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
94 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
98 u16 dmadesc_align; /* alignment requirement for dma descriptors */
100 u16 ntxd; /* # tx descriptors tunable */
101 u16 txin; /* index of next descriptor to reclaim */
102 u16 txout; /* index of next descriptor to post */
103 void **txp; /* pointer to parallel array of pointers to packets */
104 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
105 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
106 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
107 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
108 u32 txdalloc; /* #bytes allocated for the ring */
109 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
110 * is not just an index, it needs all 13 bits to be
111 * an offset from the addr register.
114 u16 nrxd; /* # rx descriptors tunable */
115 u16 rxin; /* index of next descriptor to reclaim */
116 u16 rxout; /* index of next descriptor to post */
117 void **rxp; /* pointer to parallel array of pointers to packets */
118 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
119 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
120 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
121 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
122 u32 rxdalloc; /* #bytes allocated for the ring */
123 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
126 unsigned int rxbufsize; /* rx buffer size in bytes,
127 * not including the extra headroom
129 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
130 * e.g. some rx pkt buffers will be bridged to tx side
131 * without byte copying. The extra headroom needs to be
132 * large enough to fit txheader needs.
133 * Some dongle driver may not need it.
135 uint nrxpost; /* # rx buffers to keep posted */
136 unsigned int rxoffset; /* rxcontrol offset */
137 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
138 uint ddoffsethigh; /* high 32 bits */
139 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
140 uint dataoffsethigh; /* high 32 bits */
141 bool aligndesc_4k; /* descriptor base need to be aligned or not */
144 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
145 #ifdef BCMDMASGLISTOSL
146 #define DMASGLIST_ENAB true
148 #define DMASGLIST_ENAB false
149 #endif /* BCMDMASGLISTOSL */
151 /* descriptor bumping macros */
152 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
153 #define TXD(x) XXD((x), di->ntxd)
154 #define RXD(x) XXD((x), di->nrxd)
155 #define NEXTTXD(i) TXD((i) + 1)
156 #define PREVTXD(i) TXD((i) - 1)
157 #define NEXTRXD(i) RXD((i) + 1)
158 #define PREVRXD(i) RXD((i) - 1)
160 #define NTXDACTIVE(h, t) TXD((t) - (h))
161 #define NRXDACTIVE(h, t) RXD((t) - (h))
163 /* macros to convert between byte offsets and indexes */
164 #define B2I(bytes, type) ((bytes) / sizeof(type))
165 #define I2B(index, type) ((index) * sizeof(type))
167 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
168 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
170 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
171 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
173 /* Common prototypes */
174 static bool _dma_isaddrext(dma_info_t *di);
175 static bool _dma_descriptor_align(dma_info_t *di);
176 static bool _dma_alloc(dma_info_t *di, uint direction);
177 static void _dma_detach(dma_info_t *di);
178 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
179 static void _dma_rxinit(dma_info_t *di);
180 static void *_dma_rx(dma_info_t *di);
181 static bool _dma_rxfill(dma_info_t *di);
182 static void _dma_rxreclaim(dma_info_t *di);
183 static void _dma_rxenable(dma_info_t *di);
184 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
185 static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
188 static void _dma_txblock(dma_info_t *di);
189 static void _dma_txunblock(dma_info_t *di);
190 static uint _dma_txactive(dma_info_t *di);
191 static uint _dma_rxactive(dma_info_t *di);
192 static uint _dma_txpending(dma_info_t *di);
193 static uint _dma_txcommitted(dma_info_t *di);
195 static void *_dma_peeknexttxp(dma_info_t *di);
196 static void *_dma_peeknextrxp(dma_info_t *di);
197 static unsigned long _dma_getvar(dma_info_t *di, const char *name);
198 static void _dma_counterreset(dma_info_t *di);
199 static void _dma_fifoloopbackenable(dma_info_t *di);
200 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
201 static u8 dma_align_sizetobits(uint size);
202 static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
203 u16 *alignbits, uint *alloced,
206 /* Prototypes for 64-bit routines */
207 static bool dma64_alloc(dma_info_t *di, uint direction);
208 static bool dma64_txreset(dma_info_t *di);
209 static bool dma64_rxreset(dma_info_t *di);
210 static bool dma64_txsuspendedidle(dma_info_t *di);
211 static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
212 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
213 static void *dma64_getpos(dma_info_t *di, bool direction);
214 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
215 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
216 static void dma64_txrotate(dma_info_t *di);
218 static bool dma64_rxidle(dma_info_t *di);
219 static void dma64_txinit(dma_info_t *di);
220 static bool dma64_txenabled(dma_info_t *di);
221 static void dma64_txsuspend(dma_info_t *di);
222 static void dma64_txresume(dma_info_t *di);
223 static bool dma64_txsuspended(dma_info_t *di);
224 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
225 static bool dma64_txstopped(dma_info_t *di);
226 static bool dma64_rxstopped(dma_info_t *di);
227 static bool dma64_rxenabled(dma_info_t *di);
228 static bool _dma64_addrext(dma64regs_t *dma64regs);
230 static inline u32 parity32(u32 data);
232 const di_fcn_t dma64proc = {
233 (di_detach_t) _dma_detach,
234 (di_txinit_t) dma64_txinit,
235 (di_txreset_t) dma64_txreset,
236 (di_txenabled_t) dma64_txenabled,
237 (di_txsuspend_t) dma64_txsuspend,
238 (di_txresume_t) dma64_txresume,
239 (di_txsuspended_t) dma64_txsuspended,
240 (di_txsuspendedidle_t) dma64_txsuspendedidle,
241 (di_txfast_t) dma64_txfast,
242 (di_txunframed_t) dma64_txunframed,
243 (di_getpos_t) dma64_getpos,
244 (di_txstopped_t) dma64_txstopped,
245 (di_txreclaim_t) dma64_txreclaim,
246 (di_getnexttxp_t) dma64_getnexttxp,
247 (di_peeknexttxp_t) _dma_peeknexttxp,
248 (di_txblock_t) _dma_txblock,
249 (di_txunblock_t) _dma_txunblock,
250 (di_txactive_t) _dma_txactive,
251 (di_txrotate_t) dma64_txrotate,
253 (di_rxinit_t) _dma_rxinit,
254 (di_rxreset_t) dma64_rxreset,
255 (di_rxidle_t) dma64_rxidle,
256 (di_rxstopped_t) dma64_rxstopped,
257 (di_rxenable_t) _dma_rxenable,
258 (di_rxenabled_t) dma64_rxenabled,
260 (di_rxfill_t) _dma_rxfill,
261 (di_rxreclaim_t) _dma_rxreclaim,
262 (di_getnextrxp_t) _dma_getnextrxp,
263 (di_peeknextrxp_t) _dma_peeknextrxp,
264 (di_rxparam_get_t) _dma_rx_param_get,
266 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
267 (di_getvar_t) _dma_getvar,
268 (di_counterreset_t) _dma_counterreset,
269 (di_ctrlflags_t) _dma_ctrlflags,
273 (di_rxactive_t) _dma_rxactive,
274 (di_txpending_t) _dma_txpending,
275 (di_txcommitted_t) _dma_txcommitted,
279 struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
280 void *dmaregstx, void *dmaregsrx, uint ntxd,
281 uint nrxd, uint rxbufsize, int rxextheadroom,
282 uint nrxpost, uint rxoffset, uint *msg_level)
287 /* allocate private info structure */
288 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
291 printk(KERN_ERR "dma_attach: out of memory\n");
296 di->msg_level = msg_level ? msg_level : &dma_msg_level;
298 /* old chips w/o sb is no longer supported */
301 di->dma64 = ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
303 /* check arguments */
304 ASSERT(ISPOWEROF2(ntxd));
305 ASSERT(ISPOWEROF2(nrxd));
308 ASSERT(dmaregsrx == NULL);
310 ASSERT(dmaregstx == NULL);
312 /* init dma reg pointer */
313 ASSERT(ntxd <= D64MAXDD);
314 ASSERT(nrxd <= D64MAXDD);
315 di->d64txregs = (dma64regs_t *) dmaregstx;
316 di->d64rxregs = (dma64regs_t *) dmaregsrx;
317 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
319 /* Default flags (which can be changed by the driver calling dma_ctrlflags
320 * before enable): For backwards compatibility both Rx Overflow Continue
321 * and Parity are DISABLED.
324 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
327 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d "
328 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
329 "dmaregstx %p dmaregsrx %p\n", name, "DMA64", osh,
330 di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize,
331 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
333 /* make a private copy of our callers name */
334 strncpy(di->name, name, MAXNAMEL);
335 di->name[MAXNAMEL - 1] = '\0';
338 di->pbus = ((struct si_info *)sih)->pbus;
341 di->ntxd = (u16) ntxd;
342 di->nrxd = (u16) nrxd;
344 /* the actual dma size doesn't include the extra headroom */
346 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
347 if (rxbufsize > BCMEXTRAHDROOM)
348 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
350 di->rxbufsize = (u16) rxbufsize;
352 di->nrxpost = (u16) nrxpost;
353 di->rxoffset = (u8) rxoffset;
356 * figure out the DMA physical address offset for dd and data
357 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
358 * Other bus: use zero
359 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
362 di->dataoffsetlow = 0;
363 /* for pci bus, add offset */
364 if (sih->bustype == PCI_BUS) {
365 /* pcie with DMA64 */
367 di->ddoffsethigh = SI_PCIE_DMA_H32;
368 di->dataoffsetlow = di->ddoffsetlow;
369 di->dataoffsethigh = di->ddoffsethigh;
371 #if defined(__mips__) && defined(IL_BIGENDIAN)
372 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
373 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
374 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
375 if ((si_coreid(sih) == SDIOD_CORE_ID)
376 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
378 else if ((si_coreid(sih) == I2S_CORE_ID) &&
379 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
382 di->addrext = _dma_isaddrext(di);
384 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
385 di->aligndesc_4k = _dma_descriptor_align(di);
386 if (di->aligndesc_4k) {
387 di->dmadesc_align = D64RINGALIGN_BITS;
388 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
389 /* for smaller dd table, HW relax alignment reqmnt */
390 di->dmadesc_align = D64RINGALIGN_BITS - 1;
393 di->dmadesc_align = 4; /* 16 byte alignment */
395 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
396 di->aligndesc_4k, di->dmadesc_align));
398 /* allocate tx packet pointer vector */
400 size = ntxd * sizeof(void *);
401 di->txp = kzalloc(size, GFP_ATOMIC);
402 if (di->txp == NULL) {
403 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
408 /* allocate rx packet pointer vector */
410 size = nrxd * sizeof(void *);
411 di->rxp = kzalloc(size, GFP_ATOMIC);
412 if (di->rxp == NULL) {
413 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
418 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
420 if (!_dma_alloc(di, DMA_TX))
424 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
426 if (!_dma_alloc(di, DMA_RX))
430 if ((di->ddoffsetlow != 0) && !di->addrext) {
431 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
432 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
435 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
436 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
441 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
443 /* allocate DMA mapping vectors */
444 if (DMASGLIST_ENAB) {
446 size = ntxd * sizeof(hnddma_seg_map_t);
447 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
448 if (di->txp_dmah == NULL)
453 size = nrxd * sizeof(hnddma_seg_map_t);
454 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
455 if (di->rxp_dmah == NULL)
460 return (struct hnddma_pub *) di;
467 /* Check for odd number of 1's */
468 static inline u32 parity32(u32 data)
479 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
482 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
483 u32 *flags, u32 bufcount)
485 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
487 /* PCI bus with big(>1G) physical address, use address extension */
488 #if defined(__mips__) && defined(IL_BIGENDIAN)
489 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
490 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
492 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
493 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
494 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
496 W_SM(&ddring[outidx].addrlow,
497 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
498 W_SM(&ddring[outidx].addrhigh,
499 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
500 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
501 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
503 /* address extension for 32-bit PCI */
507 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
508 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
509 ASSERT(PHYSADDRHI(pa) == 0);
511 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
512 W_SM(&ddring[outidx].addrlow,
513 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
514 W_SM(&ddring[outidx].addrhigh,
515 BUS_SWAP32(0 + di->dataoffsethigh));
516 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
517 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
519 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
520 if (DMA64_DD_PARITY(&ddring[outidx])) {
521 W_SM(&ddring[outidx].ctrl2,
522 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
527 static bool _dma_alloc(dma_info_t *di, uint direction)
529 return dma64_alloc(di, direction);
532 void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
533 uint *alloced, unsigned long *pap)
536 u16 align = (1 << align_bits);
537 if (!IS_ALIGNED(PAGE_SIZE, align))
541 return pci_alloc_consistent(pdev, size, (dma_addr_t *) pap);
544 /* !! may be called with core in reset */
545 static void _dma_detach(dma_info_t *di)
548 DMA_TRACE(("%s: dma_detach\n", di->name));
550 /* shouldn't be here if descriptors are unreclaimed */
551 ASSERT(di->txin == di->txout);
552 ASSERT(di->rxin == di->rxout);
554 /* free dma descriptor rings */
556 pci_free_consistent(di->pbus, di->txdalloc,
557 ((s8 *)di->txd64 - di->txdalign),
560 pci_free_consistent(di->pbus, di->rxdalloc,
561 ((s8 *)di->rxd64 - di->rxdalign),
564 /* free packet pointer vectors */
566 kfree((void *)di->txp);
568 kfree((void *)di->rxp);
570 /* free tx packet DMA handles */
574 /* free rx packet DMA handles */
578 /* free our private info structure */
583 static bool _dma_descriptor_align(dma_info_t *di)
587 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
588 if (di->d64txregs != NULL) {
589 W_REG(&di->d64txregs->addrlow, 0xff0);
590 addrl = R_REG(&di->d64txregs->addrlow);
593 } else if (di->d64rxregs != NULL) {
594 W_REG(&di->d64rxregs->addrlow, 0xff0);
595 addrl = R_REG(&di->d64rxregs->addrlow);
602 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
603 static bool _dma_isaddrext(dma_info_t *di)
605 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
607 /* not all tx or rx channel are available */
608 if (di->d64txregs != NULL) {
609 if (!_dma64_addrext(di->d64txregs)) {
610 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
611 "AE set\n", di->name));
615 } else if (di->d64rxregs != NULL) {
616 if (!_dma64_addrext(di->d64rxregs)) {
617 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
618 "AE set\n", di->name));
626 /* initialize descriptor table base address */
627 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
629 if (!di->aligndesc_4k) {
630 if (direction == DMA_TX)
631 di->xmtptrbase = PHYSADDRLO(pa);
633 di->rcvptrbase = PHYSADDRLO(pa);
636 if ((di->ddoffsetlow == 0)
637 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
638 if (direction == DMA_TX) {
639 W_REG(&di->d64txregs->addrlow,
640 (PHYSADDRLO(pa) + di->ddoffsetlow));
641 W_REG(&di->d64txregs->addrhigh,
642 (PHYSADDRHI(pa) + di->ddoffsethigh));
644 W_REG(&di->d64rxregs->addrlow,
645 (PHYSADDRLO(pa) + di->ddoffsetlow));
646 W_REG(&di->d64rxregs->addrhigh,
647 (PHYSADDRHI(pa) + di->ddoffsethigh));
650 /* DMA64 32bits address extension */
653 ASSERT(PHYSADDRHI(pa) == 0);
655 /* shift the high bit(s) from pa to ae */
656 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
657 PCI32ADDR_HIGH_SHIFT;
658 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
660 if (direction == DMA_TX) {
661 W_REG(&di->d64txregs->addrlow,
662 (PHYSADDRLO(pa) + di->ddoffsetlow));
663 W_REG(&di->d64txregs->addrhigh,
665 SET_REG(&di->d64txregs->control,
666 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
668 W_REG(&di->d64rxregs->addrlow,
669 (PHYSADDRLO(pa) + di->ddoffsetlow));
670 W_REG(&di->d64rxregs->addrhigh,
672 SET_REG(&di->d64rxregs->control,
673 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
678 static void _dma_fifoloopbackenable(dma_info_t *di)
680 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
682 OR_REG(&di->d64txregs->control, D64_XC_LE);
685 static void _dma_rxinit(dma_info_t *di)
687 DMA_TRACE(("%s: dma_rxinit\n", di->name));
692 di->rxin = di->rxout = 0;
694 /* clear rx descriptor ring */
695 memset((void *)di->rxd64, '\0',
696 (di->nrxd * sizeof(dma64dd_t)));
698 /* DMA engine with out alignment requirement requires table to be inited
699 * before enabling the engine
701 if (!di->aligndesc_4k)
702 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
706 if (di->aligndesc_4k)
707 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
710 static void _dma_rxenable(dma_info_t *di)
712 uint dmactrlflags = di->hnddma.dmactrlflags;
715 DMA_TRACE(("%s: dma_rxenable\n", di->name));
718 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
721 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
722 control |= D64_RC_PD;
724 if (dmactrlflags & DMA_CTRL_ROC)
725 control |= D64_RC_OC;
727 W_REG(&di->d64rxregs->control,
728 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
732 _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
734 /* the normal values fit into 16 bits */
735 *rxoffset = (u16) di->rxoffset;
736 *rxbufsize = (u16) di->rxbufsize;
739 /* !! rx entry routine
740 * returns a pointer to the next frame received, or NULL if there are no more
741 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
743 * otherwise, it's treated as giant pkt and will be tossed.
744 * The DMA scattering starts with normal DMA header, followed by first buffer data.
745 * After it reaches the max size of buffer, the data continues in next DMA descriptor
746 * buffer WITHOUT DMA header
748 static void *BCMFASTPATH _dma_rx(dma_info_t *di)
750 struct sk_buff *p, *head, *tail;
756 head = _dma_getnextrxp(di, false);
760 len = le16_to_cpu(*(u16 *) (head->data));
761 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
763 #if defined(__mips__)
764 #define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va)))
766 while (!(len = *(u16 *) OSL_UNCACHED(head->data)))
769 *(u16 *) (head->data) = cpu_to_le16((u16) len);
771 #endif /* defined(__mips__) */
773 /* set actual length */
774 pkt_len = min((di->rxoffset + len), di->rxbufsize);
775 __skb_trim(head, pkt_len);
776 resid = len - (di->rxbufsize - di->rxoffset);
778 /* check for single or multi-buffer rx */
781 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
783 pkt_len = min(resid, (int)di->rxbufsize);
784 __skb_trim(p, pkt_len);
787 resid -= di->rxbufsize;
795 B2I(((R_REG(&di->d64rxregs->status0) &
797 di->rcvptrbase) & D64_RS0_CD_MASK,
799 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
800 di->rxin, di->rxout, cur));
804 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
805 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
807 pkt_buf_free_skb(di->osh, head, false);
808 di->hnddma.rxgiants++;
816 /* post receive buffers
817 * return false is refill failed completely and ring is empty
818 * this will stall the rx dma and user might want to call rxfill again asap
819 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
821 static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
829 uint extra_offset = 0;
835 * Determine how many receive buffers we're lacking
836 * from the full complement, allocate, initialize,
837 * and post them, then update the chip rx lastdscr.
843 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
845 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
847 if (di->rxbufsize > BCMEXTRAHDROOM)
848 extra_offset = di->rxextrahdrroom;
850 for (i = 0; i < n; i++) {
851 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
855 p = pkt_buf_get_skb(di->osh, di->rxbufsize + extra_offset);
858 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
860 if (i == 0 && dma64_rxidle(di)) {
861 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
865 di->hnddma.rxnobuf++;
868 /* reserve an extra headroom, if applicable */
870 skb_pull(p, extra_offset);
872 /* Do a cached write instead of uncached write since DMA_MAP
873 * will flush the cache.
875 *(u32 *) (p->data) = 0;
878 memset(&di->rxp_dmah[rxout], 0,
879 sizeof(hnddma_seg_map_t));
881 pa = pci_map_single(di->pbus, p->data,
882 di->rxbufsize, PCI_DMA_FROMDEVICE);
884 ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
886 /* save the free packet pointer */
887 ASSERT(di->rxp[rxout] == NULL);
890 /* reset flags for each descriptor */
892 if (rxout == (di->nrxd - 1))
893 flags = D64_CTRL1_EOT;
895 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
897 rxout = NEXTRXD(rxout);
902 /* update the chip lastdscr pointer */
903 W_REG(&di->d64rxregs->ptr,
904 di->rcvptrbase + I2B(rxout, dma64dd_t));
909 /* like getnexttxp but no reclaim */
910 static void *_dma_peeknexttxp(dma_info_t *di)
918 B2I(((R_REG(&di->d64txregs->status0) &
919 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
922 for (i = di->txin; i != end; i = NEXTTXD(i))
929 /* like getnextrxp but not take off the ring */
930 static void *_dma_peeknextrxp(dma_info_t *di)
938 B2I(((R_REG(&di->d64rxregs->status0) &
939 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
942 for (i = di->rxin; i != end; i = NEXTRXD(i))
949 static void _dma_rxreclaim(dma_info_t *di)
953 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
955 while ((p = _dma_getnextrxp(di, true)))
956 pkt_buf_free_skb(di->osh, p, false);
959 static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
964 return dma64_getnextrxp(di, forceall);
967 static void _dma_txblock(dma_info_t *di)
969 di->hnddma.txavail = 0;
972 static void _dma_txunblock(dma_info_t *di)
974 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
977 static uint _dma_txactive(dma_info_t *di)
979 return NTXDACTIVE(di->txin, di->txout);
982 static uint _dma_txpending(dma_info_t *di)
987 B2I(((R_REG(&di->d64txregs->status0) &
988 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
991 return NTXDACTIVE(curr, di->txout);
994 static uint _dma_txcommitted(dma_info_t *di)
997 uint txin = di->txin;
999 if (txin == di->txout)
1002 ptr = B2I(R_REG(&di->d64txregs->ptr), dma64dd_t);
1004 return NTXDACTIVE(di->txin, ptr);
1007 static uint _dma_rxactive(dma_info_t *di)
1009 return NRXDACTIVE(di->rxin, di->rxout);
1012 static void _dma_counterreset(dma_info_t *di)
1014 /* reset all software counter */
1015 di->hnddma.rxgiants = 0;
1016 di->hnddma.rxnobuf = 0;
1017 di->hnddma.txnobuf = 0;
1020 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
1022 uint dmactrlflags = di->hnddma.dmactrlflags;
1025 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
1029 ASSERT((flags & ~mask) == 0);
1031 dmactrlflags &= ~mask;
1032 dmactrlflags |= flags;
1034 /* If trying to enable parity, check if parity is actually supported */
1035 if (dmactrlflags & DMA_CTRL_PEN) {
1038 control = R_REG(&di->d64txregs->control);
1039 W_REG(&di->d64txregs->control,
1040 control | D64_XC_PD);
1041 if (R_REG(&di->d64txregs->control) & D64_XC_PD) {
1042 /* We *can* disable it so it is supported,
1043 * restore control register
1045 W_REG(&di->d64txregs->control,
1048 /* Not supported, don't allow it to be enabled */
1049 dmactrlflags &= ~DMA_CTRL_PEN;
1053 di->hnddma.dmactrlflags = dmactrlflags;
1055 return dmactrlflags;
1058 /* get the address of the var in order to change later */
1059 static unsigned long _dma_getvar(dma_info_t *di, const char *name)
1061 if (!strcmp(name, "&txavail"))
1062 return (unsigned long)&(di->hnddma.txavail);
1070 u8 dma_align_sizetobits(uint size)
1074 ASSERT(!(size & (size - 1)));
1075 while (size >>= 1) {
1081 /* This function ensures that the DMA descriptor ring will not get allocated
1082 * across Page boundary. If the allocation is done across the page boundary
1083 * at the first time, then it is freed and the allocation is done at
1084 * descriptor ring size aligned location. This will ensure that the ring will
1085 * not cross page boundary
1087 static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
1088 u16 *alignbits, uint *alloced,
1093 u32 alignbytes = 1 << *alignbits;
1095 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
1100 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
1101 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1103 *alignbits = dma_align_sizetobits(size);
1104 pci_free_consistent(di->pbus, size, va, *descpa);
1105 va = dma_alloc_consistent(di->pbus, size, *alignbits,
1111 /* 64-bit DMA functions */
1113 static void dma64_txinit(dma_info_t *di)
1115 u32 control = D64_XC_XE;
1117 DMA_TRACE(("%s: dma_txinit\n", di->name));
1122 di->txin = di->txout = 0;
1123 di->hnddma.txavail = di->ntxd - 1;
1125 /* clear tx descriptor ring */
1126 memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
1128 /* DMA engine with out alignment requirement requires table to be inited
1129 * before enabling the engine
1131 if (!di->aligndesc_4k)
1132 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1134 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1135 control |= D64_XC_PD;
1136 OR_REG(&di->d64txregs->control, control);
1138 /* DMA engine with alignment requirement requires table to be inited
1139 * before enabling the engine
1141 if (di->aligndesc_4k)
1142 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1145 static bool dma64_txenabled(dma_info_t *di)
1149 /* If the chip is dead, it is not enabled :-) */
1150 xc = R_REG(&di->d64txregs->control);
1151 return (xc != 0xffffffff) && (xc & D64_XC_XE);
1154 static void dma64_txsuspend(dma_info_t *di)
1156 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1161 OR_REG(&di->d64txregs->control, D64_XC_SE);
1164 static void dma64_txresume(dma_info_t *di)
1166 DMA_TRACE(("%s: dma_txresume\n", di->name));
1171 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1174 static bool dma64_txsuspended(dma_info_t *di)
1176 return (di->ntxd == 0) ||
1177 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1181 static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
1185 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1186 (range == HNDDMA_RANGE_ALL) ? "all" :
1188 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1191 if (di->txin == di->txout)
1194 while ((p = dma64_getnexttxp(di, range))) {
1195 /* For unframed data, we don't have any packets to free */
1196 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
1197 pkt_buf_free_skb(di->osh, p, true);
1201 static bool dma64_txstopped(dma_info_t *di)
1203 return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1204 D64_XS0_XS_STOPPED);
1207 static bool dma64_rxstopped(dma_info_t *di)
1209 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
1210 D64_RS0_RS_STOPPED);
1213 static bool dma64_alloc(dma_info_t *di, uint direction)
1222 ddlen = sizeof(dma64dd_t);
1224 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1225 align_bits = di->dmadesc_align;
1226 align = (1 << align_bits);
1228 if (direction == DMA_TX) {
1229 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
1230 &alloced, &di->txdpaorig);
1232 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1235 align = (1 << align_bits);
1236 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1237 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
1238 PHYSADDRLOSET(di->txdpa,
1239 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1240 /* Make sure that alignment didn't overflow */
1241 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1243 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
1244 di->txdalloc = alloced;
1245 ASSERT(IS_ALIGNED((unsigned long)di->txd64, align));
1247 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
1248 &alloced, &di->rxdpaorig);
1250 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1253 align = (1 << align_bits);
1254 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1255 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
1256 PHYSADDRLOSET(di->rxdpa,
1257 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1258 /* Make sure that alignment didn't overflow */
1259 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1261 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
1262 di->rxdalloc = alloced;
1263 ASSERT(IS_ALIGNED((unsigned long)di->rxd64, align));
1269 static bool dma64_txreset(dma_info_t *di)
1276 /* suspend tx DMA first */
1277 W_REG(&di->d64txregs->control, D64_XC_SE);
1279 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1280 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1281 && (status != D64_XS0_XS_STOPPED), 10000);
1283 W_REG(&di->d64txregs->control, 0);
1285 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1286 != D64_XS0_XS_DISABLED), 10000);
1288 /* wait for the last transaction to complete */
1291 return status == D64_XS0_XS_DISABLED;
1294 static bool dma64_rxidle(dma_info_t *di)
1296 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1301 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1302 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
1305 static bool dma64_rxreset(dma_info_t *di)
1312 W_REG(&di->d64rxregs->control, 0);
1314 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1315 != D64_RS0_RS_DISABLED), 10000);
1317 return status == D64_RS0_RS_DISABLED;
1320 static bool dma64_rxenabled(dma_info_t *di)
1324 rc = R_REG(&di->d64rxregs->control);
1325 return (rc != 0xffffffff) && (rc & D64_RC_RE);
1328 static bool dma64_txsuspendedidle(dma_info_t *di)
1334 if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
1337 if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1344 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
1345 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
1346 * If DMA is idle, we return NULL.
1348 static void *dma64_getpos(dma_info_t *di, bool direction)
1354 if (direction == DMA_TX) {
1356 R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK;
1357 idle = !NTXDACTIVE(di->txin, di->txout);
1358 va = di->txp[B2I(cd_offset, dma64dd_t)];
1361 R_REG(&di->d64rxregs->status0) & D64_XS0_CD_MASK;
1362 idle = !NRXDACTIVE(di->rxin, di->rxout);
1363 va = di->rxp[B2I(cd_offset, dma64dd_t)];
1366 /* If DMA is IDLE, return NULL */
1368 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
1375 /* TX of unframed data
1377 * Adds a DMA ring descriptor for the data pointed to by "buf".
1378 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
1379 * that take a pointer to a "packet"
1380 * Each call to this is results in a single descriptor being added for "len" bytes of
1381 * data starting at "buf", it doesn't handle chained buffers.
1383 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
1387 dmaaddr_t pa; /* phys addr */
1391 /* return nonzero if out of tx descriptors */
1392 if (NEXTTXD(txout) == di->txin)
1398 pa = pci_map_single(di->pbus, buf, len, PCI_DMA_TODEVICE);
1400 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
1402 if (txout == (di->ntxd - 1))
1403 flags |= D64_CTRL1_EOT;
1405 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1406 ASSERT(di->txp[txout] == NULL);
1408 /* save the buffer pointer - used by dma_getpos */
1409 di->txp[txout] = buf;
1411 txout = NEXTTXD(txout);
1412 /* bump the tx descriptor index */
1417 W_REG(&di->d64txregs->ptr,
1418 di->xmtptrbase + I2B(txout, dma64dd_t));
1421 /* tx flow control */
1422 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1427 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
1428 di->hnddma.txavail = 0;
1429 di->hnddma.txnobuf++;
1433 /* !! tx entry routine
1434 * WARNING: call must check the return value for error.
1435 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1437 static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0,
1440 struct sk_buff *p, *next;
1441 unsigned char *data;
1447 DMA_TRACE(("%s: dma_txfast\n", di->name));
1452 * Walk the chain of packet buffers
1453 * allocating and initializing transmit descriptor entries.
1455 for (p = p0; p; p = next) {
1457 hnddma_seg_map_t *map;
1462 len += PKTDMAPAD(di->osh, p);
1463 #endif /* BCM_DMAPAD */
1466 /* return nonzero if out of tx descriptors */
1467 if (NEXTTXD(txout) == di->txin)
1473 /* get physical address of buffer start */
1475 memset(&di->txp_dmah[txout], 0,
1476 sizeof(hnddma_seg_map_t));
1478 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1480 if (DMASGLIST_ENAB) {
1481 map = &di->txp_dmah[txout];
1483 /* See if all the segments can be accounted for */
1485 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1493 for (j = 1; j <= nsegs; j++) {
1495 if (p == p0 && j == 1)
1496 flags |= D64_CTRL1_SOF;
1498 /* With a DMA segment list, Descriptor table is filled
1499 * using the segment list instead of looping over
1500 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1501 * end of segment list is reached.
1503 if ((!DMASGLIST_ENAB && next == NULL) ||
1504 (DMASGLIST_ENAB && j == nsegs))
1505 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1506 if (txout == (di->ntxd - 1))
1507 flags |= D64_CTRL1_EOT;
1509 if (DMASGLIST_ENAB) {
1510 len = map->segs[j - 1].length;
1511 pa = map->segs[j - 1].addr;
1513 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1514 ASSERT(di->txp[txout] == NULL);
1516 txout = NEXTTXD(txout);
1519 /* See above. No need to loop over individual buffers */
1524 /* if last txd eof not set, fix it */
1525 if (!(flags & D64_CTRL1_EOF))
1526 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
1527 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
1529 /* save the packet */
1530 di->txp[PREVTXD(txout)] = p0;
1532 /* bump the tx descriptor index */
1537 W_REG(&di->d64txregs->ptr,
1538 di->xmtptrbase + I2B(txout, dma64dd_t));
1540 /* tx flow control */
1541 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1546 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1547 pkt_buf_free_skb(di->osh, p0, true);
1548 di->hnddma.txavail = 0;
1549 di->hnddma.txnobuf++;
1554 * Reclaim next completed txd (txds if using chained buffers) in the range
1555 * specified and return associated packet.
1556 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1557 * transmitted as noted by the hardware "CurrDescr" pointer.
1558 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1559 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1560 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1561 * return associated packet regardless of the value of hardware pointers.
1563 static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
1569 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1570 (range == HNDDMA_RANGE_ALL) ? "all" :
1572 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1581 if (range == HNDDMA_RANGE_ALL)
1584 dma64regs_t *dregs = di->d64txregs;
1588 (((R_REG(&dregs->status0) &
1590 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
1592 if (range == HNDDMA_RANGE_TRANSFERED) {
1594 (u16) (R_REG(&dregs->status1) &
1597 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1598 active_desc = B2I(active_desc, dma64dd_t);
1599 if (end != active_desc)
1600 end = PREVTXD(active_desc);
1604 if ((start == 0) && (end > di->txout))
1607 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1609 hnddma_seg_map_t *map = NULL;
1610 uint size, j, nsegs;
1613 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
1614 di->dataoffsetlow));
1616 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
1617 di->dataoffsethigh));
1619 if (DMASGLIST_ENAB) {
1620 map = &di->txp_dmah[i];
1621 size = map->origsize;
1625 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
1630 for (j = nsegs; j > 0; j--) {
1631 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
1632 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
1640 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1645 /* tx flow control */
1646 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1651 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1655 static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
1661 /* if forcing, dma engine must be disabled */
1662 ASSERT(!forceall || !dma64_rxenabled(di));
1666 /* return if no packets posted */
1671 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
1672 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
1674 /* ignore curr if forceall */
1675 if (!forceall && (i == curr))
1678 /* get the packet pointer that corresponds to the rx descriptor */
1684 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
1685 di->dataoffsetlow));
1687 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
1688 di->dataoffsethigh));
1690 /* clear this packet from the descriptor ring */
1691 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
1693 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
1694 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
1696 di->rxin = NEXTRXD(i);
1701 static bool _dma64_addrext(dma64regs_t *dma64regs)
1704 OR_REG(&dma64regs->control, D64_XC_AE);
1705 w = R_REG(&dma64regs->control);
1706 AND_REG(&dma64regs->control, ~D64_XC_AE);
1707 return (w & D64_XC_AE) == D64_XC_AE;
1711 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1713 static void dma64_txrotate(dma_info_t *di)
1722 ASSERT(dma64_txsuspendedidle(di));
1724 nactive = _dma_txactive(di);
1726 ((((R_REG(&di->d64txregs->status1) &
1728 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
1729 rot = TXD(ad - di->txin);
1731 ASSERT(rot < di->ntxd);
1733 /* full-ring case is a lot harder - don't worry about this */
1734 if (rot >= (di->ntxd - nactive)) {
1735 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1740 last = PREVTXD(di->txout);
1742 /* move entries starting at last and moving backwards to first */
1743 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1744 new = TXD(old + rot);
1747 * Move the tx dma descriptor.
1748 * EOT is set only in the last entry in the ring.
1750 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
1751 if (new == (di->ntxd - 1))
1753 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
1755 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
1756 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
1758 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
1759 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
1761 /* zap the old tx dma descriptor address field */
1762 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
1763 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
1765 /* move the corresponding txp[] entry */
1766 ASSERT(di->txp[new] == NULL);
1767 di->txp[new] = di->txp[old];
1770 if (DMASGLIST_ENAB) {
1771 memcpy(&di->txp_dmah[new], &di->txp_dmah[old],
1772 sizeof(hnddma_seg_map_t));
1773 memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
1776 di->txp[old] = NULL;
1779 /* update txin and txout */
1781 di->txout = TXD(di->txout + rot);
1782 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1785 W_REG(&di->d64txregs->ptr,
1786 di->xmtptrbase + I2B(di->txout, dma64dd_t));
1789 uint dma_addrwidth(si_t *sih, void *dmaregs)
1791 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
1792 /* DMA engine is 64-bit capable */
1793 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
1794 /* backplane are 64-bit capable */
1795 if (si_backplane64(sih))
1796 /* If bus is System Backplane or PCIE then we can access 64-bits */
1797 if ((sih->bustype == SI_BUS) ||
1798 ((sih->bustype == PCI_BUS) &&
1799 (sih->buscoretype == PCIE_CORE_ID)))
1800 return DMADDRWIDTH_64;
1802 ASSERT(0); /* DMA hardware not supported by this driver*/
1803 return DMADDRWIDTH_64;