2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
32 #include <asm/addrspace.h>
36 #error "hnddma.c shouldn't be needed for FULLMAC"
41 #define DMA_ERROR(args) \
43 if (!(*di->msg_level & 1)) \
48 #define DMA_TRACE(args) \
50 if (!(*di->msg_level & 2)) \
56 #define DMA_ERROR(args)
57 #define DMA_TRACE(args)
60 #define DMA_NONE(args)
62 #define d64txregs dregs.d64_u.txregs_64
63 #define d64rxregs dregs.d64_u.rxregs_64
64 #define txd64 dregs.d64_u.txd_64
65 #define rxd64 dregs.d64_u.rxd_64
67 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
68 static uint dma_msg_level;
70 #define MAXNAMEL 8 /* 8 char names */
72 #define DI_INFO(dmah) ((dma_info_t *)dmah)
74 #define R_SM(r) (*(r))
75 #define W_SM(r, v) (*(r) = (v))
77 /* dma engine software state */
78 typedef struct dma_info {
79 struct hnddma_pub hnddma; /* exported structure */
80 uint *msg_level; /* message level pointer */
81 char name[MAXNAMEL]; /* callers name for diag msgs */
83 struct osl_info *osh; /* os handle */
84 void *pbus; /* bus handle */
86 bool dma64; /* this dma engine is operating in 64-bit mode */
87 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
91 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
92 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
93 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
94 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
98 u16 dmadesc_align; /* alignment requirement for dma descriptors */
100 u16 ntxd; /* # tx descriptors tunable */
101 u16 txin; /* index of next descriptor to reclaim */
102 u16 txout; /* index of next descriptor to post */
103 void **txp; /* pointer to parallel array of pointers to packets */
104 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
105 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
106 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
107 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
108 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
109 u32 txdalloc; /* #bytes allocated for the ring */
110 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
111 * is not just an index, it needs all 13 bits to be
112 * an offset from the addr register.
115 u16 nrxd; /* # rx descriptors tunable */
116 u16 rxin; /* index of next descriptor to reclaim */
117 u16 rxout; /* index of next descriptor to post */
118 void **rxp; /* pointer to parallel array of pointers to packets */
119 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
120 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
121 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
122 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
123 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
124 u32 rxdalloc; /* #bytes allocated for the ring */
125 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
128 unsigned int rxbufsize; /* rx buffer size in bytes,
129 * not including the extra headroom
131 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
132 * e.g. some rx pkt buffers will be bridged to tx side
133 * without byte copying. The extra headroom needs to be
134 * large enough to fit txheader needs.
135 * Some dongle driver may not need it.
137 uint nrxpost; /* # rx buffers to keep posted */
138 unsigned int rxoffset; /* rxcontrol offset */
139 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
140 uint ddoffsethigh; /* high 32 bits */
141 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
142 uint dataoffsethigh; /* high 32 bits */
143 bool aligndesc_4k; /* descriptor base need to be aligned or not */
146 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
147 #ifdef BCMDMASGLISTOSL
148 #define DMASGLIST_ENAB true
150 #define DMASGLIST_ENAB false
151 #endif /* BCMDMASGLISTOSL */
153 /* descriptor bumping macros */
154 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
155 #define TXD(x) XXD((x), di->ntxd)
156 #define RXD(x) XXD((x), di->nrxd)
157 #define NEXTTXD(i) TXD((i) + 1)
158 #define PREVTXD(i) TXD((i) - 1)
159 #define NEXTRXD(i) RXD((i) + 1)
160 #define PREVRXD(i) RXD((i) - 1)
162 #define NTXDACTIVE(h, t) TXD((t) - (h))
163 #define NRXDACTIVE(h, t) RXD((t) - (h))
165 /* macros to convert between byte offsets and indexes */
166 #define B2I(bytes, type) ((bytes) / sizeof(type))
167 #define I2B(index, type) ((index) * sizeof(type))
169 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
170 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
172 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
173 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
175 /* Common prototypes */
176 static bool _dma_isaddrext(dma_info_t *di);
177 static bool _dma_descriptor_align(dma_info_t *di);
178 static bool _dma_alloc(dma_info_t *di, uint direction);
179 static void _dma_detach(dma_info_t *di);
180 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
181 static void _dma_rxinit(dma_info_t *di);
182 static void *_dma_rx(dma_info_t *di);
183 static bool _dma_rxfill(dma_info_t *di);
184 static void _dma_rxreclaim(dma_info_t *di);
185 static void _dma_rxenable(dma_info_t *di);
186 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
187 static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
190 static void _dma_txblock(dma_info_t *di);
191 static void _dma_txunblock(dma_info_t *di);
192 static uint _dma_txactive(dma_info_t *di);
193 static uint _dma_rxactive(dma_info_t *di);
194 static uint _dma_txpending(dma_info_t *di);
195 static uint _dma_txcommitted(dma_info_t *di);
197 static void *_dma_peeknexttxp(dma_info_t *di);
198 static void *_dma_peeknextrxp(dma_info_t *di);
199 static unsigned long _dma_getvar(dma_info_t *di, const char *name);
200 static void _dma_counterreset(dma_info_t *di);
201 static void _dma_fifoloopbackenable(dma_info_t *di);
202 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
203 static u8 dma_align_sizetobits(uint size);
204 static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
205 u16 *alignbits, uint *alloced,
206 dmaaddr_t *descpa, osldma_t **dmah);
208 /* Prototypes for 64-bit routines */
209 static bool dma64_alloc(dma_info_t *di, uint direction);
210 static bool dma64_txreset(dma_info_t *di);
211 static bool dma64_rxreset(dma_info_t *di);
212 static bool dma64_txsuspendedidle(dma_info_t *di);
213 static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
214 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
215 static void *dma64_getpos(dma_info_t *di, bool direction);
216 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
217 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
218 static void dma64_txrotate(dma_info_t *di);
220 static bool dma64_rxidle(dma_info_t *di);
221 static void dma64_txinit(dma_info_t *di);
222 static bool dma64_txenabled(dma_info_t *di);
223 static void dma64_txsuspend(dma_info_t *di);
224 static void dma64_txresume(dma_info_t *di);
225 static bool dma64_txsuspended(dma_info_t *di);
226 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
227 static bool dma64_txstopped(dma_info_t *di);
228 static bool dma64_rxstopped(dma_info_t *di);
229 static bool dma64_rxenabled(dma_info_t *di);
230 static bool _dma64_addrext(dma64regs_t *dma64regs);
232 static inline u32 parity32(u32 data);
234 const di_fcn_t dma64proc = {
235 (di_detach_t) _dma_detach,
236 (di_txinit_t) dma64_txinit,
237 (di_txreset_t) dma64_txreset,
238 (di_txenabled_t) dma64_txenabled,
239 (di_txsuspend_t) dma64_txsuspend,
240 (di_txresume_t) dma64_txresume,
241 (di_txsuspended_t) dma64_txsuspended,
242 (di_txsuspendedidle_t) dma64_txsuspendedidle,
243 (di_txfast_t) dma64_txfast,
244 (di_txunframed_t) dma64_txunframed,
245 (di_getpos_t) dma64_getpos,
246 (di_txstopped_t) dma64_txstopped,
247 (di_txreclaim_t) dma64_txreclaim,
248 (di_getnexttxp_t) dma64_getnexttxp,
249 (di_peeknexttxp_t) _dma_peeknexttxp,
250 (di_txblock_t) _dma_txblock,
251 (di_txunblock_t) _dma_txunblock,
252 (di_txactive_t) _dma_txactive,
253 (di_txrotate_t) dma64_txrotate,
255 (di_rxinit_t) _dma_rxinit,
256 (di_rxreset_t) dma64_rxreset,
257 (di_rxidle_t) dma64_rxidle,
258 (di_rxstopped_t) dma64_rxstopped,
259 (di_rxenable_t) _dma_rxenable,
260 (di_rxenabled_t) dma64_rxenabled,
262 (di_rxfill_t) _dma_rxfill,
263 (di_rxreclaim_t) _dma_rxreclaim,
264 (di_getnextrxp_t) _dma_getnextrxp,
265 (di_peeknextrxp_t) _dma_peeknextrxp,
266 (di_rxparam_get_t) _dma_rx_param_get,
268 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
269 (di_getvar_t) _dma_getvar,
270 (di_counterreset_t) _dma_counterreset,
271 (di_ctrlflags_t) _dma_ctrlflags,
275 (di_rxactive_t) _dma_rxactive,
276 (di_txpending_t) _dma_txpending,
277 (di_txcommitted_t) _dma_txcommitted,
281 struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
282 void *dmaregstx, void *dmaregsrx, uint ntxd,
283 uint nrxd, uint rxbufsize, int rxextheadroom,
284 uint nrxpost, uint rxoffset, uint *msg_level)
289 /* allocate private info structure */
290 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
293 printk(KERN_ERR "dma_attach: out of memory\n");
298 di->msg_level = msg_level ? msg_level : &dma_msg_level;
300 /* old chips w/o sb is no longer supported */
303 di->dma64 = ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
305 /* check arguments */
306 ASSERT(ISPOWEROF2(ntxd));
307 ASSERT(ISPOWEROF2(nrxd));
310 ASSERT(dmaregsrx == NULL);
312 ASSERT(dmaregstx == NULL);
314 /* init dma reg pointer */
315 ASSERT(ntxd <= D64MAXDD);
316 ASSERT(nrxd <= D64MAXDD);
317 di->d64txregs = (dma64regs_t *) dmaregstx;
318 di->d64rxregs = (dma64regs_t *) dmaregsrx;
319 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
321 /* Default flags (which can be changed by the driver calling dma_ctrlflags
322 * before enable): For backwards compatibility both Rx Overflow Continue
323 * and Parity are DISABLED.
326 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
329 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d "
330 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
331 "dmaregstx %p dmaregsrx %p\n", name, "DMA64", osh,
332 di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize,
333 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
335 /* make a private copy of our callers name */
336 strncpy(di->name, name, MAXNAMEL);
337 di->name[MAXNAMEL - 1] = '\0';
340 di->pbus = ((struct si_info *)sih)->pbus;
343 di->ntxd = (u16) ntxd;
344 di->nrxd = (u16) nrxd;
346 /* the actual dma size doesn't include the extra headroom */
348 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
349 if (rxbufsize > BCMEXTRAHDROOM)
350 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
352 di->rxbufsize = (u16) rxbufsize;
354 di->nrxpost = (u16) nrxpost;
355 di->rxoffset = (u8) rxoffset;
358 * figure out the DMA physical address offset for dd and data
359 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
360 * Other bus: use zero
361 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
364 di->dataoffsetlow = 0;
365 /* for pci bus, add offset */
366 if (sih->bustype == PCI_BUS) {
367 /* pcie with DMA64 */
369 di->ddoffsethigh = SI_PCIE_DMA_H32;
370 di->dataoffsetlow = di->ddoffsetlow;
371 di->dataoffsethigh = di->ddoffsethigh;
373 #if defined(__mips__) && defined(IL_BIGENDIAN)
374 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
375 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
376 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
377 if ((si_coreid(sih) == SDIOD_CORE_ID)
378 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
380 else if ((si_coreid(sih) == I2S_CORE_ID) &&
381 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
384 di->addrext = _dma_isaddrext(di);
386 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
387 di->aligndesc_4k = _dma_descriptor_align(di);
388 if (di->aligndesc_4k) {
389 di->dmadesc_align = D64RINGALIGN_BITS;
390 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
391 /* for smaller dd table, HW relax alignment reqmnt */
392 di->dmadesc_align = D64RINGALIGN_BITS - 1;
395 di->dmadesc_align = 4; /* 16 byte alignment */
397 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
398 di->aligndesc_4k, di->dmadesc_align));
400 /* allocate tx packet pointer vector */
402 size = ntxd * sizeof(void *);
403 di->txp = kzalloc(size, GFP_ATOMIC);
404 if (di->txp == NULL) {
405 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
410 /* allocate rx packet pointer vector */
412 size = nrxd * sizeof(void *);
413 di->rxp = kzalloc(size, GFP_ATOMIC);
414 if (di->rxp == NULL) {
415 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
420 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
422 if (!_dma_alloc(di, DMA_TX))
426 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
428 if (!_dma_alloc(di, DMA_RX))
432 if ((di->ddoffsetlow != 0) && !di->addrext) {
433 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
434 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
437 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
438 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
443 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
445 /* allocate DMA mapping vectors */
446 if (DMASGLIST_ENAB) {
448 size = ntxd * sizeof(hnddma_seg_map_t);
449 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
450 if (di->txp_dmah == NULL)
455 size = nrxd * sizeof(hnddma_seg_map_t);
456 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
457 if (di->rxp_dmah == NULL)
462 return (struct hnddma_pub *) di;
469 /* Check for odd number of 1's */
470 static inline u32 parity32(u32 data)
481 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
484 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
485 u32 *flags, u32 bufcount)
487 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
489 /* PCI bus with big(>1G) physical address, use address extension */
490 #if defined(__mips__) && defined(IL_BIGENDIAN)
491 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
492 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
494 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
495 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
496 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
498 W_SM(&ddring[outidx].addrlow,
499 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
500 W_SM(&ddring[outidx].addrhigh,
501 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
502 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
503 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
505 /* address extension for 32-bit PCI */
509 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
510 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
511 ASSERT(PHYSADDRHI(pa) == 0);
513 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
514 W_SM(&ddring[outidx].addrlow,
515 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
516 W_SM(&ddring[outidx].addrhigh,
517 BUS_SWAP32(0 + di->dataoffsethigh));
518 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
519 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
521 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
522 if (DMA64_DD_PARITY(&ddring[outidx])) {
523 W_SM(&ddring[outidx].ctrl2,
524 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
529 static bool _dma_alloc(dma_info_t *di, uint direction)
531 return dma64_alloc(di, direction);
534 void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
535 uint *alloced, unsigned long *pap)
538 u16 align = (1 << align_bits);
539 if (!IS_ALIGNED(PAGE_SIZE, align))
543 return pci_alloc_consistent(pdev, size, (dma_addr_t *) pap);
546 /* !! may be called with core in reset */
547 static void _dma_detach(dma_info_t *di)
550 DMA_TRACE(("%s: dma_detach\n", di->name));
552 /* shouldn't be here if descriptors are unreclaimed */
553 ASSERT(di->txin == di->txout);
554 ASSERT(di->rxin == di->rxout);
556 /* free dma descriptor rings */
558 pci_free_consistent(di->pbus, di->txdalloc,
559 ((s8 *)di->txd64 - di->txdalign),
562 pci_free_consistent(di->pbus, di->rxdalloc,
563 ((s8 *)di->rxd64 - di->rxdalign),
566 /* free packet pointer vectors */
568 kfree((void *)di->txp);
570 kfree((void *)di->rxp);
572 /* free tx packet DMA handles */
576 /* free rx packet DMA handles */
580 /* free our private info structure */
585 static bool _dma_descriptor_align(dma_info_t *di)
589 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
590 if (di->d64txregs != NULL) {
591 W_REG(&di->d64txregs->addrlow, 0xff0);
592 addrl = R_REG(&di->d64txregs->addrlow);
595 } else if (di->d64rxregs != NULL) {
596 W_REG(&di->d64rxregs->addrlow, 0xff0);
597 addrl = R_REG(&di->d64rxregs->addrlow);
604 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
605 static bool _dma_isaddrext(dma_info_t *di)
607 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
609 /* not all tx or rx channel are available */
610 if (di->d64txregs != NULL) {
611 if (!_dma64_addrext(di->d64txregs)) {
612 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
613 "AE set\n", di->name));
617 } else if (di->d64rxregs != NULL) {
618 if (!_dma64_addrext(di->d64rxregs)) {
619 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
620 "AE set\n", di->name));
628 /* initialize descriptor table base address */
629 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
631 if (!di->aligndesc_4k) {
632 if (direction == DMA_TX)
633 di->xmtptrbase = PHYSADDRLO(pa);
635 di->rcvptrbase = PHYSADDRLO(pa);
638 if ((di->ddoffsetlow == 0)
639 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
640 if (direction == DMA_TX) {
641 W_REG(&di->d64txregs->addrlow,
642 (PHYSADDRLO(pa) + di->ddoffsetlow));
643 W_REG(&di->d64txregs->addrhigh,
644 (PHYSADDRHI(pa) + di->ddoffsethigh));
646 W_REG(&di->d64rxregs->addrlow,
647 (PHYSADDRLO(pa) + di->ddoffsetlow));
648 W_REG(&di->d64rxregs->addrhigh,
649 (PHYSADDRHI(pa) + di->ddoffsethigh));
652 /* DMA64 32bits address extension */
655 ASSERT(PHYSADDRHI(pa) == 0);
657 /* shift the high bit(s) from pa to ae */
658 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
659 PCI32ADDR_HIGH_SHIFT;
660 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
662 if (direction == DMA_TX) {
663 W_REG(&di->d64txregs->addrlow,
664 (PHYSADDRLO(pa) + di->ddoffsetlow));
665 W_REG(&di->d64txregs->addrhigh,
667 SET_REG(&di->d64txregs->control,
668 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
670 W_REG(&di->d64rxregs->addrlow,
671 (PHYSADDRLO(pa) + di->ddoffsetlow));
672 W_REG(&di->d64rxregs->addrhigh,
674 SET_REG(&di->d64rxregs->control,
675 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
680 static void _dma_fifoloopbackenable(dma_info_t *di)
682 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
684 OR_REG(&di->d64txregs->control, D64_XC_LE);
687 static void _dma_rxinit(dma_info_t *di)
689 DMA_TRACE(("%s: dma_rxinit\n", di->name));
694 di->rxin = di->rxout = 0;
696 /* clear rx descriptor ring */
697 memset((void *)di->rxd64, '\0',
698 (di->nrxd * sizeof(dma64dd_t)));
700 /* DMA engine with out alignment requirement requires table to be inited
701 * before enabling the engine
703 if (!di->aligndesc_4k)
704 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
708 if (di->aligndesc_4k)
709 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
712 static void _dma_rxenable(dma_info_t *di)
714 uint dmactrlflags = di->hnddma.dmactrlflags;
717 DMA_TRACE(("%s: dma_rxenable\n", di->name));
720 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
723 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
724 control |= D64_RC_PD;
726 if (dmactrlflags & DMA_CTRL_ROC)
727 control |= D64_RC_OC;
729 W_REG(&di->d64rxregs->control,
730 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
734 _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
736 /* the normal values fit into 16 bits */
737 *rxoffset = (u16) di->rxoffset;
738 *rxbufsize = (u16) di->rxbufsize;
741 /* !! rx entry routine
742 * returns a pointer to the next frame received, or NULL if there are no more
743 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
745 * otherwise, it's treated as giant pkt and will be tossed.
746 * The DMA scattering starts with normal DMA header, followed by first buffer data.
747 * After it reaches the max size of buffer, the data continues in next DMA descriptor
748 * buffer WITHOUT DMA header
750 static void *BCMFASTPATH _dma_rx(dma_info_t *di)
752 struct sk_buff *p, *head, *tail;
758 head = _dma_getnextrxp(di, false);
762 len = le16_to_cpu(*(u16 *) (head->data));
763 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
765 #if defined(__mips__)
766 #define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va)))
768 while (!(len = *(u16 *) OSL_UNCACHED(head->data)))
771 *(u16 *) (head->data) = cpu_to_le16((u16) len);
773 #endif /* defined(__mips__) */
775 /* set actual length */
776 pkt_len = min((di->rxoffset + len), di->rxbufsize);
777 __skb_trim(head, pkt_len);
778 resid = len - (di->rxbufsize - di->rxoffset);
780 /* check for single or multi-buffer rx */
783 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
785 pkt_len = min(resid, (int)di->rxbufsize);
786 __skb_trim(p, pkt_len);
789 resid -= di->rxbufsize;
797 B2I(((R_REG(&di->d64rxregs->status0) &
799 di->rcvptrbase) & D64_RS0_CD_MASK,
801 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
802 di->rxin, di->rxout, cur));
806 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
807 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
809 pkt_buf_free_skb(di->osh, head, false);
810 di->hnddma.rxgiants++;
818 /* post receive buffers
819 * return false is refill failed completely and ring is empty
820 * this will stall the rx dma and user might want to call rxfill again asap
821 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
823 static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
831 uint extra_offset = 0;
837 * Determine how many receive buffers we're lacking
838 * from the full complement, allocate, initialize,
839 * and post them, then update the chip rx lastdscr.
845 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
847 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
849 if (di->rxbufsize > BCMEXTRAHDROOM)
850 extra_offset = di->rxextrahdrroom;
852 for (i = 0; i < n; i++) {
853 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
857 p = pkt_buf_get_skb(di->osh, di->rxbufsize + extra_offset);
860 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
862 if (i == 0 && dma64_rxidle(di)) {
863 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
867 di->hnddma.rxnobuf++;
870 /* reserve an extra headroom, if applicable */
872 skb_pull(p, extra_offset);
874 /* Do a cached write instead of uncached write since DMA_MAP
875 * will flush the cache.
877 *(u32 *) (p->data) = 0;
880 memset(&di->rxp_dmah[rxout], 0,
881 sizeof(hnddma_seg_map_t));
883 pa = pci_map_single(di->pbus, p->data,
884 di->rxbufsize, PCI_DMA_FROMDEVICE);
886 ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
888 /* save the free packet pointer */
889 ASSERT(di->rxp[rxout] == NULL);
892 /* reset flags for each descriptor */
894 if (rxout == (di->nrxd - 1))
895 flags = D64_CTRL1_EOT;
897 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
899 rxout = NEXTRXD(rxout);
904 /* update the chip lastdscr pointer */
905 W_REG(&di->d64rxregs->ptr,
906 di->rcvptrbase + I2B(rxout, dma64dd_t));
911 /* like getnexttxp but no reclaim */
912 static void *_dma_peeknexttxp(dma_info_t *di)
920 B2I(((R_REG(&di->d64txregs->status0) &
921 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
924 for (i = di->txin; i != end; i = NEXTTXD(i))
931 /* like getnextrxp but not take off the ring */
932 static void *_dma_peeknextrxp(dma_info_t *di)
940 B2I(((R_REG(&di->d64rxregs->status0) &
941 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
944 for (i = di->rxin; i != end; i = NEXTRXD(i))
951 static void _dma_rxreclaim(dma_info_t *di)
955 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
957 while ((p = _dma_getnextrxp(di, true)))
958 pkt_buf_free_skb(di->osh, p, false);
961 static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
966 return dma64_getnextrxp(di, forceall);
969 static void _dma_txblock(dma_info_t *di)
971 di->hnddma.txavail = 0;
974 static void _dma_txunblock(dma_info_t *di)
976 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
979 static uint _dma_txactive(dma_info_t *di)
981 return NTXDACTIVE(di->txin, di->txout);
984 static uint _dma_txpending(dma_info_t *di)
989 B2I(((R_REG(&di->d64txregs->status0) &
990 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
993 return NTXDACTIVE(curr, di->txout);
996 static uint _dma_txcommitted(dma_info_t *di)
999 uint txin = di->txin;
1001 if (txin == di->txout)
1004 ptr = B2I(R_REG(&di->d64txregs->ptr), dma64dd_t);
1006 return NTXDACTIVE(di->txin, ptr);
1009 static uint _dma_rxactive(dma_info_t *di)
1011 return NRXDACTIVE(di->rxin, di->rxout);
1014 static void _dma_counterreset(dma_info_t *di)
1016 /* reset all software counter */
1017 di->hnddma.rxgiants = 0;
1018 di->hnddma.rxnobuf = 0;
1019 di->hnddma.txnobuf = 0;
1022 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
1024 uint dmactrlflags = di->hnddma.dmactrlflags;
1027 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
1031 ASSERT((flags & ~mask) == 0);
1033 dmactrlflags &= ~mask;
1034 dmactrlflags |= flags;
1036 /* If trying to enable parity, check if parity is actually supported */
1037 if (dmactrlflags & DMA_CTRL_PEN) {
1040 control = R_REG(&di->d64txregs->control);
1041 W_REG(&di->d64txregs->control,
1042 control | D64_XC_PD);
1043 if (R_REG(&di->d64txregs->control) & D64_XC_PD) {
1044 /* We *can* disable it so it is supported,
1045 * restore control register
1047 W_REG(&di->d64txregs->control,
1050 /* Not supported, don't allow it to be enabled */
1051 dmactrlflags &= ~DMA_CTRL_PEN;
1055 di->hnddma.dmactrlflags = dmactrlflags;
1057 return dmactrlflags;
1060 /* get the address of the var in order to change later */
1061 static unsigned long _dma_getvar(dma_info_t *di, const char *name)
1063 if (!strcmp(name, "&txavail"))
1064 return (unsigned long)&(di->hnddma.txavail);
1072 u8 dma_align_sizetobits(uint size)
1076 ASSERT(!(size & (size - 1)));
1077 while (size >>= 1) {
1083 /* This function ensures that the DMA descriptor ring will not get allocated
1084 * across Page boundary. If the allocation is done across the page boundary
1085 * at the first time, then it is freed and the allocation is done at
1086 * descriptor ring size aligned location. This will ensure that the ring will
1087 * not cross page boundary
1089 static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
1090 u16 *alignbits, uint *alloced,
1091 dmaaddr_t *descpa, osldma_t **dmah)
1095 u32 alignbytes = 1 << *alignbits;
1097 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
1102 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
1103 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1105 *alignbits = dma_align_sizetobits(size);
1106 pci_free_consistent(di->pbus, size, va, *descpa);
1107 va = dma_alloc_consistent(di->pbus, size, *alignbits,
1113 /* 64-bit DMA functions */
1115 static void dma64_txinit(dma_info_t *di)
1117 u32 control = D64_XC_XE;
1119 DMA_TRACE(("%s: dma_txinit\n", di->name));
1124 di->txin = di->txout = 0;
1125 di->hnddma.txavail = di->ntxd - 1;
1127 /* clear tx descriptor ring */
1128 memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
1130 /* DMA engine with out alignment requirement requires table to be inited
1131 * before enabling the engine
1133 if (!di->aligndesc_4k)
1134 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1136 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1137 control |= D64_XC_PD;
1138 OR_REG(&di->d64txregs->control, control);
1140 /* DMA engine with alignment requirement requires table to be inited
1141 * before enabling the engine
1143 if (di->aligndesc_4k)
1144 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1147 static bool dma64_txenabled(dma_info_t *di)
1151 /* If the chip is dead, it is not enabled :-) */
1152 xc = R_REG(&di->d64txregs->control);
1153 return (xc != 0xffffffff) && (xc & D64_XC_XE);
1156 static void dma64_txsuspend(dma_info_t *di)
1158 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1163 OR_REG(&di->d64txregs->control, D64_XC_SE);
1166 static void dma64_txresume(dma_info_t *di)
1168 DMA_TRACE(("%s: dma_txresume\n", di->name));
1173 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1176 static bool dma64_txsuspended(dma_info_t *di)
1178 return (di->ntxd == 0) ||
1179 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1183 static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
1187 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1188 (range == HNDDMA_RANGE_ALL) ? "all" :
1190 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1193 if (di->txin == di->txout)
1196 while ((p = dma64_getnexttxp(di, range))) {
1197 /* For unframed data, we don't have any packets to free */
1198 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
1199 pkt_buf_free_skb(di->osh, p, true);
1203 static bool dma64_txstopped(dma_info_t *di)
1205 return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1206 D64_XS0_XS_STOPPED);
1209 static bool dma64_rxstopped(dma_info_t *di)
1211 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
1212 D64_RS0_RS_STOPPED);
1215 static bool dma64_alloc(dma_info_t *di, uint direction)
1224 ddlen = sizeof(dma64dd_t);
1226 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1227 align_bits = di->dmadesc_align;
1228 align = (1 << align_bits);
1230 if (direction == DMA_TX) {
1231 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
1232 &alloced, &di->txdpaorig, &di->tx_dmah);
1234 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1237 align = (1 << align_bits);
1238 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1239 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
1240 PHYSADDRLOSET(di->txdpa,
1241 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1242 /* Make sure that alignment didn't overflow */
1243 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1245 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
1246 di->txdalloc = alloced;
1247 ASSERT(IS_ALIGNED((unsigned long)di->txd64, align));
1249 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
1250 &alloced, &di->rxdpaorig, &di->rx_dmah);
1252 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1255 align = (1 << align_bits);
1256 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1257 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
1258 PHYSADDRLOSET(di->rxdpa,
1259 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1260 /* Make sure that alignment didn't overflow */
1261 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1263 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
1264 di->rxdalloc = alloced;
1265 ASSERT(IS_ALIGNED((unsigned long)di->rxd64, align));
1271 static bool dma64_txreset(dma_info_t *di)
1278 /* suspend tx DMA first */
1279 W_REG(&di->d64txregs->control, D64_XC_SE);
1281 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1282 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1283 && (status != D64_XS0_XS_STOPPED), 10000);
1285 W_REG(&di->d64txregs->control, 0);
1287 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1288 != D64_XS0_XS_DISABLED), 10000);
1290 /* wait for the last transaction to complete */
1293 return status == D64_XS0_XS_DISABLED;
1296 static bool dma64_rxidle(dma_info_t *di)
1298 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1303 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1304 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
1307 static bool dma64_rxreset(dma_info_t *di)
1314 W_REG(&di->d64rxregs->control, 0);
1316 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1317 != D64_RS0_RS_DISABLED), 10000);
1319 return status == D64_RS0_RS_DISABLED;
1322 static bool dma64_rxenabled(dma_info_t *di)
1326 rc = R_REG(&di->d64rxregs->control);
1327 return (rc != 0xffffffff) && (rc & D64_RC_RE);
1330 static bool dma64_txsuspendedidle(dma_info_t *di)
1336 if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
1339 if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1346 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
1347 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
1348 * If DMA is idle, we return NULL.
1350 static void *dma64_getpos(dma_info_t *di, bool direction)
1356 if (direction == DMA_TX) {
1358 R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK;
1359 idle = !NTXDACTIVE(di->txin, di->txout);
1360 va = di->txp[B2I(cd_offset, dma64dd_t)];
1363 R_REG(&di->d64rxregs->status0) & D64_XS0_CD_MASK;
1364 idle = !NRXDACTIVE(di->rxin, di->rxout);
1365 va = di->rxp[B2I(cd_offset, dma64dd_t)];
1368 /* If DMA is IDLE, return NULL */
1370 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
1377 /* TX of unframed data
1379 * Adds a DMA ring descriptor for the data pointed to by "buf".
1380 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
1381 * that take a pointer to a "packet"
1382 * Each call to this is results in a single descriptor being added for "len" bytes of
1383 * data starting at "buf", it doesn't handle chained buffers.
1385 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
1389 dmaaddr_t pa; /* phys addr */
1393 /* return nonzero if out of tx descriptors */
1394 if (NEXTTXD(txout) == di->txin)
1400 pa = pci_map_single(di->pbus, buf, len, PCI_DMA_TODEVICE);
1402 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
1404 if (txout == (di->ntxd - 1))
1405 flags |= D64_CTRL1_EOT;
1407 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1408 ASSERT(di->txp[txout] == NULL);
1410 /* save the buffer pointer - used by dma_getpos */
1411 di->txp[txout] = buf;
1413 txout = NEXTTXD(txout);
1414 /* bump the tx descriptor index */
1419 W_REG(&di->d64txregs->ptr,
1420 di->xmtptrbase + I2B(txout, dma64dd_t));
1423 /* tx flow control */
1424 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1429 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
1430 di->hnddma.txavail = 0;
1431 di->hnddma.txnobuf++;
1435 /* !! tx entry routine
1436 * WARNING: call must check the return value for error.
1437 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1439 static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0,
1442 struct sk_buff *p, *next;
1443 unsigned char *data;
1449 DMA_TRACE(("%s: dma_txfast\n", di->name));
1454 * Walk the chain of packet buffers
1455 * allocating and initializing transmit descriptor entries.
1457 for (p = p0; p; p = next) {
1459 hnddma_seg_map_t *map;
1464 len += PKTDMAPAD(di->osh, p);
1465 #endif /* BCM_DMAPAD */
1468 /* return nonzero if out of tx descriptors */
1469 if (NEXTTXD(txout) == di->txin)
1475 /* get physical address of buffer start */
1477 memset(&di->txp_dmah[txout], 0,
1478 sizeof(hnddma_seg_map_t));
1480 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1482 if (DMASGLIST_ENAB) {
1483 map = &di->txp_dmah[txout];
1485 /* See if all the segments can be accounted for */
1487 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1495 for (j = 1; j <= nsegs; j++) {
1497 if (p == p0 && j == 1)
1498 flags |= D64_CTRL1_SOF;
1500 /* With a DMA segment list, Descriptor table is filled
1501 * using the segment list instead of looping over
1502 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1503 * end of segment list is reached.
1505 if ((!DMASGLIST_ENAB && next == NULL) ||
1506 (DMASGLIST_ENAB && j == nsegs))
1507 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1508 if (txout == (di->ntxd - 1))
1509 flags |= D64_CTRL1_EOT;
1511 if (DMASGLIST_ENAB) {
1512 len = map->segs[j - 1].length;
1513 pa = map->segs[j - 1].addr;
1515 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1516 ASSERT(di->txp[txout] == NULL);
1518 txout = NEXTTXD(txout);
1521 /* See above. No need to loop over individual buffers */
1526 /* if last txd eof not set, fix it */
1527 if (!(flags & D64_CTRL1_EOF))
1528 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
1529 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
1531 /* save the packet */
1532 di->txp[PREVTXD(txout)] = p0;
1534 /* bump the tx descriptor index */
1539 W_REG(&di->d64txregs->ptr,
1540 di->xmtptrbase + I2B(txout, dma64dd_t));
1542 /* tx flow control */
1543 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1548 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1549 pkt_buf_free_skb(di->osh, p0, true);
1550 di->hnddma.txavail = 0;
1551 di->hnddma.txnobuf++;
1556 * Reclaim next completed txd (txds if using chained buffers) in the range
1557 * specified and return associated packet.
1558 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1559 * transmitted as noted by the hardware "CurrDescr" pointer.
1560 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1561 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1562 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1563 * return associated packet regardless of the value of hardware pointers.
1565 static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
1571 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1572 (range == HNDDMA_RANGE_ALL) ? "all" :
1574 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1583 if (range == HNDDMA_RANGE_ALL)
1586 dma64regs_t *dregs = di->d64txregs;
1590 (((R_REG(&dregs->status0) &
1592 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
1594 if (range == HNDDMA_RANGE_TRANSFERED) {
1596 (u16) (R_REG(&dregs->status1) &
1599 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1600 active_desc = B2I(active_desc, dma64dd_t);
1601 if (end != active_desc)
1602 end = PREVTXD(active_desc);
1606 if ((start == 0) && (end > di->txout))
1609 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1611 hnddma_seg_map_t *map = NULL;
1612 uint size, j, nsegs;
1615 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
1616 di->dataoffsetlow));
1618 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
1619 di->dataoffsethigh));
1621 if (DMASGLIST_ENAB) {
1622 map = &di->txp_dmah[i];
1623 size = map->origsize;
1627 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
1632 for (j = nsegs; j > 0; j--) {
1633 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
1634 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
1642 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1647 /* tx flow control */
1648 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1653 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1657 static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
1663 /* if forcing, dma engine must be disabled */
1664 ASSERT(!forceall || !dma64_rxenabled(di));
1668 /* return if no packets posted */
1673 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
1674 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
1676 /* ignore curr if forceall */
1677 if (!forceall && (i == curr))
1680 /* get the packet pointer that corresponds to the rx descriptor */
1686 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
1687 di->dataoffsetlow));
1689 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
1690 di->dataoffsethigh));
1692 /* clear this packet from the descriptor ring */
1693 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
1695 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
1696 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
1698 di->rxin = NEXTRXD(i);
1703 static bool _dma64_addrext(dma64regs_t *dma64regs)
1706 OR_REG(&dma64regs->control, D64_XC_AE);
1707 w = R_REG(&dma64regs->control);
1708 AND_REG(&dma64regs->control, ~D64_XC_AE);
1709 return (w & D64_XC_AE) == D64_XC_AE;
1713 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1715 static void dma64_txrotate(dma_info_t *di)
1724 ASSERT(dma64_txsuspendedidle(di));
1726 nactive = _dma_txactive(di);
1728 ((((R_REG(&di->d64txregs->status1) &
1730 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
1731 rot = TXD(ad - di->txin);
1733 ASSERT(rot < di->ntxd);
1735 /* full-ring case is a lot harder - don't worry about this */
1736 if (rot >= (di->ntxd - nactive)) {
1737 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1742 last = PREVTXD(di->txout);
1744 /* move entries starting at last and moving backwards to first */
1745 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1746 new = TXD(old + rot);
1749 * Move the tx dma descriptor.
1750 * EOT is set only in the last entry in the ring.
1752 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
1753 if (new == (di->ntxd - 1))
1755 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
1757 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
1758 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
1760 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
1761 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
1763 /* zap the old tx dma descriptor address field */
1764 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
1765 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
1767 /* move the corresponding txp[] entry */
1768 ASSERT(di->txp[new] == NULL);
1769 di->txp[new] = di->txp[old];
1772 if (DMASGLIST_ENAB) {
1773 memcpy(&di->txp_dmah[new], &di->txp_dmah[old],
1774 sizeof(hnddma_seg_map_t));
1775 memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
1778 di->txp[old] = NULL;
1781 /* update txin and txout */
1783 di->txout = TXD(di->txout + rot);
1784 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1787 W_REG(&di->d64txregs->ptr,
1788 di->xmtptrbase + I2B(di->txout, dma64dd_t));
1791 uint dma_addrwidth(si_t *sih, void *dmaregs)
1793 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
1794 /* DMA engine is 64-bit capable */
1795 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
1796 /* backplane are 64-bit capable */
1797 if (si_backplane64(sih))
1798 /* If bus is System Backplane or PCIE then we can access 64-bits */
1799 if ((sih->bustype == SI_BUS) ||
1800 ((sih->bustype == PCI_BUS) &&
1801 (sih->buscoretype == PCIE_CORE_ID)))
1802 return DMADDRWIDTH_64;
1804 ASSERT(0); /* DMA hardware not supported by this driver*/
1805 return DMADDRWIDTH_64;