]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/i825xx/lib82596.c
batman-adv: use consistent kerneldoc style
[~andy/linux] / drivers / net / ethernet / i825xx / lib82596.c
1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2    munged into HPPA boxen .
3
4    This driver is based upon 82596.c, original credits are below...
5    but there were too many hoops which HP wants jumped through to
6    keep this code in there in a sane manner.
7
8    3 primary sources of the mess --
9    1) hppa needs *lots* of cacheline flushing to keep this kind of
10    MMIO running.
11
12    2) The 82596 needs to see all of its pointers as their physical
13    address.  Thus virt_to_bus/bus_to_virt are *everywhere*.
14
15    3) The implementation HP is using seems to be significantly pickier
16    about when and how the command and RX units are started.  some
17    command ordering was changed.
18
19    Examination of the mach driver leads one to believe that there
20    might be a saner way to pull this off...  anyone who feels like a
21    full rewrite can be my guest.
22
23    Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24
25    02/01/2000  Initial modifications for parisc by Helge Deller (deller@gmx.de)
26    03/02/2000  changes for better/correct(?) cache-flushing (deller)
27 */
28
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
30 /*
31    Based on Apricot.c
32    Written 1994 by Mark Evans.
33    This driver is for the Apricot 82596 bus-master interface
34
35    Modularised 12/94 Mark Evans
36
37
38    Modified to support the 82596 ethernet chips on 680x0 VME boards.
39    by Richard Hirst <richard@sleepie.demon.co.uk>
40    Renamed to be 82596.c
41
42    980825:  Changed to receive directly in to sk_buffs which are
43    allocated at open() time.  Eliminates copy on incoming frames
44    (small ones are still copied).  Shared data now held in a
45    non-cached page, so we can run on 68060 in copyback mode.
46
47    TBD:
48    * look at deferring rx frames rather than discarding (as per tulip)
49    * handle tx ring full as per tulip
50    * performance test to tune rx_copybreak
51
52    Most of my modifications relate to the braindead big-endian
53    implementation by Intel.  When the i596 is operating in
54    'big-endian' mode, it thinks a 32 bit value of 0x12345678
55    should be stored as 0x56781234.  This is a real pain, when
56    you have linked lists which are shared by the 680x0 and the
57    i596.
58
59    Driver skeleton
60    Written 1993 by Donald Becker.
61    Copyright 1993 United States Government as represented by the Director,
62    National Security Agency. This software may only be used and distributed
63    according to the terms of the GNU General Public License as modified by SRC,
64    incorporated herein by reference.
65
66    The author may be reached as becker@scyld.com, or C/O
67    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68
69  */
70
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/interrupt.h>
77 #include <linux/delay.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/init.h>
82 #include <linux/types.h>
83 #include <linux/bitops.h>
84 #include <linux/dma-mapping.h>
85 #include <linux/io.h>
86 #include <linux/irq.h>
87 #include <linux/gfp.h>
88
89 /* DEBUG flags
90  */
91
92 #define DEB_INIT        0x0001
93 #define DEB_PROBE       0x0002
94 #define DEB_SERIOUS     0x0004
95 #define DEB_ERRORS      0x0008
96 #define DEB_MULTI       0x0010
97 #define DEB_TDR         0x0020
98 #define DEB_OPEN        0x0040
99 #define DEB_RESET       0x0080
100 #define DEB_ADDCMD      0x0100
101 #define DEB_STATUS      0x0200
102 #define DEB_STARTTX     0x0400
103 #define DEB_RXADDR      0x0800
104 #define DEB_TXADDR      0x1000
105 #define DEB_RXFRAME     0x2000
106 #define DEB_INTS        0x4000
107 #define DEB_STRUCT      0x8000
108 #define DEB_ANY         0xffff
109
110
111 #define DEB(x, y)       if (i596_debug & (x)) { y; }
112
113
114 /*
115  * The MPU_PORT command allows direct access to the 82596. With PORT access
116  * the following commands are available (p5-18). The 32-bit port command
117  * must be word-swapped with the most significant word written first.
118  * This only applies to VME boards.
119  */
120 #define PORT_RESET              0x00    /* reset 82596 */
121 #define PORT_SELFTEST           0x01    /* selftest */
122 #define PORT_ALTSCP             0x02    /* alternate SCB address */
123 #define PORT_ALTDUMP            0x03    /* Alternate DUMP address */
124
125 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
126
127 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
128  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
129  */
130 static int rx_copybreak = 100;
131
132 #define PKT_BUF_SZ      1536
133 #define MAX_MC_CNT      64
134
135 #define ISCP_BUSY       0x0001
136
137 #define I596_NULL ((u32)0xffffffff)
138
139 #define CMD_EOL         0x8000  /* The last command of the list, stop. */
140 #define CMD_SUSP        0x4000  /* Suspend after doing cmd. */
141 #define CMD_INTR        0x2000  /* Interrupt after doing cmd. */
142
143 #define CMD_FLEX        0x0008  /* Enable flexible memory model */
144
145 enum commands {
146         CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
147         CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
148 };
149
150 #define STAT_C          0x8000  /* Set to 0 after execution */
151 #define STAT_B          0x4000  /* Command being executed */
152 #define STAT_OK         0x2000  /* Command executed ok */
153 #define STAT_A          0x1000  /* Command aborted */
154
155 #define  CUC_START      0x0100
156 #define  CUC_RESUME     0x0200
157 #define  CUC_SUSPEND    0x0300
158 #define  CUC_ABORT      0x0400
159 #define  RX_START       0x0010
160 #define  RX_RESUME      0x0020
161 #define  RX_SUSPEND     0x0030
162 #define  RX_ABORT       0x0040
163
164 #define TX_TIMEOUT      (HZ/20)
165
166
167 struct i596_reg {
168         unsigned short porthi;
169         unsigned short portlo;
170         u32            ca;
171 };
172
173 #define EOF             0x8000
174 #define SIZE_MASK       0x3fff
175
176 struct i596_tbd {
177         unsigned short size;
178         unsigned short pad;
179         u32            next;
180         u32            data;
181         u32 cache_pad[5];               /* Total 32 bytes... */
182 };
183
184 /* The command structure has two 'next' pointers; v_next is the address of
185  * the next command as seen by the CPU, b_next is the address of the next
186  * command as seen by the 82596.  The b_next pointer, as used by the 82596
187  * always references the status field of the next command, rather than the
188  * v_next field, because the 82596 is unaware of v_next.  It may seem more
189  * logical to put v_next at the end of the structure, but we cannot do that
190  * because the 82596 expects other fields to be there, depending on command
191  * type.
192  */
193
194 struct i596_cmd {
195         struct i596_cmd *v_next;        /* Address from CPUs viewpoint */
196         unsigned short status;
197         unsigned short command;
198         u32            b_next;  /* Address from i596 viewpoint */
199 };
200
201 struct tx_cmd {
202         struct i596_cmd cmd;
203         u32            tbd;
204         unsigned short size;
205         unsigned short pad;
206         struct sk_buff *skb;            /* So we can free it after tx */
207         dma_addr_t dma_addr;
208 #ifdef __LP64__
209         u32 cache_pad[6];               /* Total 64 bytes... */
210 #else
211         u32 cache_pad[1];               /* Total 32 bytes... */
212 #endif
213 };
214
215 struct tdr_cmd {
216         struct i596_cmd cmd;
217         unsigned short status;
218         unsigned short pad;
219 };
220
221 struct mc_cmd {
222         struct i596_cmd cmd;
223         short mc_cnt;
224         char mc_addrs[MAX_MC_CNT*6];
225 };
226
227 struct sa_cmd {
228         struct i596_cmd cmd;
229         char eth_addr[8];
230 };
231
232 struct cf_cmd {
233         struct i596_cmd cmd;
234         char i596_config[16];
235 };
236
237 struct i596_rfd {
238         unsigned short stat;
239         unsigned short cmd;
240         u32            b_next;  /* Address from i596 viewpoint */
241         u32            rbd;
242         unsigned short count;
243         unsigned short size;
244         struct i596_rfd *v_next;        /* Address from CPUs viewpoint */
245         struct i596_rfd *v_prev;
246 #ifndef __LP64__
247         u32 cache_pad[2];               /* Total 32 bytes... */
248 #endif
249 };
250
251 struct i596_rbd {
252         /* hardware data */
253         unsigned short count;
254         unsigned short zero1;
255         u32            b_next;
256         u32            b_data;          /* Address from i596 viewpoint */
257         unsigned short size;
258         unsigned short zero2;
259         /* driver data */
260         struct sk_buff *skb;
261         struct i596_rbd *v_next;
262         u32            b_addr;          /* This rbd addr from i596 view */
263         unsigned char *v_data;          /* Address from CPUs viewpoint */
264                                         /* Total 32 bytes... */
265 #ifdef __LP64__
266     u32 cache_pad[4];
267 #endif
268 };
269
270 /* These values as chosen so struct i596_dma fits in one page... */
271
272 #define TX_RING_SIZE 32
273 #define RX_RING_SIZE 16
274
275 struct i596_scb {
276         unsigned short status;
277         unsigned short command;
278         u32           cmd;
279         u32           rfd;
280         u32           crc_err;
281         u32           align_err;
282         u32           resource_err;
283         u32           over_err;
284         u32           rcvdt_err;
285         u32           short_err;
286         unsigned short t_on;
287         unsigned short t_off;
288 };
289
290 struct i596_iscp {
291         u32 stat;
292         u32 scb;
293 };
294
295 struct i596_scp {
296         u32 sysbus;
297         u32 pad;
298         u32 iscp;
299 };
300
301 struct i596_dma {
302         struct i596_scp scp                     __attribute__((aligned(32)));
303         volatile struct i596_iscp iscp          __attribute__((aligned(32)));
304         volatile struct i596_scb scb            __attribute__((aligned(32)));
305         struct sa_cmd sa_cmd                    __attribute__((aligned(32)));
306         struct cf_cmd cf_cmd                    __attribute__((aligned(32)));
307         struct tdr_cmd tdr_cmd                  __attribute__((aligned(32)));
308         struct mc_cmd mc_cmd                    __attribute__((aligned(32)));
309         struct i596_rfd rfds[RX_RING_SIZE]      __attribute__((aligned(32)));
310         struct i596_rbd rbds[RX_RING_SIZE]      __attribute__((aligned(32)));
311         struct tx_cmd tx_cmds[TX_RING_SIZE]     __attribute__((aligned(32)));
312         struct i596_tbd tbds[TX_RING_SIZE]      __attribute__((aligned(32)));
313 };
314
315 struct i596_private {
316         struct i596_dma *dma;
317         u32    stat;
318         int last_restart;
319         struct i596_rfd *rfd_head;
320         struct i596_rbd *rbd_head;
321         struct i596_cmd *cmd_tail;
322         struct i596_cmd *cmd_head;
323         int cmd_backlog;
324         u32    last_cmd;
325         int next_tx_cmd;
326         int options;
327         spinlock_t lock;       /* serialize access to chip */
328         dma_addr_t dma_addr;
329         void __iomem *mpu_port;
330         void __iomem *ca;
331 };
332
333 static const char init_setup[] =
334 {
335         0x8E,           /* length, prefetch on */
336         0xC8,           /* fifo to 8, monitor off */
337         0x80,           /* don't save bad frames */
338         0x2E,           /* No source address insertion, 8 byte preamble */
339         0x00,           /* priority and backoff defaults */
340         0x60,           /* interframe spacing */
341         0x00,           /* slot time LSB */
342         0xf2,           /* slot time and retries */
343         0x00,           /* promiscuous mode */
344         0x00,           /* collision detect */
345         0x40,           /* minimum frame length */
346         0xff,
347         0x00,
348         0x7f /*  *multi IA */ };
349
350 static int i596_open(struct net_device *dev);
351 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
352 static irqreturn_t i596_interrupt(int irq, void *dev_id);
353 static int i596_close(struct net_device *dev);
354 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
355 static void i596_tx_timeout (struct net_device *dev);
356 static void print_eth(unsigned char *buf, char *str);
357 static void set_multicast_list(struct net_device *dev);
358 static inline void ca(struct net_device *dev);
359 static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
360
361 static int rx_ring_size = RX_RING_SIZE;
362 static int ticks_limit = 100;
363 static int max_cmd_backlog = TX_RING_SIZE-1;
364
365 #ifdef CONFIG_NET_POLL_CONTROLLER
366 static void i596_poll_controller(struct net_device *dev);
367 #endif
368
369
370 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
371 {
372         DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
373         while (--delcnt && dma->iscp.stat) {
374                 udelay(10);
375                 DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
376         }
377         if (!delcnt) {
378                 printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
379                      dev->name, str, SWAP16(dma->iscp.stat));
380                 return -1;
381         } else
382                 return 0;
383 }
384
385
386 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
387 {
388         DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
389         while (--delcnt && dma->scb.command) {
390                 udelay(10);
391                 DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
392         }
393         if (!delcnt) {
394                 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
395                        dev->name, str,
396                        SWAP16(dma->scb.status),
397                        SWAP16(dma->scb.command));
398                 return -1;
399         } else
400                 return 0;
401 }
402
403
404 static void i596_display_data(struct net_device *dev)
405 {
406         struct i596_private *lp = netdev_priv(dev);
407         struct i596_dma *dma = lp->dma;
408         struct i596_cmd *cmd;
409         struct i596_rfd *rfd;
410         struct i596_rbd *rbd;
411
412         printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
413                &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
414         printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
415                &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
416         printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
417                 " .cmd = %08x, .rfd = %08x\n",
418                &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
419                 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
420         printk(KERN_DEBUG "   errors: crc %x, align %x, resource %x,"
421                " over %x, rcvdt %x, short %x\n",
422                SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
423                SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
424                SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
425         cmd = lp->cmd_head;
426         while (cmd != NULL) {
427                 printk(KERN_DEBUG
428                        "cmd at %p, .status = %04x, .command = %04x,"
429                        " .b_next = %08x\n",
430                        cmd, SWAP16(cmd->status), SWAP16(cmd->command),
431                        SWAP32(cmd->b_next));
432                 cmd = cmd->v_next;
433         }
434         rfd = lp->rfd_head;
435         printk(KERN_DEBUG "rfd_head = %p\n", rfd);
436         do {
437                 printk(KERN_DEBUG
438                        "   %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
439                        " count %04x\n",
440                        rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
441                        SWAP32(rfd->b_next), SWAP32(rfd->rbd),
442                        SWAP16(rfd->count));
443                 rfd = rfd->v_next;
444         } while (rfd != lp->rfd_head);
445         rbd = lp->rbd_head;
446         printk(KERN_DEBUG "rbd_head = %p\n", rbd);
447         do {
448                 printk(KERN_DEBUG
449                        "   %p .count %04x, b_next %08x, b_data %08x,"
450                        " size %04x\n",
451                         rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
452                        SWAP32(rbd->b_data), SWAP16(rbd->size));
453                 rbd = rbd->v_next;
454         } while (rbd != lp->rbd_head);
455         DMA_INV(dev, dma, sizeof(struct i596_dma));
456 }
457
458
459 #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
460
461 static inline int init_rx_bufs(struct net_device *dev)
462 {
463         struct i596_private *lp = netdev_priv(dev);
464         struct i596_dma *dma = lp->dma;
465         int i;
466         struct i596_rfd *rfd;
467         struct i596_rbd *rbd;
468
469         /* First build the Receive Buffer Descriptor List */
470
471         for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
472                 dma_addr_t dma_addr;
473                 struct sk_buff *skb;
474
475                 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
476                 if (skb == NULL)
477                         return -1;
478                 dma_addr = dma_map_single(dev->dev.parent, skb->data,
479                                           PKT_BUF_SZ, DMA_FROM_DEVICE);
480                 rbd->v_next = rbd+1;
481                 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
482                 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
483                 rbd->skb = skb;
484                 rbd->v_data = skb->data;
485                 rbd->b_data = SWAP32(dma_addr);
486                 rbd->size = SWAP16(PKT_BUF_SZ);
487         }
488         lp->rbd_head = dma->rbds;
489         rbd = dma->rbds + rx_ring_size - 1;
490         rbd->v_next = dma->rbds;
491         rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
492
493         /* Now build the Receive Frame Descriptor List */
494
495         for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
496                 rfd->rbd = I596_NULL;
497                 rfd->v_next = rfd+1;
498                 rfd->v_prev = rfd-1;
499                 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
500                 rfd->cmd = SWAP16(CMD_FLEX);
501         }
502         lp->rfd_head = dma->rfds;
503         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
504         rfd = dma->rfds;
505         rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
506         rfd->v_prev = dma->rfds + rx_ring_size - 1;
507         rfd = dma->rfds + rx_ring_size - 1;
508         rfd->v_next = dma->rfds;
509         rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
510         rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
511
512         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
513         return 0;
514 }
515
516 static inline void remove_rx_bufs(struct net_device *dev)
517 {
518         struct i596_private *lp = netdev_priv(dev);
519         struct i596_rbd *rbd;
520         int i;
521
522         for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
523                 if (rbd->skb == NULL)
524                         break;
525                 dma_unmap_single(dev->dev.parent,
526                                  (dma_addr_t)SWAP32(rbd->b_data),
527                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
528                 dev_kfree_skb(rbd->skb);
529         }
530 }
531
532
533 static void rebuild_rx_bufs(struct net_device *dev)
534 {
535         struct i596_private *lp = netdev_priv(dev);
536         struct i596_dma *dma = lp->dma;
537         int i;
538
539         /* Ensure rx frame/buffer descriptors are tidy */
540
541         for (i = 0; i < rx_ring_size; i++) {
542                 dma->rfds[i].rbd = I596_NULL;
543                 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
544         }
545         dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
546         lp->rfd_head = dma->rfds;
547         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
548         lp->rbd_head = dma->rbds;
549         dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
550
551         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
552 }
553
554
555 static int init_i596_mem(struct net_device *dev)
556 {
557         struct i596_private *lp = netdev_priv(dev);
558         struct i596_dma *dma = lp->dma;
559         unsigned long flags;
560
561         mpu_port(dev, PORT_RESET, 0);
562         udelay(100);                    /* Wait 100us - seems to help */
563
564         /* change the scp address */
565
566         lp->last_cmd = jiffies;
567
568         dma->scp.sysbus = SYSBUS;
569         dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
570         dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
571         dma->iscp.stat = SWAP32(ISCP_BUSY);
572         lp->cmd_backlog = 0;
573
574         lp->cmd_head = NULL;
575         dma->scb.cmd = I596_NULL;
576
577         DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
578
579         DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
580         DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
581         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
582
583         mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
584         ca(dev);
585         if (wait_istat(dev, dma, 1000, "initialization timed out"))
586                 goto failed;
587         DEB(DEB_INIT, printk(KERN_DEBUG
588                              "%s: i82596 initialization successful\n",
589                              dev->name));
590
591         if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
592                 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
593                 goto failed;
594         }
595
596         /* Ensure rx frame/buffer descriptors are tidy */
597         rebuild_rx_bufs(dev);
598
599         dma->scb.command = 0;
600         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
601
602         DEB(DEB_INIT, printk(KERN_DEBUG
603                              "%s: queuing CmdConfigure\n", dev->name));
604         memcpy(dma->cf_cmd.i596_config, init_setup, 14);
605         dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
606         DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
607         i596_add_cmd(dev, &dma->cf_cmd.cmd);
608
609         DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
610         memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
611         dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
612         DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
613         i596_add_cmd(dev, &dma->sa_cmd.cmd);
614
615         DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
616         dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
617         DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
618         i596_add_cmd(dev, &dma->tdr_cmd.cmd);
619
620         spin_lock_irqsave (&lp->lock, flags);
621
622         if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
623                 spin_unlock_irqrestore (&lp->lock, flags);
624                 goto failed_free_irq;
625         }
626         DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
627         dma->scb.command = SWAP16(RX_START);
628         dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
629         DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
630
631         ca(dev);
632
633         spin_unlock_irqrestore (&lp->lock, flags);
634         if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
635                 goto failed_free_irq;
636         DEB(DEB_INIT, printk(KERN_DEBUG
637                              "%s: Receive unit started OK\n", dev->name));
638         return 0;
639
640 failed_free_irq:
641         free_irq(dev->irq, dev);
642 failed:
643         printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
644         mpu_port(dev, PORT_RESET, 0);
645         return -1;
646 }
647
648
649 static inline int i596_rx(struct net_device *dev)
650 {
651         struct i596_private *lp = netdev_priv(dev);
652         struct i596_rfd *rfd;
653         struct i596_rbd *rbd;
654         int frames = 0;
655
656         DEB(DEB_RXFRAME, printk(KERN_DEBUG
657                                 "i596_rx(), rfd_head %p, rbd_head %p\n",
658                                 lp->rfd_head, lp->rbd_head));
659
660
661         rfd = lp->rfd_head;             /* Ref next frame to check */
662
663         DMA_INV(dev, rfd, sizeof(struct i596_rfd));
664         while (rfd->stat & SWAP16(STAT_C)) {    /* Loop while complete frames */
665                 if (rfd->rbd == I596_NULL)
666                         rbd = NULL;
667                 else if (rfd->rbd == lp->rbd_head->b_addr) {
668                         rbd = lp->rbd_head;
669                         DMA_INV(dev, rbd, sizeof(struct i596_rbd));
670                 } else {
671                         printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
672                         /* XXX Now what? */
673                         rbd = NULL;
674                 }
675                 DEB(DEB_RXFRAME, printk(KERN_DEBUG
676                                       "  rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
677                                       rfd, rfd->rbd, rfd->stat));
678
679                 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
680                         /* a good frame */
681                         int pkt_len = SWAP16(rbd->count) & 0x3fff;
682                         struct sk_buff *skb = rbd->skb;
683                         int rx_in_place = 0;
684
685                         DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
686                         frames++;
687
688                         /* Check if the packet is long enough to just accept
689                          * without copying to a properly sized skbuff.
690                          */
691
692                         if (pkt_len > rx_copybreak) {
693                                 struct sk_buff *newskb;
694                                 dma_addr_t dma_addr;
695
696                                 dma_unmap_single(dev->dev.parent,
697                                                  (dma_addr_t)SWAP32(rbd->b_data),
698                                                  PKT_BUF_SZ, DMA_FROM_DEVICE);
699                                 /* Get fresh skbuff to replace filled one. */
700                                 newskb = netdev_alloc_skb_ip_align(dev,
701                                                                    PKT_BUF_SZ);
702                                 if (newskb == NULL) {
703                                         skb = NULL;     /* drop pkt */
704                                         goto memory_squeeze;
705                                 }
706
707                                 /* Pass up the skb already on the Rx ring. */
708                                 skb_put(skb, pkt_len);
709                                 rx_in_place = 1;
710                                 rbd->skb = newskb;
711                                 dma_addr = dma_map_single(dev->dev.parent,
712                                                           newskb->data,
713                                                           PKT_BUF_SZ,
714                                                           DMA_FROM_DEVICE);
715                                 rbd->v_data = newskb->data;
716                                 rbd->b_data = SWAP32(dma_addr);
717                                 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718                         } else {
719                                 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
720                         }
721 memory_squeeze:
722                         if (skb == NULL) {
723                                 /* XXX tulip.c can defer packets here!! */
724                                 dev->stats.rx_dropped++;
725                         } else {
726                                 if (!rx_in_place) {
727                                         /* 16 byte align the data fields */
728                                         dma_sync_single_for_cpu(dev->dev.parent,
729                                                                 (dma_addr_t)SWAP32(rbd->b_data),
730                                                                 PKT_BUF_SZ, DMA_FROM_DEVICE);
731                                         memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
732                                         dma_sync_single_for_device(dev->dev.parent,
733                                                                    (dma_addr_t)SWAP32(rbd->b_data),
734                                                                    PKT_BUF_SZ, DMA_FROM_DEVICE);
735                                 }
736                                 skb->len = pkt_len;
737                                 skb->protocol = eth_type_trans(skb, dev);
738                                 netif_rx(skb);
739                                 dev->stats.rx_packets++;
740                                 dev->stats.rx_bytes += pkt_len;
741                         }
742                 } else {
743                         DEB(DEB_ERRORS, printk(KERN_DEBUG
744                                                "%s: Error, rfd.stat = 0x%04x\n",
745                                                dev->name, rfd->stat));
746                         dev->stats.rx_errors++;
747                         if (rfd->stat & SWAP16(0x0100))
748                                 dev->stats.collisions++;
749                         if (rfd->stat & SWAP16(0x8000))
750                                 dev->stats.rx_length_errors++;
751                         if (rfd->stat & SWAP16(0x0001))
752                                 dev->stats.rx_over_errors++;
753                         if (rfd->stat & SWAP16(0x0002))
754                                 dev->stats.rx_fifo_errors++;
755                         if (rfd->stat & SWAP16(0x0004))
756                                 dev->stats.rx_frame_errors++;
757                         if (rfd->stat & SWAP16(0x0008))
758                                 dev->stats.rx_crc_errors++;
759                         if (rfd->stat & SWAP16(0x0010))
760                                 dev->stats.rx_length_errors++;
761                 }
762
763                 /* Clear the buffer descriptor count and EOF + F flags */
764
765                 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
766                         rbd->count = 0;
767                         lp->rbd_head = rbd->v_next;
768                         DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
769                 }
770
771                 /* Tidy the frame descriptor, marking it as end of list */
772
773                 rfd->rbd = I596_NULL;
774                 rfd->stat = 0;
775                 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
776                 rfd->count = 0;
777
778                 /* Update record of next frame descriptor to process */
779
780                 lp->dma->scb.rfd = rfd->b_next;
781                 lp->rfd_head = rfd->v_next;
782                 DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
783
784                 /* Remove end-of-list from old end descriptor */
785
786                 rfd->v_prev->cmd = SWAP16(CMD_FLEX);
787                 DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
788                 rfd = lp->rfd_head;
789                 DMA_INV(dev, rfd, sizeof(struct i596_rfd));
790         }
791
792         DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
793
794         return 0;
795 }
796
797
798 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
799 {
800         struct i596_cmd *ptr;
801
802         while (lp->cmd_head != NULL) {
803                 ptr = lp->cmd_head;
804                 lp->cmd_head = ptr->v_next;
805                 lp->cmd_backlog--;
806
807                 switch (SWAP16(ptr->command) & 0x7) {
808                 case CmdTx:
809                         {
810                                 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
811                                 struct sk_buff *skb = tx_cmd->skb;
812                                 dma_unmap_single(dev->dev.parent,
813                                                  tx_cmd->dma_addr,
814                                                  skb->len, DMA_TO_DEVICE);
815
816                                 dev_kfree_skb(skb);
817
818                                 dev->stats.tx_errors++;
819                                 dev->stats.tx_aborted_errors++;
820
821                                 ptr->v_next = NULL;
822                                 ptr->b_next = I596_NULL;
823                                 tx_cmd->cmd.command = 0;  /* Mark as free */
824                                 break;
825                         }
826                 default:
827                         ptr->v_next = NULL;
828                         ptr->b_next = I596_NULL;
829                 }
830                 DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
831         }
832
833         wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
834         lp->dma->scb.cmd = I596_NULL;
835         DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
836 }
837
838
839 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
840 {
841         unsigned long flags;
842
843         DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
844
845         spin_lock_irqsave (&lp->lock, flags);
846
847         wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
848
849         netif_stop_queue(dev);
850
851         /* FIXME: this command might cause an lpmc */
852         lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
853         DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
854         ca(dev);
855
856         /* wait for shutdown */
857         wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
858         spin_unlock_irqrestore (&lp->lock, flags);
859
860         i596_cleanup_cmd(dev, lp);
861         i596_rx(dev);
862
863         netif_start_queue(dev);
864         init_i596_mem(dev);
865 }
866
867
868 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
869 {
870         struct i596_private *lp = netdev_priv(dev);
871         struct i596_dma *dma = lp->dma;
872         unsigned long flags;
873
874         DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
875                                lp->cmd_head));
876
877         cmd->status = 0;
878         cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
879         cmd->v_next = NULL;
880         cmd->b_next = I596_NULL;
881         DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
882
883         spin_lock_irqsave (&lp->lock, flags);
884
885         if (lp->cmd_head != NULL) {
886                 lp->cmd_tail->v_next = cmd;
887                 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
888                 DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
889         } else {
890                 lp->cmd_head = cmd;
891                 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
892                 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
893                 dma->scb.command = SWAP16(CUC_START);
894                 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
895                 ca(dev);
896         }
897         lp->cmd_tail = cmd;
898         lp->cmd_backlog++;
899
900         spin_unlock_irqrestore (&lp->lock, flags);
901
902         if (lp->cmd_backlog > max_cmd_backlog) {
903                 unsigned long tickssofar = jiffies - lp->last_cmd;
904
905                 if (tickssofar < ticks_limit)
906                         return;
907
908                 printk(KERN_ERR
909                        "%s: command unit timed out, status resetting.\n",
910                        dev->name);
911 #if 1
912                 i596_reset(dev, lp);
913 #endif
914         }
915 }
916
917 static int i596_open(struct net_device *dev)
918 {
919         DEB(DEB_OPEN, printk(KERN_DEBUG
920                              "%s: i596_open() irq %d.\n", dev->name, dev->irq));
921
922         if (init_rx_bufs(dev)) {
923                 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
924                 return -EAGAIN;
925         }
926         if (init_i596_mem(dev)) {
927                 printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
928                 goto out_remove_rx_bufs;
929         }
930         netif_start_queue(dev);
931
932         return 0;
933
934 out_remove_rx_bufs:
935         remove_rx_bufs(dev);
936         return -EAGAIN;
937 }
938
939 static void i596_tx_timeout (struct net_device *dev)
940 {
941         struct i596_private *lp = netdev_priv(dev);
942
943         /* Transmitter timeout, serious problems. */
944         DEB(DEB_ERRORS, printk(KERN_DEBUG
945                                "%s: transmit timed out, status resetting.\n",
946                                dev->name));
947
948         dev->stats.tx_errors++;
949
950         /* Try to restart the adaptor */
951         if (lp->last_restart == dev->stats.tx_packets) {
952                 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
953                 /* Shutdown and restart */
954                 i596_reset (dev, lp);
955         } else {
956                 /* Issue a channel attention signal */
957                 DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
958                 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
959                 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
960                 ca (dev);
961                 lp->last_restart = dev->stats.tx_packets;
962         }
963
964         dev->trans_start = jiffies; /* prevent tx timeout */
965         netif_wake_queue (dev);
966 }
967
968
969 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
970 {
971         struct i596_private *lp = netdev_priv(dev);
972         struct tx_cmd *tx_cmd;
973         struct i596_tbd *tbd;
974         short length = skb->len;
975
976         DEB(DEB_STARTTX, printk(KERN_DEBUG
977                                 "%s: i596_start_xmit(%x,%p) called\n",
978                                 dev->name, skb->len, skb->data));
979
980         if (length < ETH_ZLEN) {
981                 if (skb_padto(skb, ETH_ZLEN))
982                         return NETDEV_TX_OK;
983                 length = ETH_ZLEN;
984         }
985
986         netif_stop_queue(dev);
987
988         tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
989         tbd = lp->dma->tbds + lp->next_tx_cmd;
990
991         if (tx_cmd->cmd.command) {
992                 DEB(DEB_ERRORS, printk(KERN_DEBUG
993                                        "%s: xmit ring full, dropping packet.\n",
994                                        dev->name));
995                 dev->stats.tx_dropped++;
996
997                 dev_kfree_skb(skb);
998         } else {
999                 if (++lp->next_tx_cmd == TX_RING_SIZE)
1000                         lp->next_tx_cmd = 0;
1001                 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1002                 tbd->next = I596_NULL;
1003
1004                 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1005                 tx_cmd->skb = skb;
1006
1007                 tx_cmd->pad = 0;
1008                 tx_cmd->size = 0;
1009                 tbd->pad = 0;
1010                 tbd->size = SWAP16(EOF | length);
1011
1012                 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1013                                                   skb->len, DMA_TO_DEVICE);
1014                 tbd->data = SWAP32(tx_cmd->dma_addr);
1015
1016                 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1017                 DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1018                 DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1019                 i596_add_cmd(dev, &tx_cmd->cmd);
1020
1021                 dev->stats.tx_packets++;
1022                 dev->stats.tx_bytes += length;
1023         }
1024
1025         netif_start_queue(dev);
1026
1027         return NETDEV_TX_OK;
1028 }
1029
1030 static void print_eth(unsigned char *add, char *str)
1031 {
1032         printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1033                add, add + 6, add, add[12], add[13], str);
1034 }
1035 static const struct net_device_ops i596_netdev_ops = {
1036         .ndo_open               = i596_open,
1037         .ndo_stop               = i596_close,
1038         .ndo_start_xmit         = i596_start_xmit,
1039         .ndo_set_rx_mode        = set_multicast_list,
1040         .ndo_tx_timeout         = i596_tx_timeout,
1041         .ndo_change_mtu         = eth_change_mtu,
1042         .ndo_validate_addr      = eth_validate_addr,
1043         .ndo_set_mac_address    = eth_mac_addr,
1044 #ifdef CONFIG_NET_POLL_CONTROLLER
1045         .ndo_poll_controller    = i596_poll_controller,
1046 #endif
1047 };
1048
1049 static int i82596_probe(struct net_device *dev)
1050 {
1051         int i;
1052         struct i596_private *lp = netdev_priv(dev);
1053         struct i596_dma *dma;
1054
1055         /* This lot is ensure things have been cache line aligned. */
1056         BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1057         BUILD_BUG_ON(sizeof(struct i596_rbd) &  31);
1058         BUILD_BUG_ON(sizeof(struct tx_cmd)   &  31);
1059         BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1060 #ifndef __LP64__
1061         BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1062 #endif
1063
1064         if (!dev->base_addr || !dev->irq)
1065                 return -ENODEV;
1066
1067         dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1068                 sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1069         if (!dma) {
1070                 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1071                 return -ENOMEM;
1072         }
1073
1074         dev->netdev_ops = &i596_netdev_ops;
1075         dev->watchdog_timeo = TX_TIMEOUT;
1076
1077         memset(dma, 0, sizeof(struct i596_dma));
1078         lp->dma = dma;
1079
1080         dma->scb.command = 0;
1081         dma->scb.cmd = I596_NULL;
1082         dma->scb.rfd = I596_NULL;
1083         spin_lock_init(&lp->lock);
1084
1085         DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1086
1087         i = register_netdev(dev);
1088         if (i) {
1089                 DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1090                                     (void *)dma, lp->dma_addr);
1091                 return i;
1092         }
1093
1094         DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1095                               dev->name, dev->base_addr, dev->dev_addr,
1096                               dev->irq));
1097         DEB(DEB_INIT, printk(KERN_INFO
1098                              "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1099                              dev->name, dma, (int)sizeof(struct i596_dma),
1100                              &dma->scb));
1101
1102         return 0;
1103 }
1104
1105 #ifdef CONFIG_NET_POLL_CONTROLLER
1106 static void i596_poll_controller(struct net_device *dev)
1107 {
1108         disable_irq(dev->irq);
1109         i596_interrupt(dev->irq, dev);
1110         enable_irq(dev->irq);
1111 }
1112 #endif
1113
1114 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1115 {
1116         struct net_device *dev = dev_id;
1117         struct i596_private *lp;
1118         struct i596_dma *dma;
1119         unsigned short status, ack_cmd = 0;
1120
1121         lp = netdev_priv(dev);
1122         dma = lp->dma;
1123
1124         spin_lock (&lp->lock);
1125
1126         wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1127         status = SWAP16(dma->scb.status);
1128
1129         DEB(DEB_INTS, printk(KERN_DEBUG
1130                              "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1131                         dev->name, dev->irq, status));
1132
1133         ack_cmd = status & 0xf000;
1134
1135         if (!ack_cmd) {
1136                 DEB(DEB_ERRORS, printk(KERN_DEBUG
1137                                        "%s: interrupt with no events\n",
1138                                        dev->name));
1139                 spin_unlock (&lp->lock);
1140                 return IRQ_NONE;
1141         }
1142
1143         if ((status & 0x8000) || (status & 0x2000)) {
1144                 struct i596_cmd *ptr;
1145
1146                 if ((status & 0x8000))
1147                         DEB(DEB_INTS,
1148                             printk(KERN_DEBUG
1149                                    "%s: i596 interrupt completed command.\n",
1150                                    dev->name));
1151                 if ((status & 0x2000))
1152                         DEB(DEB_INTS,
1153                             printk(KERN_DEBUG
1154                                    "%s: i596 interrupt command unit inactive %x.\n",
1155                                    dev->name, status & 0x0700));
1156
1157                 while (lp->cmd_head != NULL) {
1158                         DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1159                         if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1160                                 break;
1161
1162                         ptr = lp->cmd_head;
1163
1164                         DEB(DEB_STATUS,
1165                             printk(KERN_DEBUG
1166                                    "cmd_head->status = %04x, ->command = %04x\n",
1167                                    SWAP16(lp->cmd_head->status),
1168                                    SWAP16(lp->cmd_head->command)));
1169                         lp->cmd_head = ptr->v_next;
1170                         lp->cmd_backlog--;
1171
1172                         switch (SWAP16(ptr->command) & 0x7) {
1173                         case CmdTx:
1174                             {
1175                                 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1176                                 struct sk_buff *skb = tx_cmd->skb;
1177
1178                                 if (ptr->status & SWAP16(STAT_OK)) {
1179                                         DEB(DEB_TXADDR,
1180                                             print_eth(skb->data, "tx-done"));
1181                                 } else {
1182                                         dev->stats.tx_errors++;
1183                                         if (ptr->status & SWAP16(0x0020))
1184                                                 dev->stats.collisions++;
1185                                         if (!(ptr->status & SWAP16(0x0040)))
1186                                                 dev->stats.tx_heartbeat_errors++;
1187                                         if (ptr->status & SWAP16(0x0400))
1188                                                 dev->stats.tx_carrier_errors++;
1189                                         if (ptr->status & SWAP16(0x0800))
1190                                                 dev->stats.collisions++;
1191                                         if (ptr->status & SWAP16(0x1000))
1192                                                 dev->stats.tx_aborted_errors++;
1193                                 }
1194                                 dma_unmap_single(dev->dev.parent,
1195                                                  tx_cmd->dma_addr,
1196                                                  skb->len, DMA_TO_DEVICE);
1197                                 dev_kfree_skb_irq(skb);
1198
1199                                 tx_cmd->cmd.command = 0; /* Mark free */
1200                                 break;
1201                             }
1202                         case CmdTDR:
1203                             {
1204                                 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1205
1206                                 if (status & 0x8000) {
1207                                         DEB(DEB_ANY,
1208                                             printk(KERN_DEBUG "%s: link ok.\n",
1209                                                    dev->name));
1210                                 } else {
1211                                         if (status & 0x4000)
1212                                                 printk(KERN_ERR
1213                                                        "%s: Transceiver problem.\n",
1214                                                        dev->name);
1215                                         if (status & 0x2000)
1216                                                 printk(KERN_ERR
1217                                                        "%s: Termination problem.\n",
1218                                                        dev->name);
1219                                         if (status & 0x1000)
1220                                                 printk(KERN_ERR
1221                                                        "%s: Short circuit.\n",
1222                                                        dev->name);
1223
1224                                         DEB(DEB_TDR,
1225                                             printk(KERN_DEBUG "%s: Time %d.\n",
1226                                                    dev->name, status & 0x07ff));
1227                                 }
1228                                 break;
1229                             }
1230                         case CmdConfigure:
1231                                 /*
1232                                  * Zap command so set_multicast_list() know
1233                                  * it is free
1234                                  */
1235                                 ptr->command = 0;
1236                                 break;
1237                         }
1238                         ptr->v_next = NULL;
1239                         ptr->b_next = I596_NULL;
1240                         DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1241                         lp->last_cmd = jiffies;
1242                 }
1243
1244                 /* This mess is arranging that only the last of any outstanding
1245                  * commands has the interrupt bit set.  Should probably really
1246                  * only add to the cmd queue when the CU is stopped.
1247                  */
1248                 ptr = lp->cmd_head;
1249                 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1250                         struct i596_cmd *prev = ptr;
1251
1252                         ptr->command &= SWAP16(0x1fff);
1253                         ptr = ptr->v_next;
1254                         DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1255                 }
1256
1257                 if (lp->cmd_head != NULL)
1258                         ack_cmd |= CUC_START;
1259                 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1260                 DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1261         }
1262         if ((status & 0x1000) || (status & 0x4000)) {
1263                 if ((status & 0x4000))
1264                         DEB(DEB_INTS,
1265                             printk(KERN_DEBUG
1266                                    "%s: i596 interrupt received a frame.\n",
1267                                    dev->name));
1268                 i596_rx(dev);
1269                 /* Only RX_START if stopped - RGH 07-07-96 */
1270                 if (status & 0x1000) {
1271                         if (netif_running(dev)) {
1272                                 DEB(DEB_ERRORS,
1273                                     printk(KERN_DEBUG
1274                                            "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1275                                            dev->name, status));
1276                                 ack_cmd |= RX_START;
1277                                 dev->stats.rx_errors++;
1278                                 dev->stats.rx_fifo_errors++;
1279                                 rebuild_rx_bufs(dev);
1280                         }
1281                 }
1282         }
1283         wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1284         dma->scb.command = SWAP16(ack_cmd);
1285         DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1286
1287         /* DANGER: I suspect that some kind of interrupt
1288          acknowledgement aside from acking the 82596 might be needed
1289          here...  but it's running acceptably without */
1290
1291         ca(dev);
1292
1293         wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1294         DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1295
1296         spin_unlock (&lp->lock);
1297         return IRQ_HANDLED;
1298 }
1299
1300 static int i596_close(struct net_device *dev)
1301 {
1302         struct i596_private *lp = netdev_priv(dev);
1303         unsigned long flags;
1304
1305         netif_stop_queue(dev);
1306
1307         DEB(DEB_INIT,
1308             printk(KERN_DEBUG
1309                    "%s: Shutting down ethercard, status was %4.4x.\n",
1310                    dev->name, SWAP16(lp->dma->scb.status)));
1311
1312         spin_lock_irqsave(&lp->lock, flags);
1313
1314         wait_cmd(dev, lp->dma, 100, "close1 timed out");
1315         lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1316         DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1317
1318         ca(dev);
1319
1320         wait_cmd(dev, lp->dma, 100, "close2 timed out");
1321         spin_unlock_irqrestore(&lp->lock, flags);
1322         DEB(DEB_STRUCT, i596_display_data(dev));
1323         i596_cleanup_cmd(dev, lp);
1324
1325         free_irq(dev->irq, dev);
1326         remove_rx_bufs(dev);
1327
1328         return 0;
1329 }
1330
1331 /*
1332  *    Set or clear the multicast filter for this adaptor.
1333  */
1334
1335 static void set_multicast_list(struct net_device *dev)
1336 {
1337         struct i596_private *lp = netdev_priv(dev);
1338         struct i596_dma *dma = lp->dma;
1339         int config = 0, cnt;
1340
1341         DEB(DEB_MULTI,
1342             printk(KERN_DEBUG
1343                    "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1344                    dev->name, netdev_mc_count(dev),
1345                    dev->flags & IFF_PROMISC ? "ON" : "OFF",
1346                    dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1347
1348         if ((dev->flags & IFF_PROMISC) &&
1349             !(dma->cf_cmd.i596_config[8] & 0x01)) {
1350                 dma->cf_cmd.i596_config[8] |= 0x01;
1351                 config = 1;
1352         }
1353         if (!(dev->flags & IFF_PROMISC) &&
1354             (dma->cf_cmd.i596_config[8] & 0x01)) {
1355                 dma->cf_cmd.i596_config[8] &= ~0x01;
1356                 config = 1;
1357         }
1358         if ((dev->flags & IFF_ALLMULTI) &&
1359             (dma->cf_cmd.i596_config[11] & 0x20)) {
1360                 dma->cf_cmd.i596_config[11] &= ~0x20;
1361                 config = 1;
1362         }
1363         if (!(dev->flags & IFF_ALLMULTI) &&
1364             !(dma->cf_cmd.i596_config[11] & 0x20)) {
1365                 dma->cf_cmd.i596_config[11] |= 0x20;
1366                 config = 1;
1367         }
1368         if (config) {
1369                 if (dma->cf_cmd.cmd.command)
1370                         printk(KERN_INFO
1371                                "%s: config change request already queued\n",
1372                                dev->name);
1373                 else {
1374                         dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1375                         DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1376                         i596_add_cmd(dev, &dma->cf_cmd.cmd);
1377                 }
1378         }
1379
1380         cnt = netdev_mc_count(dev);
1381         if (cnt > MAX_MC_CNT) {
1382                 cnt = MAX_MC_CNT;
1383                 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1384                         dev->name, cnt);
1385         }
1386
1387         if (!netdev_mc_empty(dev)) {
1388                 struct netdev_hw_addr *ha;
1389                 unsigned char *cp;
1390                 struct mc_cmd *cmd;
1391
1392                 cmd = &dma->mc_cmd;
1393                 cmd->cmd.command = SWAP16(CmdMulticastList);
1394                 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1395                 cp = cmd->mc_addrs;
1396                 netdev_for_each_mc_addr(ha, dev) {
1397                         if (!cnt--)
1398                                 break;
1399                         memcpy(cp, ha->addr, ETH_ALEN);
1400                         if (i596_debug > 1)
1401                                 DEB(DEB_MULTI,
1402                                     printk(KERN_DEBUG
1403                                            "%s: Adding address %pM\n",
1404                                            dev->name, cp));
1405                         cp += ETH_ALEN;
1406                 }
1407                 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1408                 i596_add_cmd(dev, &cmd->cmd);
1409         }
1410 }