]> Pileus Git - ~andy/linux/blob - drivers/ide/arm/icside.c
icside: use hwif->dev
[~andy/linux] / drivers / ide / arm / icside.c
1 /*
2  * linux/drivers/ide/arm/icside.c
3  *
4  * Copyright (c) 1996-2004 Russell King.
5  *
6  * Please note that this platform does not support 32-bit IDE IO.
7  */
8
9 #include <linux/string.h>
10 #include <linux/module.h>
11 #include <linux/ioport.h>
12 #include <linux/slab.h>
13 #include <linux/blkdev.h>
14 #include <linux/errno.h>
15 #include <linux/hdreg.h>
16 #include <linux/ide.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/device.h>
19 #include <linux/init.h>
20 #include <linux/scatterlist.h>
21 #include <linux/io.h>
22
23 #include <asm/dma.h>
24 #include <asm/ecard.h>
25
26 #define ICS_IDENT_OFFSET                0x2280
27
28 #define ICS_ARCIN_V5_INTRSTAT           0x0000
29 #define ICS_ARCIN_V5_INTROFFSET         0x0004
30 #define ICS_ARCIN_V5_IDEOFFSET          0x2800
31 #define ICS_ARCIN_V5_IDEALTOFFSET       0x2b80
32 #define ICS_ARCIN_V5_IDESTEPPING        6
33
34 #define ICS_ARCIN_V6_IDEOFFSET_1        0x2000
35 #define ICS_ARCIN_V6_INTROFFSET_1       0x2200
36 #define ICS_ARCIN_V6_INTRSTAT_1         0x2290
37 #define ICS_ARCIN_V6_IDEALTOFFSET_1     0x2380
38 #define ICS_ARCIN_V6_IDEOFFSET_2        0x3000
39 #define ICS_ARCIN_V6_INTROFFSET_2       0x3200
40 #define ICS_ARCIN_V6_INTRSTAT_2         0x3290
41 #define ICS_ARCIN_V6_IDEALTOFFSET_2     0x3380
42 #define ICS_ARCIN_V6_IDESTEPPING        6
43
44 struct cardinfo {
45         unsigned int dataoffset;
46         unsigned int ctrloffset;
47         unsigned int stepping;
48 };
49
50 static struct cardinfo icside_cardinfo_v5 = {
51         .dataoffset     = ICS_ARCIN_V5_IDEOFFSET,
52         .ctrloffset     = ICS_ARCIN_V5_IDEALTOFFSET,
53         .stepping       = ICS_ARCIN_V5_IDESTEPPING,
54 };
55
56 static struct cardinfo icside_cardinfo_v6_1 = {
57         .dataoffset     = ICS_ARCIN_V6_IDEOFFSET_1,
58         .ctrloffset     = ICS_ARCIN_V6_IDEALTOFFSET_1,
59         .stepping       = ICS_ARCIN_V6_IDESTEPPING,
60 };
61
62 static struct cardinfo icside_cardinfo_v6_2 = {
63         .dataoffset     = ICS_ARCIN_V6_IDEOFFSET_2,
64         .ctrloffset     = ICS_ARCIN_V6_IDEALTOFFSET_2,
65         .stepping       = ICS_ARCIN_V6_IDESTEPPING,
66 };
67
68 struct icside_state {
69         unsigned int channel;
70         unsigned int enabled;
71         void __iomem *irq_port;
72         void __iomem *ioc_base;
73         unsigned int type;
74         ide_hwif_t *hwif[2];
75 };
76
77 #define ICS_TYPE_A3IN   0
78 #define ICS_TYPE_A3USER 1
79 #define ICS_TYPE_V6     3
80 #define ICS_TYPE_V5     15
81 #define ICS_TYPE_NOTYPE ((unsigned int)-1)
82
83 /* ---------------- Version 5 PCB Support Functions --------------------- */
84 /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
85  * Purpose  : enable interrupts from card
86  */
87 static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
88 {
89         struct icside_state *state = ec->irq_data;
90
91         writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
92 }
93
94 /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
95  * Purpose  : disable interrupts from card
96  */
97 static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
98 {
99         struct icside_state *state = ec->irq_data;
100
101         readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
102 }
103
104 static const expansioncard_ops_t icside_ops_arcin_v5 = {
105         .irqenable      = icside_irqenable_arcin_v5,
106         .irqdisable     = icside_irqdisable_arcin_v5,
107 };
108
109
110 /* ---------------- Version 6 PCB Support Functions --------------------- */
111 /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
112  * Purpose  : enable interrupts from card
113  */
114 static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
115 {
116         struct icside_state *state = ec->irq_data;
117         void __iomem *base = state->irq_port;
118
119         state->enabled = 1;
120
121         switch (state->channel) {
122         case 0:
123                 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
124                 readb(base + ICS_ARCIN_V6_INTROFFSET_2);
125                 break;
126         case 1:
127                 writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
128                 readb(base + ICS_ARCIN_V6_INTROFFSET_1);
129                 break;
130         }
131 }
132
133 /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
134  * Purpose  : disable interrupts from card
135  */
136 static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
137 {
138         struct icside_state *state = ec->irq_data;
139
140         state->enabled = 0;
141
142         readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
143         readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
144 }
145
146 /* Prototype: icside_irqprobe(struct expansion_card *ec)
147  * Purpose  : detect an active interrupt from card
148  */
149 static int icside_irqpending_arcin_v6(struct expansion_card *ec)
150 {
151         struct icside_state *state = ec->irq_data;
152
153         return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
154                readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
155 }
156
157 static const expansioncard_ops_t icside_ops_arcin_v6 = {
158         .irqenable      = icside_irqenable_arcin_v6,
159         .irqdisable     = icside_irqdisable_arcin_v6,
160         .irqpending     = icside_irqpending_arcin_v6,
161 };
162
163 /*
164  * Handle routing of interrupts.  This is called before
165  * we write the command to the drive.
166  */
167 static void icside_maskproc(ide_drive_t *drive, int mask)
168 {
169         ide_hwif_t *hwif = HWIF(drive);
170         struct icside_state *state = hwif->hwif_data;
171         unsigned long flags;
172
173         local_irq_save(flags);
174
175         state->channel = hwif->channel;
176
177         if (state->enabled && !mask) {
178                 switch (hwif->channel) {
179                 case 0:
180                         writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
181                         readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
182                         break;
183                 case 1:
184                         writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
185                         readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
186                         break;
187                 }
188         } else {
189                 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
190                 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
191         }
192
193         local_irq_restore(flags);
194 }
195
196 #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
197 /*
198  * SG-DMA support.
199  *
200  * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
201  * There is only one DMA controller per card, which means that only
202  * one drive can be accessed at one time.  NOTE! We do not enforce that
203  * here, but we rely on the main IDE driver spotting that both
204  * interfaces use the same IRQ, which should guarantee this.
205  */
206
207 static void icside_build_sglist(ide_drive_t *drive, struct request *rq)
208 {
209         ide_hwif_t *hwif = drive->hwif;
210         struct scatterlist *sg = hwif->sg_table;
211
212         ide_map_sg(drive, rq);
213
214         if (rq_data_dir(rq) == READ)
215                 hwif->sg_dma_direction = DMA_FROM_DEVICE;
216         else
217                 hwif->sg_dma_direction = DMA_TO_DEVICE;
218
219         hwif->sg_nents = dma_map_sg(hwif->dev, sg, hwif->sg_nents,
220                                     hwif->sg_dma_direction);
221 }
222
223 /*
224  * Configure the IOMD to give the appropriate timings for the transfer
225  * mode being requested.  We take the advice of the ATA standards, and
226  * calculate the cycle time based on the transfer mode, and the EIDE
227  * MW DMA specs that the drive provides in the IDENTIFY command.
228  *
229  * We have the following IOMD DMA modes to choose from:
230  *
231  *      Type    Active          Recovery        Cycle
232  *      A       250 (250)       312 (550)       562 (800)
233  *      B       187             250             437
234  *      C       125 (125)       125 (375)       250 (500)
235  *      D       62              125             187
236  *
237  * (figures in brackets are actual measured timings)
238  *
239  * However, we also need to take care of the read/write active and
240  * recovery timings:
241  *
242  *                      Read    Write
243  *      Mode    Active  -- Recovery --  Cycle   IOMD type
244  *      MW0     215     50      215     480     A
245  *      MW1     80      50      50      150     C
246  *      MW2     70      25      25      120     C
247  */
248 static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
249 {
250         int cycle_time, use_dma_info = 0;
251
252         switch (xfer_mode) {
253         case XFER_MW_DMA_2:
254                 cycle_time = 250;
255                 use_dma_info = 1;
256                 break;
257
258         case XFER_MW_DMA_1:
259                 cycle_time = 250;
260                 use_dma_info = 1;
261                 break;
262
263         case XFER_MW_DMA_0:
264                 cycle_time = 480;
265                 break;
266
267         case XFER_SW_DMA_2:
268         case XFER_SW_DMA_1:
269         case XFER_SW_DMA_0:
270                 cycle_time = 480;
271                 break;
272         }
273
274         /*
275          * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
276          * take care to note the values in the ID...
277          */
278         if (use_dma_info && drive->id->eide_dma_time > cycle_time)
279                 cycle_time = drive->id->eide_dma_time;
280
281         drive->drive_data = cycle_time;
282
283         printk("%s: %s selected (peak %dMB/s)\n", drive->name,
284                 ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
285 }
286
287 static void icside_dma_host_set(ide_drive_t *drive, int on)
288 {
289 }
290
291 static int icside_dma_end(ide_drive_t *drive)
292 {
293         ide_hwif_t *hwif = HWIF(drive);
294         struct expansion_card *ec = ECARD_DEV(hwif->dev);
295
296         drive->waiting_for_dma = 0;
297
298         disable_dma(ec->dma);
299
300         /* Teardown mappings after DMA has completed. */
301         dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
302                      hwif->sg_dma_direction);
303
304         return get_dma_residue(ec->dma) != 0;
305 }
306
307 static void icside_dma_start(ide_drive_t *drive)
308 {
309         ide_hwif_t *hwif = HWIF(drive);
310         struct expansion_card *ec = ECARD_DEV(hwif->dev);
311
312         /* We can not enable DMA on both channels simultaneously. */
313         BUG_ON(dma_channel_active(ec->dma));
314         enable_dma(ec->dma);
315 }
316
317 static int icside_dma_setup(ide_drive_t *drive)
318 {
319         ide_hwif_t *hwif = HWIF(drive);
320         struct expansion_card *ec = ECARD_DEV(hwif->dev);
321         struct request *rq = hwif->hwgroup->rq;
322         unsigned int dma_mode;
323
324         if (rq_data_dir(rq))
325                 dma_mode = DMA_MODE_WRITE;
326         else
327                 dma_mode = DMA_MODE_READ;
328
329         /*
330          * We can not enable DMA on both channels.
331          */
332         BUG_ON(dma_channel_active(ec->dma));
333
334         icside_build_sglist(drive, rq);
335
336         /*
337          * Ensure that we have the right interrupt routed.
338          */
339         icside_maskproc(drive, 0);
340
341         /*
342          * Route the DMA signals to the correct interface.
343          */
344         writeb(hwif->select_data, hwif->config_data);
345
346         /*
347          * Select the correct timing for this drive.
348          */
349         set_dma_speed(ec->dma, drive->drive_data);
350
351         /*
352          * Tell the DMA engine about the SG table and
353          * data direction.
354          */
355         set_dma_sg(ec->dma, hwif->sg_table, hwif->sg_nents);
356         set_dma_mode(ec->dma, dma_mode);
357
358         drive->waiting_for_dma = 1;
359
360         return 0;
361 }
362
363 static void icside_dma_exec_cmd(ide_drive_t *drive, u8 cmd)
364 {
365         /* issue cmd to drive */
366         ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD, NULL);
367 }
368
369 static int icside_dma_test_irq(ide_drive_t *drive)
370 {
371         ide_hwif_t *hwif = HWIF(drive);
372         struct icside_state *state = hwif->hwif_data;
373
374         return readb(state->irq_port +
375                      (hwif->channel ?
376                         ICS_ARCIN_V6_INTRSTAT_2 :
377                         ICS_ARCIN_V6_INTRSTAT_1)) & 1;
378 }
379
380 static void icside_dma_timeout(ide_drive_t *drive)
381 {
382         printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
383
384         if (icside_dma_test_irq(drive))
385                 return;
386
387         ide_dump_status(drive, "DMA timeout", HWIF(drive)->INB(IDE_STATUS_REG));
388
389         icside_dma_end(drive);
390 }
391
392 static void icside_dma_lost_irq(ide_drive_t *drive)
393 {
394         printk(KERN_ERR "%s: IRQ lost\n", drive->name);
395 }
396
397 static void icside_dma_init(ide_hwif_t *hwif)
398 {
399         hwif->mwdma_mask        = 7; /* MW0..2 */
400         hwif->swdma_mask        = 7; /* SW0..2 */
401
402         hwif->dmatable_cpu      = NULL;
403         hwif->dmatable_dma      = 0;
404         hwif->set_dma_mode      = icside_set_dma_mode;
405
406         hwif->dma_host_set      = icside_dma_host_set;
407         hwif->dma_setup         = icside_dma_setup;
408         hwif->dma_exec_cmd      = icside_dma_exec_cmd;
409         hwif->dma_start         = icside_dma_start;
410         hwif->ide_dma_end       = icside_dma_end;
411         hwif->ide_dma_test_irq  = icside_dma_test_irq;
412         hwif->dma_timeout       = icside_dma_timeout;
413         hwif->dma_lost_irq      = icside_dma_lost_irq;
414 }
415 #else
416 #define icside_dma_init(hwif)   (0)
417 #endif
418
419 static ide_hwif_t *
420 icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec)
421 {
422         unsigned long port = (unsigned long)base + info->dataoffset;
423         ide_hwif_t *hwif;
424
425         hwif = ide_find_port(port);
426         if (hwif) {
427                 int i;
428
429                 /*
430                  * Ensure we're using MMIO
431                  */
432                 default_hwif_mmiops(hwif);
433                 hwif->mmio = 1;
434
435                 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
436                         hwif->io_ports[i] = port;
437                         port += 1 << info->stepping;
438                 }
439                 hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset;
440                 hwif->irq     = ec->irq;
441                 hwif->noprobe = 0;
442                 hwif->chipset = ide_acorn;
443                 hwif->gendev.parent = &ec->dev;
444                 hwif->dev = &ec->dev;
445         }
446
447         return hwif;
448 }
449
450 static int __init
451 icside_register_v5(struct icside_state *state, struct expansion_card *ec)
452 {
453         ide_hwif_t *hwif;
454         void __iomem *base;
455         u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
456
457         base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
458         if (!base)
459                 return -ENOMEM;
460
461         state->irq_port = base;
462
463         ec->irqaddr  = base + ICS_ARCIN_V5_INTRSTAT;
464         ec->irqmask  = 1;
465
466         ecard_setirq(ec, &icside_ops_arcin_v5, state);
467
468         /*
469          * Be on the safe side - disable interrupts
470          */
471         icside_irqdisable_arcin_v5(ec, 0);
472
473         hwif = icside_setup(base, &icside_cardinfo_v5, ec);
474         if (!hwif)
475                 return -ENODEV;
476
477         state->hwif[0] = hwif;
478
479         idx[0] = hwif->index;
480
481         ide_device_add(idx);
482
483         return 0;
484 }
485
486 static int __init
487 icside_register_v6(struct icside_state *state, struct expansion_card *ec)
488 {
489         ide_hwif_t *hwif, *mate;
490         void __iomem *ioc_base, *easi_base;
491         unsigned int sel = 0;
492         int ret;
493         u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
494
495         ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
496         if (!ioc_base) {
497                 ret = -ENOMEM;
498                 goto out;
499         }
500
501         easi_base = ioc_base;
502
503         if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
504                 easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
505                 if (!easi_base) {
506                         ret = -ENOMEM;
507                         goto out;
508                 }
509
510                 /*
511                  * Enable access to the EASI region.
512                  */
513                 sel = 1 << 5;
514         }
515
516         writeb(sel, ioc_base);
517
518         ecard_setirq(ec, &icside_ops_arcin_v6, state);
519
520         state->irq_port   = easi_base;
521         state->ioc_base   = ioc_base;
522
523         /*
524          * Be on the safe side - disable interrupts
525          */
526         icside_irqdisable_arcin_v6(ec, 0);
527
528         /*
529          * Find and register the interfaces.
530          */
531         hwif = icside_setup(easi_base, &icside_cardinfo_v6_1, ec);
532         mate = icside_setup(easi_base, &icside_cardinfo_v6_2, ec);
533
534         if (!hwif || !mate) {
535                 ret = -ENODEV;
536                 goto out;
537         }
538
539         state->hwif[0]    = hwif;
540         state->hwif[1]    = mate;
541
542         hwif->maskproc    = icside_maskproc;
543         hwif->channel     = 0;
544         hwif->hwif_data   = state;
545         hwif->mate        = mate;
546         hwif->serialized  = 1;
547         hwif->config_data = (unsigned long)ioc_base;
548         hwif->select_data = sel;
549
550         mate->maskproc    = icside_maskproc;
551         mate->channel     = 1;
552         mate->hwif_data   = state;
553         mate->mate        = hwif;
554         mate->serialized  = 1;
555         mate->config_data = (unsigned long)ioc_base;
556         mate->select_data = sel | 1;
557
558         if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) {
559                 icside_dma_init(hwif);
560                 icside_dma_init(mate);
561         }
562
563         idx[0] = hwif->index;
564         idx[1] = mate->index;
565
566         ide_device_add(idx);
567
568         return 0;
569
570  out:
571         return ret;
572 }
573
574 static int __devinit
575 icside_probe(struct expansion_card *ec, const struct ecard_id *id)
576 {
577         struct icside_state *state;
578         void __iomem *idmem;
579         int ret;
580
581         ret = ecard_request_resources(ec);
582         if (ret)
583                 goto out;
584
585         state = kzalloc(sizeof(struct icside_state), GFP_KERNEL);
586         if (!state) {
587                 ret = -ENOMEM;
588                 goto release;
589         }
590
591         state->type     = ICS_TYPE_NOTYPE;
592
593         idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
594         if (idmem) {
595                 unsigned int type;
596
597                 type = readb(idmem + ICS_IDENT_OFFSET) & 1;
598                 type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
599                 type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
600                 type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
601                 ecardm_iounmap(ec, idmem);
602
603                 state->type = type;
604         }
605
606         switch (state->type) {
607         case ICS_TYPE_A3IN:
608                 dev_warn(&ec->dev, "A3IN unsupported\n");
609                 ret = -ENODEV;
610                 break;
611
612         case ICS_TYPE_A3USER:
613                 dev_warn(&ec->dev, "A3USER unsupported\n");
614                 ret = -ENODEV;
615                 break;
616
617         case ICS_TYPE_V5:
618                 ret = icside_register_v5(state, ec);
619                 break;
620
621         case ICS_TYPE_V6:
622                 ret = icside_register_v6(state, ec);
623                 break;
624
625         default:
626                 dev_warn(&ec->dev, "unknown interface type\n");
627                 ret = -ENODEV;
628                 break;
629         }
630
631         if (ret == 0) {
632                 ecard_set_drvdata(ec, state);
633                 goto out;
634         }
635
636         kfree(state);
637  release:
638         ecard_release_resources(ec);
639  out:
640         return ret;
641 }
642
643 static void __devexit icside_remove(struct expansion_card *ec)
644 {
645         struct icside_state *state = ecard_get_drvdata(ec);
646
647         switch (state->type) {
648         case ICS_TYPE_V5:
649                 /* FIXME: tell IDE to stop using the interface */
650
651                 /* Disable interrupts */
652                 icside_irqdisable_arcin_v5(ec, 0);
653                 break;
654
655         case ICS_TYPE_V6:
656                 /* FIXME: tell IDE to stop using the interface */
657                 if (ec->dma != NO_DMA)
658                         free_dma(ec->dma);
659
660                 /* Disable interrupts */
661                 icside_irqdisable_arcin_v6(ec, 0);
662
663                 /* Reset the ROM pointer/EASI selection */
664                 writeb(0, state->ioc_base);
665                 break;
666         }
667
668         ecard_set_drvdata(ec, NULL);
669
670         kfree(state);
671         ecard_release_resources(ec);
672 }
673
674 static void icside_shutdown(struct expansion_card *ec)
675 {
676         struct icside_state *state = ecard_get_drvdata(ec);
677         unsigned long flags;
678
679         /*
680          * Disable interrupts from this card.  We need to do
681          * this before disabling EASI since we may be accessing
682          * this register via that region.
683          */
684         local_irq_save(flags);
685         ec->ops->irqdisable(ec, 0);
686         local_irq_restore(flags);
687
688         /*
689          * Reset the ROM pointer so that we can read the ROM
690          * after a soft reboot.  This also disables access to
691          * the IDE taskfile via the EASI region.
692          */
693         if (state->ioc_base)
694                 writeb(0, state->ioc_base);
695 }
696
697 static const struct ecard_id icside_ids[] = {
698         { MANU_ICS,  PROD_ICS_IDE  },
699         { MANU_ICS2, PROD_ICS2_IDE },
700         { 0xffff, 0xffff }
701 };
702
703 static struct ecard_driver icside_driver = {
704         .probe          = icside_probe,
705         .remove         = __devexit_p(icside_remove),
706         .shutdown       = icside_shutdown,
707         .id_table       = icside_ids,
708         .drv = {
709                 .name   = "icside",
710         },
711 };
712
713 static int __init icside_init(void)
714 {
715         return ecard_register_driver(&icside_driver);
716 }
717
718 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
719 MODULE_LICENSE("GPL");
720 MODULE_DESCRIPTION("ICS IDE driver");
721
722 module_init(icside_init);