2 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
3 * Due to massive hardware bugs, UltraDMA is only supported
4 * on the 646U2 and not on the 646U.
6 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1998 David S. Miller (davem@redhat.com)
9 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
10 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
11 * Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/pci.h>
17 #include <linux/ide.h>
18 #include <linux/init.h>
22 #define DRV_NAME "cmd64x"
25 * CMD64x specific registers definition.
28 #define CFR_INTR_CH0 0x04
36 #define ARTTIM23_DIS_RA2 0x04
37 #define ARTTIM23_DIS_RA3 0x08
38 #define ARTTIM23_INTR_CH1 0x10
45 #define MRDMODE_INTR_CH0 0x04
46 #define MRDMODE_INTR_CH1 0x08
47 #define UDIDETCR0 0x73
51 #define UDIDETCR1 0x7B
54 static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
56 ide_hwif_t *hwif = drive->hwif;
57 struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
58 int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
59 const unsigned long T = 1000000 / bus_speed;
60 static const u8 recovery_values[] =
61 {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
62 static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
63 static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
64 static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3};
68 ide_timing_compute(drive, mode, &t, T, 0);
71 * In case we've got too long recovery phase, try to lengthen
75 t.active += t.recover - 16;
78 if (t.active > 16) /* shouldn't actually happen... */
82 * Convert values to internal chipset representation
84 t.recover = recovery_values[t.recover];
87 /* Program the active/recovery counts into the DRWTIM register */
88 pci_write_config_byte(dev, drwtim_regs[drive->dn],
89 (t.active << 4) | t.recover);
91 if (mode >= XFER_SW_DMA_0)
95 * The primary channel has individual address setup timing registers
96 * for each drive and the hardware selects the slowest timing itself.
97 * The secondary channel has one common register and we have to select
98 * the slowest address setup timing ourselves.
101 ide_drive_t *pair = ide_get_pair_dev(drive);
103 ide_set_drivedata(drive, (void *)(unsigned long)t.setup);
106 t.setup = max_t(u8, t.setup,
107 (unsigned long)ide_get_drivedata(pair));
110 if (t.setup > 5) /* shouldn't actually happen... */
114 * Program the address setup clocks into the ARTTIM registers.
115 * Avoid clearing the secondary channel's interrupt bit.
117 (void) pci_read_config_byte (dev, arttim_regs[drive->dn], &arttim);
119 arttim &= ~ARTTIM23_INTR_CH1;
121 arttim |= setup_values[t.setup];
122 (void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim);
126 * Attempts to set drive's PIO mode.
127 * Special cases are 8: prefetch off, 9: prefetch on (both never worked)
130 static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio)
133 * Filter out the prefetch control values
134 * to prevent PIO5 from being programmed
136 if (pio == 8 || pio == 9)
139 cmd64x_program_timings(drive, XFER_PIO_0 + pio);
142 static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
144 ide_hwif_t *hwif = drive->hwif;
145 struct pci_dev *dev = to_pci_dev(hwif->dev);
146 u8 unit = drive->dn & 0x01;
147 u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0;
149 if (speed >= XFER_SW_DMA_0) {
150 (void) pci_read_config_byte(dev, pciU, ®U);
151 regU &= ~(unit ? 0xCA : 0x35);
156 regU |= unit ? 0x0A : 0x05;
159 regU |= unit ? 0x4A : 0x15;
162 regU |= unit ? 0x8A : 0x25;
165 regU |= unit ? 0x42 : 0x11;
168 regU |= unit ? 0x82 : 0x21;
171 regU |= unit ? 0xC2 : 0x31;
176 cmd64x_program_timings(drive, speed);
180 if (speed >= XFER_SW_DMA_0)
181 (void) pci_write_config_byte(dev, pciU, regU);
184 static void cmd648_clear_irq(ide_drive_t *drive)
186 ide_hwif_t *hwif = drive->hwif;
187 struct pci_dev *dev = to_pci_dev(hwif->dev);
188 unsigned long base = pci_resource_start(dev, 4);
189 u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
191 u8 mrdmode = inb(base + 1);
193 /* clear the interrupt bit */
194 outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
198 static void cmd64x_clear_irq(ide_drive_t *drive)
200 ide_hwif_t *hwif = drive->hwif;
201 struct pci_dev *dev = to_pci_dev(hwif->dev);
202 int irq_reg = hwif->channel ? ARTTIM23 : CFR;
203 u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
207 (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
208 /* clear the interrupt bit */
209 (void) pci_write_config_byte(dev, irq_reg, irq_stat | irq_mask);
212 static int cmd648_test_irq(ide_hwif_t *hwif)
214 struct pci_dev *dev = to_pci_dev(hwif->dev);
215 unsigned long base = pci_resource_start(dev, 4);
216 u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
218 u8 mrdmode = inb(base + 1);
220 pr_debug("%s: mrdmode: 0x%02x irq_mask: 0x%02x\n",
221 hwif->name, mrdmode, irq_mask);
223 return (mrdmode & irq_mask) ? 1 : 0;
226 static int cmd64x_test_irq(ide_hwif_t *hwif)
228 struct pci_dev *dev = to_pci_dev(hwif->dev);
229 int irq_reg = hwif->channel ? ARTTIM23 : CFR;
230 u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
234 (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
236 pr_debug("%s: irq_stat: 0x%02x irq_mask: 0x%02x\n",
237 hwif->name, irq_stat, irq_mask);
239 return (irq_stat & irq_mask) ? 1 : 0;
243 * ASUS P55T2P4D with CMD646 chipset revision 0x01 requires the old
244 * event order for DMA transfers.
247 static int cmd646_1_dma_end(ide_drive_t *drive)
249 ide_hwif_t *hwif = drive->hwif;
250 u8 dma_stat = 0, dma_cmd = 0;
253 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
254 /* read DMA command state */
255 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
257 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
258 /* clear the INTR & ERROR bits */
259 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
260 /* verify good DMA status */
261 return (dma_stat & 7) != 4;
264 static int init_chipset_cmd64x(struct pci_dev *dev)
268 /* Set a good latency timer and cache line size value. */
269 (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
270 /* FIXME: pci_set_master() to ensure a good latency timer value */
273 * Enable interrupts, select MEMORY READ LINE for reads.
275 * NOTE: although not mentioned in the PCI0646U specs,
276 * bits 0-1 are write only and won't be read back as
277 * set or not -- PCI0646U2 specs clarify this point.
279 (void) pci_read_config_byte (dev, MRDMODE, &mrdmode);
281 (void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
286 static u8 cmd64x_cable_detect(ide_hwif_t *hwif)
288 struct pci_dev *dev = to_pci_dev(hwif->dev);
289 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
291 switch (dev->device) {
292 case PCI_DEVICE_ID_CMD_648:
293 case PCI_DEVICE_ID_CMD_649:
294 pci_read_config_byte(dev, BMIDECSR, &bmidecsr);
295 return (bmidecsr & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
297 return ATA_CBL_PATA40;
301 static const struct ide_port_ops cmd64x_port_ops = {
302 .set_pio_mode = cmd64x_set_pio_mode,
303 .set_dma_mode = cmd64x_set_dma_mode,
304 .clear_irq = cmd64x_clear_irq,
305 .test_irq = cmd64x_test_irq,
306 .cable_detect = cmd64x_cable_detect,
309 static const struct ide_port_ops cmd648_port_ops = {
310 .set_pio_mode = cmd64x_set_pio_mode,
311 .set_dma_mode = cmd64x_set_dma_mode,
312 .clear_irq = cmd648_clear_irq,
313 .test_irq = cmd648_test_irq,
314 .cable_detect = cmd64x_cable_detect,
317 static const struct ide_dma_ops cmd646_rev1_dma_ops = {
318 .dma_host_set = ide_dma_host_set,
319 .dma_setup = ide_dma_setup,
320 .dma_start = ide_dma_start,
321 .dma_end = cmd646_1_dma_end,
322 .dma_test_irq = ide_dma_test_irq,
323 .dma_lost_irq = ide_dma_lost_irq,
324 .dma_timer_expiry = ide_dma_sff_timer_expiry,
325 .dma_sff_read_status = ide_dma_sff_read_status,
328 static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
331 .init_chipset = init_chipset_cmd64x,
332 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
333 .port_ops = &cmd64x_port_ops,
334 .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
335 IDE_HFLAG_ABUSE_PREFETCH |
337 .pio_mask = ATA_PIO5,
338 .mwdma_mask = ATA_MWDMA2,
339 .udma_mask = 0x00, /* no udma */
343 .init_chipset = init_chipset_cmd64x,
344 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
345 .port_ops = &cmd648_port_ops,
346 .host_flags = IDE_HFLAG_ABUSE_PREFETCH |
348 .pio_mask = ATA_PIO5,
349 .mwdma_mask = ATA_MWDMA2,
350 .udma_mask = ATA_UDMA2,
354 .init_chipset = init_chipset_cmd64x,
355 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
356 .port_ops = &cmd648_port_ops,
357 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
358 .pio_mask = ATA_PIO5,
359 .mwdma_mask = ATA_MWDMA2,
360 .udma_mask = ATA_UDMA4,
364 .init_chipset = init_chipset_cmd64x,
365 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
366 .port_ops = &cmd648_port_ops,
367 .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
368 .pio_mask = ATA_PIO5,
369 .mwdma_mask = ATA_MWDMA2,
370 .udma_mask = ATA_UDMA5,
374 static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
376 struct ide_port_info d;
377 u8 idx = id->driver_data;
379 d = cmd64x_chipsets[idx];
383 * UltraDMA only supported on PCI646U and PCI646U2, which
384 * correspond to revisions 0x03, 0x05 and 0x07 respectively.
385 * Actually, although the CMD tech support people won't
386 * tell me the details, the 0x03 revision cannot support
387 * UDMA correctly without hardware modifications, and even
388 * then it only works with Quantum disks due to some
389 * hold time assumptions in the 646U part which are fixed
392 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
394 if (dev->revision < 5) {
397 * The original PCI0646 didn't have the primary
398 * channel enable bit, it appeared starting with
399 * PCI0646U (i.e. revision ID 3).
401 if (dev->revision < 3) {
402 d.enablebits[0].reg = 0;
403 d.port_ops = &cmd64x_port_ops;
404 if (dev->revision == 1)
405 d.dma_ops = &cmd646_rev1_dma_ops;
410 return ide_pci_init_one(dev, &d, NULL);
413 static const struct pci_device_id cmd64x_pci_tbl[] = {
414 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
415 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
416 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 2 },
417 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 3 },
420 MODULE_DEVICE_TABLE(pci, cmd64x_pci_tbl);
422 static struct pci_driver cmd64x_pci_driver = {
423 .name = "CMD64x_IDE",
424 .id_table = cmd64x_pci_tbl,
425 .probe = cmd64x_init_one,
426 .remove = ide_pci_remove,
427 .suspend = ide_pci_suspend,
428 .resume = ide_pci_resume,
431 static int __init cmd64x_ide_init(void)
433 return ide_pci_register_driver(&cmd64x_pci_driver);
436 static void __exit cmd64x_ide_exit(void)
438 pci_unregister_driver(&cmd64x_pci_driver);
441 module_init(cmd64x_ide_init);
442 module_exit(cmd64x_ide_exit);
444 MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz");
445 MODULE_DESCRIPTION("PCI driver module for CMD64x IDE");
446 MODULE_LICENSE("GPL");