2 * SuperTrak EX Series Storage Controller driver for Linux
4 * Copyright (C) 2005-2009 Promise Technology Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Ed Lin <promise_linux@promise.com>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/delay.h>
20 #include <linux/time.h>
21 #include <linux/pci.h>
22 #include <linux/blkdev.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
29 #include <asm/byteorder.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/scsi_dbg.h>
36 #include <scsi/scsi_eh.h>
38 #define DRV_NAME "stex"
39 #define ST_DRIVER_VERSION "4.6.0000.3"
40 #define ST_VER_MAJOR 4
41 #define ST_VER_MINOR 6
43 #define ST_BUILD_VER 3
46 /* MU register offset */
47 IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
48 IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
49 OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
50 OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
51 IDBL = 0x20, /* MU_INBOUND_DOORBELL */
52 IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
53 IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
54 ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
55 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
56 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
66 /* MU register value */
67 MU_INBOUND_DOORBELL_HANDSHAKE = 1,
68 MU_INBOUND_DOORBELL_REQHEADCHANGED = 2,
69 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4,
70 MU_INBOUND_DOORBELL_HMUSTOPPED = 8,
71 MU_INBOUND_DOORBELL_RESET = 16,
73 MU_OUTBOUND_DOORBELL_HANDSHAKE = 1,
74 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2,
75 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4,
76 MU_OUTBOUND_DOORBELL_BUSCHANGE = 8,
77 MU_OUTBOUND_DOORBELL_HASEVENT = 16,
80 MU_STATE_STARTING = 1,
81 MU_STATE_FMU_READY_FOR_HANDSHAKE = 2,
82 MU_STATE_SEND_HANDSHAKE_FRAME = 3,
84 MU_STATE_RESETTING = 5,
87 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
88 MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
89 MU_HARD_RESET_WAIT = 30000,
92 /* firmware returned values */
93 SRB_STATUS_SUCCESS = 0x01,
94 SRB_STATUS_ERROR = 0x04,
95 SRB_STATUS_BUSY = 0x05,
96 SRB_STATUS_INVALID_REQUEST = 0x06,
97 SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
101 TASK_ATTRIBUTE_SIMPLE = 0x0,
102 TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
103 TASK_ATTRIBUTE_ORDERED = 0x2,
104 TASK_ATTRIBUTE_ACA = 0x4,
106 SS_STS_NORMAL = 0x80000000,
107 SS_STS_DONE = 0x40000000,
108 SS_STS_HANDSHAKE = 0x20000000,
110 SS_HEAD_HANDSHAKE = 0x80,
112 SS_H2I_INT_RESET = 0x100,
114 SS_MU_OPERATIONAL = 0x80000000,
116 STEX_CDB_LENGTH = 16,
117 STATUS_VAR_LEN = 128,
120 SG_CF_EOT = 0x80, /* end of table */
121 SG_CF_64B = 0x40, /* 64 bit item */
122 SG_CF_HOST = 0x20, /* sg in host memory */
125 MSG_DATA_DIR_OUT = 2,
133 PASSTHRU_REQ_TYPE = 0x00000001,
134 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
135 ST_INTERNAL_TIMEOUT = 180,
140 /* vendor specific commands of Promise */
142 SINBAND_MGT_CMD = 0xd9,
144 CONTROLLER_CMD = 0xe1,
145 DEBUGGING_CMD = 0xe2,
148 PASSTHRU_GET_ADAPTER = 0x05,
149 PASSTHRU_GET_DRVVER = 0x10,
151 CTLR_CONFIG_CMD = 0x03,
152 CTLR_SHUTDOWN = 0x0d,
154 CTLR_POWER_STATE_CHANGE = 0x0e,
155 CTLR_POWER_SAVING = 0x01,
157 PASSTHRU_SIGNATURE = 0x4e415041,
158 MGT_CMD_SIGNATURE = 0xba,
162 ST_ADDITIONAL_MEM = 0x200000,
163 ST_ADDITIONAL_MEM_MIN = 0x80000,
167 u8 ctrl; /* SG_CF_xxx */
173 struct st_ss_sgitem {
185 struct st_msg_header {
193 struct handshake_frame {
194 __le64 rb_phy; /* request payload queue physical address */
195 __le16 req_sz; /* size of each request payload */
196 __le16 req_cnt; /* count of reqs the buffer can hold */
197 __le16 status_sz; /* size of each status payload */
198 __le16 status_cnt; /* count of status the buffer can hold */
199 __le64 hosttime; /* seconds from Jan 1, 1970 (GMT) */
200 u8 partner_type; /* who sends this frame */
202 __le32 partner_ver_major;
203 __le32 partner_ver_minor;
204 __le32 partner_ver_oem;
205 __le32 partner_ver_build;
206 __le32 extra_offset; /* NEW */
207 __le32 extra_size; /* NEW */
219 u8 payload_sz; /* payload size in 4-byte, not used */
220 u8 cdb[STEX_CDB_LENGTH];
231 u8 payload_sz; /* payload size in 4-byte */
232 u8 variable[STATUS_VAR_LEN];
247 struct ver_info drv_ver;
248 struct ver_info bios_ver;
279 struct scsi_cmnd *cmd;
282 unsigned int sense_bufflen;
292 void __iomem *mmio_base; /* iomapped PCI memory space */
294 dma_addr_t dma_handle;
297 struct Scsi_Host *host;
298 struct pci_dev *pdev;
300 struct req_msg * (*alloc_rq) (struct st_hba *);
301 int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
302 void (*send) (struct st_hba *, struct req_msg *, u16);
309 struct status_msg *status_buffer;
310 void *copy_buffer; /* temp buffer for driver-handled commands */
312 struct st_ccb *wait_ccb;
315 unsigned int mu_status;
316 unsigned int cardtype;
325 struct st_card_info {
326 struct req_msg * (*alloc_rq) (struct st_hba *);
327 int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
328 void (*send) (struct st_hba *, struct req_msg *, u16);
330 unsigned int max_lun;
331 unsigned int max_channel;
338 module_param(msi, int, 0);
339 MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
341 static const char console_inq_page[] =
343 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
344 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
345 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
346 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
347 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
348 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
349 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
350 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
353 MODULE_AUTHOR("Ed Lin");
354 MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
355 MODULE_LICENSE("GPL");
356 MODULE_VERSION(ST_DRIVER_VERSION);
358 static void stex_gettime(__le64 *time)
362 do_gettimeofday(&tv);
363 *time = cpu_to_le64(tv.tv_sec);
366 static struct status_msg *stex_get_status(struct st_hba *hba)
368 struct status_msg *status = hba->status_buffer + hba->status_tail;
371 hba->status_tail %= hba->sts_count+1;
376 static void stex_invalid_field(struct scsi_cmnd *cmd,
377 void (*done)(struct scsi_cmnd *))
379 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
381 /* "Invalid field in cdb" */
382 scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
387 static struct req_msg *stex_alloc_req(struct st_hba *hba)
389 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
392 hba->req_head %= hba->rq_count+1;
397 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
399 return (struct req_msg *)(hba->dma_mem +
400 hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
403 static int stex_map_sg(struct st_hba *hba,
404 struct req_msg *req, struct st_ccb *ccb)
406 struct scsi_cmnd *cmd;
407 struct scatterlist *sg;
408 struct st_sgtable *dst;
409 struct st_sgitem *table;
413 nseg = scsi_dma_map(cmd);
416 dst = (struct st_sgtable *)req->variable;
418 ccb->sg_count = nseg;
419 dst->sg_count = cpu_to_le16((u16)nseg);
420 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
421 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
423 table = (struct st_sgitem *)(dst + 1);
424 scsi_for_each_sg(cmd, sg, nseg, i) {
425 table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
426 table[i].addr = cpu_to_le64(sg_dma_address(sg));
427 table[i].ctrl = SG_CF_64B | SG_CF_HOST;
429 table[--i].ctrl |= SG_CF_EOT;
435 static int stex_ss_map_sg(struct st_hba *hba,
436 struct req_msg *req, struct st_ccb *ccb)
438 struct scsi_cmnd *cmd;
439 struct scatterlist *sg;
440 struct st_sgtable *dst;
441 struct st_ss_sgitem *table;
445 nseg = scsi_dma_map(cmd);
448 dst = (struct st_sgtable *)req->variable;
450 ccb->sg_count = nseg;
451 dst->sg_count = cpu_to_le16((u16)nseg);
452 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
453 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
455 table = (struct st_ss_sgitem *)(dst + 1);
456 scsi_for_each_sg(cmd, sg, nseg, i) {
457 table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
459 cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
461 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
468 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
471 size_t count = sizeof(struct st_frame);
473 p = hba->copy_buffer;
474 scsi_sg_copy_to_buffer(ccb->cmd, p, count);
475 memset(p->base, 0, sizeof(u32)*6);
476 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
479 p->drv_ver.major = ST_VER_MAJOR;
480 p->drv_ver.minor = ST_VER_MINOR;
481 p->drv_ver.oem = ST_OEM;
482 p->drv_ver.build = ST_BUILD_VER;
484 p->bus = hba->pdev->bus->number;
485 p->slot = hba->pdev->devfn;
487 p->irq_vec = hba->pdev->irq;
488 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
490 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
492 scsi_sg_copy_from_buffer(ccb->cmd, p, count);
496 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
498 req->tag = cpu_to_le16(tag);
500 hba->ccb[tag].req = req;
503 writel(hba->req_head, hba->mmio_base + IMR0);
504 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
505 readl(hba->mmio_base + IDBL); /* flush */
509 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
511 struct scsi_cmnd *cmd;
512 struct st_msg_header *msg_h;
515 req->tag = cpu_to_le16(tag);
517 hba->ccb[tag].req = req;
520 cmd = hba->ccb[tag].cmd;
521 msg_h = (struct st_msg_header *)req - 1;
523 msg_h->channel = (u8)cmd->device->channel;
524 msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
526 addr = hba->dma_handle + hba->req_head * hba->rq_size;
527 addr += (hba->ccb[tag].sg_count+4)/11;
528 msg_h->handle = cpu_to_le64(addr);
531 hba->req_head %= hba->rq_count+1;
533 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
534 readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
535 writel(addr, hba->mmio_base + YH2I_REQ);
536 readl(hba->mmio_base + YH2I_REQ); /* flush */
540 stex_slave_alloc(struct scsi_device *sdev)
542 /* Cheat: usually extracted from Inquiry data */
543 sdev->tagged_supported = 1;
545 scsi_activate_tcq(sdev, sdev->host->can_queue);
551 stex_slave_config(struct scsi_device *sdev)
553 sdev->use_10_for_rw = 1;
554 sdev->use_10_for_ms = 1;
555 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
556 sdev->tagged_supported = 1;
562 stex_slave_destroy(struct scsi_device *sdev)
564 scsi_deactivate_tcq(sdev, 1);
568 stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
571 struct Scsi_Host *host;
572 unsigned int id, lun;
576 host = cmd->device->host;
577 id = cmd->device->id;
578 lun = cmd->device->lun;
579 hba = (struct st_hba *) &host->hostdata[0];
581 switch (cmd->cmnd[0]) {
584 static char ms10_caching_page[12] =
585 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
588 page = cmd->cmnd[2] & 0x3f;
589 if (page == 0x8 || page == 0x3f) {
590 scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
591 sizeof(ms10_caching_page));
592 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
595 stex_invalid_field(cmd, done);
600 * The shasta firmware does not report actual luns in the
601 * target, so fail the command to force sequential lun scan.
602 * Also, the console device does not support this command.
604 if (hba->cardtype == st_shasta || id == host->max_id - 1) {
605 stex_invalid_field(cmd, done);
609 case TEST_UNIT_READY:
610 if (id == host->max_id - 1) {
611 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
617 if (id != host->max_id - 1)
619 if (!lun && !cmd->device->channel &&
620 (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
621 scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
622 sizeof(console_inq_page));
623 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
626 stex_invalid_field(cmd, done);
629 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
630 struct st_drvver ver;
631 size_t cp_len = sizeof(ver);
633 ver.major = ST_VER_MAJOR;
634 ver.minor = ST_VER_MINOR;
636 ver.build = ST_BUILD_VER;
637 ver.signature[0] = PASSTHRU_SIGNATURE;
638 ver.console_id = host->max_id - 1;
639 ver.host_no = hba->host->host_no;
640 cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
641 cmd->result = sizeof(ver) == cp_len ?
642 DID_OK << 16 | COMMAND_COMPLETE << 8 :
643 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
651 cmd->scsi_done = done;
653 tag = cmd->request->tag;
655 if (unlikely(tag >= host->can_queue))
656 return SCSI_MLQUEUE_HOST_BUSY;
658 req = hba->alloc_rq(hba);
664 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
666 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
667 req->data_dir = MSG_DATA_DIR_IN;
668 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
669 req->data_dir = MSG_DATA_DIR_OUT;
671 req->data_dir = MSG_DATA_DIR_ND;
673 hba->ccb[tag].cmd = cmd;
674 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
675 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
677 if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
678 hba->ccb[tag].sg_count = 0;
679 memset(&req->variable[0], 0, 8);
682 hba->send(hba, req, tag);
686 static void stex_scsi_done(struct st_ccb *ccb)
688 struct scsi_cmnd *cmd = ccb->cmd;
691 if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
692 result = ccb->scsi_status;
693 switch (ccb->scsi_status) {
695 result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
697 case SAM_STAT_CHECK_CONDITION:
698 result |= DRIVER_SENSE << 24;
701 result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
704 result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
708 else if (ccb->srb_status & SRB_SEE_SENSE)
709 result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
710 else switch (ccb->srb_status) {
711 case SRB_STATUS_SELECTION_TIMEOUT:
712 result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
714 case SRB_STATUS_BUSY:
715 result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
717 case SRB_STATUS_INVALID_REQUEST:
718 case SRB_STATUS_ERROR:
720 result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
724 cmd->result = result;
728 static void stex_copy_data(struct st_ccb *ccb,
729 struct status_msg *resp, unsigned int variable)
731 if (resp->scsi_status != SAM_STAT_GOOD) {
732 if (ccb->sense_buffer != NULL)
733 memcpy(ccb->sense_buffer, resp->variable,
734 min(variable, ccb->sense_bufflen));
738 if (ccb->cmd == NULL)
740 scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
743 static void stex_check_cmd(struct st_hba *hba,
744 struct st_ccb *ccb, struct status_msg *resp)
746 if (ccb->cmd->cmnd[0] == MGT_CMD &&
747 resp->scsi_status != SAM_STAT_CHECK_CONDITION)
748 scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
749 le32_to_cpu(*(__le32 *)&resp->variable[0]));
752 static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
754 void __iomem *base = hba->mmio_base;
755 struct status_msg *resp;
760 if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
763 /* status payloads */
764 hba->status_head = readl(base + OMR1);
765 if (unlikely(hba->status_head > hba->sts_count)) {
766 printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
767 pci_name(hba->pdev));
772 * it's not a valid status payload if:
773 * 1. there are no pending requests(e.g. during init stage)
774 * 2. there are some pending requests, but the controller is in
775 * reset status, and its type is not st_yosemite
776 * firmware of st_yosemite in reset status will return pending requests
777 * to driver, so we allow it to pass
779 if (unlikely(hba->out_req_cnt <= 0 ||
780 (hba->mu_status == MU_STATE_RESETTING &&
781 hba->cardtype != st_yosemite))) {
782 hba->status_tail = hba->status_head;
786 while (hba->status_tail != hba->status_head) {
787 resp = stex_get_status(hba);
788 tag = le16_to_cpu(resp->tag);
789 if (unlikely(tag >= hba->host->can_queue)) {
790 printk(KERN_WARNING DRV_NAME
791 "(%s): invalid tag\n", pci_name(hba->pdev));
796 ccb = &hba->ccb[tag];
797 if (unlikely(hba->wait_ccb == ccb))
798 hba->wait_ccb = NULL;
799 if (unlikely(ccb->req == NULL)) {
800 printk(KERN_WARNING DRV_NAME
801 "(%s): lagging req\n", pci_name(hba->pdev));
805 size = resp->payload_sz * sizeof(u32); /* payload size */
806 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
807 size > sizeof(*resp))) {
808 printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
809 pci_name(hba->pdev));
811 size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
813 stex_copy_data(ccb, resp, size);
817 ccb->srb_status = resp->srb_status;
818 ccb->scsi_status = resp->scsi_status;
820 if (likely(ccb->cmd != NULL)) {
821 if (hba->cardtype == st_yosemite)
822 stex_check_cmd(hba, ccb, resp);
824 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
825 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
826 stex_controller_info(hba, ccb);
828 scsi_dma_unmap(ccb->cmd);
835 writel(hba->status_head, base + IMR1);
836 readl(base + IMR1); /* flush */
839 static irqreturn_t stex_intr(int irq, void *__hba)
841 struct st_hba *hba = __hba;
842 void __iomem *base = hba->mmio_base;
847 spin_lock_irqsave(hba->host->host_lock, flags);
849 data = readl(base + ODBL);
851 if (data && data != 0xffffffff) {
852 /* clear the interrupt */
853 writel(data, base + ODBL);
854 readl(base + ODBL); /* flush */
855 stex_mu_intr(hba, data);
859 spin_unlock_irqrestore(hba->host->host_lock, flags);
861 return IRQ_RETVAL(handled);
864 static void stex_ss_mu_intr(struct st_hba *hba)
866 struct status_msg *resp;
874 if (unlikely(hba->out_req_cnt <= 0 ||
875 hba->mu_status == MU_STATE_RESETTING))
878 while (count < hba->sts_count) {
879 scratch = hba->scratch + hba->status_tail;
880 value = le32_to_cpu(*scratch);
881 if (unlikely(!(value & SS_STS_NORMAL)))
884 resp = hba->status_buffer + hba->status_tail;
888 hba->status_tail %= hba->sts_count+1;
891 if (unlikely(tag >= hba->host->can_queue)) {
892 printk(KERN_WARNING DRV_NAME
893 "(%s): invalid tag\n", pci_name(hba->pdev));
898 ccb = &hba->ccb[tag];
899 if (unlikely(hba->wait_ccb == ccb))
900 hba->wait_ccb = NULL;
901 if (unlikely(ccb->req == NULL)) {
902 printk(KERN_WARNING DRV_NAME
903 "(%s): lagging req\n", pci_name(hba->pdev));
908 if (likely(value & SS_STS_DONE)) { /* normal case */
909 ccb->srb_status = SRB_STATUS_SUCCESS;
910 ccb->scsi_status = SAM_STAT_GOOD;
912 ccb->srb_status = resp->srb_status;
913 ccb->scsi_status = resp->scsi_status;
914 size = resp->payload_sz * sizeof(u32);
915 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
916 size > sizeof(*resp))) {
917 printk(KERN_WARNING DRV_NAME
918 "(%s): bad status size\n",
919 pci_name(hba->pdev));
921 size -= sizeof(*resp) - STATUS_VAR_LEN;
923 stex_copy_data(ccb, resp, size);
925 if (likely(ccb->cmd != NULL))
926 stex_check_cmd(hba, ccb, resp);
929 if (likely(ccb->cmd != NULL)) {
930 scsi_dma_unmap(ccb->cmd);
937 static irqreturn_t stex_ss_intr(int irq, void *__hba)
939 struct st_hba *hba = __hba;
940 void __iomem *base = hba->mmio_base;
945 spin_lock_irqsave(hba->host->host_lock, flags);
947 data = readl(base + YI2H_INT);
948 if (data && data != 0xffffffff) {
949 /* clear the interrupt */
950 writel(data, base + YI2H_INT_C);
951 stex_ss_mu_intr(hba);
955 spin_unlock_irqrestore(hba->host->host_lock, flags);
957 return IRQ_RETVAL(handled);
960 static int stex_common_handshake(struct st_hba *hba)
962 void __iomem *base = hba->mmio_base;
963 struct handshake_frame *h;
964 dma_addr_t status_phys;
966 unsigned long before;
968 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
969 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
972 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
973 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
974 printk(KERN_ERR DRV_NAME
975 "(%s): no handshake signature\n",
976 pci_name(hba->pdev));
986 data = readl(base + OMR1);
987 if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
989 if (hba->host->can_queue > data) {
990 hba->host->can_queue = data;
991 hba->host->cmd_per_lun = data;
995 h = (struct handshake_frame *)hba->status_buffer;
996 h->rb_phy = cpu_to_le64(hba->dma_handle);
997 h->req_sz = cpu_to_le16(hba->rq_size);
998 h->req_cnt = cpu_to_le16(hba->rq_count+1);
999 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1000 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1001 stex_gettime(&h->hosttime);
1002 h->partner_type = HMU_PARTNER_TYPE;
1003 if (hba->extra_offset) {
1004 h->extra_offset = cpu_to_le32(hba->extra_offset);
1005 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1007 h->extra_offset = h->extra_size = 0;
1009 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1010 writel(status_phys, base + IMR0);
1012 writel((status_phys >> 16) >> 16, base + IMR1);
1015 writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
1017 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1018 readl(base + IDBL); /* flush */
1022 while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1023 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1024 printk(KERN_ERR DRV_NAME
1025 "(%s): no signature after handshake frame\n",
1026 pci_name(hba->pdev));
1033 writel(0, base + IMR0);
1035 writel(0, base + OMR0);
1037 writel(0, base + IMR1);
1039 writel(0, base + OMR1);
1040 readl(base + OMR1); /* flush */
1044 static int stex_ss_handshake(struct st_hba *hba)
1046 void __iomem *base = hba->mmio_base;
1047 struct st_msg_header *msg_h;
1048 struct handshake_frame *h;
1051 unsigned long before;
1055 while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
1056 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1057 printk(KERN_ERR DRV_NAME
1058 "(%s): firmware not operational\n",
1059 pci_name(hba->pdev));
1065 msg_h = (struct st_msg_header *)hba->dma_mem;
1066 msg_h->handle = cpu_to_le64(hba->dma_handle);
1067 msg_h->flag = SS_HEAD_HANDSHAKE;
1069 h = (struct handshake_frame *)(msg_h + 1);
1070 h->rb_phy = cpu_to_le64(hba->dma_handle);
1071 h->req_sz = cpu_to_le16(hba->rq_size);
1072 h->req_cnt = cpu_to_le16(hba->rq_count+1);
1073 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1074 h->status_cnt = cpu_to_le16(hba->sts_count+1);
1075 stex_gettime(&h->hosttime);
1076 h->partner_type = HMU_PARTNER_TYPE;
1077 h->extra_offset = h->extra_size = 0;
1078 h->scratch_size = cpu_to_le32((hba->sts_count+1)*sizeof(u32));
1080 data = readl(base + YINT_EN);
1082 writel(data, base + YINT_EN);
1083 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1084 writel(hba->dma_handle, base + YH2I_REQ);
1086 scratch = hba->scratch;
1088 while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
1089 if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1090 printk(KERN_ERR DRV_NAME
1091 "(%s): no signature after handshake frame\n",
1092 pci_name(hba->pdev));
1105 static int stex_handshake(struct st_hba *hba)
1108 unsigned long flags;
1110 err = (hba->cardtype == st_yel) ?
1111 stex_ss_handshake(hba) : stex_common_handshake(hba);
1113 spin_lock_irqsave(hba->host->host_lock, flags);
1116 hba->status_head = 0;
1117 hba->status_tail = 0;
1118 hba->out_req_cnt = 0;
1119 hba->mu_status = MU_STATE_STARTED;
1120 spin_unlock_irqrestore(hba->host->host_lock, flags);
1125 static int stex_abort(struct scsi_cmnd *cmd)
1127 struct Scsi_Host *host = cmd->device->host;
1128 struct st_hba *hba = (struct st_hba *)host->hostdata;
1129 u16 tag = cmd->request->tag;
1132 int result = SUCCESS;
1133 unsigned long flags;
1135 printk(KERN_INFO DRV_NAME
1136 "(%s): aborting command\n", pci_name(hba->pdev));
1137 scsi_print_command(cmd);
1139 base = hba->mmio_base;
1140 spin_lock_irqsave(host->host_lock, flags);
1141 if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
1142 hba->wait_ccb = &hba->ccb[tag];
1144 for (tag = 0; tag < host->can_queue; tag++)
1145 if (hba->ccb[tag].cmd == cmd) {
1146 hba->wait_ccb = &hba->ccb[tag];
1149 if (tag >= host->can_queue)
1153 if (hba->cardtype == st_yel) {
1154 data = readl(base + YI2H_INT);
1155 if (data == 0 || data == 0xffffffff)
1158 writel(data, base + YI2H_INT_C);
1159 stex_ss_mu_intr(hba);
1161 data = readl(base + ODBL);
1162 if (data == 0 || data == 0xffffffff)
1165 writel(data, base + ODBL);
1166 readl(base + ODBL); /* flush */
1168 stex_mu_intr(hba, data);
1170 if (hba->wait_ccb == NULL) {
1171 printk(KERN_WARNING DRV_NAME
1172 "(%s): lost interrupt\n", pci_name(hba->pdev));
1177 scsi_dma_unmap(cmd);
1178 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1179 hba->wait_ccb = NULL;
1182 spin_unlock_irqrestore(host->host_lock, flags);
1186 static void stex_hard_reset(struct st_hba *hba)
1188 struct pci_bus *bus;
1193 for (i = 0; i < 16; i++)
1194 pci_read_config_dword(hba->pdev, i * 4,
1195 &hba->pdev->saved_config_space[i]);
1197 /* Reset secondary bus. Our controller(MU/ATU) is the only device on
1198 secondary bus. Consult Intel 80331/3 developer's manual for detail */
1199 bus = hba->pdev->bus;
1200 pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
1201 pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
1202 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1205 * 1 ms may be enough for 8-port controllers. But 16-port controllers
1206 * require more time to finish bus reset. Use 100 ms here for safety
1209 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1210 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1212 for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1213 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1214 if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1220 for (i = 0; i < 16; i++)
1221 pci_write_config_dword(hba->pdev, i * 4,
1222 hba->pdev->saved_config_space[i]);
1225 static void stex_ss_reset(struct st_hba *hba)
1227 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1228 readl(hba->mmio_base + YH2I_INT);
1232 static int stex_reset(struct scsi_cmnd *cmd)
1236 unsigned long flags, before;
1238 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1240 printk(KERN_INFO DRV_NAME
1241 "(%s): resetting host\n", pci_name(hba->pdev));
1242 scsi_print_command(cmd);
1244 hba->mu_status = MU_STATE_RESETTING;
1246 if (hba->cardtype == st_shasta)
1247 stex_hard_reset(hba);
1248 else if (hba->cardtype == st_yel)
1251 if (hba->cardtype != st_yosemite) {
1252 if (stex_handshake(hba)) {
1253 printk(KERN_WARNING DRV_NAME
1254 "(%s): resetting: handshake failed\n",
1255 pci_name(hba->pdev));
1262 writel(MU_INBOUND_DOORBELL_RESET, hba->mmio_base + IDBL);
1263 readl(hba->mmio_base + IDBL); /* flush */
1265 while (hba->out_req_cnt > 0) {
1266 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1267 printk(KERN_WARNING DRV_NAME
1268 "(%s): reset timeout\n", pci_name(hba->pdev));
1274 base = hba->mmio_base;
1275 writel(0, base + IMR0);
1277 writel(0, base + OMR0);
1279 writel(0, base + IMR1);
1281 writel(0, base + OMR1);
1282 readl(base + OMR1); /* flush */
1283 spin_lock_irqsave(hba->host->host_lock, flags);
1286 hba->status_head = 0;
1287 hba->status_tail = 0;
1288 hba->out_req_cnt = 0;
1289 hba->mu_status = MU_STATE_STARTED;
1290 spin_unlock_irqrestore(hba->host->host_lock, flags);
1294 static int stex_biosparam(struct scsi_device *sdev,
1295 struct block_device *bdev, sector_t capacity, int geom[])
1297 int heads = 255, sectors = 63;
1299 if (capacity < 0x200000) {
1304 sector_div(capacity, heads * sectors);
1313 static struct scsi_host_template driver_template = {
1314 .module = THIS_MODULE,
1316 .proc_name = DRV_NAME,
1317 .bios_param = stex_biosparam,
1318 .queuecommand = stex_queuecommand,
1319 .slave_alloc = stex_slave_alloc,
1320 .slave_configure = stex_slave_config,
1321 .slave_destroy = stex_slave_destroy,
1322 .eh_abort_handler = stex_abort,
1323 .eh_host_reset_handler = stex_reset,
1327 static struct pci_device_id stex_pci_tbl[] = {
1329 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1330 st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1331 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1332 st_shasta }, /* SuperTrak EX12350 */
1333 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1334 st_shasta }, /* SuperTrak EX4350 */
1335 { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1336 st_shasta }, /* SuperTrak EX24350 */
1339 { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1342 { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
1345 { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
1348 { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
1349 { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
1350 { } /* terminate list */
1353 static struct st_card_info stex_card_info[] = {
1362 .alloc_rq = stex_alloc_req,
1363 .map_sg = stex_map_sg,
1364 .send = stex_send_cmd,
1375 .alloc_rq = stex_alloc_req,
1376 .map_sg = stex_map_sg,
1377 .send = stex_send_cmd,
1388 .alloc_rq = stex_alloc_req,
1389 .map_sg = stex_map_sg,
1390 .send = stex_send_cmd,
1401 .alloc_rq = stex_alloc_req,
1402 .map_sg = stex_map_sg,
1403 .send = stex_send_cmd,
1414 .alloc_rq = stex_ss_alloc_req,
1415 .map_sg = stex_ss_map_sg,
1416 .send = stex_ss_send_cmd,
1420 static int stex_set_dma_mask(struct pci_dev * pdev)
1424 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1425 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1427 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1429 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1433 static int stex_request_irq(struct st_hba *hba)
1435 struct pci_dev *pdev = hba->pdev;
1439 status = pci_enable_msi(pdev);
1441 printk(KERN_ERR DRV_NAME
1442 "(%s): error %d setting up MSI\n",
1443 pci_name(pdev), status);
1445 hba->msi_enabled = 1;
1447 hba->msi_enabled = 0;
1449 status = request_irq(pdev->irq, hba->cardtype == st_yel ?
1450 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1453 if (hba->msi_enabled)
1454 pci_disable_msi(pdev);
1459 static void stex_free_irq(struct st_hba *hba)
1461 struct pci_dev *pdev = hba->pdev;
1463 free_irq(pdev->irq, hba);
1464 if (hba->msi_enabled)
1465 pci_disable_msi(pdev);
1468 static int __devinit
1469 stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1472 struct Scsi_Host *host;
1473 const struct st_card_info *ci = NULL;
1474 u32 sts_offset, cp_offset, scratch_offset;
1477 err = pci_enable_device(pdev);
1481 pci_set_master(pdev);
1483 host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1486 printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1492 hba = (struct st_hba *)host->hostdata;
1493 memset(hba, 0, sizeof(struct st_hba));
1495 err = pci_request_regions(pdev, DRV_NAME);
1497 printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1499 goto out_scsi_host_put;
1502 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1503 if ( !hba->mmio_base) {
1504 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1507 goto out_release_regions;
1510 err = stex_set_dma_mask(pdev);
1512 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1517 hba->cardtype = (unsigned int) id->driver_data;
1518 ci = &stex_card_info[hba->cardtype];
1519 sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
1520 if (hba->cardtype == st_yel)
1521 sts_offset += (ci->sts_count+1) * sizeof(u32);
1522 cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
1523 hba->dma_size = cp_offset + sizeof(struct st_frame);
1524 if (hba->cardtype == st_seq ||
1525 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1526 hba->extra_offset = hba->dma_size;
1527 hba->dma_size += ST_ADDITIONAL_MEM;
1529 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1530 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1531 if (!hba->dma_mem) {
1532 /* Retry minimum coherent mapping for st_seq and st_vsc */
1533 if (hba->cardtype == st_seq ||
1534 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1535 printk(KERN_WARNING DRV_NAME
1536 "(%s): allocating min buffer for controller\n",
1538 hba->dma_size = hba->extra_offset
1539 + ST_ADDITIONAL_MEM_MIN;
1540 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1541 hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1544 if (!hba->dma_mem) {
1546 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1552 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1555 printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
1560 if (hba->cardtype == st_yel)
1561 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1562 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1563 hba->copy_buffer = hba->dma_mem + cp_offset;
1564 hba->rq_count = ci->rq_count;
1565 hba->rq_size = ci->rq_size;
1566 hba->sts_count = ci->sts_count;
1567 hba->alloc_rq = ci->alloc_rq;
1568 hba->map_sg = ci->map_sg;
1569 hba->send = ci->send;
1570 hba->mu_status = MU_STATE_STARTING;
1572 if (hba->cardtype == st_yel)
1573 host->sg_tablesize = 38;
1575 host->sg_tablesize = 32;
1576 host->can_queue = ci->rq_count;
1577 host->cmd_per_lun = ci->rq_count;
1578 host->max_id = ci->max_id;
1579 host->max_lun = ci->max_lun;
1580 host->max_channel = ci->max_channel;
1581 host->unique_id = host->host_no;
1582 host->max_cmd_len = STEX_CDB_LENGTH;
1587 err = stex_request_irq(hba);
1589 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1594 err = stex_handshake(hba);
1598 err = scsi_init_shared_tag_map(host, host->can_queue);
1600 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
1605 pci_set_drvdata(pdev, hba);
1607 err = scsi_add_host(host, &pdev->dev);
1609 printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1614 scsi_scan_host(host);
1623 dma_free_coherent(&pdev->dev, hba->dma_size,
1624 hba->dma_mem, hba->dma_handle);
1626 iounmap(hba->mmio_base);
1627 out_release_regions:
1628 pci_release_regions(pdev);
1630 scsi_host_put(host);
1632 pci_disable_device(pdev);
1637 static void stex_hba_stop(struct st_hba *hba)
1639 struct req_msg *req;
1640 struct st_msg_header *msg_h;
1641 unsigned long flags;
1642 unsigned long before;
1645 spin_lock_irqsave(hba->host->host_lock, flags);
1646 req = hba->alloc_rq(hba);
1647 if (hba->cardtype == st_yel) {
1648 msg_h = (struct st_msg_header *)req - 1;
1649 memset(msg_h, 0, hba->rq_size);
1651 memset(req, 0, hba->rq_size);
1653 if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) {
1654 req->cdb[0] = MGT_CMD;
1655 req->cdb[1] = MGT_CMD_SIGNATURE;
1656 req->cdb[2] = CTLR_CONFIG_CMD;
1657 req->cdb[3] = CTLR_SHUTDOWN;
1659 req->cdb[0] = CONTROLLER_CMD;
1660 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1661 req->cdb[2] = CTLR_POWER_SAVING;
1664 hba->ccb[tag].cmd = NULL;
1665 hba->ccb[tag].sg_count = 0;
1666 hba->ccb[tag].sense_bufflen = 0;
1667 hba->ccb[tag].sense_buffer = NULL;
1668 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1670 hba->send(hba, req, tag);
1671 spin_unlock_irqrestore(hba->host->host_lock, flags);
1674 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1675 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1676 hba->ccb[tag].req_type = 0;
1683 static void stex_hba_free(struct st_hba *hba)
1687 iounmap(hba->mmio_base);
1689 pci_release_regions(hba->pdev);
1693 dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1694 hba->dma_mem, hba->dma_handle);
1697 static void stex_remove(struct pci_dev *pdev)
1699 struct st_hba *hba = pci_get_drvdata(pdev);
1701 scsi_remove_host(hba->host);
1703 pci_set_drvdata(pdev, NULL);
1709 scsi_host_put(hba->host);
1711 pci_disable_device(pdev);
1714 static void stex_shutdown(struct pci_dev *pdev)
1716 struct st_hba *hba = pci_get_drvdata(pdev);
1721 MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1723 static struct pci_driver stex_pci_driver = {
1725 .id_table = stex_pci_tbl,
1726 .probe = stex_probe,
1727 .remove = __devexit_p(stex_remove),
1728 .shutdown = stex_shutdown,
1731 static int __init stex_init(void)
1733 printk(KERN_INFO DRV_NAME
1734 ": Promise SuperTrak EX Driver version: %s\n",
1737 return pci_register_driver(&stex_pci_driver);
1740 static void __exit stex_exit(void)
1742 pci_unregister_driver(&stex_pci_driver);
1745 module_init(stex_init);
1746 module_exit(stex_exit);