2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/seq_file.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compat.h>
36 #include <linux/blktrace_api.h>
37 #include <linux/uaccess.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/completion.h>
41 #include <linux/moduleparam.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <asm/atomic.h>
51 #include <linux/kthread.h>
55 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
56 #define HPSA_DRIVER_VERSION "2.0.2-1"
57 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
59 /* How long to wait (in milliseconds) for board to go into simple mode */
60 #define MAX_CONFIG_WAIT 30000
61 #define MAX_IOCTL_CONFIG_WAIT 1000
63 /*define how many times we will try a command because of bus resets */
64 #define MAX_CMD_RETRIES 3
66 /* Embedded module documentation macros - see modules.h */
67 MODULE_AUTHOR("Hewlett-Packard Company");
68 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
70 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
71 MODULE_VERSION(HPSA_DRIVER_VERSION);
72 MODULE_LICENSE("GPL");
74 static int hpsa_allow_any;
75 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
76 MODULE_PARM_DESC(hpsa_allow_any,
77 "Allow hpsa driver to access unknown HP Smart Array hardware");
79 /* define the PCI info for the cards we can control */
80 static const struct pci_device_id hpsa_pci_device_id[] = {
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
94 #define PCI_DEVICE_ID_HP_CISSF 0x333f
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F},
96 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
97 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
98 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
99 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
103 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
105 /* board_id = Subsystem Device ID & Vendor ID
106 * product = Marketing Name for the board
107 * access = Address of the struct of function pointers
109 static struct board_type products[] = {
110 {0x3241103C, "Smart Array P212", &SA5_access},
111 {0x3243103C, "Smart Array P410", &SA5_access},
112 {0x3245103C, "Smart Array P410i", &SA5_access},
113 {0x3247103C, "Smart Array P411", &SA5_access},
114 {0x3249103C, "Smart Array P812", &SA5_access},
115 {0x324a103C, "Smart Array P712m", &SA5_access},
116 {0x324b103C, "Smart Array P711m", &SA5_access},
117 {0x3233103C, "StorageWorks P1210m", &SA5_access},
118 {0x333F103C, "StorageWorks P1210m", &SA5_access},
119 {0x3250103C, "Smart Array", &SA5_access},
120 {0x3250113C, "Smart Array", &SA5_access},
121 {0x3250123C, "Smart Array", &SA5_access},
122 {0x3250133C, "Smart Array", &SA5_access},
123 {0x3250143C, "Smart Array", &SA5_access},
124 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
127 static int number_of_controllers;
129 static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
130 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
131 static void start_io(struct ctlr_info *h);
134 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
137 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
138 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
139 static struct CommandList *cmd_alloc(struct ctlr_info *h);
140 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
141 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
142 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
145 static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
146 void (*done)(struct scsi_cmnd *));
147 static void hpsa_scan_start(struct Scsi_Host *);
148 static int hpsa_scan_finished(struct Scsi_Host *sh,
149 unsigned long elapsed_time);
150 static int hpsa_change_queue_depth(struct scsi_device *sdev,
151 int qdepth, int reason);
153 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
154 static int hpsa_slave_alloc(struct scsi_device *sdev);
155 static void hpsa_slave_destroy(struct scsi_device *sdev);
157 static ssize_t raid_level_show(struct device *dev,
158 struct device_attribute *attr, char *buf);
159 static ssize_t lunid_show(struct device *dev,
160 struct device_attribute *attr, char *buf);
161 static ssize_t unique_id_show(struct device *dev,
162 struct device_attribute *attr, char *buf);
163 static ssize_t host_show_firmware_revision(struct device *dev,
164 struct device_attribute *attr, char *buf);
165 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
166 static ssize_t host_store_rescan(struct device *dev,
167 struct device_attribute *attr, const char *buf, size_t count);
168 static int check_for_unit_attention(struct ctlr_info *h,
169 struct CommandList *c);
170 static void check_ioctl_unit_attention(struct ctlr_info *h,
171 struct CommandList *c);
172 /* performant mode helper functions */
173 static void calc_bucket_map(int *bucket, int num_buckets,
174 int nsgs, int *bucket_map);
175 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
176 static inline u32 next_command(struct ctlr_info *h);
178 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
179 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
180 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
181 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
182 static DEVICE_ATTR(firmware_revision, S_IRUGO,
183 host_show_firmware_revision, NULL);
185 static struct device_attribute *hpsa_sdev_attrs[] = {
186 &dev_attr_raid_level,
192 static struct device_attribute *hpsa_shost_attrs[] = {
194 &dev_attr_firmware_revision,
198 static struct scsi_host_template hpsa_driver_template = {
199 .module = THIS_MODULE,
202 .queuecommand = hpsa_scsi_queue_command,
203 .scan_start = hpsa_scan_start,
204 .scan_finished = hpsa_scan_finished,
205 .change_queue_depth = hpsa_change_queue_depth,
207 .use_clustering = ENABLE_CLUSTERING,
208 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
210 .slave_alloc = hpsa_slave_alloc,
211 .slave_destroy = hpsa_slave_destroy,
213 .compat_ioctl = hpsa_compat_ioctl,
215 .sdev_attrs = hpsa_sdev_attrs,
216 .shost_attrs = hpsa_shost_attrs,
219 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
221 unsigned long *priv = shost_priv(sdev->host);
222 return (struct ctlr_info *) *priv;
225 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
227 unsigned long *priv = shost_priv(sh);
228 return (struct ctlr_info *) *priv;
231 static int check_for_unit_attention(struct ctlr_info *h,
232 struct CommandList *c)
234 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
237 switch (c->err_info->SenseInfo[12]) {
239 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
240 "detected, command retried\n", h->ctlr);
243 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
244 "detected, action required\n", h->ctlr);
246 case REPORT_LUNS_CHANGED:
247 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
248 "changed, action required\n", h->ctlr);
250 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
254 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
255 "or device reset detected\n", h->ctlr);
257 case UNIT_ATTENTION_CLEARED:
258 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
259 "cleared by another initiator\n", h->ctlr);
262 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
263 "unit attention detected\n", h->ctlr);
269 static ssize_t host_store_rescan(struct device *dev,
270 struct device_attribute *attr,
271 const char *buf, size_t count)
274 struct Scsi_Host *shost = class_to_shost(dev);
275 h = shost_to_hba(shost);
276 hpsa_scan_start(h->scsi_host);
280 static ssize_t host_show_firmware_revision(struct device *dev,
281 struct device_attribute *attr, char *buf)
284 struct Scsi_Host *shost = class_to_shost(dev);
285 unsigned char *fwrev;
287 h = shost_to_hba(shost);
288 if (!h->hba_inquiry_data)
290 fwrev = &h->hba_inquiry_data[32];
291 return snprintf(buf, 20, "%c%c%c%c\n",
292 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
295 /* Enqueuing and dequeuing functions for cmdlists. */
296 static inline void addQ(struct hlist_head *list, struct CommandList *c)
298 hlist_add_head(&c->list, list);
301 static inline u32 next_command(struct ctlr_info *h)
305 if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
306 return h->access.command_completed(h);
308 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
309 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
310 (h->reply_pool_head)++;
311 h->commands_outstanding--;
315 /* Check for wraparound */
316 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
317 h->reply_pool_head = h->reply_pool;
318 h->reply_pool_wraparound ^= 1;
323 /* set_performant_mode: Modify the tag for cciss performant
324 * set bit 0 for pull model, bits 3-1 for block fetch
327 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
329 if (likely(h->transMethod == CFGTBL_Trans_Performant))
330 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
333 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
334 struct CommandList *c)
338 set_performant_mode(h, c);
339 spin_lock_irqsave(&h->lock, flags);
343 spin_unlock_irqrestore(&h->lock, flags);
346 static inline void removeQ(struct CommandList *c)
348 if (WARN_ON(hlist_unhashed(&c->list)))
350 hlist_del_init(&c->list);
353 static inline int is_hba_lunid(unsigned char scsi3addr[])
355 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
358 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
360 return (scsi3addr[3] & 0xC0) == 0x40;
363 static inline int is_scsi_rev_5(struct ctlr_info *h)
365 if (!h->hba_inquiry_data)
367 if ((h->hba_inquiry_data[2] & 0x07) == 5)
372 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
375 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
377 static ssize_t raid_level_show(struct device *dev,
378 struct device_attribute *attr, char *buf)
381 unsigned char rlevel;
383 struct scsi_device *sdev;
384 struct hpsa_scsi_dev_t *hdev;
387 sdev = to_scsi_device(dev);
388 h = sdev_to_hba(sdev);
389 spin_lock_irqsave(&h->lock, flags);
390 hdev = sdev->hostdata;
392 spin_unlock_irqrestore(&h->lock, flags);
396 /* Is this even a logical drive? */
397 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
398 spin_unlock_irqrestore(&h->lock, flags);
399 l = snprintf(buf, PAGE_SIZE, "N/A\n");
403 rlevel = hdev->raid_level;
404 spin_unlock_irqrestore(&h->lock, flags);
405 if (rlevel > RAID_UNKNOWN)
406 rlevel = RAID_UNKNOWN;
407 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
411 static ssize_t lunid_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
415 struct scsi_device *sdev;
416 struct hpsa_scsi_dev_t *hdev;
418 unsigned char lunid[8];
420 sdev = to_scsi_device(dev);
421 h = sdev_to_hba(sdev);
422 spin_lock_irqsave(&h->lock, flags);
423 hdev = sdev->hostdata;
425 spin_unlock_irqrestore(&h->lock, flags);
428 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
429 spin_unlock_irqrestore(&h->lock, flags);
430 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
431 lunid[0], lunid[1], lunid[2], lunid[3],
432 lunid[4], lunid[5], lunid[6], lunid[7]);
435 static ssize_t unique_id_show(struct device *dev,
436 struct device_attribute *attr, char *buf)
439 struct scsi_device *sdev;
440 struct hpsa_scsi_dev_t *hdev;
442 unsigned char sn[16];
444 sdev = to_scsi_device(dev);
445 h = sdev_to_hba(sdev);
446 spin_lock_irqsave(&h->lock, flags);
447 hdev = sdev->hostdata;
449 spin_unlock_irqrestore(&h->lock, flags);
452 memcpy(sn, hdev->device_id, sizeof(sn));
453 spin_unlock_irqrestore(&h->lock, flags);
454 return snprintf(buf, 16 * 2 + 2,
455 "%02X%02X%02X%02X%02X%02X%02X%02X"
456 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
457 sn[0], sn[1], sn[2], sn[3],
458 sn[4], sn[5], sn[6], sn[7],
459 sn[8], sn[9], sn[10], sn[11],
460 sn[12], sn[13], sn[14], sn[15]);
463 static int hpsa_find_target_lun(struct ctlr_info *h,
464 unsigned char scsi3addr[], int bus, int *target, int *lun)
466 /* finds an unused bus, target, lun for a new physical device
467 * assumes h->devlock is held
470 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
472 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
474 for (i = 0; i < h->ndevices; i++) {
475 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
476 set_bit(h->dev[i]->target, lun_taken);
479 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
480 if (!test_bit(i, lun_taken)) {
491 /* Add an entry into h->dev[] array. */
492 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
493 struct hpsa_scsi_dev_t *device,
494 struct hpsa_scsi_dev_t *added[], int *nadded)
496 /* assumes h->devlock is held */
499 unsigned char addr1[8], addr2[8];
500 struct hpsa_scsi_dev_t *sd;
502 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
503 dev_err(&h->pdev->dev, "too many devices, some will be "
508 /* physical devices do not have lun or target assigned until now. */
509 if (device->lun != -1)
510 /* Logical device, lun is already assigned. */
513 /* If this device a non-zero lun of a multi-lun device
514 * byte 4 of the 8-byte LUN addr will contain the logical
515 * unit no, zero otherise.
517 if (device->scsi3addr[4] == 0) {
518 /* This is not a non-zero lun of a multi-lun device */
519 if (hpsa_find_target_lun(h, device->scsi3addr,
520 device->bus, &device->target, &device->lun) != 0)
525 /* This is a non-zero lun of a multi-lun device.
526 * Search through our list and find the device which
527 * has the same 8 byte LUN address, excepting byte 4.
528 * Assign the same bus and target for this new LUN.
529 * Use the logical unit number from the firmware.
531 memcpy(addr1, device->scsi3addr, 8);
533 for (i = 0; i < n; i++) {
535 memcpy(addr2, sd->scsi3addr, 8);
537 /* differ only in byte 4? */
538 if (memcmp(addr1, addr2, 8) == 0) {
539 device->bus = sd->bus;
540 device->target = sd->target;
541 device->lun = device->scsi3addr[4];
545 if (device->lun == -1) {
546 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
547 " suspect firmware bug or unsupported hardware "
556 added[*nadded] = device;
559 /* initially, (before registering with scsi layer) we don't
560 * know our hostno and we don't want to print anything first
561 * time anyway (the scsi layer's inquiries will show that info)
563 /* if (hostno != -1) */
564 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
565 scsi_device_type(device->devtype), hostno,
566 device->bus, device->target, device->lun);
570 /* Replace an entry from h->dev[] array. */
571 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
572 int entry, struct hpsa_scsi_dev_t *new_entry,
573 struct hpsa_scsi_dev_t *added[], int *nadded,
574 struct hpsa_scsi_dev_t *removed[], int *nremoved)
576 /* assumes h->devlock is held */
577 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
578 removed[*nremoved] = h->dev[entry];
580 h->dev[entry] = new_entry;
581 added[*nadded] = new_entry;
583 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
584 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
585 new_entry->target, new_entry->lun);
588 /* Remove an entry from h->dev[] array. */
589 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
590 struct hpsa_scsi_dev_t *removed[], int *nremoved)
592 /* assumes h->devlock is held */
594 struct hpsa_scsi_dev_t *sd;
596 BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
599 removed[*nremoved] = h->dev[entry];
602 for (i = entry; i < h->ndevices-1; i++)
603 h->dev[i] = h->dev[i+1];
605 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
606 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
610 #define SCSI3ADDR_EQ(a, b) ( \
611 (a)[7] == (b)[7] && \
612 (a)[6] == (b)[6] && \
613 (a)[5] == (b)[5] && \
614 (a)[4] == (b)[4] && \
615 (a)[3] == (b)[3] && \
616 (a)[2] == (b)[2] && \
617 (a)[1] == (b)[1] && \
620 static void fixup_botched_add(struct ctlr_info *h,
621 struct hpsa_scsi_dev_t *added)
623 /* called when scsi_add_device fails in order to re-adjust
624 * h->dev[] to match the mid layer's view.
629 spin_lock_irqsave(&h->lock, flags);
630 for (i = 0; i < h->ndevices; i++) {
631 if (h->dev[i] == added) {
632 for (j = i; j < h->ndevices-1; j++)
633 h->dev[j] = h->dev[j+1];
638 spin_unlock_irqrestore(&h->lock, flags);
642 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
643 struct hpsa_scsi_dev_t *dev2)
645 if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
646 (dev1->lun != -1 && dev2->lun != -1)) &&
647 dev1->devtype != 0x0C)
648 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
650 /* we compare everything except lun and target as these
651 * are not yet assigned. Compare parts likely
654 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
655 sizeof(dev1->scsi3addr)) != 0)
657 if (memcmp(dev1->device_id, dev2->device_id,
658 sizeof(dev1->device_id)) != 0)
660 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
662 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
664 if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
666 if (dev1->devtype != dev2->devtype)
668 if (dev1->raid_level != dev2->raid_level)
670 if (dev1->bus != dev2->bus)
675 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
676 * and return needle location in *index. If scsi3addr matches, but not
677 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
678 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
680 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
681 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
685 #define DEVICE_NOT_FOUND 0
686 #define DEVICE_CHANGED 1
687 #define DEVICE_SAME 2
688 for (i = 0; i < haystack_size; i++) {
689 if (haystack[i] == NULL) /* previously removed. */
691 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
693 if (device_is_the_same(needle, haystack[i]))
696 return DEVICE_CHANGED;
700 return DEVICE_NOT_FOUND;
703 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
704 struct hpsa_scsi_dev_t *sd[], int nsds)
706 /* sd contains scsi3 addresses and devtypes, and inquiry
707 * data. This function takes what's in sd to be the current
708 * reality and updates h->dev[] to reflect that reality.
710 int i, entry, device_change, changes = 0;
711 struct hpsa_scsi_dev_t *csd;
713 struct hpsa_scsi_dev_t **added, **removed;
714 int nadded, nremoved;
715 struct Scsi_Host *sh = NULL;
717 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
719 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
722 if (!added || !removed) {
723 dev_warn(&h->pdev->dev, "out of memory in "
724 "adjust_hpsa_scsi_table\n");
728 spin_lock_irqsave(&h->devlock, flags);
730 /* find any devices in h->dev[] that are not in
731 * sd[] and remove them from h->dev[], and for any
732 * devices which have changed, remove the old device
733 * info and add the new device info.
738 while (i < h->ndevices) {
740 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
741 if (device_change == DEVICE_NOT_FOUND) {
743 hpsa_scsi_remove_entry(h, hostno, i,
745 continue; /* remove ^^^, hence i not incremented */
746 } else if (device_change == DEVICE_CHANGED) {
748 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
749 added, &nadded, removed, &nremoved);
750 /* Set it to NULL to prevent it from being freed
751 * at the bottom of hpsa_update_scsi_devices()
758 /* Now, make sure every device listed in sd[] is also
759 * listed in h->dev[], adding them if they aren't found
762 for (i = 0; i < nsds; i++) {
763 if (!sd[i]) /* if already added above. */
765 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
766 h->ndevices, &entry);
767 if (device_change == DEVICE_NOT_FOUND) {
769 if (hpsa_scsi_add_entry(h, hostno, sd[i],
770 added, &nadded) != 0)
772 sd[i] = NULL; /* prevent from being freed later. */
773 } else if (device_change == DEVICE_CHANGED) {
774 /* should never happen... */
776 dev_warn(&h->pdev->dev,
777 "device unexpectedly changed.\n");
778 /* but if it does happen, we just ignore that device */
781 spin_unlock_irqrestore(&h->devlock, flags);
783 /* Don't notify scsi mid layer of any changes the first time through
784 * (or if there are no changes) scsi_scan_host will do it later the
785 * first time through.
787 if (hostno == -1 || !changes)
791 /* Notify scsi mid layer of any removed devices */
792 for (i = 0; i < nremoved; i++) {
793 struct scsi_device *sdev =
794 scsi_device_lookup(sh, removed[i]->bus,
795 removed[i]->target, removed[i]->lun);
797 scsi_remove_device(sdev);
798 scsi_device_put(sdev);
800 /* We don't expect to get here.
801 * future cmds to this device will get selection
802 * timeout as if the device was gone.
804 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
805 " for removal.", hostno, removed[i]->bus,
806 removed[i]->target, removed[i]->lun);
812 /* Notify scsi mid layer of any added devices */
813 for (i = 0; i < nadded; i++) {
814 if (scsi_add_device(sh, added[i]->bus,
815 added[i]->target, added[i]->lun) == 0)
817 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
818 "device not added.\n", hostno, added[i]->bus,
819 added[i]->target, added[i]->lun);
820 /* now we have to remove it from h->dev,
821 * since it didn't get added to scsi mid layer
823 fixup_botched_add(h, added[i]);
832 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
833 * Assume's h->devlock is held.
835 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
836 int bus, int target, int lun)
839 struct hpsa_scsi_dev_t *sd;
841 for (i = 0; i < h->ndevices; i++) {
843 if (sd->bus == bus && sd->target == target && sd->lun == lun)
849 /* link sdev->hostdata to our per-device structure. */
850 static int hpsa_slave_alloc(struct scsi_device *sdev)
852 struct hpsa_scsi_dev_t *sd;
856 h = sdev_to_hba(sdev);
857 spin_lock_irqsave(&h->devlock, flags);
858 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
859 sdev_id(sdev), sdev->lun);
862 spin_unlock_irqrestore(&h->devlock, flags);
866 static void hpsa_slave_destroy(struct scsi_device *sdev)
871 static void hpsa_scsi_setup(struct ctlr_info *h)
875 spin_lock_init(&h->devlock);
878 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
884 for (i = 0; i < h->nr_cmds; i++) {
885 kfree(h->cmd_sg_list[i]);
886 h->cmd_sg_list[i] = NULL;
888 kfree(h->cmd_sg_list);
889 h->cmd_sg_list = NULL;
892 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
896 if (h->chainsize <= 0)
899 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
903 for (i = 0; i < h->nr_cmds; i++) {
904 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
905 h->chainsize, GFP_KERNEL);
906 if (!h->cmd_sg_list[i])
912 hpsa_free_sg_chain_blocks(h);
916 static void hpsa_map_sg_chain_block(struct ctlr_info *h,
917 struct CommandList *c)
919 struct SGDescriptor *chain_sg, *chain_block;
922 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
923 chain_block = h->cmd_sg_list[c->cmdindex];
924 chain_sg->Ext = HPSA_SG_CHAIN;
925 chain_sg->Len = sizeof(*chain_sg) *
926 (c->Header.SGTotal - h->max_cmd_sg_entries);
927 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
929 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
930 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
933 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
934 struct CommandList *c)
936 struct SGDescriptor *chain_sg;
939 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
942 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
943 temp64.val32.lower = chain_sg->Addr.lower;
944 temp64.val32.upper = chain_sg->Addr.upper;
945 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
948 static void complete_scsi_command(struct CommandList *cp,
949 int timeout, u32 tag)
951 struct scsi_cmnd *cmd;
953 struct ErrorInfo *ei;
955 unsigned char sense_key;
956 unsigned char asc; /* additional sense code */
957 unsigned char ascq; /* additional sense code qualifier */
960 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
963 scsi_dma_unmap(cmd); /* undo the DMA mappings */
964 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
965 hpsa_unmap_sg_chain_block(h, cp);
967 cmd->result = (DID_OK << 16); /* host byte */
968 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
969 cmd->result |= ei->ScsiStatus;
971 /* copy the sense data whether we need to or not. */
972 memcpy(cmd->sense_buffer, ei->SenseInfo,
973 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
974 SCSI_SENSE_BUFFERSIZE :
976 scsi_set_resid(cmd, ei->ResidualCnt);
978 if (ei->CommandStatus == 0) {
984 /* an error has occurred */
985 switch (ei->CommandStatus) {
987 case CMD_TARGET_STATUS:
988 if (ei->ScsiStatus) {
990 sense_key = 0xf & ei->SenseInfo[2];
991 /* Get additional sense code */
992 asc = ei->SenseInfo[12];
993 /* Get addition sense code qualifier */
994 ascq = ei->SenseInfo[13];
997 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
998 if (check_for_unit_attention(h, cp)) {
999 cmd->result = DID_SOFT_ERROR << 16;
1002 if (sense_key == ILLEGAL_REQUEST) {
1004 * SCSI REPORT_LUNS is commonly unsupported on
1005 * Smart Array. Suppress noisy complaint.
1007 if (cp->Request.CDB[0] == REPORT_LUNS)
1010 /* If ASC/ASCQ indicate Logical Unit
1011 * Not Supported condition,
1013 if ((asc == 0x25) && (ascq == 0x0)) {
1014 dev_warn(&h->pdev->dev, "cp %p "
1015 "has check condition\n", cp);
1020 if (sense_key == NOT_READY) {
1021 /* If Sense is Not Ready, Logical Unit
1022 * Not ready, Manual Intervention
1025 if ((asc == 0x04) && (ascq == 0x03)) {
1026 dev_warn(&h->pdev->dev, "cp %p "
1027 "has check condition: unit "
1028 "not ready, manual "
1029 "intervention required\n", cp);
1033 if (sense_key == ABORTED_COMMAND) {
1034 /* Aborted command is retryable */
1035 dev_warn(&h->pdev->dev, "cp %p "
1036 "has check condition: aborted command: "
1037 "ASC: 0x%x, ASCQ: 0x%x\n",
1039 cmd->result = DID_SOFT_ERROR << 16;
1042 /* Must be some other type of check condition */
1043 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1045 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1046 "Returning result: 0x%x, "
1047 "cmd=[%02x %02x %02x %02x %02x "
1048 "%02x %02x %02x %02x %02x %02x "
1049 "%02x %02x %02x %02x %02x]\n",
1050 cp, sense_key, asc, ascq,
1052 cmd->cmnd[0], cmd->cmnd[1],
1053 cmd->cmnd[2], cmd->cmnd[3],
1054 cmd->cmnd[4], cmd->cmnd[5],
1055 cmd->cmnd[6], cmd->cmnd[7],
1056 cmd->cmnd[8], cmd->cmnd[9],
1057 cmd->cmnd[10], cmd->cmnd[11],
1058 cmd->cmnd[12], cmd->cmnd[13],
1059 cmd->cmnd[14], cmd->cmnd[15]);
1064 /* Problem was not a check condition
1065 * Pass it up to the upper layers...
1067 if (ei->ScsiStatus) {
1068 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1069 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1070 "Returning result: 0x%x\n",
1072 sense_key, asc, ascq,
1074 } else { /* scsi status is zero??? How??? */
1075 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1076 "Returning no connection.\n", cp),
1078 /* Ordinarily, this case should never happen,
1079 * but there is a bug in some released firmware
1080 * revisions that allows it to happen if, for
1081 * example, a 4100 backplane loses power and
1082 * the tape drive is in it. We assume that
1083 * it's a fatal error of some kind because we
1084 * can't show that it wasn't. We will make it
1085 * look like selection timeout since that is
1086 * the most common reason for this to occur,
1087 * and it's severe enough.
1090 cmd->result = DID_NO_CONNECT << 16;
1094 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1096 case CMD_DATA_OVERRUN:
1097 dev_warn(&h->pdev->dev, "cp %p has"
1098 " completed with data overrun "
1102 /* print_bytes(cp, sizeof(*cp), 1, 0);
1104 /* We get CMD_INVALID if you address a non-existent device
1105 * instead of a selection timeout (no response). You will
1106 * see this if you yank out a drive, then try to access it.
1107 * This is kind of a shame because it means that any other
1108 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1109 * missing target. */
1110 cmd->result = DID_NO_CONNECT << 16;
1113 case CMD_PROTOCOL_ERR:
1114 dev_warn(&h->pdev->dev, "cp %p has "
1115 "protocol error \n", cp);
1117 case CMD_HARDWARE_ERR:
1118 cmd->result = DID_ERROR << 16;
1119 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1121 case CMD_CONNECTION_LOST:
1122 cmd->result = DID_ERROR << 16;
1123 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1126 cmd->result = DID_ABORT << 16;
1127 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1128 cp, ei->ScsiStatus);
1130 case CMD_ABORT_FAILED:
1131 cmd->result = DID_ERROR << 16;
1132 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1134 case CMD_UNSOLICITED_ABORT:
1135 cmd->result = DID_RESET << 16;
1136 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1140 cmd->result = DID_TIME_OUT << 16;
1141 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1144 cmd->result = DID_ERROR << 16;
1145 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1146 cp, ei->CommandStatus);
1148 cmd->scsi_done(cmd);
1152 static int hpsa_scsi_detect(struct ctlr_info *h)
1154 struct Scsi_Host *sh;
1157 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1164 sh->max_channel = 3;
1165 sh->max_cmd_len = MAX_COMMAND_SIZE;
1166 sh->max_lun = HPSA_MAX_LUN;
1167 sh->max_id = HPSA_MAX_LUN;
1168 sh->can_queue = h->nr_cmds;
1169 sh->cmd_per_lun = h->nr_cmds;
1170 sh->sg_tablesize = h->maxsgentries;
1172 sh->hostdata[0] = (unsigned long) h;
1173 sh->irq = h->intr[PERF_MODE_INT];
1174 sh->unique_id = sh->irq;
1175 error = scsi_add_host(sh, &h->pdev->dev);
1182 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1183 " failed for controller %d\n", h->ctlr);
1187 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1188 " failed for controller %d\n", h->ctlr);
1192 static void hpsa_pci_unmap(struct pci_dev *pdev,
1193 struct CommandList *c, int sg_used, int data_direction)
1196 union u64bit addr64;
1198 for (i = 0; i < sg_used; i++) {
1199 addr64.val32.lower = c->SG[i].Addr.lower;
1200 addr64.val32.upper = c->SG[i].Addr.upper;
1201 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1206 static void hpsa_map_one(struct pci_dev *pdev,
1207 struct CommandList *cp,
1214 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1215 cp->Header.SGList = 0;
1216 cp->Header.SGTotal = 0;
1220 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1221 cp->SG[0].Addr.lower =
1222 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1223 cp->SG[0].Addr.upper =
1224 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1225 cp->SG[0].Len = buflen;
1226 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1227 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1230 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1231 struct CommandList *c)
1233 DECLARE_COMPLETION_ONSTACK(wait);
1236 enqueue_cmd_and_start_io(h, c);
1237 wait_for_completion(&wait);
1240 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1241 struct CommandList *c, int data_direction)
1243 int retry_count = 0;
1246 memset(c->err_info, 0, sizeof(c->err_info));
1247 hpsa_scsi_do_simple_cmd_core(h, c);
1249 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1250 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1253 static void hpsa_scsi_interpret_error(struct CommandList *cp)
1255 struct ErrorInfo *ei;
1256 struct device *d = &cp->h->pdev->dev;
1259 switch (ei->CommandStatus) {
1260 case CMD_TARGET_STATUS:
1261 dev_warn(d, "cmd %p has completed with errors\n", cp);
1262 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1264 if (ei->ScsiStatus == 0)
1265 dev_warn(d, "SCSI status is abnormally zero. "
1266 "(probably indicates selection timeout "
1267 "reported incorrectly due to a known "
1268 "firmware bug, circa July, 2001.)\n");
1270 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1271 dev_info(d, "UNDERRUN\n");
1273 case CMD_DATA_OVERRUN:
1274 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1277 /* controller unfortunately reports SCSI passthru's
1278 * to non-existent targets as invalid commands.
1280 dev_warn(d, "cp %p is reported invalid (probably means "
1281 "target device no longer present)\n", cp);
1282 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1286 case CMD_PROTOCOL_ERR:
1287 dev_warn(d, "cp %p has protocol error \n", cp);
1289 case CMD_HARDWARE_ERR:
1290 /* cmd->result = DID_ERROR << 16; */
1291 dev_warn(d, "cp %p had hardware error\n", cp);
1293 case CMD_CONNECTION_LOST:
1294 dev_warn(d, "cp %p had connection lost\n", cp);
1297 dev_warn(d, "cp %p was aborted\n", cp);
1299 case CMD_ABORT_FAILED:
1300 dev_warn(d, "cp %p reports abort failed\n", cp);
1302 case CMD_UNSOLICITED_ABORT:
1303 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1306 dev_warn(d, "cp %p timed out\n", cp);
1309 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1314 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1315 unsigned char page, unsigned char *buf,
1316 unsigned char bufsize)
1319 struct CommandList *c;
1320 struct ErrorInfo *ei;
1322 c = cmd_special_alloc(h);
1324 if (c == NULL) { /* trouble... */
1325 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1329 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1330 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1332 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1333 hpsa_scsi_interpret_error(c);
1336 cmd_special_free(h, c);
1340 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1343 struct CommandList *c;
1344 struct ErrorInfo *ei;
1346 c = cmd_special_alloc(h);
1348 if (c == NULL) { /* trouble... */
1349 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1353 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1354 hpsa_scsi_do_simple_cmd_core(h, c);
1355 /* no unmap needed here because no data xfer. */
1358 if (ei->CommandStatus != 0) {
1359 hpsa_scsi_interpret_error(c);
1362 cmd_special_free(h, c);
1366 static void hpsa_get_raid_level(struct ctlr_info *h,
1367 unsigned char *scsi3addr, unsigned char *raid_level)
1372 *raid_level = RAID_UNKNOWN;
1373 buf = kzalloc(64, GFP_KERNEL);
1376 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1378 *raid_level = buf[8];
1379 if (*raid_level > RAID_UNKNOWN)
1380 *raid_level = RAID_UNKNOWN;
1385 /* Get the device id from inquiry page 0x83 */
1386 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1387 unsigned char *device_id, int buflen)
1394 buf = kzalloc(64, GFP_KERNEL);
1397 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1399 memcpy(device_id, &buf[8], buflen);
1404 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1405 struct ReportLUNdata *buf, int bufsize,
1406 int extended_response)
1409 struct CommandList *c;
1410 unsigned char scsi3addr[8];
1411 struct ErrorInfo *ei;
1413 c = cmd_special_alloc(h);
1414 if (c == NULL) { /* trouble... */
1415 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1418 /* address the controller */
1419 memset(scsi3addr, 0, sizeof(scsi3addr));
1420 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1421 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1422 if (extended_response)
1423 c->Request.CDB[1] = extended_response;
1424 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1426 if (ei->CommandStatus != 0 &&
1427 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1428 hpsa_scsi_interpret_error(c);
1431 cmd_special_free(h, c);
1435 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1436 struct ReportLUNdata *buf,
1437 int bufsize, int extended_response)
1439 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1442 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1443 struct ReportLUNdata *buf, int bufsize)
1445 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1448 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1449 int bus, int target, int lun)
1452 device->target = target;
1456 static int hpsa_update_device_info(struct ctlr_info *h,
1457 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1459 #define OBDR_TAPE_INQ_SIZE 49
1460 unsigned char *inq_buff;
1462 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1466 /* Do an inquiry to the device to see what it is. */
1467 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1468 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1469 /* Inquiry failed (msg printed already) */
1470 dev_err(&h->pdev->dev,
1471 "hpsa_update_device_info: inquiry failed\n");
1475 this_device->devtype = (inq_buff[0] & 0x1f);
1476 memcpy(this_device->scsi3addr, scsi3addr, 8);
1477 memcpy(this_device->vendor, &inq_buff[8],
1478 sizeof(this_device->vendor));
1479 memcpy(this_device->model, &inq_buff[16],
1480 sizeof(this_device->model));
1481 memcpy(this_device->revision, &inq_buff[32],
1482 sizeof(this_device->revision));
1483 memset(this_device->device_id, 0,
1484 sizeof(this_device->device_id));
1485 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1486 sizeof(this_device->device_id));
1488 if (this_device->devtype == TYPE_DISK &&
1489 is_logical_dev_addr_mode(scsi3addr))
1490 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1492 this_device->raid_level = RAID_UNKNOWN;
1502 static unsigned char *msa2xxx_model[] = {
1510 static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1514 for (i = 0; msa2xxx_model[i]; i++)
1515 if (strncmp(device->model, msa2xxx_model[i],
1516 strlen(msa2xxx_model[i])) == 0)
1521 /* Helper function to assign bus, target, lun mapping of devices.
1522 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1523 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1524 * Logical drive target and lun are assigned at this time, but
1525 * physical device lun and target assignment are deferred (assigned
1526 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1528 static void figure_bus_target_lun(struct ctlr_info *h,
1529 u8 *lunaddrbytes, int *bus, int *target, int *lun,
1530 struct hpsa_scsi_dev_t *device)
1534 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1535 /* logical device */
1536 if (unlikely(is_scsi_rev_5(h))) {
1537 /* p1210m, logical drives lun assignments
1538 * match SCSI REPORT LUNS data.
1540 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1543 *lun = (lunid & 0x3fff) + 1;
1546 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1547 if (is_msa2xxx(h, device)) {
1548 /* msa2xxx way, put logicals on bus 1
1549 * and match target/lun numbers box
1553 *target = (lunid >> 16) & 0x3fff;
1554 *lun = lunid & 0x00ff;
1556 /* Traditional smart array way. */
1559 *target = lunid & 0x3fff;
1563 /* physical device */
1564 if (is_hba_lunid(lunaddrbytes))
1565 if (unlikely(is_scsi_rev_5(h))) {
1566 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1571 *bus = 3; /* traditional smartarray */
1573 *bus = 2; /* physical disk */
1575 *lun = -1; /* we will fill these in later. */
1580 * If there is no lun 0 on a target, linux won't find any devices.
1581 * For the MSA2xxx boxes, we have to manually detect the enclosure
1582 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1583 * it for some reason. *tmpdevice is the target we're adding,
1584 * this_device is a pointer into the current element of currentsd[]
1585 * that we're building up in update_scsi_devices(), below.
1586 * lunzerobits is a bitmap that tracks which targets already have a
1588 * Returns 1 if an enclosure was added, 0 if not.
1590 static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1591 struct hpsa_scsi_dev_t *tmpdevice,
1592 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1593 int bus, int target, int lun, unsigned long lunzerobits[],
1594 int *nmsa2xxx_enclosures)
1596 unsigned char scsi3addr[8];
1598 if (test_bit(target, lunzerobits))
1599 return 0; /* There is already a lun 0 on this target. */
1601 if (!is_logical_dev_addr_mode(lunaddrbytes))
1602 return 0; /* It's the logical targets that may lack lun 0. */
1604 if (!is_msa2xxx(h, tmpdevice))
1605 return 0; /* It's only the MSA2xxx that have this problem. */
1607 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1610 if (is_hba_lunid(scsi3addr))
1611 return 0; /* Don't add the RAID controller here. */
1613 if (is_scsi_rev_5(h))
1614 return 0; /* p1210m doesn't need to do this. */
1616 #define MAX_MSA2XXX_ENCLOSURES 32
1617 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1618 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1619 "enclosures exceeded. Check your hardware "
1624 memset(scsi3addr, 0, 8);
1625 scsi3addr[3] = target;
1626 if (hpsa_update_device_info(h, scsi3addr, this_device))
1628 (*nmsa2xxx_enclosures)++;
1629 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1630 set_bit(target, lunzerobits);
1635 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1636 * logdev. The number of luns in physdev and logdev are returned in
1637 * *nphysicals and *nlogicals, respectively.
1638 * Returns 0 on success, -1 otherwise.
1640 static int hpsa_gather_lun_info(struct ctlr_info *h,
1642 struct ReportLUNdata *physdev, u32 *nphysicals,
1643 struct ReportLUNdata *logdev, u32 *nlogicals)
1645 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1646 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1649 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
1650 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1651 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1652 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1653 *nphysicals - HPSA_MAX_PHYS_LUN);
1654 *nphysicals = HPSA_MAX_PHYS_LUN;
1656 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1657 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1660 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
1661 /* Reject Logicals in excess of our max capability. */
1662 if (*nlogicals > HPSA_MAX_LUN) {
1663 dev_warn(&h->pdev->dev,
1664 "maximum logical LUNs (%d) exceeded. "
1665 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1666 *nlogicals - HPSA_MAX_LUN);
1667 *nlogicals = HPSA_MAX_LUN;
1669 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1670 dev_warn(&h->pdev->dev,
1671 "maximum logical + physical LUNs (%d) exceeded. "
1672 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1673 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1674 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1679 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1680 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1681 struct ReportLUNdata *logdev_list)
1683 /* Helper function, figure out where the LUN ID info is coming from
1684 * given index i, lists of physical and logical devices, where in
1685 * the list the raid controller is supposed to appear (first or last)
1688 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1689 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1691 if (i == raid_ctlr_position)
1692 return RAID_CTLR_LUNID;
1694 if (i < logicals_start)
1695 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1697 if (i < last_device)
1698 return &logdev_list->LUN[i - nphysicals -
1699 (raid_ctlr_position == 0)][0];
1704 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1706 /* the idea here is we could get notified
1707 * that some devices have changed, so we do a report
1708 * physical luns and report logical luns cmd, and adjust
1709 * our list of devices accordingly.
1711 * The scsi3addr's of devices won't change so long as the
1712 * adapter is not reset. That means we can rescan and
1713 * tell which devices we already know about, vs. new
1714 * devices, vs. disappearing devices.
1716 struct ReportLUNdata *physdev_list = NULL;
1717 struct ReportLUNdata *logdev_list = NULL;
1718 unsigned char *inq_buff = NULL;
1721 u32 ndev_allocated = 0;
1722 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1724 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1725 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1726 int bus, target, lun;
1727 int raid_ctlr_position;
1728 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1730 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1732 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1733 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1734 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1735 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1737 if (!currentsd || !physdev_list || !logdev_list ||
1738 !inq_buff || !tmpdevice) {
1739 dev_err(&h->pdev->dev, "out of memory\n");
1742 memset(lunzerobits, 0, sizeof(lunzerobits));
1744 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1745 logdev_list, &nlogicals))
1748 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1749 * but each of them 4 times through different paths. The plus 1
1750 * is for the RAID controller.
1752 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1754 /* Allocate the per device structures */
1755 for (i = 0; i < ndevs_to_allocate; i++) {
1756 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1757 if (!currentsd[i]) {
1758 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1759 __FILE__, __LINE__);
1765 if (unlikely(is_scsi_rev_5(h)))
1766 raid_ctlr_position = 0;
1768 raid_ctlr_position = nphysicals + nlogicals;
1770 /* adjust our table of devices */
1771 nmsa2xxx_enclosures = 0;
1772 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1775 /* Figure out where the LUN ID info is coming from */
1776 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1777 i, nphysicals, nlogicals, physdev_list, logdev_list);
1778 /* skip masked physical devices. */
1779 if (lunaddrbytes[3] & 0xC0 &&
1780 i < nphysicals + (raid_ctlr_position == 0))
1783 /* Get device type, vendor, model, device id */
1784 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1785 continue; /* skip it if we can't talk to it. */
1786 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1788 this_device = currentsd[ncurrent];
1791 * For the msa2xxx boxes, we have to insert a LUN 0 which
1792 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1793 * is nonetheless an enclosure device there. We have to
1794 * present that otherwise linux won't find anything if
1795 * there is no lun 0.
1797 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1798 lunaddrbytes, bus, target, lun, lunzerobits,
1799 &nmsa2xxx_enclosures)) {
1801 this_device = currentsd[ncurrent];
1804 *this_device = *tmpdevice;
1805 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1807 switch (this_device->devtype) {
1809 /* We don't *really* support actual CD-ROM devices,
1810 * just "One Button Disaster Recovery" tape drive
1811 * which temporarily pretends to be a CD-ROM drive.
1812 * So we check that the device is really an OBDR tape
1813 * device by checking for "$DR-10" in bytes 43-48 of
1817 #define OBDR_TAPE_SIG "$DR-10"
1818 strncpy(obdr_sig, &inq_buff[43], 6);
1820 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1821 /* Not OBDR device, ignore it. */
1832 case TYPE_MEDIUM_CHANGER:
1836 /* Only present the Smartarray HBA as a RAID controller.
1837 * If it's a RAID controller other than the HBA itself
1838 * (an external RAID controller, MSA500 or similar)
1841 if (!is_hba_lunid(lunaddrbytes))
1848 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1851 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1854 for (i = 0; i < ndev_allocated; i++)
1855 kfree(currentsd[i]);
1858 kfree(physdev_list);
1862 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1863 * dma mapping and fills in the scatter gather entries of the
1866 static int hpsa_scatter_gather(struct ctlr_info *h,
1867 struct CommandList *cp,
1868 struct scsi_cmnd *cmd)
1871 struct scatterlist *sg;
1873 int use_sg, i, sg_index, chained;
1874 struct SGDescriptor *curr_sg;
1876 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
1878 use_sg = scsi_dma_map(cmd);
1883 goto sglist_finished;
1888 scsi_for_each_sg(cmd, sg, use_sg, i) {
1889 if (i == h->max_cmd_sg_entries - 1 &&
1890 use_sg > h->max_cmd_sg_entries) {
1892 curr_sg = h->cmd_sg_list[cp->cmdindex];
1895 addr64 = (u64) sg_dma_address(sg);
1896 len = sg_dma_len(sg);
1897 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
1898 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
1900 curr_sg->Ext = 0; /* we are not chaining */
1904 if (use_sg + chained > h->maxSG)
1905 h->maxSG = use_sg + chained;
1908 cp->Header.SGList = h->max_cmd_sg_entries;
1909 cp->Header.SGTotal = (u16) (use_sg + 1);
1910 hpsa_map_sg_chain_block(h, cp);
1916 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
1917 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
1922 static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1923 void (*done)(struct scsi_cmnd *))
1925 struct ctlr_info *h;
1926 struct hpsa_scsi_dev_t *dev;
1927 unsigned char scsi3addr[8];
1928 struct CommandList *c;
1929 unsigned long flags;
1931 /* Get the ptr to our adapter structure out of cmd->host. */
1932 h = sdev_to_hba(cmd->device);
1933 dev = cmd->device->hostdata;
1935 cmd->result = DID_NO_CONNECT << 16;
1939 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1941 /* Need a lock as this is being allocated from the pool */
1942 spin_lock_irqsave(&h->lock, flags);
1944 spin_unlock_irqrestore(&h->lock, flags);
1945 if (c == NULL) { /* trouble... */
1946 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1947 return SCSI_MLQUEUE_HOST_BUSY;
1950 /* Fill in the command list header */
1952 cmd->scsi_done = done; /* save this for use by completion code */
1954 /* save c in case we have to abort it */
1955 cmd->host_scribble = (unsigned char *) c;
1957 c->cmd_type = CMD_SCSI;
1959 c->Header.ReplyQueue = 0; /* unused in simple mode */
1960 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1961 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
1962 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
1964 /* Fill in the request block... */
1966 c->Request.Timeout = 0;
1967 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1968 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1969 c->Request.CDBLen = cmd->cmd_len;
1970 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1971 c->Request.Type.Type = TYPE_CMD;
1972 c->Request.Type.Attribute = ATTR_SIMPLE;
1973 switch (cmd->sc_data_direction) {
1975 c->Request.Type.Direction = XFER_WRITE;
1977 case DMA_FROM_DEVICE:
1978 c->Request.Type.Direction = XFER_READ;
1981 c->Request.Type.Direction = XFER_NONE;
1983 case DMA_BIDIRECTIONAL:
1984 /* This can happen if a buggy application does a scsi passthru
1985 * and sets both inlen and outlen to non-zero. ( see
1986 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
1989 c->Request.Type.Direction = XFER_RSVD;
1990 /* This is technically wrong, and hpsa controllers should
1991 * reject it with CMD_INVALID, which is the most correct
1992 * response, but non-fibre backends appear to let it
1993 * slide by, and give the same results as if this field
1994 * were set correctly. Either way is acceptable for
1995 * our purposes here.
2001 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2002 cmd->sc_data_direction);
2007 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
2009 return SCSI_MLQUEUE_HOST_BUSY;
2011 enqueue_cmd_and_start_io(h, c);
2012 /* the cmd'll come back via intr handler in complete_scsi_command() */
2016 static void hpsa_scan_start(struct Scsi_Host *sh)
2018 struct ctlr_info *h = shost_to_hba(sh);
2019 unsigned long flags;
2021 /* wait until any scan already in progress is finished. */
2023 spin_lock_irqsave(&h->scan_lock, flags);
2024 if (h->scan_finished)
2026 spin_unlock_irqrestore(&h->scan_lock, flags);
2027 wait_event(h->scan_wait_queue, h->scan_finished);
2028 /* Note: We don't need to worry about a race between this
2029 * thread and driver unload because the midlayer will
2030 * have incremented the reference count, so unload won't
2031 * happen if we're in here.
2034 h->scan_finished = 0; /* mark scan as in progress */
2035 spin_unlock_irqrestore(&h->scan_lock, flags);
2037 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2039 spin_lock_irqsave(&h->scan_lock, flags);
2040 h->scan_finished = 1; /* mark scan as finished. */
2041 wake_up_all(&h->scan_wait_queue);
2042 spin_unlock_irqrestore(&h->scan_lock, flags);
2045 static int hpsa_scan_finished(struct Scsi_Host *sh,
2046 unsigned long elapsed_time)
2048 struct ctlr_info *h = shost_to_hba(sh);
2049 unsigned long flags;
2052 spin_lock_irqsave(&h->scan_lock, flags);
2053 finished = h->scan_finished;
2054 spin_unlock_irqrestore(&h->scan_lock, flags);
2058 static int hpsa_change_queue_depth(struct scsi_device *sdev,
2059 int qdepth, int reason)
2061 struct ctlr_info *h = sdev_to_hba(sdev);
2063 if (reason != SCSI_QDEPTH_DEFAULT)
2069 if (qdepth > h->nr_cmds)
2070 qdepth = h->nr_cmds;
2071 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2072 return sdev->queue_depth;
2075 static void hpsa_unregister_scsi(struct ctlr_info *h)
2077 /* we are being forcibly unloaded, and may not refuse. */
2078 scsi_remove_host(h->scsi_host);
2079 scsi_host_put(h->scsi_host);
2080 h->scsi_host = NULL;
2083 static int hpsa_register_scsi(struct ctlr_info *h)
2087 rc = hpsa_scsi_detect(h);
2089 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
2090 " hpsa_scsi_detect(), rc is %d\n", rc);
2094 static int wait_for_device_to_become_ready(struct ctlr_info *h,
2095 unsigned char lunaddr[])
2099 int waittime = 1; /* seconds */
2100 struct CommandList *c;
2102 c = cmd_special_alloc(h);
2104 dev_warn(&h->pdev->dev, "out of memory in "
2105 "wait_for_device_to_become_ready.\n");
2109 /* Send test unit ready until device ready, or give up. */
2110 while (count < HPSA_TUR_RETRY_LIMIT) {
2112 /* Wait for a bit. do this first, because if we send
2113 * the TUR right away, the reset will just abort it.
2115 msleep(1000 * waittime);
2118 /* Increase wait time with each try, up to a point. */
2119 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2120 waittime = waittime * 2;
2122 /* Send the Test Unit Ready */
2123 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2124 hpsa_scsi_do_simple_cmd_core(h, c);
2125 /* no unmap needed here because no data xfer. */
2127 if (c->err_info->CommandStatus == CMD_SUCCESS)
2130 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2131 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2132 (c->err_info->SenseInfo[2] == NO_SENSE ||
2133 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2136 dev_warn(&h->pdev->dev, "waiting %d secs "
2137 "for device to become ready.\n", waittime);
2138 rc = 1; /* device not ready. */
2142 dev_warn(&h->pdev->dev, "giving up on device.\n");
2144 dev_warn(&h->pdev->dev, "device is ready.\n");
2146 cmd_special_free(h, c);
2150 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
2151 * complaining. Doing a host- or bus-reset can't do anything good here.
2153 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2156 struct ctlr_info *h;
2157 struct hpsa_scsi_dev_t *dev;
2159 /* find the controller to which the command to be aborted was sent */
2160 h = sdev_to_hba(scsicmd->device);
2161 if (h == NULL) /* paranoia */
2163 dev = scsicmd->device->hostdata;
2165 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2166 "device lookup failed.\n");
2169 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2170 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
2171 /* send a reset to the SCSI LUN which the command was sent to */
2172 rc = hpsa_send_reset(h, dev->scsi3addr);
2173 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2176 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2181 * For operations that cannot sleep, a command block is allocated at init,
2182 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2183 * which ones are free or in use. Lock must be held when calling this.
2184 * cmd_free() is the complement.
2186 static struct CommandList *cmd_alloc(struct ctlr_info *h)
2188 struct CommandList *c;
2190 union u64bit temp64;
2191 dma_addr_t cmd_dma_handle, err_dma_handle;
2194 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2195 if (i == h->nr_cmds)
2197 } while (test_and_set_bit
2198 (i & (BITS_PER_LONG - 1),
2199 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2200 c = h->cmd_pool + i;
2201 memset(c, 0, sizeof(*c));
2202 cmd_dma_handle = h->cmd_pool_dhandle
2204 c->err_info = h->errinfo_pool + i;
2205 memset(c->err_info, 0, sizeof(*c->err_info));
2206 err_dma_handle = h->errinfo_pool_dhandle
2207 + i * sizeof(*c->err_info);
2212 INIT_HLIST_NODE(&c->list);
2213 c->busaddr = (u32) cmd_dma_handle;
2214 temp64.val = (u64) err_dma_handle;
2215 c->ErrDesc.Addr.lower = temp64.val32.lower;
2216 c->ErrDesc.Addr.upper = temp64.val32.upper;
2217 c->ErrDesc.Len = sizeof(*c->err_info);
2223 /* For operations that can wait for kmalloc to possibly sleep,
2224 * this routine can be called. Lock need not be held to call
2225 * cmd_special_alloc. cmd_special_free() is the complement.
2227 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2229 struct CommandList *c;
2230 union u64bit temp64;
2231 dma_addr_t cmd_dma_handle, err_dma_handle;
2233 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2236 memset(c, 0, sizeof(*c));
2240 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2243 if (c->err_info == NULL) {
2244 pci_free_consistent(h->pdev,
2245 sizeof(*c), c, cmd_dma_handle);
2248 memset(c->err_info, 0, sizeof(*c->err_info));
2250 INIT_HLIST_NODE(&c->list);
2251 c->busaddr = (u32) cmd_dma_handle;
2252 temp64.val = (u64) err_dma_handle;
2253 c->ErrDesc.Addr.lower = temp64.val32.lower;
2254 c->ErrDesc.Addr.upper = temp64.val32.upper;
2255 c->ErrDesc.Len = sizeof(*c->err_info);
2261 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2265 i = c - h->cmd_pool;
2266 clear_bit(i & (BITS_PER_LONG - 1),
2267 h->cmd_pool_bits + (i / BITS_PER_LONG));
2271 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2273 union u64bit temp64;
2275 temp64.val32.lower = c->ErrDesc.Addr.lower;
2276 temp64.val32.upper = c->ErrDesc.Addr.upper;
2277 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2278 c->err_info, (dma_addr_t) temp64.val);
2279 pci_free_consistent(h->pdev, sizeof(*c),
2280 c, (dma_addr_t) c->busaddr);
2283 #ifdef CONFIG_COMPAT
2285 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2287 IOCTL32_Command_struct __user *arg32 =
2288 (IOCTL32_Command_struct __user *) arg;
2289 IOCTL_Command_struct arg64;
2290 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2295 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2296 sizeof(arg64.LUN_info));
2297 err |= copy_from_user(&arg64.Request, &arg32->Request,
2298 sizeof(arg64.Request));
2299 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2300 sizeof(arg64.error_info));
2301 err |= get_user(arg64.buf_size, &arg32->buf_size);
2302 err |= get_user(cp, &arg32->buf);
2303 arg64.buf = compat_ptr(cp);
2304 err |= copy_to_user(p, &arg64, sizeof(arg64));
2309 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2312 err |= copy_in_user(&arg32->error_info, &p->error_info,
2313 sizeof(arg32->error_info));
2319 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2322 BIG_IOCTL32_Command_struct __user *arg32 =
2323 (BIG_IOCTL32_Command_struct __user *) arg;
2324 BIG_IOCTL_Command_struct arg64;
2325 BIG_IOCTL_Command_struct __user *p =
2326 compat_alloc_user_space(sizeof(arg64));
2331 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2332 sizeof(arg64.LUN_info));
2333 err |= copy_from_user(&arg64.Request, &arg32->Request,
2334 sizeof(arg64.Request));
2335 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2336 sizeof(arg64.error_info));
2337 err |= get_user(arg64.buf_size, &arg32->buf_size);
2338 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2339 err |= get_user(cp, &arg32->buf);
2340 arg64.buf = compat_ptr(cp);
2341 err |= copy_to_user(p, &arg64, sizeof(arg64));
2346 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2349 err |= copy_in_user(&arg32->error_info, &p->error_info,
2350 sizeof(arg32->error_info));
2356 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2359 case CCISS_GETPCIINFO:
2360 case CCISS_GETINTINFO:
2361 case CCISS_SETINTINFO:
2362 case CCISS_GETNODENAME:
2363 case CCISS_SETNODENAME:
2364 case CCISS_GETHEARTBEAT:
2365 case CCISS_GETBUSTYPES:
2366 case CCISS_GETFIRMVER:
2367 case CCISS_GETDRIVVER:
2368 case CCISS_REVALIDVOLS:
2369 case CCISS_DEREGDISK:
2370 case CCISS_REGNEWDISK:
2372 case CCISS_RESCANDISK:
2373 case CCISS_GETLUNINFO:
2374 return hpsa_ioctl(dev, cmd, arg);
2376 case CCISS_PASSTHRU32:
2377 return hpsa_ioctl32_passthru(dev, cmd, arg);
2378 case CCISS_BIG_PASSTHRU32:
2379 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2382 return -ENOIOCTLCMD;
2387 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2389 struct hpsa_pci_info pciinfo;
2393 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2394 pciinfo.bus = h->pdev->bus->number;
2395 pciinfo.dev_fn = h->pdev->devfn;
2396 pciinfo.board_id = h->board_id;
2397 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2402 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2404 DriverVer_type DriverVer;
2405 unsigned char vmaj, vmin, vsubmin;
2408 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2409 &vmaj, &vmin, &vsubmin);
2411 dev_info(&h->pdev->dev, "driver version string '%s' "
2412 "unrecognized.", HPSA_DRIVER_VERSION);
2417 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2420 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2425 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2427 IOCTL_Command_struct iocommand;
2428 struct CommandList *c;
2430 union u64bit temp64;
2434 if (!capable(CAP_SYS_RAWIO))
2436 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2438 if ((iocommand.buf_size < 1) &&
2439 (iocommand.Request.Type.Direction != XFER_NONE)) {
2442 if (iocommand.buf_size > 0) {
2443 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2447 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2448 /* Copy the data into the buffer we created */
2449 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
2454 memset(buff, 0, iocommand.buf_size);
2455 c = cmd_special_alloc(h);
2460 /* Fill in the command type */
2461 c->cmd_type = CMD_IOCTL_PEND;
2462 /* Fill in Command Header */
2463 c->Header.ReplyQueue = 0; /* unused in simple mode */
2464 if (iocommand.buf_size > 0) { /* buffer to fill */
2465 c->Header.SGList = 1;
2466 c->Header.SGTotal = 1;
2467 } else { /* no buffers to fill */
2468 c->Header.SGList = 0;
2469 c->Header.SGTotal = 0;
2471 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2472 /* use the kernel address the cmd block for tag */
2473 c->Header.Tag.lower = c->busaddr;
2475 /* Fill in Request block */
2476 memcpy(&c->Request, &iocommand.Request,
2477 sizeof(c->Request));
2479 /* Fill in the scatter gather information */
2480 if (iocommand.buf_size > 0) {
2481 temp64.val = pci_map_single(h->pdev, buff,
2482 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2483 c->SG[0].Addr.lower = temp64.val32.lower;
2484 c->SG[0].Addr.upper = temp64.val32.upper;
2485 c->SG[0].Len = iocommand.buf_size;
2486 c->SG[0].Ext = 0; /* we are not chaining*/
2488 hpsa_scsi_do_simple_cmd_core(h, c);
2489 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2490 check_ioctl_unit_attention(h, c);
2492 /* Copy the error information out */
2493 memcpy(&iocommand.error_info, c->err_info,
2494 sizeof(iocommand.error_info));
2495 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2497 cmd_special_free(h, c);
2501 if (iocommand.Request.Type.Direction == XFER_READ) {
2502 /* Copy the data out of the buffer we created */
2503 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2505 cmd_special_free(h, c);
2510 cmd_special_free(h, c);
2514 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2516 BIG_IOCTL_Command_struct *ioc;
2517 struct CommandList *c;
2518 unsigned char **buff = NULL;
2519 int *buff_size = NULL;
2520 union u64bit temp64;
2526 BYTE __user *data_ptr;
2530 if (!capable(CAP_SYS_RAWIO))
2532 ioc = (BIG_IOCTL_Command_struct *)
2533 kmalloc(sizeof(*ioc), GFP_KERNEL);
2538 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2542 if ((ioc->buf_size < 1) &&
2543 (ioc->Request.Type.Direction != XFER_NONE)) {
2547 /* Check kmalloc limits using all SGs */
2548 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2552 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2556 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2561 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2566 left = ioc->buf_size;
2567 data_ptr = ioc->buf;
2569 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2570 buff_size[sg_used] = sz;
2571 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2572 if (buff[sg_used] == NULL) {
2576 if (ioc->Request.Type.Direction == XFER_WRITE) {
2577 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2582 memset(buff[sg_used], 0, sz);
2587 c = cmd_special_alloc(h);
2592 c->cmd_type = CMD_IOCTL_PEND;
2593 c->Header.ReplyQueue = 0;
2595 if (ioc->buf_size > 0) {
2596 c->Header.SGList = sg_used;
2597 c->Header.SGTotal = sg_used;
2599 c->Header.SGList = 0;
2600 c->Header.SGTotal = 0;
2602 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2603 c->Header.Tag.lower = c->busaddr;
2604 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2605 if (ioc->buf_size > 0) {
2607 for (i = 0; i < sg_used; i++) {
2608 temp64.val = pci_map_single(h->pdev, buff[i],
2609 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2610 c->SG[i].Addr.lower = temp64.val32.lower;
2611 c->SG[i].Addr.upper = temp64.val32.upper;
2612 c->SG[i].Len = buff_size[i];
2613 /* we are not chaining */
2617 hpsa_scsi_do_simple_cmd_core(h, c);
2618 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2619 check_ioctl_unit_attention(h, c);
2620 /* Copy the error information out */
2621 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2622 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2623 cmd_special_free(h, c);
2627 if (ioc->Request.Type.Direction == XFER_READ) {
2628 /* Copy the data out of the buffer we created */
2629 BYTE __user *ptr = ioc->buf;
2630 for (i = 0; i < sg_used; i++) {
2631 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2632 cmd_special_free(h, c);
2636 ptr += buff_size[i];
2639 cmd_special_free(h, c);
2643 for (i = 0; i < sg_used; i++)
2652 static void check_ioctl_unit_attention(struct ctlr_info *h,
2653 struct CommandList *c)
2655 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2656 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2657 (void) check_for_unit_attention(h, c);
2662 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2664 struct ctlr_info *h;
2665 void __user *argp = (void __user *)arg;
2667 h = sdev_to_hba(dev);
2670 case CCISS_DEREGDISK:
2671 case CCISS_REGNEWDISK:
2673 hpsa_scan_start(h->scsi_host);
2675 case CCISS_GETPCIINFO:
2676 return hpsa_getpciinfo_ioctl(h, argp);
2677 case CCISS_GETDRIVVER:
2678 return hpsa_getdrivver_ioctl(h, argp);
2679 case CCISS_PASSTHRU:
2680 return hpsa_passthru_ioctl(h, argp);
2681 case CCISS_BIG_PASSTHRU:
2682 return hpsa_big_passthru_ioctl(h, argp);
2688 static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2689 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2692 int pci_dir = XFER_NONE;
2694 c->cmd_type = CMD_IOCTL_PEND;
2695 c->Header.ReplyQueue = 0;
2696 if (buff != NULL && size > 0) {
2697 c->Header.SGList = 1;
2698 c->Header.SGTotal = 1;
2700 c->Header.SGList = 0;
2701 c->Header.SGTotal = 0;
2703 c->Header.Tag.lower = c->busaddr;
2704 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2706 c->Request.Type.Type = cmd_type;
2707 if (cmd_type == TYPE_CMD) {
2710 /* are we trying to read a vital product page */
2711 if (page_code != 0) {
2712 c->Request.CDB[1] = 0x01;
2713 c->Request.CDB[2] = page_code;
2715 c->Request.CDBLen = 6;
2716 c->Request.Type.Attribute = ATTR_SIMPLE;
2717 c->Request.Type.Direction = XFER_READ;
2718 c->Request.Timeout = 0;
2719 c->Request.CDB[0] = HPSA_INQUIRY;
2720 c->Request.CDB[4] = size & 0xFF;
2722 case HPSA_REPORT_LOG:
2723 case HPSA_REPORT_PHYS:
2724 /* Talking to controller so It's a physical command
2725 mode = 00 target = 0. Nothing to write.
2727 c->Request.CDBLen = 12;
2728 c->Request.Type.Attribute = ATTR_SIMPLE;
2729 c->Request.Type.Direction = XFER_READ;
2730 c->Request.Timeout = 0;
2731 c->Request.CDB[0] = cmd;
2732 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2733 c->Request.CDB[7] = (size >> 16) & 0xFF;
2734 c->Request.CDB[8] = (size >> 8) & 0xFF;
2735 c->Request.CDB[9] = size & 0xFF;
2737 case HPSA_CACHE_FLUSH:
2738 c->Request.CDBLen = 12;
2739 c->Request.Type.Attribute = ATTR_SIMPLE;
2740 c->Request.Type.Direction = XFER_WRITE;
2741 c->Request.Timeout = 0;
2742 c->Request.CDB[0] = BMIC_WRITE;
2743 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2745 case TEST_UNIT_READY:
2746 c->Request.CDBLen = 6;
2747 c->Request.Type.Attribute = ATTR_SIMPLE;
2748 c->Request.Type.Direction = XFER_NONE;
2749 c->Request.Timeout = 0;
2752 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2756 } else if (cmd_type == TYPE_MSG) {
2759 case HPSA_DEVICE_RESET_MSG:
2760 c->Request.CDBLen = 16;
2761 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2762 c->Request.Type.Attribute = ATTR_SIMPLE;
2763 c->Request.Type.Direction = XFER_NONE;
2764 c->Request.Timeout = 0; /* Don't time out */
2765 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
2766 c->Request.CDB[1] = 0x03; /* Reset target above */
2767 /* If bytes 4-7 are zero, it means reset the */
2769 c->Request.CDB[4] = 0x00;
2770 c->Request.CDB[5] = 0x00;
2771 c->Request.CDB[6] = 0x00;
2772 c->Request.CDB[7] = 0x00;
2776 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2781 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2785 switch (c->Request.Type.Direction) {
2787 pci_dir = PCI_DMA_FROMDEVICE;
2790 pci_dir = PCI_DMA_TODEVICE;
2793 pci_dir = PCI_DMA_NONE;
2796 pci_dir = PCI_DMA_BIDIRECTIONAL;
2799 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2805 * Map (physical) PCI mem into (virtual) kernel space
2807 static void __iomem *remap_pci_mem(ulong base, ulong size)
2809 ulong page_base = ((ulong) base) & PAGE_MASK;
2810 ulong page_offs = ((ulong) base) - page_base;
2811 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2813 return page_remapped ? (page_remapped + page_offs) : NULL;
2816 /* Takes cmds off the submission queue and sends them to the hardware,
2817 * then puts them on the queue of cmds waiting for completion.
2819 static void start_io(struct ctlr_info *h)
2821 struct CommandList *c;
2823 while (!hlist_empty(&h->reqQ)) {
2824 c = hlist_entry(h->reqQ.first, struct CommandList, list);
2825 /* can't do anything if fifo is full */
2826 if ((h->access.fifo_full(h))) {
2827 dev_warn(&h->pdev->dev, "fifo full\n");
2831 /* Get the first entry from the Request Q */
2835 /* Tell the controller execute command */
2836 h->access.submit_command(h, c);
2838 /* Put job onto the completed Q */
2843 static inline unsigned long get_next_completion(struct ctlr_info *h)
2845 return h->access.command_completed(h);
2848 static inline bool interrupt_pending(struct ctlr_info *h)
2850 return h->access.intr_pending(h);
2853 static inline long interrupt_not_for_us(struct ctlr_info *h)
2855 return !(h->msi_vector || h->msix_vector) &&
2856 ((h->access.intr_pending(h) == 0) ||
2857 (h->interrupts_enabled == 0));
2860 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
2863 if (unlikely(tag_index >= h->nr_cmds)) {
2864 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2870 static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2873 if (likely(c->cmd_type == CMD_SCSI))
2874 complete_scsi_command(c, 0, raw_tag);
2875 else if (c->cmd_type == CMD_IOCTL_PEND)
2876 complete(c->waiting);
2879 static inline u32 hpsa_tag_contains_index(u32 tag)
2881 #define DIRECT_LOOKUP_BIT 0x10
2882 return tag & DIRECT_LOOKUP_BIT;
2885 static inline u32 hpsa_tag_to_index(u32 tag)
2887 #define DIRECT_LOOKUP_SHIFT 5
2888 return tag >> DIRECT_LOOKUP_SHIFT;
2891 static inline u32 hpsa_tag_discard_error_bits(u32 tag)
2893 #define HPSA_ERROR_BITS 0x03
2894 return tag & ~HPSA_ERROR_BITS;
2897 /* process completion of an indexed ("direct lookup") command */
2898 static inline u32 process_indexed_cmd(struct ctlr_info *h,
2902 struct CommandList *c;
2904 tag_index = hpsa_tag_to_index(raw_tag);
2905 if (bad_tag(h, tag_index, raw_tag))
2906 return next_command(h);
2907 c = h->cmd_pool + tag_index;
2908 finish_cmd(c, raw_tag);
2909 return next_command(h);
2912 /* process completion of a non-indexed command */
2913 static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2917 struct CommandList *c = NULL;
2918 struct hlist_node *tmp;
2920 tag = hpsa_tag_discard_error_bits(raw_tag);
2921 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2922 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2923 finish_cmd(c, raw_tag);
2924 return next_command(h);
2927 bad_tag(h, h->nr_cmds + 1, raw_tag);
2928 return next_command(h);
2931 static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2933 struct ctlr_info *h = dev_id;
2934 unsigned long flags;
2937 if (interrupt_not_for_us(h))
2939 spin_lock_irqsave(&h->lock, flags);
2940 raw_tag = get_next_completion(h);
2941 while (raw_tag != FIFO_EMPTY) {
2942 if (hpsa_tag_contains_index(raw_tag))
2943 raw_tag = process_indexed_cmd(h, raw_tag);
2945 raw_tag = process_nonindexed_cmd(h, raw_tag);
2947 spin_unlock_irqrestore(&h->lock, flags);
2951 /* Send a message CDB to the firmware. */
2952 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2956 struct CommandListHeader CommandHeader;
2957 struct RequestBlock Request;
2958 struct ErrDescriptor ErrorDescriptor;
2960 struct Command *cmd;
2961 static const size_t cmd_sz = sizeof(*cmd) +
2962 sizeof(cmd->ErrorDescriptor);
2964 uint32_t paddr32, tag;
2965 void __iomem *vaddr;
2968 vaddr = pci_ioremap_bar(pdev, 0);
2972 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
2973 * CCISS commands, so they must be allocated from the lower 4GiB of
2976 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2982 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
2988 /* This must fit, because of the 32-bit consistent DMA mask. Also,
2989 * although there's no guarantee, we assume that the address is at
2990 * least 4-byte aligned (most likely, it's page-aligned).
2994 cmd->CommandHeader.ReplyQueue = 0;
2995 cmd->CommandHeader.SGList = 0;
2996 cmd->CommandHeader.SGTotal = 0;
2997 cmd->CommandHeader.Tag.lower = paddr32;
2998 cmd->CommandHeader.Tag.upper = 0;
2999 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3001 cmd->Request.CDBLen = 16;
3002 cmd->Request.Type.Type = TYPE_MSG;
3003 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3004 cmd->Request.Type.Direction = XFER_NONE;
3005 cmd->Request.Timeout = 0; /* Don't time out */
3006 cmd->Request.CDB[0] = opcode;
3007 cmd->Request.CDB[1] = type;
3008 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3009 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3010 cmd->ErrorDescriptor.Addr.upper = 0;
3011 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3013 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3015 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3016 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3017 if (hpsa_tag_discard_error_bits(tag) == paddr32)
3019 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3024 /* we leak the DMA buffer here ... no choice since the controller could
3025 * still complete the command.
3027 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3028 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3033 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3035 if (tag & HPSA_ERROR_BIT) {
3036 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3041 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3046 #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3047 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3049 static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
3051 /* the #defines are stolen from drivers/pci/msi.h. */
3052 #define msi_control_reg(base) (base + PCI_MSI_FLAGS)
3053 #define PCI_MSIX_FLAGS_ENABLE (1 << 15)
3058 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
3060 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3061 if (control & PCI_MSI_FLAGS_ENABLE) {
3062 dev_info(&pdev->dev, "resetting MSI\n");
3063 pci_write_config_word(pdev, msi_control_reg(pos),
3064 control & ~PCI_MSI_FLAGS_ENABLE);
3068 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3070 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3071 if (control & PCI_MSIX_FLAGS_ENABLE) {
3072 dev_info(&pdev->dev, "resetting MSI-X\n");
3073 pci_write_config_word(pdev, msi_control_reg(pos),
3074 control & ~PCI_MSIX_FLAGS_ENABLE);
3081 /* This does a hard reset of the controller using PCI power management
3084 static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
3086 u16 pmcsr, saved_config_space[32];
3089 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3091 /* This is very nearly the same thing as
3093 * pci_save_state(pci_dev);
3094 * pci_set_power_state(pci_dev, PCI_D3hot);
3095 * pci_set_power_state(pci_dev, PCI_D0);
3096 * pci_restore_state(pci_dev);
3098 * but we can't use these nice canned kernel routines on
3099 * kexec, because they also check the MSI/MSI-X state in PCI
3100 * configuration space and do the wrong thing when it is
3101 * set/cleared. Also, the pci_save/restore_state functions
3102 * violate the ordering requirements for restoring the
3103 * configuration space from the CCISS document (see the
3104 * comment below). So we roll our own ....
3107 for (i = 0; i < 32; i++)
3108 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
3110 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3113 "hpsa_reset_controller: PCI PM not supported\n");
3117 /* Quoting from the Open CISS Specification: "The Power
3118 * Management Control/Status Register (CSR) controls the power
3119 * state of the device. The normal operating state is D0,
3120 * CSR=00h. The software off state is D3, CSR=03h. To reset
3121 * the controller, place the interface device in D3 then to
3122 * D0, this causes a secondary PCI reset which will reset the
3126 /* enter the D3hot power management state */
3127 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3128 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3130 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3134 /* enter the D0 power management state */
3135 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3137 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3141 /* Restore the PCI configuration space. The Open CISS
3142 * Specification says, "Restore the PCI Configuration
3143 * Registers, offsets 00h through 60h. It is important to
3144 * restore the command register, 16-bits at offset 04h,
3145 * last. Do not restore the configuration status register,
3146 * 16-bits at offset 06h." Note that the offset is 2*i.
3148 for (i = 0; i < 32; i++) {
3149 if (i == 2 || i == 3)
3151 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
3154 pci_write_config_word(pdev, 4, saved_config_space[2]);
3160 * We cannot read the structure directly, for portability we must use
3162 * This is for debug only.
3164 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3170 dev_info(dev, "Controller Configuration information\n");
3171 dev_info(dev, "------------------------------------\n");
3172 for (i = 0; i < 4; i++)
3173 temp_name[i] = readb(&(tb->Signature[i]));
3174 temp_name[4] = '\0';
3175 dev_info(dev, " Signature = %s\n", temp_name);
3176 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3177 dev_info(dev, " Transport methods supported = 0x%x\n",
3178 readl(&(tb->TransportSupport)));
3179 dev_info(dev, " Transport methods active = 0x%x\n",
3180 readl(&(tb->TransportActive)));
3181 dev_info(dev, " Requested transport Method = 0x%x\n",
3182 readl(&(tb->HostWrite.TransportRequest)));
3183 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3184 readl(&(tb->HostWrite.CoalIntDelay)));
3185 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3186 readl(&(tb->HostWrite.CoalIntCount)));
3187 dev_info(dev, " Max outstanding commands = 0x%d\n",
3188 readl(&(tb->CmdsOutMax)));
3189 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3190 for (i = 0; i < 16; i++)
3191 temp_name[i] = readb(&(tb->ServerName[i]));
3192 temp_name[16] = '\0';
3193 dev_info(dev, " Server Name = %s\n", temp_name);
3194 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3195 readl(&(tb->HeartBeat)));
3196 #endif /* HPSA_DEBUG */
3199 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3201 int i, offset, mem_type, bar_type;
3203 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3206 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3207 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3208 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3211 mem_type = pci_resource_flags(pdev, i) &
3212 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3214 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3215 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3216 offset += 4; /* 32 bit */
3218 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3221 default: /* reserved in PCI 2.2 */
3222 dev_warn(&pdev->dev,
3223 "base address is invalid\n");
3228 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3234 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
3235 * controllers that are capable. If not, we use IO-APIC mode.
3238 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3240 #ifdef CONFIG_PCI_MSI
3242 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3246 /* Some boards advertise MSI but don't really support it */
3247 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3248 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
3249 goto default_int_mode;
3250 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3251 dev_info(&h->pdev->dev, "MSIX\n");
3252 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
3254 h->intr[0] = hpsa_msix_entries[0].vector;
3255 h->intr[1] = hpsa_msix_entries[1].vector;
3256 h->intr[2] = hpsa_msix_entries[2].vector;
3257 h->intr[3] = hpsa_msix_entries[3].vector;
3262 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
3263 "available\n", err);
3264 goto default_int_mode;
3266 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
3268 goto default_int_mode;
3271 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3272 dev_info(&h->pdev->dev, "MSI\n");
3273 if (!pci_enable_msi(h->pdev))
3276 dev_warn(&h->pdev->dev, "MSI init failed\n");
3279 #endif /* CONFIG_PCI_MSI */
3280 /* if we get here we're going to use the default interrupt mode */
3281 h->intr[PERF_MODE_INT] = h->pdev->irq;
3284 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3287 u32 subsystem_vendor_id, subsystem_device_id;
3289 subsystem_vendor_id = pdev->subsystem_vendor;
3290 subsystem_device_id = pdev->subsystem_device;
3291 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3292 subsystem_vendor_id;
3294 for (i = 0; i < ARRAY_SIZE(products); i++)
3295 if (*board_id == products[i].board_id)
3298 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3299 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3301 dev_warn(&pdev->dev, "unrecognized board ID: "
3302 "0x%08x, ignoring.\n", *board_id);
3305 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3308 static inline bool hpsa_board_disabled(struct pci_dev *pdev)
3312 (void) pci_read_config_word(pdev, PCI_COMMAND, &command);
3313 return ((command & PCI_COMMAND_MEMORY) == 0);
3316 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3317 unsigned long *memory_bar)
3321 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
3322 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3323 /* addressing mode bits already removed */
3324 *memory_bar = pci_resource_start(pdev, i);
3325 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3329 dev_warn(&pdev->dev, "no memory BAR found\n");
3333 static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
3338 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3339 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3340 if (scratchpad == HPSA_FIRMWARE_READY)
3342 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3344 dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
3348 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3349 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3352 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3353 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3354 *cfg_base_addr &= (u32) 0x0000ffff;
3355 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3356 if (*cfg_base_addr_index == -1) {
3357 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3363 static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3367 u64 cfg_base_addr_index;
3371 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3372 &cfg_base_addr_index, &cfg_offset);
3375 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
3376 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3379 /* Find performant mode table. */
3380 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3381 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3382 cfg_base_addr_index)+cfg_offset+trans_offset,
3383 sizeof(*h->transtable));
3389 /* Interrogate the hardware for some limits:
3390 * max commands, max SG elements without chaining, and with chaining,
3391 * SG chain block size, etc.
3393 static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3395 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3396 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
3397 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3399 * Limit in-command s/g elements to 32 save dma'able memory.
3400 * Howvever spec says if 0, use 31
3402 h->max_cmd_sg_entries = 31;
3403 if (h->maxsgentries > 512) {
3404 h->max_cmd_sg_entries = 32;
3405 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3406 h->maxsgentries--; /* save one for chain pointer */
3408 h->maxsgentries = 31; /* default to traditional values */
3413 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3415 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3416 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3417 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3418 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3419 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3425 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3426 static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3431 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3433 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3437 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
3438 * in a prefetch beyond physical memory.
3440 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3444 if (h->board_id != 0x3225103C)
3446 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3447 dma_prefetch |= 0x8000;
3448 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3451 static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
3455 /* under certain very rare conditions, this can take awhile.
3456 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3457 * as we enter this code.)
3459 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3460 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3462 /* delay and try again */
3467 static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3471 trans_support = readl(&(h->cfgtable->TransportSupport));
3472 if (!(trans_support & SIMPLE_MODE))
3475 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3476 /* Update the field, and then ring the doorbell */
3477 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3478 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3479 hpsa_wait_for_mode_change_ack(h);
3480 print_cfg_table(&h->pdev->dev, h->cfgtable);
3481 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3482 dev_warn(&h->pdev->dev,
3483 "unable to get board into simple mode\n");
3489 static int __devinit hpsa_pci_init(struct ctlr_info *h)
3491 int prod_index, err;
3493 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3496 h->product_name = products[prod_index].product_name;
3497 h->access = *(products[prod_index].access);
3499 if (hpsa_board_disabled(h->pdev)) {
3500 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
3503 err = pci_enable_device(h->pdev);
3505 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
3509 err = pci_request_regions(h->pdev, "hpsa");
3511 dev_err(&h->pdev->dev,
3512 "cannot obtain PCI resources, aborting\n");
3515 hpsa_interrupt_mode(h);
3516 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3518 goto err_out_free_res;
3519 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3522 goto err_out_free_res;
3524 err = hpsa_wait_for_board_ready(h);
3526 goto err_out_free_res;
3527 err = hpsa_find_cfgtables(h);
3529 goto err_out_free_res;
3530 hpsa_find_board_params(h);
3532 if (!hpsa_CISS_signature_present(h)) {
3534 goto err_out_free_res;
3536 hpsa_enable_scsi_prefetch(h);
3537 hpsa_p600_dma_prefetch_quirk(h);
3538 err = hpsa_enter_simple_mode(h);
3540 goto err_out_free_res;
3545 iounmap(h->transtable);
3547 iounmap(h->cfgtable);
3551 * Deliberately omit pci_disable_device(): it does something nasty to
3552 * Smart Array controllers that pci_enable_device does not undo
3554 pci_release_regions(h->pdev);
3558 static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3562 #define HBA_INQUIRY_BYTE_COUNT 64
3563 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3564 if (!h->hba_inquiry_data)
3566 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3567 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3569 kfree(h->hba_inquiry_data);
3570 h->hba_inquiry_data = NULL;
3574 static int __devinit hpsa_init_one(struct pci_dev *pdev,
3575 const struct pci_device_id *ent)
3579 struct ctlr_info *h;
3581 if (number_of_controllers == 0)
3582 printk(KERN_INFO DRIVER_NAME "\n");
3583 if (reset_devices) {
3584 /* Reset the controller with a PCI power-cycle */
3585 if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
3588 /* Some devices (notably the HP Smart Array 5i Controller)
3589 need a little pause here */
3590 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3592 /* Now try to get the controller to respond to a no-op */
3593 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3594 if (hpsa_noop(pdev) == 0)
3597 dev_warn(&pdev->dev, "no-op failed%s\n",
3598 (i < 11 ? "; re-trying" : ""));
3602 /* Command structures must be aligned on a 32-byte boundary because
3603 * the 5 lower bits of the address are used by the hardware. and by
3604 * the driver. See comments in hpsa.h for more info.
3606 #define COMMANDLIST_ALIGNMENT 32
3607 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
3608 h = kzalloc(sizeof(*h), GFP_KERNEL);
3613 h->busy_initializing = 1;
3614 INIT_HLIST_HEAD(&h->cmpQ);
3615 INIT_HLIST_HEAD(&h->reqQ);
3616 rc = hpsa_pci_init(h);
3620 sprintf(h->devname, "hpsa%d", number_of_controllers);
3621 h->ctlr = number_of_controllers;
3622 number_of_controllers++;
3624 /* configure PCI DMA stuff */
3625 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3629 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3633 dev_err(&pdev->dev, "no suitable DMA available\n");
3638 /* make sure the board interrupts are off */
3639 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3640 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
3641 IRQF_DISABLED, h->devname, h);
3643 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3644 h->intr[PERF_MODE_INT], h->devname);
3648 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3649 h->devname, pdev->device,
3650 h->intr[PERF_MODE_INT], dac ? "" : " not");
3653 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3654 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3655 h->cmd_pool = pci_alloc_consistent(h->pdev,
3656 h->nr_cmds * sizeof(*h->cmd_pool),
3657 &(h->cmd_pool_dhandle));
3658 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3659 h->nr_cmds * sizeof(*h->errinfo_pool),
3660 &(h->errinfo_pool_dhandle));
3661 if ((h->cmd_pool_bits == NULL)
3662 || (h->cmd_pool == NULL)
3663 || (h->errinfo_pool == NULL)) {
3664 dev_err(&pdev->dev, "out of memory");
3668 if (hpsa_allocate_sg_chain_blocks(h))
3670 spin_lock_init(&h->lock);
3671 spin_lock_init(&h->scan_lock);
3672 init_waitqueue_head(&h->scan_wait_queue);
3673 h->scan_finished = 1; /* no scan currently in progress */
3675 pci_set_drvdata(pdev, h);
3676 memset(h->cmd_pool_bits, 0,
3677 ((h->nr_cmds + BITS_PER_LONG -
3678 1) / BITS_PER_LONG) * sizeof(unsigned long));
3682 /* Turn the interrupts on so we can service requests */
3683 h->access.set_intr_mask(h, HPSA_INTR_ON);
3685 hpsa_put_ctlr_into_performant_mode(h);
3686 hpsa_hba_inquiry(h);
3687 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3688 h->busy_initializing = 0;
3692 hpsa_free_sg_chain_blocks(h);
3693 kfree(h->cmd_pool_bits);
3695 pci_free_consistent(h->pdev,
3696 h->nr_cmds * sizeof(struct CommandList),
3697 h->cmd_pool, h->cmd_pool_dhandle);
3698 if (h->errinfo_pool)
3699 pci_free_consistent(h->pdev,
3700 h->nr_cmds * sizeof(struct ErrorInfo),
3702 h->errinfo_pool_dhandle);
3703 free_irq(h->intr[PERF_MODE_INT], h);
3706 h->busy_initializing = 0;
3711 static void hpsa_flush_cache(struct ctlr_info *h)
3714 struct CommandList *c;
3716 flush_buf = kzalloc(4, GFP_KERNEL);
3720 c = cmd_special_alloc(h);
3722 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3725 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3726 RAID_CTLR_LUNID, TYPE_CMD);
3727 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3728 if (c->err_info->CommandStatus != 0)
3729 dev_warn(&h->pdev->dev,
3730 "error flushing cache on controller\n");
3731 cmd_special_free(h, c);
3736 static void hpsa_shutdown(struct pci_dev *pdev)
3738 struct ctlr_info *h;
3740 h = pci_get_drvdata(pdev);
3741 /* Turn board interrupts off and send the flush cache command
3742 * sendcmd will turn off interrupt, and send the flush...
3743 * To write all data in the battery backed cache to disks
3745 hpsa_flush_cache(h);
3746 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3747 free_irq(h->intr[PERF_MODE_INT], h);
3748 #ifdef CONFIG_PCI_MSI
3750 pci_disable_msix(h->pdev);
3751 else if (h->msi_vector)
3752 pci_disable_msi(h->pdev);
3753 #endif /* CONFIG_PCI_MSI */
3756 static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3758 struct ctlr_info *h;
3760 if (pci_get_drvdata(pdev) == NULL) {
3761 dev_err(&pdev->dev, "unable to remove device \n");
3764 h = pci_get_drvdata(pdev);
3765 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3766 hpsa_shutdown(pdev);
3768 iounmap(h->transtable);
3769 iounmap(h->cfgtable);
3770 hpsa_free_sg_chain_blocks(h);
3771 pci_free_consistent(h->pdev,
3772 h->nr_cmds * sizeof(struct CommandList),
3773 h->cmd_pool, h->cmd_pool_dhandle);
3774 pci_free_consistent(h->pdev,
3775 h->nr_cmds * sizeof(struct ErrorInfo),
3776 h->errinfo_pool, h->errinfo_pool_dhandle);
3777 pci_free_consistent(h->pdev, h->reply_pool_size,
3778 h->reply_pool, h->reply_pool_dhandle);
3779 kfree(h->cmd_pool_bits);
3780 kfree(h->blockFetchTable);
3781 kfree(h->hba_inquiry_data);
3783 * Deliberately omit pci_disable_device(): it does something nasty to
3784 * Smart Array controllers that pci_enable_device does not undo
3786 pci_release_regions(pdev);
3787 pci_set_drvdata(pdev, NULL);
3791 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3792 __attribute__((unused)) pm_message_t state)
3797 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3802 static struct pci_driver hpsa_pci_driver = {
3804 .probe = hpsa_init_one,
3805 .remove = __devexit_p(hpsa_remove_one),
3806 .id_table = hpsa_pci_device_id, /* id_table */
3807 .shutdown = hpsa_shutdown,
3808 .suspend = hpsa_suspend,
3809 .resume = hpsa_resume,
3812 /* Fill in bucket_map[], given nsgs (the max number of
3813 * scatter gather elements supported) and bucket[],
3814 * which is an array of 8 integers. The bucket[] array
3815 * contains 8 different DMA transfer sizes (in 16
3816 * byte increments) which the controller uses to fetch
3817 * commands. This function fills in bucket_map[], which
3818 * maps a given number of scatter gather elements to one of
3819 * the 8 DMA transfer sizes. The point of it is to allow the
3820 * controller to only do as much DMA as needed to fetch the
3821 * command, with the DMA transfer size encoded in the lower
3822 * bits of the command address.
3824 static void calc_bucket_map(int bucket[], int num_buckets,
3825 int nsgs, int *bucket_map)
3829 /* even a command with 0 SGs requires 4 blocks */
3830 #define MINIMUM_TRANSFER_BLOCKS 4
3831 #define NUM_BUCKETS 8
3832 /* Note, bucket_map must have nsgs+1 entries. */
3833 for (i = 0; i <= nsgs; i++) {
3834 /* Compute size of a command with i SG entries */
3835 size = i + MINIMUM_TRANSFER_BLOCKS;
3836 b = num_buckets; /* Assume the biggest bucket */
3837 /* Find the bucket that is just big enough */
3838 for (j = 0; j < 8; j++) {
3839 if (bucket[j] >= size) {
3844 /* for a command with i SG entries, use bucket b. */
3849 static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
3852 unsigned long register_value;
3854 /* This is a bit complicated. There are 8 registers on
3855 * the controller which we write to to tell it 8 different
3856 * sizes of commands which there may be. It's a way of
3857 * reducing the DMA done to fetch each command. Encoded into
3858 * each command's tag are 3 bits which communicate to the controller
3859 * which of the eight sizes that command fits within. The size of
3860 * each command depends on how many scatter gather entries there are.
3861 * Each SG entry requires 16 bytes. The eight registers are programmed
3862 * with the number of 16-byte blocks a command of that size requires.
3863 * The smallest command possible requires 5 such 16 byte blocks.
3864 * the largest command possible requires MAXSGENTRIES + 4 16-byte
3865 * blocks. Note, this only extends to the SG entries contained
3866 * within the command block, and does not extend to chained blocks
3867 * of SG elements. bft[] contains the eight values we write to
3868 * the registers. They are not evenly distributed, but have more
3869 * sizes for small commands, and fewer sizes for larger commands.
3871 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
3872 BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
3873 /* 5 = 1 s/g entry or 4k
3874 * 6 = 2 s/g entry or 8k
3875 * 8 = 4 s/g entry or 16k
3876 * 10 = 6 s/g entry or 24k
3879 h->reply_pool_wraparound = 1; /* spec: init to 1 */
3881 /* Controller spec: zero out this buffer. */
3882 memset(h->reply_pool, 0, h->reply_pool_size);
3883 h->reply_pool_head = h->reply_pool;
3885 bft[7] = h->max_sg_entries + 4;
3886 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
3887 for (i = 0; i < 8; i++)
3888 writel(bft[i], &h->transtable->BlockFetch[i]);
3890 /* size of controller ring buffer */
3891 writel(h->max_commands, &h->transtable->RepQSize);
3892 writel(1, &h->transtable->RepQCount);
3893 writel(0, &h->transtable->RepQCtrAddrLow32);
3894 writel(0, &h->transtable->RepQCtrAddrHigh32);
3895 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
3896 writel(0, &h->transtable->RepQAddr0High32);
3897 writel(CFGTBL_Trans_Performant,
3898 &(h->cfgtable->HostWrite.TransportRequest));
3899 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3900 hpsa_wait_for_mode_change_ack(h);
3901 register_value = readl(&(h->cfgtable->TransportActive));
3902 if (!(register_value & CFGTBL_Trans_Performant)) {
3903 dev_warn(&h->pdev->dev, "unable to get board into"
3904 " performant mode\n");
3909 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
3913 trans_support = readl(&(h->cfgtable->TransportSupport));
3914 if (!(trans_support & PERFORMANT_MODE))
3917 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3918 h->max_sg_entries = 32;
3919 /* Performant mode ring buffer and supporting data structures */
3920 h->reply_pool_size = h->max_commands * sizeof(u64);
3921 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
3922 &(h->reply_pool_dhandle));
3924 /* Need a block fetch table for performant mode */
3925 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
3926 sizeof(u32)), GFP_KERNEL);
3928 if ((h->reply_pool == NULL)
3929 || (h->blockFetchTable == NULL))
3932 hpsa_enter_performant_mode(h);
3934 /* Change the access methods to the performant access methods */
3935 h->access = SA5_performant_access;
3936 h->transMethod = CFGTBL_Trans_Performant;
3942 pci_free_consistent(h->pdev, h->reply_pool_size,
3943 h->reply_pool, h->reply_pool_dhandle);
3944 kfree(h->blockFetchTable);
3948 * This is it. Register the PCI driver information for the cards we control
3949 * the OS will call our registered routines when it finds one of our cards.
3951 static int __init hpsa_init(void)
3953 return pci_register_driver(&hpsa_pci_driver);
3956 static void __exit hpsa_cleanup(void)
3958 pci_unregister_driver(&hpsa_pci_driver);
3961 module_init(hpsa_init);
3962 module_exit(hpsa_cleanup);