2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
57 #include <linux/config.h>
59 #include <linux/init.h>
60 #include <linux/types.h>
61 #include <linux/errno.h>
62 #include <linux/kernel.h>
63 #include <linux/ioport.h>
64 #include <linux/delay.h>
65 #include <linux/pci.h>
66 #include <linux/wait.h>
67 #include <linux/spinlock.h>
68 #include <linux/sched.h>
69 #include <linux/interrupt.h>
70 #include <linux/blkdev.h>
71 #include <linux/firmware.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
76 #include <asm/processor.h>
77 #include <scsi/scsi.h>
78 #include <scsi/scsi_host.h>
79 #include <scsi/scsi_tcq.h>
80 #include <scsi/scsi_eh.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_request.h>
88 static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90 static unsigned int ipr_max_speed = 1;
91 static int ipr_testmode = 0;
92 static unsigned int ipr_fastfail = 0;
93 static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94 static unsigned int ipr_enable_cache = 1;
95 static unsigned int ipr_debug = 0;
96 static DEFINE_SPINLOCK(ipr_driver_lock);
98 /* This table describes the differences between DMA controller chips */
99 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100 { /* Gemstone and Citrine */
102 .cache_line_size = 0x20,
104 .set_interrupt_mask_reg = 0x0022C,
105 .clr_interrupt_mask_reg = 0x00230,
106 .sense_interrupt_mask_reg = 0x0022C,
107 .clr_interrupt_reg = 0x00228,
108 .sense_interrupt_reg = 0x00224,
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
111 .set_uproc_interrupt_reg = 0x00214,
112 .clr_uproc_interrupt_reg = 0x00218
115 { /* Snipe and Scamp */
117 .cache_line_size = 0x20,
119 .set_interrupt_mask_reg = 0x00288,
120 .clr_interrupt_mask_reg = 0x0028C,
121 .sense_interrupt_mask_reg = 0x00288,
122 .clr_interrupt_reg = 0x00284,
123 .sense_interrupt_reg = 0x00280,
124 .ioarrin_reg = 0x00504,
125 .sense_uproc_interrupt_reg = 0x00290,
126 .set_uproc_interrupt_reg = 0x00290,
127 .clr_uproc_interrupt_reg = 0x00294
132 static const struct ipr_chip_t ipr_chip[] = {
133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
139 static int ipr_max_bus_speeds [] = {
140 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
143 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
144 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
145 module_param_named(max_speed, ipr_max_speed, uint, 0);
146 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
147 module_param_named(log_level, ipr_log_level, uint, 0);
148 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
149 module_param_named(testmode, ipr_testmode, int, 0);
150 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
151 module_param_named(fastfail, ipr_fastfail, int, 0);
152 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
153 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
154 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
155 module_param_named(enable_cache, ipr_enable_cache, int, 0);
156 MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
157 module_param_named(debug, ipr_debug, int, 0);
158 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(IPR_DRIVER_VERSION);
162 static const char *ipr_gpdd_dev_end_states[] = {
164 "Terminated by host",
165 "Terminated by device reset",
166 "Terminated by bus reset",
168 "Command not started"
171 static const char *ipr_gpdd_dev_bus_phases[] = {
185 /* A constant array of IOASCs/URCs/Error Messages */
187 struct ipr_error_table_t ipr_error_table[] = {
189 "8155: An unknown error was received"},
191 "Soft underlength error"},
193 "Command to be cancelled not found"},
195 "Qualified success"},
197 "FFFE: Soft device bus error recovered by the IOA"},
199 "FFF9: Device sector reassign successful"},
201 "FFF7: Media error recovered by device rewrite procedures"},
203 "7001: IOA sector reassignment successful"},
205 "FFF9: Soft media error. Sector reassignment recommended"},
207 "FFF7: Media error recovered by IOA rewrite procedures"},
209 "FF3D: Soft PCI bus error recovered by the IOA"},
211 "FFF6: Device hardware error recovered by the IOA"},
213 "FFF6: Device hardware error recovered by the device"},
215 "FF3D: Soft IOA error recovered by the IOA"},
217 "FFFA: Undefined device response recovered by the IOA"},
219 "FFF6: Device bus error, message or command phase"},
221 "FFF6: Failure prediction threshold exceeded"},
223 "8009: Impending cache battery pack failure"},
225 "34FF: Disk device format in progress"},
227 "Synchronization required"},
229 "No ready, IOA shutdown"},
231 "Not ready, IOA has been shutdown"},
233 "3020: Storage subsystem configuration error"},
235 "FFF5: Medium error, data unreadable, recommend reassign"},
237 "7000: Medium error, data unreadable, do not reassign"},
239 "FFF3: Disk media format bad"},
241 "3002: Addressed device failed to respond to selection"},
243 "3100: Device bus error"},
245 "3109: IOA timed out a device command"},
247 "3120: SCSI bus is not operational"},
249 "9000: IOA reserved area data check"},
251 "9001: IOA reserved area invalid data pattern"},
253 "9002: IOA reserved area LRC error"},
255 "102E: Out of alternate sectors for disk storage"},
257 "FFF4: Data transfer underlength error"},
259 "FFF4: Data transfer overlength error"},
261 "3400: Logical unit failure"},
263 "FFF4: Device microcode is corrupt"},
265 "8150: PCI bus error"},
267 "Unsupported device bus message received"},
269 "FFF4: Disk device problem"},
271 "8150: Permanent IOA failure"},
273 "3010: Disk device returned wrong response to IOA"},
275 "8151: IOA microcode error"},
277 "Device bus status error"},
279 "8157: IOA error requiring IOA reset to recover"},
281 "Message reject received from the device"},
283 "8008: A permanent cache battery pack failure occurred"},
285 "9090: Disk unit has been modified after the last known status"},
287 "9081: IOA detected device error"},
289 "9082: IOA detected device error"},
291 "3110: Device bus error, message or command phase"},
293 "9091: Incorrect hardware configuration change has been detected"},
295 "FFF4: Command to logical unit failed"},
297 "Illegal request, invalid request type or request packet"},
299 "Illegal request, invalid resource handle"},
301 "Illegal request, invalid field in parameter list"},
303 "Illegal request, parameter not supported"},
305 "Illegal request, parameter value invalid"},
307 "Illegal request, command sequence error"},
309 "9031: Array protection temporarily suspended, protection resuming"},
311 "9040: Array protection temporarily suspended, protection resuming"},
313 "FFFB: SCSI bus was reset"},
315 "FFFE: SCSI bus transition to single ended"},
317 "FFFE: SCSI bus transition to LVD"},
319 "FFFB: SCSI bus was reset by another initiator"},
321 "3029: A device replacement has occurred"},
323 "9051: IOA cache data exists for a missing or failed device"},
325 "9025: Disk unit is not supported at its physical location"},
327 "3020: IOA detected a SCSI bus configuration error"},
329 "3150: SCSI bus configuration error"},
331 "9041: Array protection temporarily suspended"},
333 "9042: Corrupt array parity detected on specified device"},
335 "9030: Array no longer protected due to missing or failed disk unit"},
337 "9032: Array exposed but still protected"},
339 "Failure due to other device"},
341 "9008: IOA does not support functions expected by devices"},
343 "9010: Cache data associated with attached devices cannot be found"},
345 "9011: Cache data belongs to devices other than those attached"},
347 "9020: Array missing 2 or more devices with only 1 device present"},
349 "9021: Array missing 2 or more devices with 2 or more devices present"},
351 "9022: Exposed array is missing a required device"},
353 "9023: Array member(s) not at required physical locations"},
355 "9024: Array not functional due to present hardware configuration"},
357 "9026: Array not functional due to present hardware configuration"},
359 "9027: Array is missing a device and parity is out of sync"},
361 "9028: Maximum number of arrays already exist"},
363 "9050: Required cache data cannot be located for a disk unit"},
365 "9052: Cache data exists for a device that has been modified"},
367 "9054: IOA resources not available due to previous problems"},
369 "9092: Disk unit requires initialization before use"},
371 "9029: Incorrect hardware configuration change has been detected"},
373 "9060: One or more disk pairs are missing from an array"},
375 "9061: One or more disks are missing from an array"},
377 "9062: One or more disks are missing from an array"},
379 "9063: Maximum number of functional arrays has been exceeded"},
381 "Aborted command, invalid descriptor"},
383 "Command terminated by host"}
386 static const struct ipr_ses_table_entry ipr_ses_table[] = {
387 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
388 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
389 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
390 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
391 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
392 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
393 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
394 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
395 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
396 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
397 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
398 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
399 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
403 * Function Prototypes
405 static int ipr_reset_alert(struct ipr_cmnd *);
406 static void ipr_process_ccn(struct ipr_cmnd *);
407 static void ipr_process_error(struct ipr_cmnd *);
408 static void ipr_reset_ioa_job(struct ipr_cmnd *);
409 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
410 enum ipr_shutdown_type);
412 #ifdef CONFIG_SCSI_IPR_TRACE
414 * ipr_trc_hook - Add a trace entry to the driver trace
415 * @ipr_cmd: ipr command struct
417 * @add_data: additional data
422 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
423 u8 type, u32 add_data)
425 struct ipr_trace_entry *trace_entry;
426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
428 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
429 trace_entry->time = jiffies;
430 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
431 trace_entry->type = type;
432 trace_entry->cmd_index = ipr_cmd->cmd_index;
433 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
434 trace_entry->u.add_data = add_data;
437 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
441 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
442 * @ipr_cmd: ipr command struct
447 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
449 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
450 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
452 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
453 ioarcb->write_data_transfer_length = 0;
454 ioarcb->read_data_transfer_length = 0;
455 ioarcb->write_ioadl_len = 0;
456 ioarcb->read_ioadl_len = 0;
458 ioasa->residual_data_len = 0;
460 ipr_cmd->scsi_cmd = NULL;
461 ipr_cmd->sense_buffer[0] = 0;
462 ipr_cmd->dma_use_sg = 0;
466 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
467 * @ipr_cmd: ipr command struct
472 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
474 ipr_reinit_ipr_cmnd(ipr_cmd);
475 ipr_cmd->u.scratch = 0;
476 ipr_cmd->sibling = NULL;
477 init_timer(&ipr_cmd->timer);
481 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
482 * @ioa_cfg: ioa config struct
485 * pointer to ipr command struct
488 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
490 struct ipr_cmnd *ipr_cmd;
492 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
493 list_del(&ipr_cmd->queue);
494 ipr_init_ipr_cmnd(ipr_cmd);
500 * ipr_unmap_sglist - Unmap scatterlist if mapped
501 * @ioa_cfg: ioa config struct
502 * @ipr_cmd: ipr command struct
507 static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
508 struct ipr_cmnd *ipr_cmd)
510 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
512 if (ipr_cmd->dma_use_sg) {
513 if (scsi_cmd->use_sg > 0) {
514 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
516 scsi_cmd->sc_data_direction);
518 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
519 scsi_cmd->request_bufflen,
520 scsi_cmd->sc_data_direction);
526 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
527 * @ioa_cfg: ioa config struct
528 * @clr_ints: interrupts to clear
530 * This function masks all interrupts on the adapter, then clears the
531 * interrupts specified in the mask
536 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
539 volatile u32 int_reg;
541 /* Stop new interrupts */
542 ioa_cfg->allow_interrupts = 0;
544 /* Set interrupt mask to stop all new interrupts */
545 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
547 /* Clear any pending interrupts */
548 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
549 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
553 * ipr_save_pcix_cmd_reg - Save PCI-X command register
554 * @ioa_cfg: ioa config struct
557 * 0 on success / -EIO on failure
559 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
561 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
563 if (pcix_cmd_reg == 0) {
564 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
568 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
569 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
570 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
574 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
579 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
580 * @ioa_cfg: ioa config struct
583 * 0 on success / -EIO on failure
585 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
587 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
590 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
591 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
592 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
596 dev_err(&ioa_cfg->pdev->dev,
597 "Failed to setup PCI-X command register\n");
605 * ipr_scsi_eh_done - mid-layer done function for aborted ops
606 * @ipr_cmd: ipr command struct
608 * This function is invoked by the interrupt handler for
609 * ops generated by the SCSI mid-layer which are being aborted.
614 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
617 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
619 scsi_cmd->result |= (DID_ERROR << 16);
621 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
622 scsi_cmd->scsi_done(scsi_cmd);
623 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
627 * ipr_fail_all_ops - Fails all outstanding ops.
628 * @ioa_cfg: ioa config struct
630 * This function fails all outstanding ops.
635 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
637 struct ipr_cmnd *ipr_cmd, *temp;
640 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
641 list_del(&ipr_cmd->queue);
643 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
644 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
646 if (ipr_cmd->scsi_cmd)
647 ipr_cmd->done = ipr_scsi_eh_done;
649 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
650 del_timer(&ipr_cmd->timer);
651 ipr_cmd->done(ipr_cmd);
658 * ipr_do_req - Send driver initiated requests.
659 * @ipr_cmd: ipr command struct
660 * @done: done function
661 * @timeout_func: timeout function
662 * @timeout: timeout value
664 * This function sends the specified command to the adapter with the
665 * timeout given. The done function is invoked on command completion.
670 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
671 void (*done) (struct ipr_cmnd *),
672 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
676 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
678 ipr_cmd->done = done;
680 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
681 ipr_cmd->timer.expires = jiffies + timeout;
682 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
684 add_timer(&ipr_cmd->timer);
686 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
689 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
690 ioa_cfg->regs.ioarrin_reg);
694 * ipr_internal_cmd_done - Op done function for an internally generated op.
695 * @ipr_cmd: ipr command struct
697 * This function is the op done function for an internally generated,
698 * blocking op. It simply wakes the sleeping thread.
703 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
705 if (ipr_cmd->sibling)
706 ipr_cmd->sibling = NULL;
708 complete(&ipr_cmd->completion);
712 * ipr_send_blocking_cmd - Send command and sleep on its completion.
713 * @ipr_cmd: ipr command struct
714 * @timeout_func: function to invoke if command times out
720 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
721 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
724 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
726 init_completion(&ipr_cmd->completion);
727 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
729 spin_unlock_irq(ioa_cfg->host->host_lock);
730 wait_for_completion(&ipr_cmd->completion);
731 spin_lock_irq(ioa_cfg->host->host_lock);
735 * ipr_send_hcam - Send an HCAM to the adapter.
736 * @ioa_cfg: ioa config struct
738 * @hostrcb: hostrcb struct
740 * This function will send a Host Controlled Async command to the adapter.
741 * If HCAMs are currently not allowed to be issued to the adapter, it will
742 * place the hostrcb on the free queue.
747 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
748 struct ipr_hostrcb *hostrcb)
750 struct ipr_cmnd *ipr_cmd;
751 struct ipr_ioarcb *ioarcb;
753 if (ioa_cfg->allow_cmds) {
754 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
755 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
756 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
758 ipr_cmd->u.hostrcb = hostrcb;
759 ioarcb = &ipr_cmd->ioarcb;
761 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
762 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
763 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
764 ioarcb->cmd_pkt.cdb[1] = type;
765 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
766 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
768 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
769 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
770 ipr_cmd->ioadl[0].flags_and_data_len =
771 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
772 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
774 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
775 ipr_cmd->done = ipr_process_ccn;
777 ipr_cmd->done = ipr_process_error;
779 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
782 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
783 ioa_cfg->regs.ioarrin_reg);
785 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
790 * ipr_init_res_entry - Initialize a resource entry struct.
791 * @res: resource entry struct
796 static void ipr_init_res_entry(struct ipr_resource_entry *res)
798 res->needs_sync_complete = 1;
801 res->del_from_ml = 0;
802 res->resetting_device = 0;
807 * ipr_handle_config_change - Handle a config change from the adapter
808 * @ioa_cfg: ioa config struct
814 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
815 struct ipr_hostrcb *hostrcb)
817 struct ipr_resource_entry *res = NULL;
818 struct ipr_config_table_entry *cfgte;
821 cfgte = &hostrcb->hcam.u.ccn.cfgte;
823 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
824 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
825 sizeof(cfgte->res_addr))) {
832 if (list_empty(&ioa_cfg->free_res_q)) {
833 ipr_send_hcam(ioa_cfg,
834 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
839 res = list_entry(ioa_cfg->free_res_q.next,
840 struct ipr_resource_entry, queue);
842 list_del(&res->queue);
843 ipr_init_res_entry(res);
844 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
847 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
849 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
851 res->sdev->hostdata = NULL;
852 res->del_from_ml = 1;
853 if (ioa_cfg->allow_ml_add_del)
854 schedule_work(&ioa_cfg->work_q);
856 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
857 } else if (!res->sdev) {
859 if (ioa_cfg->allow_ml_add_del)
860 schedule_work(&ioa_cfg->work_q);
863 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
867 * ipr_process_ccn - Op done function for a CCN.
868 * @ipr_cmd: ipr command struct
870 * This function is the op done function for a configuration
871 * change notification host controlled async from the adapter.
876 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
878 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
879 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
880 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
882 list_del(&hostrcb->queue);
883 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
886 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
887 dev_err(&ioa_cfg->pdev->dev,
888 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
890 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
892 ipr_handle_config_change(ioa_cfg, hostrcb);
897 * ipr_log_vpd - Log the passed VPD to the error log.
898 * @vpd: vendor/product id/sn struct
903 static void ipr_log_vpd(struct ipr_vpd *vpd)
905 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
906 + IPR_SERIAL_NUM_LEN];
908 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
909 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
911 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
912 ipr_err("Vendor/Product ID: %s\n", buffer);
914 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
915 buffer[IPR_SERIAL_NUM_LEN] = '\0';
916 ipr_err(" Serial Number: %s\n", buffer);
920 * ipr_log_cache_error - Log a cache error.
921 * @ioa_cfg: ioa config struct
922 * @hostrcb: hostrcb struct
927 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
928 struct ipr_hostrcb *hostrcb)
930 struct ipr_hostrcb_type_02_error *error =
931 &hostrcb->hcam.u.error.u.type_02_error;
933 ipr_err("-----Current Configuration-----\n");
934 ipr_err("Cache Directory Card Information:\n");
935 ipr_log_vpd(&error->ioa_vpd);
936 ipr_err("Adapter Card Information:\n");
937 ipr_log_vpd(&error->cfc_vpd);
939 ipr_err("-----Expected Configuration-----\n");
940 ipr_err("Cache Directory Card Information:\n");
941 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
942 ipr_err("Adapter Card Information:\n");
943 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
945 ipr_err("Additional IOA Data: %08X %08X %08X\n",
946 be32_to_cpu(error->ioa_data[0]),
947 be32_to_cpu(error->ioa_data[1]),
948 be32_to_cpu(error->ioa_data[2]));
952 * ipr_log_config_error - Log a configuration error.
953 * @ioa_cfg: ioa config struct
954 * @hostrcb: hostrcb struct
959 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
960 struct ipr_hostrcb *hostrcb)
962 int errors_logged, i;
963 struct ipr_hostrcb_device_data_entry *dev_entry;
964 struct ipr_hostrcb_type_03_error *error;
966 error = &hostrcb->hcam.u.error.u.type_03_error;
967 errors_logged = be32_to_cpu(error->errors_logged);
969 ipr_err("Device Errors Detected/Logged: %d/%d\n",
970 be32_to_cpu(error->errors_detected), errors_logged);
972 dev_entry = error->dev;
974 for (i = 0; i < errors_logged; i++, dev_entry++) {
977 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
978 ipr_log_vpd(&dev_entry->vpd);
980 ipr_err("-----New Device Information-----\n");
981 ipr_log_vpd(&dev_entry->new_vpd);
983 ipr_err("Cache Directory Card Information:\n");
984 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
986 ipr_err("Adapter Card Information:\n");
987 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
989 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
990 be32_to_cpu(dev_entry->ioa_data[0]),
991 be32_to_cpu(dev_entry->ioa_data[1]),
992 be32_to_cpu(dev_entry->ioa_data[2]),
993 be32_to_cpu(dev_entry->ioa_data[3]),
994 be32_to_cpu(dev_entry->ioa_data[4]));
999 * ipr_log_array_error - Log an array configuration error.
1000 * @ioa_cfg: ioa config struct
1001 * @hostrcb: hostrcb struct
1006 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1007 struct ipr_hostrcb *hostrcb)
1010 struct ipr_hostrcb_type_04_error *error;
1011 struct ipr_hostrcb_array_data_entry *array_entry;
1012 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1014 error = &hostrcb->hcam.u.error.u.type_04_error;
1018 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1019 error->protection_level,
1020 ioa_cfg->host->host_no,
1021 error->last_func_vset_res_addr.bus,
1022 error->last_func_vset_res_addr.target,
1023 error->last_func_vset_res_addr.lun);
1027 array_entry = error->array_member;
1029 for (i = 0; i < 18; i++) {
1030 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1033 if (be32_to_cpu(error->exposed_mode_adn) == i)
1034 ipr_err("Exposed Array Member %d:\n", i);
1036 ipr_err("Array Member %d:\n", i);
1038 ipr_log_vpd(&array_entry->vpd);
1040 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1041 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1042 "Expected Location");
1047 array_entry = error->array_member2;
1054 * ipr_log_generic_error - Log an adapter error.
1055 * @ioa_cfg: ioa config struct
1056 * @hostrcb: hostrcb struct
1061 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1062 struct ipr_hostrcb *hostrcb)
1065 int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
1067 if (ioa_data_len == 0)
1070 for (i = 0; i < ioa_data_len / 4; i += 4) {
1071 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1072 be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
1073 be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
1074 be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
1075 be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
1080 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1083 * This function will return the index of into the ipr_error_table
1084 * for the specified IOASC. If the IOASC is not in the table,
1085 * 0 will be returned, which points to the entry used for unknown errors.
1088 * index into the ipr_error_table
1090 static u32 ipr_get_error(u32 ioasc)
1094 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1095 if (ipr_error_table[i].ioasc == ioasc)
1102 * ipr_handle_log_data - Log an adapter error.
1103 * @ioa_cfg: ioa config struct
1104 * @hostrcb: hostrcb struct
1106 * This function logs an adapter error to the system.
1111 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1112 struct ipr_hostrcb *hostrcb)
1117 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1120 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1121 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1123 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1125 if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1126 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1127 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1128 scsi_report_bus_reset(ioa_cfg->host,
1129 hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1132 error_index = ipr_get_error(ioasc);
1134 if (!ipr_error_table[error_index].log_hcam)
1137 if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1138 ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1139 "%s\n", ipr_error_table[error_index].error);
1141 dev_err(&ioa_cfg->pdev->dev, "%s\n",
1142 ipr_error_table[error_index].error);
1145 /* Set indication we have logged an error */
1146 ioa_cfg->errors_logged++;
1148 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1150 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1151 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1153 switch (hostrcb->hcam.overlay_id) {
1154 case IPR_HOST_RCB_OVERLAY_ID_2:
1155 ipr_log_cache_error(ioa_cfg, hostrcb);
1157 case IPR_HOST_RCB_OVERLAY_ID_3:
1158 ipr_log_config_error(ioa_cfg, hostrcb);
1160 case IPR_HOST_RCB_OVERLAY_ID_4:
1161 case IPR_HOST_RCB_OVERLAY_ID_6:
1162 ipr_log_array_error(ioa_cfg, hostrcb);
1164 case IPR_HOST_RCB_OVERLAY_ID_1:
1165 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1167 ipr_log_generic_error(ioa_cfg, hostrcb);
1173 * ipr_process_error - Op done function for an adapter error log.
1174 * @ipr_cmd: ipr command struct
1176 * This function is the op done function for an error log host
1177 * controlled async from the adapter. It will log the error and
1178 * send the HCAM back to the adapter.
1183 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1185 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1186 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1187 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1189 list_del(&hostrcb->queue);
1190 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1193 ipr_handle_log_data(ioa_cfg, hostrcb);
1194 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1195 dev_err(&ioa_cfg->pdev->dev,
1196 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1199 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1203 * ipr_timeout - An internally generated op has timed out.
1204 * @ipr_cmd: ipr command struct
1206 * This function blocks host requests and initiates an
1212 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1214 unsigned long lock_flags = 0;
1215 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1218 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1220 ioa_cfg->errors_logged++;
1221 dev_err(&ioa_cfg->pdev->dev,
1222 "Adapter being reset due to command timeout.\n");
1224 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1225 ioa_cfg->sdt_state = GET_DUMP;
1227 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1228 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1235 * ipr_oper_timeout - Adapter timed out transitioning to operational
1236 * @ipr_cmd: ipr command struct
1238 * This function blocks host requests and initiates an
1244 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1246 unsigned long lock_flags = 0;
1247 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1250 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1252 ioa_cfg->errors_logged++;
1253 dev_err(&ioa_cfg->pdev->dev,
1254 "Adapter timed out transitioning to operational.\n");
1256 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1257 ioa_cfg->sdt_state = GET_DUMP;
1259 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1261 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1262 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1270 * ipr_reset_reload - Reset/Reload the IOA
1271 * @ioa_cfg: ioa config struct
1272 * @shutdown_type: shutdown type
1274 * This function resets the adapter and re-initializes it.
1275 * This function assumes that all new host commands have been stopped.
1279 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1280 enum ipr_shutdown_type shutdown_type)
1282 if (!ioa_cfg->in_reset_reload)
1283 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1285 spin_unlock_irq(ioa_cfg->host->host_lock);
1286 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1287 spin_lock_irq(ioa_cfg->host->host_lock);
1289 /* If we got hit with a host reset while we were already resetting
1290 the adapter for some reason, and the reset failed. */
1291 if (ioa_cfg->ioa_is_dead) {
1300 * ipr_find_ses_entry - Find matching SES in SES table
1301 * @res: resource entry struct of SES
1304 * pointer to SES table entry / NULL on failure
1306 static const struct ipr_ses_table_entry *
1307 ipr_find_ses_entry(struct ipr_resource_entry *res)
1310 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1312 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1313 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1314 if (ste->compare_product_id_byte[j] == 'X') {
1315 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1323 if (matches == IPR_PROD_ID_LEN)
1331 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1332 * @ioa_cfg: ioa config struct
1334 * @bus_width: bus width
1337 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1338 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1339 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1340 * max 160MHz = max 320MB/sec).
1342 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1344 struct ipr_resource_entry *res;
1345 const struct ipr_ses_table_entry *ste;
1346 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1348 /* Loop through each config table entry in the config table buffer */
1349 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1350 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1353 if (bus != res->cfgte.res_addr.bus)
1356 if (!(ste = ipr_find_ses_entry(res)))
1359 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1362 return max_xfer_rate;
1366 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1367 * @ioa_cfg: ioa config struct
1368 * @max_delay: max delay in micro-seconds to wait
1370 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1373 * 0 on success / other on failure
1375 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1377 volatile u32 pcii_reg;
1380 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1381 while (delay < max_delay) {
1382 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1384 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1387 /* udelay cannot be used if delay is more than a few milliseconds */
1388 if ((delay / 1000) > MAX_UDELAY_MS)
1389 mdelay(delay / 1000);
1399 * ipr_get_ldump_data_section - Dump IOA memory
1400 * @ioa_cfg: ioa config struct
1401 * @start_addr: adapter address to dump
1402 * @dest: destination kernel buffer
1403 * @length_in_words: length to dump in 4 byte words
1406 * 0 on success / -EIO on failure
1408 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1410 __be32 *dest, u32 length_in_words)
1412 volatile u32 temp_pcii_reg;
1415 /* Write IOA interrupt reg starting LDUMP state */
1416 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1417 ioa_cfg->regs.set_uproc_interrupt_reg);
1419 /* Wait for IO debug acknowledge */
1420 if (ipr_wait_iodbg_ack(ioa_cfg,
1421 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1422 dev_err(&ioa_cfg->pdev->dev,
1423 "IOA dump long data transfer timeout\n");
1427 /* Signal LDUMP interlocked - clear IO debug ack */
1428 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1429 ioa_cfg->regs.clr_interrupt_reg);
1431 /* Write Mailbox with starting address */
1432 writel(start_addr, ioa_cfg->ioa_mailbox);
1434 /* Signal address valid - clear IOA Reset alert */
1435 writel(IPR_UPROCI_RESET_ALERT,
1436 ioa_cfg->regs.clr_uproc_interrupt_reg);
1438 for (i = 0; i < length_in_words; i++) {
1439 /* Wait for IO debug acknowledge */
1440 if (ipr_wait_iodbg_ack(ioa_cfg,
1441 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1442 dev_err(&ioa_cfg->pdev->dev,
1443 "IOA dump short data transfer timeout\n");
1447 /* Read data from mailbox and increment destination pointer */
1448 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1451 /* For all but the last word of data, signal data received */
1452 if (i < (length_in_words - 1)) {
1453 /* Signal dump data received - Clear IO debug Ack */
1454 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1455 ioa_cfg->regs.clr_interrupt_reg);
1459 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1460 writel(IPR_UPROCI_RESET_ALERT,
1461 ioa_cfg->regs.set_uproc_interrupt_reg);
1463 writel(IPR_UPROCI_IO_DEBUG_ALERT,
1464 ioa_cfg->regs.clr_uproc_interrupt_reg);
1466 /* Signal dump data received - Clear IO debug Ack */
1467 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1468 ioa_cfg->regs.clr_interrupt_reg);
1470 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1471 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1473 readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1475 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1485 #ifdef CONFIG_SCSI_IPR_DUMP
1487 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1488 * @ioa_cfg: ioa config struct
1489 * @pci_address: adapter address
1490 * @length: length of data to copy
1492 * Copy data from PCI adapter to kernel buffer.
1493 * Note: length MUST be a 4 byte multiple
1495 * 0 on success / other on failure
1497 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1498 unsigned long pci_address, u32 length)
1500 int bytes_copied = 0;
1501 int cur_len, rc, rem_len, rem_page_len;
1503 unsigned long lock_flags = 0;
1504 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1506 while (bytes_copied < length &&
1507 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1508 if (ioa_dump->page_offset >= PAGE_SIZE ||
1509 ioa_dump->page_offset == 0) {
1510 page = (__be32 *)__get_free_page(GFP_ATOMIC);
1514 return bytes_copied;
1517 ioa_dump->page_offset = 0;
1518 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1519 ioa_dump->next_page_index++;
1521 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1523 rem_len = length - bytes_copied;
1524 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1525 cur_len = min(rem_len, rem_page_len);
1527 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1528 if (ioa_cfg->sdt_state == ABORT_DUMP) {
1531 rc = ipr_get_ldump_data_section(ioa_cfg,
1532 pci_address + bytes_copied,
1533 &page[ioa_dump->page_offset / 4],
1534 (cur_len / sizeof(u32)));
1536 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1539 ioa_dump->page_offset += cur_len;
1540 bytes_copied += cur_len;
1548 return bytes_copied;
1552 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1553 * @hdr: dump entry header struct
1558 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1560 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1562 hdr->offset = sizeof(*hdr);
1563 hdr->status = IPR_DUMP_STATUS_SUCCESS;
1567 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1568 * @ioa_cfg: ioa config struct
1569 * @driver_dump: driver dump struct
1574 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1575 struct ipr_driver_dump *driver_dump)
1577 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1579 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1580 driver_dump->ioa_type_entry.hdr.len =
1581 sizeof(struct ipr_dump_ioa_type_entry) -
1582 sizeof(struct ipr_dump_entry_header);
1583 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1584 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1585 driver_dump->ioa_type_entry.type = ioa_cfg->type;
1586 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1587 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1588 ucode_vpd->minor_release[1];
1589 driver_dump->hdr.num_entries++;
1593 * ipr_dump_version_data - Fill in the driver version in the dump.
1594 * @ioa_cfg: ioa config struct
1595 * @driver_dump: driver dump struct
1600 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1601 struct ipr_driver_dump *driver_dump)
1603 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1604 driver_dump->version_entry.hdr.len =
1605 sizeof(struct ipr_dump_version_entry) -
1606 sizeof(struct ipr_dump_entry_header);
1607 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1608 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1609 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1610 driver_dump->hdr.num_entries++;
1614 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1615 * @ioa_cfg: ioa config struct
1616 * @driver_dump: driver dump struct
1621 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1622 struct ipr_driver_dump *driver_dump)
1624 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1625 driver_dump->trace_entry.hdr.len =
1626 sizeof(struct ipr_dump_trace_entry) -
1627 sizeof(struct ipr_dump_entry_header);
1628 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1629 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1630 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1631 driver_dump->hdr.num_entries++;
1635 * ipr_dump_location_data - Fill in the IOA location in the dump.
1636 * @ioa_cfg: ioa config struct
1637 * @driver_dump: driver dump struct
1642 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1643 struct ipr_driver_dump *driver_dump)
1645 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1646 driver_dump->location_entry.hdr.len =
1647 sizeof(struct ipr_dump_location_entry) -
1648 sizeof(struct ipr_dump_entry_header);
1649 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1650 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1651 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1652 driver_dump->hdr.num_entries++;
1656 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1657 * @ioa_cfg: ioa config struct
1658 * @dump: dump struct
1663 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1665 unsigned long start_addr, sdt_word;
1666 unsigned long lock_flags = 0;
1667 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1668 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1669 u32 num_entries, start_off, end_off;
1670 u32 bytes_to_copy, bytes_copied, rc;
1671 struct ipr_sdt *sdt;
1676 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1678 if (ioa_cfg->sdt_state != GET_DUMP) {
1679 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1683 start_addr = readl(ioa_cfg->ioa_mailbox);
1685 if (!ipr_sdt_is_fmt2(start_addr)) {
1686 dev_err(&ioa_cfg->pdev->dev,
1687 "Invalid dump table format: %lx\n", start_addr);
1688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1692 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1694 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1696 /* Initialize the overall dump header */
1697 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1698 driver_dump->hdr.num_entries = 1;
1699 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1700 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1701 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1702 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1704 ipr_dump_version_data(ioa_cfg, driver_dump);
1705 ipr_dump_location_data(ioa_cfg, driver_dump);
1706 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1707 ipr_dump_trace_data(ioa_cfg, driver_dump);
1709 /* Update dump_header */
1710 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1712 /* IOA Dump entry */
1713 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1714 ioa_dump->format = IPR_SDT_FMT2;
1715 ioa_dump->hdr.len = 0;
1716 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1717 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1719 /* First entries in sdt are actually a list of dump addresses and
1720 lengths to gather the real dump data. sdt represents the pointer
1721 to the ioa generated dump table. Dump data will be extracted based
1722 on entries in this table */
1723 sdt = &ioa_dump->sdt;
1725 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1726 sizeof(struct ipr_sdt) / sizeof(__be32));
1728 /* Smart Dump table is ready to use and the first entry is valid */
1729 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1730 dev_err(&ioa_cfg->pdev->dev,
1731 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1732 rc, be32_to_cpu(sdt->hdr.state));
1733 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1734 ioa_cfg->sdt_state = DUMP_OBTAINED;
1735 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1739 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1741 if (num_entries > IPR_NUM_SDT_ENTRIES)
1742 num_entries = IPR_NUM_SDT_ENTRIES;
1744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1746 for (i = 0; i < num_entries; i++) {
1747 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1748 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1752 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1753 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1754 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1755 end_off = be32_to_cpu(sdt->entry[i].end_offset);
1757 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1758 bytes_to_copy = end_off - start_off;
1759 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1760 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1764 /* Copy data from adapter to driver buffers */
1765 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1768 ioa_dump->hdr.len += bytes_copied;
1770 if (bytes_copied != bytes_to_copy) {
1771 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1778 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1780 /* Update dump_header */
1781 driver_dump->hdr.len += ioa_dump->hdr.len;
1783 ioa_cfg->sdt_state = DUMP_OBTAINED;
1788 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1792 * ipr_release_dump - Free adapter dump memory
1793 * @kref: kref struct
1798 static void ipr_release_dump(struct kref *kref)
1800 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1801 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1802 unsigned long lock_flags = 0;
1806 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1807 ioa_cfg->dump = NULL;
1808 ioa_cfg->sdt_state = INACTIVE;
1809 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1811 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1812 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1819 * ipr_worker_thread - Worker thread
1820 * @data: ioa config struct
1822 * Called at task level from a work thread. This function takes care
1823 * of adding and removing device from the mid-layer as configuration
1824 * changes are detected by the adapter.
1829 static void ipr_worker_thread(void *data)
1831 unsigned long lock_flags;
1832 struct ipr_resource_entry *res;
1833 struct scsi_device *sdev;
1834 struct ipr_dump *dump;
1835 struct ipr_ioa_cfg *ioa_cfg = data;
1836 u8 bus, target, lun;
1840 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1842 if (ioa_cfg->sdt_state == GET_DUMP) {
1843 dump = ioa_cfg->dump;
1845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1848 kref_get(&dump->kref);
1849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1850 ipr_get_ioa_dump(ioa_cfg, dump);
1851 kref_put(&dump->kref, ipr_release_dump);
1853 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1854 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1855 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1856 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1863 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1864 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1868 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1869 if (res->del_from_ml && res->sdev) {
1872 if (!scsi_device_get(sdev)) {
1874 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1875 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1876 scsi_remove_device(sdev);
1877 scsi_device_put(sdev);
1878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1885 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1886 if (res->add_to_ml) {
1887 bus = res->cfgte.res_addr.bus;
1888 target = res->cfgte.res_addr.target;
1889 lun = res->cfgte.res_addr.lun;
1890 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1891 scsi_add_device(ioa_cfg->host, bus, target, lun);
1892 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1898 kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1902 #ifdef CONFIG_SCSI_IPR_TRACE
1904 * ipr_read_trace - Dump the adapter trace
1905 * @kobj: kobject struct
1908 * @count: buffer size
1911 * number of bytes printed to buffer
1913 static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1914 loff_t off, size_t count)
1916 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1917 struct Scsi_Host *shost = class_to_shost(cdev);
1918 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1919 unsigned long lock_flags = 0;
1920 int size = IPR_TRACE_SIZE;
1921 char *src = (char *)ioa_cfg->trace;
1925 if (off + count > size) {
1930 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1931 memcpy(buf, &src[off], count);
1932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1936 static struct bin_attribute ipr_trace_attr = {
1942 .read = ipr_read_trace,
1946 static const struct {
1947 enum ipr_cache_state state;
1949 } cache_state [] = {
1950 { CACHE_NONE, "none" },
1951 { CACHE_DISABLED, "disabled" },
1952 { CACHE_ENABLED, "enabled" }
1956 * ipr_show_write_caching - Show the write caching attribute
1957 * @class_dev: class device struct
1961 * number of bytes printed to buffer
1963 static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
1965 struct Scsi_Host *shost = class_to_shost(class_dev);
1966 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1967 unsigned long lock_flags = 0;
1970 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1971 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
1972 if (cache_state[i].state == ioa_cfg->cache_state) {
1973 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
1977 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1983 * ipr_store_write_caching - Enable/disable adapter write cache
1984 * @class_dev: class_device struct
1986 * @count: buffer size
1988 * This function will enable/disable adapter write cache.
1991 * count on success / other on failure
1993 static ssize_t ipr_store_write_caching(struct class_device *class_dev,
1994 const char *buf, size_t count)
1996 struct Scsi_Host *shost = class_to_shost(class_dev);
1997 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1998 unsigned long lock_flags = 0;
1999 enum ipr_cache_state new_state = CACHE_INVALID;
2002 if (!capable(CAP_SYS_ADMIN))
2004 if (ioa_cfg->cache_state == CACHE_NONE)
2007 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2008 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2009 new_state = cache_state[i].state;
2014 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2017 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2018 if (ioa_cfg->cache_state == new_state) {
2019 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2023 ioa_cfg->cache_state = new_state;
2024 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2025 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2026 if (!ioa_cfg->in_reset_reload)
2027 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2029 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2034 static struct class_device_attribute ipr_ioa_cache_attr = {
2036 .name = "write_cache",
2037 .mode = S_IRUGO | S_IWUSR,
2039 .show = ipr_show_write_caching,
2040 .store = ipr_store_write_caching
2044 * ipr_show_fw_version - Show the firmware version
2045 * @class_dev: class device struct
2049 * number of bytes printed to buffer
2051 static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2053 struct Scsi_Host *shost = class_to_shost(class_dev);
2054 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2055 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2056 unsigned long lock_flags = 0;
2059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2060 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2061 ucode_vpd->major_release, ucode_vpd->card_type,
2062 ucode_vpd->minor_release[0],
2063 ucode_vpd->minor_release[1]);
2064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2068 static struct class_device_attribute ipr_fw_version_attr = {
2070 .name = "fw_version",
2073 .show = ipr_show_fw_version,
2077 * ipr_show_log_level - Show the adapter's error logging level
2078 * @class_dev: class device struct
2082 * number of bytes printed to buffer
2084 static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2086 struct Scsi_Host *shost = class_to_shost(class_dev);
2087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2088 unsigned long lock_flags = 0;
2091 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2092 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2093 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2098 * ipr_store_log_level - Change the adapter's error logging level
2099 * @class_dev: class device struct
2103 * number of bytes printed to buffer
2105 static ssize_t ipr_store_log_level(struct class_device *class_dev,
2106 const char *buf, size_t count)
2108 struct Scsi_Host *shost = class_to_shost(class_dev);
2109 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2110 unsigned long lock_flags = 0;
2112 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2113 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2118 static struct class_device_attribute ipr_log_level_attr = {
2120 .name = "log_level",
2121 .mode = S_IRUGO | S_IWUSR,
2123 .show = ipr_show_log_level,
2124 .store = ipr_store_log_level
2128 * ipr_store_diagnostics - IOA Diagnostics interface
2129 * @class_dev: class_device struct
2131 * @count: buffer size
2133 * This function will reset the adapter and wait a reasonable
2134 * amount of time for any errors that the adapter might log.
2137 * count on success / other on failure
2139 static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2140 const char *buf, size_t count)
2142 struct Scsi_Host *shost = class_to_shost(class_dev);
2143 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2144 unsigned long lock_flags = 0;
2147 if (!capable(CAP_SYS_ADMIN))
2150 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2152 ioa_cfg->errors_logged = 0;
2153 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2155 if (ioa_cfg->in_reset_reload) {
2156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2157 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2159 /* Wait for a second for any errors to be logged */
2162 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2166 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2167 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2174 static struct class_device_attribute ipr_diagnostics_attr = {
2176 .name = "run_diagnostics",
2179 .store = ipr_store_diagnostics
2183 * ipr_show_adapter_state - Show the adapter's state
2184 * @class_dev: class device struct
2188 * number of bytes printed to buffer
2190 static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2192 struct Scsi_Host *shost = class_to_shost(class_dev);
2193 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2194 unsigned long lock_flags = 0;
2197 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2198 if (ioa_cfg->ioa_is_dead)
2199 len = snprintf(buf, PAGE_SIZE, "offline\n");
2201 len = snprintf(buf, PAGE_SIZE, "online\n");
2202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2207 * ipr_store_adapter_state - Change adapter state
2208 * @class_dev: class_device struct
2210 * @count: buffer size
2212 * This function will change the adapter's state.
2215 * count on success / other on failure
2217 static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2218 const char *buf, size_t count)
2220 struct Scsi_Host *shost = class_to_shost(class_dev);
2221 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2222 unsigned long lock_flags;
2225 if (!capable(CAP_SYS_ADMIN))
2228 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2229 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2230 ioa_cfg->ioa_is_dead = 0;
2231 ioa_cfg->reset_retries = 0;
2232 ioa_cfg->in_ioa_bringdown = 0;
2233 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2235 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2236 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2241 static struct class_device_attribute ipr_ioa_state_attr = {
2244 .mode = S_IRUGO | S_IWUSR,
2246 .show = ipr_show_adapter_state,
2247 .store = ipr_store_adapter_state
2251 * ipr_store_reset_adapter - Reset the adapter
2252 * @class_dev: class_device struct
2254 * @count: buffer size
2256 * This function will reset the adapter.
2259 * count on success / other on failure
2261 static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2262 const char *buf, size_t count)
2264 struct Scsi_Host *shost = class_to_shost(class_dev);
2265 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2266 unsigned long lock_flags;
2269 if (!capable(CAP_SYS_ADMIN))
2272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2273 if (!ioa_cfg->in_reset_reload)
2274 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2276 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2281 static struct class_device_attribute ipr_ioa_reset_attr = {
2283 .name = "reset_host",
2286 .store = ipr_store_reset_adapter
2290 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2291 * @buf_len: buffer length
2293 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2294 * list to use for microcode download
2297 * pointer to sglist / NULL on failure
2299 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2301 int sg_size, order, bsize_elem, num_elem, i, j;
2302 struct ipr_sglist *sglist;
2303 struct scatterlist *scatterlist;
2306 /* Get the minimum size per scatter/gather element */
2307 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2309 /* Get the actual size per element */
2310 order = get_order(sg_size);
2312 /* Determine the actual number of bytes per element */
2313 bsize_elem = PAGE_SIZE * (1 << order);
2315 /* Determine the actual number of sg entries needed */
2316 if (buf_len % bsize_elem)
2317 num_elem = (buf_len / bsize_elem) + 1;
2319 num_elem = buf_len / bsize_elem;
2321 /* Allocate a scatter/gather list for the DMA */
2322 sglist = kzalloc(sizeof(struct ipr_sglist) +
2323 (sizeof(struct scatterlist) * (num_elem - 1)),
2326 if (sglist == NULL) {
2331 scatterlist = sglist->scatterlist;
2333 sglist->order = order;
2334 sglist->num_sg = num_elem;
2336 /* Allocate a bunch of sg elements */
2337 for (i = 0; i < num_elem; i++) {
2338 page = alloc_pages(GFP_KERNEL, order);
2342 /* Free up what we already allocated */
2343 for (j = i - 1; j >= 0; j--)
2344 __free_pages(scatterlist[j].page, order);
2349 scatterlist[i].page = page;
2356 * ipr_free_ucode_buffer - Frees a microcode download buffer
2357 * @p_dnld: scatter/gather list pointer
2359 * Free a DMA'able ucode download buffer previously allocated with
2360 * ipr_alloc_ucode_buffer
2365 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2369 for (i = 0; i < sglist->num_sg; i++)
2370 __free_pages(sglist->scatterlist[i].page, sglist->order);
2376 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2377 * @sglist: scatter/gather list pointer
2378 * @buffer: buffer pointer
2379 * @len: buffer length
2381 * Copy a microcode image from a user buffer into a buffer allocated by
2382 * ipr_alloc_ucode_buffer
2385 * 0 on success / other on failure
2387 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2388 u8 *buffer, u32 len)
2390 int bsize_elem, i, result = 0;
2391 struct scatterlist *scatterlist;
2394 /* Determine the actual number of bytes per element */
2395 bsize_elem = PAGE_SIZE * (1 << sglist->order);
2397 scatterlist = sglist->scatterlist;
2399 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2400 kaddr = kmap(scatterlist[i].page);
2401 memcpy(kaddr, buffer, bsize_elem);
2402 kunmap(scatterlist[i].page);
2404 scatterlist[i].length = bsize_elem;
2412 if (len % bsize_elem) {
2413 kaddr = kmap(scatterlist[i].page);
2414 memcpy(kaddr, buffer, len % bsize_elem);
2415 kunmap(scatterlist[i].page);
2417 scatterlist[i].length = len % bsize_elem;
2420 sglist->buffer_len = len;
2425 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2426 * @ipr_cmd: ipr command struct
2427 * @sglist: scatter/gather list
2429 * Builds a microcode download IOA data list (IOADL).
2432 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2433 struct ipr_sglist *sglist)
2435 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2436 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2437 struct scatterlist *scatterlist = sglist->scatterlist;
2440 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2441 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2442 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2443 ioarcb->write_ioadl_len =
2444 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2446 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2447 ioadl[i].flags_and_data_len =
2448 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2450 cpu_to_be32(sg_dma_address(&scatterlist[i]));
2453 ioadl[i-1].flags_and_data_len |=
2454 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2458 * ipr_update_ioa_ucode - Update IOA's microcode
2459 * @ioa_cfg: ioa config struct
2460 * @sglist: scatter/gather list
2462 * Initiate an adapter reset to update the IOA's microcode
2465 * 0 on success / -EIO on failure
2467 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2468 struct ipr_sglist *sglist)
2470 unsigned long lock_flags;
2472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2474 if (ioa_cfg->ucode_sglist) {
2475 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2476 dev_err(&ioa_cfg->pdev->dev,
2477 "Microcode download already in progress\n");
2481 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2482 sglist->num_sg, DMA_TO_DEVICE);
2484 if (!sglist->num_dma_sg) {
2485 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2486 dev_err(&ioa_cfg->pdev->dev,
2487 "Failed to map microcode download buffer!\n");
2491 ioa_cfg->ucode_sglist = sglist;
2492 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2493 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2494 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2496 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2497 ioa_cfg->ucode_sglist = NULL;
2498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2503 * ipr_store_update_fw - Update the firmware on the adapter
2504 * @class_dev: class_device struct
2506 * @count: buffer size
2508 * This function will update the firmware on the adapter.
2511 * count on success / other on failure
2513 static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2514 const char *buf, size_t count)
2516 struct Scsi_Host *shost = class_to_shost(class_dev);
2517 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2518 struct ipr_ucode_image_header *image_hdr;
2519 const struct firmware *fw_entry;
2520 struct ipr_sglist *sglist;
2523 int len, result, dnld_size;
2525 if (!capable(CAP_SYS_ADMIN))
2528 len = snprintf(fname, 99, "%s", buf);
2529 fname[len-1] = '\0';
2531 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2532 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2536 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2538 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2539 (ioa_cfg->vpd_cbs->page3_data.card_type &&
2540 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2541 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2542 release_firmware(fw_entry);
2546 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2547 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2548 sglist = ipr_alloc_ucode_buffer(dnld_size);
2551 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2552 release_firmware(fw_entry);
2556 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2559 dev_err(&ioa_cfg->pdev->dev,
2560 "Microcode buffer copy to DMA buffer failed\n");
2564 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2569 ipr_free_ucode_buffer(sglist);
2570 release_firmware(fw_entry);
2574 static struct class_device_attribute ipr_update_fw_attr = {
2576 .name = "update_fw",
2579 .store = ipr_store_update_fw
2582 static struct class_device_attribute *ipr_ioa_attrs[] = {
2583 &ipr_fw_version_attr,
2584 &ipr_log_level_attr,
2585 &ipr_diagnostics_attr,
2586 &ipr_ioa_state_attr,
2587 &ipr_ioa_reset_attr,
2588 &ipr_update_fw_attr,
2589 &ipr_ioa_cache_attr,
2593 #ifdef CONFIG_SCSI_IPR_DUMP
2595 * ipr_read_dump - Dump the adapter
2596 * @kobj: kobject struct
2599 * @count: buffer size
2602 * number of bytes printed to buffer
2604 static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2605 loff_t off, size_t count)
2607 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2608 struct Scsi_Host *shost = class_to_shost(cdev);
2609 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2610 struct ipr_dump *dump;
2611 unsigned long lock_flags = 0;
2616 if (!capable(CAP_SYS_ADMIN))
2619 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2620 dump = ioa_cfg->dump;
2622 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2626 kref_get(&dump->kref);
2627 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2629 if (off > dump->driver_dump.hdr.len) {
2630 kref_put(&dump->kref, ipr_release_dump);
2634 if (off + count > dump->driver_dump.hdr.len) {
2635 count = dump->driver_dump.hdr.len - off;
2639 if (count && off < sizeof(dump->driver_dump)) {
2640 if (off + count > sizeof(dump->driver_dump))
2641 len = sizeof(dump->driver_dump) - off;
2644 src = (u8 *)&dump->driver_dump + off;
2645 memcpy(buf, src, len);
2651 off -= sizeof(dump->driver_dump);
2653 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2654 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2655 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2658 src = (u8 *)&dump->ioa_dump + off;
2659 memcpy(buf, src, len);
2665 off -= offsetof(struct ipr_ioa_dump, ioa_data);
2668 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2669 len = PAGE_ALIGN(off) - off;
2672 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2673 src += off & ~PAGE_MASK;
2674 memcpy(buf, src, len);
2680 kref_put(&dump->kref, ipr_release_dump);
2685 * ipr_alloc_dump - Prepare for adapter dump
2686 * @ioa_cfg: ioa config struct
2689 * 0 on success / other on failure
2691 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2693 struct ipr_dump *dump;
2694 unsigned long lock_flags = 0;
2697 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2700 ipr_err("Dump memory allocation failed\n");
2704 kref_init(&dump->kref);
2705 dump->ioa_cfg = ioa_cfg;
2707 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709 if (INACTIVE != ioa_cfg->sdt_state) {
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2715 ioa_cfg->dump = dump;
2716 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2717 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2718 ioa_cfg->dump_taken = 1;
2719 schedule_work(&ioa_cfg->work_q);
2721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2728 * ipr_free_dump - Free adapter dump memory
2729 * @ioa_cfg: ioa config struct
2732 * 0 on success / other on failure
2734 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2736 struct ipr_dump *dump;
2737 unsigned long lock_flags = 0;
2741 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2742 dump = ioa_cfg->dump;
2744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2748 ioa_cfg->dump = NULL;
2749 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2751 kref_put(&dump->kref, ipr_release_dump);
2758 * ipr_write_dump - Setup dump state of adapter
2759 * @kobj: kobject struct
2762 * @count: buffer size
2765 * number of bytes printed to buffer
2767 static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2768 loff_t off, size_t count)
2770 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2771 struct Scsi_Host *shost = class_to_shost(cdev);
2772 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2775 if (!capable(CAP_SYS_ADMIN))
2779 rc = ipr_alloc_dump(ioa_cfg);
2780 else if (buf[0] == '0')
2781 rc = ipr_free_dump(ioa_cfg);
2791 static struct bin_attribute ipr_dump_attr = {
2794 .mode = S_IRUSR | S_IWUSR,
2797 .read = ipr_read_dump,
2798 .write = ipr_write_dump
2801 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2805 * ipr_change_queue_depth - Change the device's queue depth
2806 * @sdev: scsi device struct
2807 * @qdepth: depth to set
2812 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2814 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2815 return sdev->queue_depth;
2819 * ipr_change_queue_type - Change the device's queue type
2820 * @dsev: scsi device struct
2821 * @tag_type: type of tags to use
2824 * actual queue type set
2826 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2828 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2829 struct ipr_resource_entry *res;
2830 unsigned long lock_flags = 0;
2832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2833 res = (struct ipr_resource_entry *)sdev->hostdata;
2836 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2838 * We don't bother quiescing the device here since the
2839 * adapter firmware does it for us.
2841 scsi_set_tag_type(sdev, tag_type);
2844 scsi_activate_tcq(sdev, sdev->queue_depth);
2846 scsi_deactivate_tcq(sdev, sdev->queue_depth);
2852 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2857 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2858 * @dev: device struct
2862 * number of bytes printed to buffer
2864 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
2866 struct scsi_device *sdev = to_scsi_device(dev);
2867 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2868 struct ipr_resource_entry *res;
2869 unsigned long lock_flags = 0;
2870 ssize_t len = -ENXIO;
2872 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2873 res = (struct ipr_resource_entry *)sdev->hostdata;
2875 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2876 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2880 static struct device_attribute ipr_adapter_handle_attr = {
2882 .name = "adapter_handle",
2885 .show = ipr_show_adapter_handle
2888 static struct device_attribute *ipr_dev_attrs[] = {
2889 &ipr_adapter_handle_attr,
2894 * ipr_biosparam - Return the HSC mapping
2895 * @sdev: scsi device struct
2896 * @block_device: block device pointer
2897 * @capacity: capacity of the device
2898 * @parm: Array containing returned HSC values.
2900 * This function generates the HSC parms that fdisk uses.
2901 * We want to make sure we return something that places partitions
2902 * on 4k boundaries for best performance with the IOA.
2907 static int ipr_biosparam(struct scsi_device *sdev,
2908 struct block_device *block_device,
2909 sector_t capacity, int *parm)
2917 cylinders = capacity;
2918 sector_div(cylinders, (128 * 32));
2923 parm[2] = cylinders;
2929 * ipr_slave_destroy - Unconfigure a SCSI device
2930 * @sdev: scsi device struct
2935 static void ipr_slave_destroy(struct scsi_device *sdev)
2937 struct ipr_resource_entry *res;
2938 struct ipr_ioa_cfg *ioa_cfg;
2939 unsigned long lock_flags = 0;
2941 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2943 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2944 res = (struct ipr_resource_entry *) sdev->hostdata;
2946 sdev->hostdata = NULL;
2949 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2953 * ipr_slave_configure - Configure a SCSI device
2954 * @sdev: scsi device struct
2956 * This function configures the specified scsi device.
2961 static int ipr_slave_configure(struct scsi_device *sdev)
2963 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2964 struct ipr_resource_entry *res;
2965 unsigned long lock_flags = 0;
2967 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2968 res = sdev->hostdata;
2970 if (ipr_is_af_dasd_device(res))
2971 sdev->type = TYPE_RAID;
2972 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
2973 sdev->scsi_level = 4;
2974 sdev->no_uld_attach = 1;
2976 if (ipr_is_vset_device(res)) {
2977 sdev->timeout = IPR_VSET_RW_TIMEOUT;
2978 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
2980 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
2981 sdev->allow_restart = 1;
2982 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
2984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2989 * ipr_slave_alloc - Prepare for commands to a device.
2990 * @sdev: scsi device struct
2992 * This function saves a pointer to the resource entry
2993 * in the scsi device struct if the device exists. We
2994 * can then use this pointer in ipr_queuecommand when
2995 * handling new commands.
2998 * 0 on success / -ENXIO if device does not exist
3000 static int ipr_slave_alloc(struct scsi_device *sdev)
3002 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3003 struct ipr_resource_entry *res;
3004 unsigned long lock_flags;
3007 sdev->hostdata = NULL;
3009 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3011 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3012 if ((res->cfgte.res_addr.bus == sdev->channel) &&
3013 (res->cfgte.res_addr.target == sdev->id) &&
3014 (res->cfgte.res_addr.lun == sdev->lun)) {
3018 sdev->hostdata = res;
3019 res->needs_sync_complete = 1;
3025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3031 * ipr_eh_host_reset - Reset the host adapter
3032 * @scsi_cmd: scsi command struct
3037 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3039 struct ipr_ioa_cfg *ioa_cfg;
3043 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3045 dev_err(&ioa_cfg->pdev->dev,
3046 "Adapter being reset as a result of error recovery.\n");
3048 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3049 ioa_cfg->sdt_state = GET_DUMP;
3051 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3057 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3061 spin_lock_irq(cmd->device->host->host_lock);
3062 rc = __ipr_eh_host_reset(cmd);
3063 spin_unlock_irq(cmd->device->host->host_lock);
3069 * ipr_eh_dev_reset - Reset the device
3070 * @scsi_cmd: scsi command struct
3072 * This function issues a device reset to the affected device.
3073 * A LUN reset will be sent to the device first. If that does
3074 * not work, a target reset will be sent.
3079 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3081 struct ipr_cmnd *ipr_cmd;
3082 struct ipr_ioa_cfg *ioa_cfg;
3083 struct ipr_resource_entry *res;
3084 struct ipr_cmd_pkt *cmd_pkt;
3088 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3089 res = scsi_cmd->device->hostdata;
3091 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3095 * If we are currently going through reset/reload, return failed. This will force the
3096 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3099 if (ioa_cfg->in_reset_reload)
3101 if (ioa_cfg->ioa_is_dead)
3104 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3105 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3106 if (ipr_cmd->scsi_cmd)
3107 ipr_cmd->done = ipr_scsi_eh_done;
3111 res->resetting_device = 1;
3113 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3115 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3116 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3117 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3118 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3120 ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3121 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3123 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3125 res->resetting_device = 0;
3127 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3130 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3133 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3137 spin_lock_irq(cmd->device->host->host_lock);
3138 rc = __ipr_eh_dev_reset(cmd);
3139 spin_unlock_irq(cmd->device->host->host_lock);
3145 * ipr_bus_reset_done - Op done function for bus reset.
3146 * @ipr_cmd: ipr command struct
3148 * This function is the op done function for a bus reset
3153 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3155 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3156 struct ipr_resource_entry *res;
3159 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3160 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3161 sizeof(res->cfgte.res_handle))) {
3162 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3168 * If abort has not completed, indicate the reset has, else call the
3169 * abort's done function to wake the sleeping eh thread
3171 if (ipr_cmd->sibling->sibling)
3172 ipr_cmd->sibling->sibling = NULL;
3174 ipr_cmd->sibling->done(ipr_cmd->sibling);
3176 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3181 * ipr_abort_timeout - An abort task has timed out
3182 * @ipr_cmd: ipr command struct
3184 * This function handles when an abort task times out. If this
3185 * happens we issue a bus reset since we have resources tied
3186 * up that must be freed before returning to the midlayer.
3191 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3193 struct ipr_cmnd *reset_cmd;
3194 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3195 struct ipr_cmd_pkt *cmd_pkt;
3196 unsigned long lock_flags = 0;
3199 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3200 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3205 ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3206 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3207 ipr_cmd->sibling = reset_cmd;
3208 reset_cmd->sibling = ipr_cmd;
3209 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3210 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3211 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3212 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3213 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3215 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3216 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221 * ipr_cancel_op - Cancel specified op
3222 * @scsi_cmd: scsi command struct
3224 * This function cancels specified op.
3229 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3231 struct ipr_cmnd *ipr_cmd;
3232 struct ipr_ioa_cfg *ioa_cfg;
3233 struct ipr_resource_entry *res;
3234 struct ipr_cmd_pkt *cmd_pkt;
3239 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3240 res = scsi_cmd->device->hostdata;
3242 /* If we are currently going through reset/reload, return failed.
3243 * This will force the mid-layer to call ipr_eh_host_reset,
3244 * which will then go to sleep and wait for the reset to complete
3246 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3248 if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3251 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3252 if (ipr_cmd->scsi_cmd == scsi_cmd) {
3253 ipr_cmd->done = ipr_scsi_eh_done;
3262 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3263 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3264 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3265 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3266 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3267 ipr_cmd->u.sdev = scsi_cmd->device;
3269 ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3270 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3271 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3274 * If the abort task timed out and we sent a bus reset, we will get
3275 * one the following responses to the abort
3277 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3282 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3283 res->needs_sync_complete = 1;
3286 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3290 * ipr_eh_abort - Abort a single op
3291 * @scsi_cmd: scsi command struct
3296 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3298 unsigned long flags;
3303 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3304 rc = ipr_cancel_op(scsi_cmd);
3305 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3312 * ipr_handle_other_interrupt - Handle "other" interrupts
3313 * @ioa_cfg: ioa config struct
3314 * @int_reg: interrupt register
3317 * IRQ_NONE / IRQ_HANDLED
3319 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3320 volatile u32 int_reg)
3322 irqreturn_t rc = IRQ_HANDLED;
3324 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3325 /* Mask the interrupt */
3326 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3328 /* Clear the interrupt */
3329 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3330 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3332 list_del(&ioa_cfg->reset_cmd->queue);
3333 del_timer(&ioa_cfg->reset_cmd->timer);
3334 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3336 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3337 ioa_cfg->ioa_unit_checked = 1;
3339 dev_err(&ioa_cfg->pdev->dev,
3340 "Permanent IOA failure. 0x%08X\n", int_reg);
3342 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3343 ioa_cfg->sdt_state = GET_DUMP;
3345 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3346 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3353 * ipr_isr - Interrupt service routine
3355 * @devp: pointer to ioa config struct
3356 * @regs: pt_regs struct
3359 * IRQ_NONE / IRQ_HANDLED
3361 static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3363 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3364 unsigned long lock_flags = 0;
3365 volatile u32 int_reg, int_mask_reg;
3368 struct ipr_cmnd *ipr_cmd;
3369 irqreturn_t rc = IRQ_NONE;
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3373 /* If interrupts are disabled, ignore the interrupt */
3374 if (!ioa_cfg->allow_interrupts) {
3375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3379 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3380 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3382 /* If an interrupt on the adapter did not occur, ignore it */
3383 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3391 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3392 ioa_cfg->toggle_bit) {
3394 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3395 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3397 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3398 ioa_cfg->errors_logged++;
3399 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3401 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3402 ioa_cfg->sdt_state = GET_DUMP;
3404 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3409 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3411 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3413 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3415 list_del(&ipr_cmd->queue);
3416 del_timer(&ipr_cmd->timer);
3417 ipr_cmd->done(ipr_cmd);
3421 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3422 ioa_cfg->hrrq_curr++;
3424 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3425 ioa_cfg->toggle_bit ^= 1u;
3429 if (ipr_cmd != NULL) {
3430 /* Clear the PCI interrupt */
3431 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3432 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3437 if (unlikely(rc == IRQ_NONE))
3438 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3445 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3446 * @ioa_cfg: ioa config struct
3447 * @ipr_cmd: ipr command struct
3450 * 0 on success / -1 on failure
3452 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3453 struct ipr_cmnd *ipr_cmd)
3456 struct scatterlist *sglist;
3458 u32 ioadl_flags = 0;
3459 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3460 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3461 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3463 length = scsi_cmd->request_bufflen;
3468 if (scsi_cmd->use_sg) {
3469 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3470 scsi_cmd->request_buffer,
3472 scsi_cmd->sc_data_direction);
3474 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3475 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3476 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3477 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3478 ioarcb->write_ioadl_len =
3479 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3480 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3481 ioadl_flags = IPR_IOADL_FLAGS_READ;
3482 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3483 ioarcb->read_ioadl_len =
3484 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3487 sglist = scsi_cmd->request_buffer;
3489 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3490 ioadl[i].flags_and_data_len =
3491 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3493 cpu_to_be32(sg_dma_address(&sglist[i]));
3496 if (likely(ipr_cmd->dma_use_sg)) {
3497 ioadl[i-1].flags_and_data_len |=
3498 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3501 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3503 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3504 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3505 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3506 ioarcb->write_data_transfer_length = cpu_to_be32(length);
3507 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3508 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3509 ioadl_flags = IPR_IOADL_FLAGS_READ;
3510 ioarcb->read_data_transfer_length = cpu_to_be32(length);
3511 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3514 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3515 scsi_cmd->request_buffer, length,
3516 scsi_cmd->sc_data_direction);
3518 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3519 ipr_cmd->dma_use_sg = 1;
3520 ioadl[0].flags_and_data_len =
3521 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3522 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3525 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3532 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3533 * @scsi_cmd: scsi command struct
3538 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3541 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3543 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3545 case MSG_SIMPLE_TAG:
3546 rc = IPR_FLAGS_LO_SIMPLE_TASK;
3549 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3551 case MSG_ORDERED_TAG:
3552 rc = IPR_FLAGS_LO_ORDERED_TASK;
3561 * ipr_erp_done - Process completion of ERP for a device
3562 * @ipr_cmd: ipr command struct
3564 * This function copies the sense buffer into the scsi_cmd
3565 * struct and pushes the scsi_done function.
3570 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3572 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3573 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3574 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3575 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3577 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3578 scsi_cmd->result |= (DID_ERROR << 16);
3579 ipr_sdev_err(scsi_cmd->device,
3580 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3582 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3583 SCSI_SENSE_BUFFERSIZE);
3587 res->needs_sync_complete = 1;
3590 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3591 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3592 scsi_cmd->scsi_done(scsi_cmd);
3596 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3597 * @ipr_cmd: ipr command struct
3602 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3604 struct ipr_ioarcb *ioarcb;
3605 struct ipr_ioasa *ioasa;
3607 ioarcb = &ipr_cmd->ioarcb;
3608 ioasa = &ipr_cmd->ioasa;
3610 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3611 ioarcb->write_data_transfer_length = 0;
3612 ioarcb->read_data_transfer_length = 0;
3613 ioarcb->write_ioadl_len = 0;
3614 ioarcb->read_ioadl_len = 0;
3616 ioasa->residual_data_len = 0;
3620 * ipr_erp_request_sense - Send request sense to a device
3621 * @ipr_cmd: ipr command struct
3623 * This function sends a request sense to a device as a result
3624 * of a check condition.
3629 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3631 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3632 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3634 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3635 ipr_erp_done(ipr_cmd);
3639 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3641 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3642 cmd_pkt->cdb[0] = REQUEST_SENSE;
3643 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3644 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3645 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3646 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3648 ipr_cmd->ioadl[0].flags_and_data_len =
3649 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3650 ipr_cmd->ioadl[0].address =
3651 cpu_to_be32(ipr_cmd->sense_buffer_dma);
3653 ipr_cmd->ioarcb.read_ioadl_len =
3654 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3655 ipr_cmd->ioarcb.read_data_transfer_length =
3656 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3658 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3659 IPR_REQUEST_SENSE_TIMEOUT * 2);
3663 * ipr_erp_cancel_all - Send cancel all to a device
3664 * @ipr_cmd: ipr command struct
3666 * This function sends a cancel all to a device to clear the
3667 * queue. If we are running TCQ on the device, QERR is set to 1,
3668 * which means all outstanding ops have been dropped on the floor.
3669 * Cancel all will return them to us.
3674 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3676 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3677 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3678 struct ipr_cmd_pkt *cmd_pkt;
3682 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3684 if (!scsi_get_tag_type(scsi_cmd->device)) {
3685 ipr_erp_request_sense(ipr_cmd);
3689 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3690 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3691 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3693 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3694 IPR_CANCEL_ALL_TIMEOUT);
3698 * ipr_dump_ioasa - Dump contents of IOASA
3699 * @ioa_cfg: ioa config struct
3700 * @ipr_cmd: ipr command struct
3702 * This function is invoked by the interrupt handler when ops
3703 * fail. It will log the IOASA if appropriate. Only called
3709 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3710 struct ipr_cmnd *ipr_cmd)
3715 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3716 __be32 *ioasa_data = (__be32 *)ioasa;
3719 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3724 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3727 error_index = ipr_get_error(ioasc);
3729 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3730 /* Don't log an error if the IOA already logged one */
3731 if (ioasa->ilid != 0)
3734 if (ipr_error_table[error_index].log_ioasa == 0)
3738 ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3739 ipr_error_table[error_index].error);
3741 if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3742 (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3743 ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3744 "Device End state: %s Phase: %s\n",
3745 ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3746 ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3749 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3750 data_len = sizeof(struct ipr_ioasa);
3752 data_len = be16_to_cpu(ioasa->ret_stat_len);
3754 ipr_err("IOASA Dump:\n");
3756 for (i = 0; i < data_len / 4; i += 4) {
3757 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3758 be32_to_cpu(ioasa_data[i]),
3759 be32_to_cpu(ioasa_data[i+1]),
3760 be32_to_cpu(ioasa_data[i+2]),
3761 be32_to_cpu(ioasa_data[i+3]));
3766 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3768 * @sense_buf: sense data buffer
3773 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3776 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3777 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3778 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3779 u32 ioasc = be32_to_cpu(ioasa->ioasc);
3781 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3783 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3786 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3788 if (ipr_is_vset_device(res) &&
3789 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3790 ioasa->u.vset.failing_lba_hi != 0) {
3791 sense_buf[0] = 0x72;
3792 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3793 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3794 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3798 sense_buf[9] = 0x0A;
3799 sense_buf[10] = 0x80;
3801 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3803 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3804 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3805 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3806 sense_buf[15] = failing_lba & 0x000000ff;
3808 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3810 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3811 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3812 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3813 sense_buf[19] = failing_lba & 0x000000ff;
3815 sense_buf[0] = 0x70;
3816 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3817 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3818 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3820 /* Illegal request */
3821 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3822 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3823 sense_buf[7] = 10; /* additional length */
3825 /* IOARCB was in error */
3826 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3827 sense_buf[15] = 0xC0;
3828 else /* Parameter data was invalid */
3829 sense_buf[15] = 0x80;
3832 ((IPR_FIELD_POINTER_MASK &
3833 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3835 (IPR_FIELD_POINTER_MASK &
3836 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3838 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3839 if (ipr_is_vset_device(res))
3840 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3842 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3844 sense_buf[0] |= 0x80; /* Or in the Valid bit */
3845 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3846 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3847 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3848 sense_buf[6] = failing_lba & 0x000000ff;
3851 sense_buf[7] = 6; /* additional length */
3857 * ipr_erp_start - Process an error response for a SCSI op
3858 * @ioa_cfg: ioa config struct
3859 * @ipr_cmd: ipr command struct
3861 * This function determines whether or not to initiate ERP
3862 * on the affected device.
3867 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3868 struct ipr_cmnd *ipr_cmd)
3870 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3871 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3872 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3875 ipr_scsi_eh_done(ipr_cmd);
3879 if (ipr_is_gscsi(res))
3880 ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3882 ipr_gen_sense(ipr_cmd);
3884 switch (ioasc & IPR_IOASC_IOASC_MASK) {
3885 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3886 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3888 case IPR_IOASC_IR_RESOURCE_HANDLE:
3889 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3891 case IPR_IOASC_HW_SEL_TIMEOUT:
3892 scsi_cmd->result |= (DID_NO_CONNECT << 16);
3893 res->needs_sync_complete = 1;
3895 case IPR_IOASC_SYNC_REQUIRED:
3897 res->needs_sync_complete = 1;
3898 scsi_cmd->result |= (DID_IMM_RETRY << 16);
3900 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3901 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3903 case IPR_IOASC_BUS_WAS_RESET:
3904 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3906 * Report the bus reset and ask for a retry. The device
3907 * will give CC/UA the next command.
3909 if (!res->resetting_device)
3910 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3911 scsi_cmd->result |= (DID_ERROR << 16);
3912 res->needs_sync_complete = 1;
3914 case IPR_IOASC_HW_DEV_BUS_STATUS:
3915 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3916 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3917 ipr_erp_cancel_all(ipr_cmd);
3920 res->needs_sync_complete = 1;
3922 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3925 scsi_cmd->result |= (DID_ERROR << 16);
3926 if (!ipr_is_vset_device(res))
3927 res->needs_sync_complete = 1;
3931 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3932 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3933 scsi_cmd->scsi_done(scsi_cmd);
3937 * ipr_scsi_done - mid-layer done function
3938 * @ipr_cmd: ipr command struct
3940 * This function is invoked by the interrupt handler for
3941 * ops generated by the SCSI mid-layer
3946 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
3948 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3949 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3950 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3952 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
3954 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
3955 ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3956 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3957 scsi_cmd->scsi_done(scsi_cmd);
3959 ipr_erp_start(ioa_cfg, ipr_cmd);
3963 * ipr_save_ioafp_mode_select - Save adapters mode select data
3964 * @ioa_cfg: ioa config struct
3965 * @scsi_cmd: scsi command struct
3967 * This function saves mode select data for the adapter to
3968 * use following an adapter reset.
3971 * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
3973 static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
3974 struct scsi_cmnd *scsi_cmd)
3976 if (!ioa_cfg->saved_mode_pages) {
3977 ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages),
3979 if (!ioa_cfg->saved_mode_pages) {
3980 dev_err(&ioa_cfg->pdev->dev,
3981 "IOA mode select buffer allocation failed\n");
3982 return SCSI_MLQUEUE_HOST_BUSY;
3986 memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
3987 ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
3992 * ipr_queuecommand - Queue a mid-layer request
3993 * @scsi_cmd: scsi command struct
3994 * @done: done function
3996 * This function queues a request generated by the mid-layer.
4000 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4001 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4003 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4004 void (*done) (struct scsi_cmnd *))
4006 struct ipr_ioa_cfg *ioa_cfg;
4007 struct ipr_resource_entry *res;
4008 struct ipr_ioarcb *ioarcb;
4009 struct ipr_cmnd *ipr_cmd;
4012 scsi_cmd->scsi_done = done;
4013 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4014 res = scsi_cmd->device->hostdata;
4015 scsi_cmd->result = (DID_OK << 16);
4018 * We are currently blocking all devices due to a host reset
4019 * We have told the host to stop giving us new requests, but
4020 * ERP ops don't count. FIXME
4022 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4023 return SCSI_MLQUEUE_HOST_BUSY;
4026 * FIXME - Create scsi_set_host_offline interface
4027 * and the ioa_is_dead check can be removed
4029 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4030 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4031 scsi_cmd->result = (DID_NO_CONNECT << 16);
4032 scsi_cmd->scsi_done(scsi_cmd);
4036 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4037 ioarcb = &ipr_cmd->ioarcb;
4038 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4040 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4041 ipr_cmd->scsi_cmd = scsi_cmd;
4042 ioarcb->res_handle = res->cfgte.res_handle;
4043 ipr_cmd->done = ipr_scsi_done;
4044 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4046 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4047 if (scsi_cmd->underflow == 0)
4048 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4050 if (res->needs_sync_complete) {
4051 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4052 res->needs_sync_complete = 0;
4055 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4056 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4057 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4058 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4061 if (scsi_cmd->cmnd[0] >= 0xC0 &&
4062 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4063 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4065 if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4066 rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4068 if (likely(rc == 0))
4069 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4071 if (likely(rc == 0)) {
4073 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4074 ioa_cfg->regs.ioarrin_reg);
4076 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4077 return SCSI_MLQUEUE_HOST_BUSY;
4084 * ipr_info - Get information about the card/driver
4085 * @scsi_host: scsi host struct
4088 * pointer to buffer with description string
4090 static const char * ipr_ioa_info(struct Scsi_Host *host)
4092 static char buffer[512];
4093 struct ipr_ioa_cfg *ioa_cfg;
4094 unsigned long lock_flags = 0;
4096 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4098 spin_lock_irqsave(host->host_lock, lock_flags);
4099 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4100 spin_unlock_irqrestore(host->host_lock, lock_flags);
4105 static struct scsi_host_template driver_template = {
4106 .module = THIS_MODULE,
4108 .info = ipr_ioa_info,
4109 .queuecommand = ipr_queuecommand,
4110 .eh_abort_handler = ipr_eh_abort,
4111 .eh_device_reset_handler = ipr_eh_dev_reset,
4112 .eh_host_reset_handler = ipr_eh_host_reset,
4113 .slave_alloc = ipr_slave_alloc,
4114 .slave_configure = ipr_slave_configure,
4115 .slave_destroy = ipr_slave_destroy,
4116 .change_queue_depth = ipr_change_queue_depth,
4117 .change_queue_type = ipr_change_queue_type,
4118 .bios_param = ipr_biosparam,
4119 .can_queue = IPR_MAX_COMMANDS,
4121 .sg_tablesize = IPR_MAX_SGLIST,
4122 .max_sectors = IPR_IOA_MAX_SECTORS,
4123 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4124 .use_clustering = ENABLE_CLUSTERING,
4125 .shost_attrs = ipr_ioa_attrs,
4126 .sdev_attrs = ipr_dev_attrs,
4127 .proc_name = IPR_NAME
4130 #ifdef CONFIG_PPC_PSERIES
4131 static const u16 ipr_blocked_processors[] = {
4143 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4144 * @ioa_cfg: ioa cfg struct
4146 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4147 * certain pSeries hardware. This function determines if the given
4148 * adapter is in one of these confgurations or not.
4151 * 1 if adapter is not supported / 0 if adapter is supported
4153 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4158 if (ioa_cfg->type == 0x5702) {
4159 if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4160 &rev_id) == PCIBIOS_SUCCESSFUL) {
4162 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4163 if (__is_processor(ipr_blocked_processors[i]))
4172 #define ipr_invalid_adapter(ioa_cfg) 0
4176 * ipr_ioa_bringdown_done - IOA bring down completion.
4177 * @ipr_cmd: ipr command struct
4179 * This function processes the completion of an adapter bring down.
4180 * It wakes any reset sleepers.
4185 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4190 ioa_cfg->in_reset_reload = 0;
4191 ioa_cfg->reset_retries = 0;
4192 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4193 wake_up_all(&ioa_cfg->reset_wait_q);
4195 spin_unlock_irq(ioa_cfg->host->host_lock);
4196 scsi_unblock_requests(ioa_cfg->host);
4197 spin_lock_irq(ioa_cfg->host->host_lock);
4200 return IPR_RC_JOB_RETURN;
4204 * ipr_ioa_reset_done - IOA reset completion.
4205 * @ipr_cmd: ipr command struct
4207 * This function processes the completion of an adapter reset.
4208 * It schedules any necessary mid-layer add/removes and
4209 * wakes any reset sleepers.
4214 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4217 struct ipr_resource_entry *res;
4218 struct ipr_hostrcb *hostrcb, *temp;
4222 ioa_cfg->in_reset_reload = 0;
4223 ioa_cfg->allow_cmds = 1;
4224 ioa_cfg->reset_cmd = NULL;
4226 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4227 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4232 schedule_work(&ioa_cfg->work_q);
4234 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4235 list_del(&hostrcb->queue);
4236 if (i++ < IPR_NUM_LOG_HCAMS)
4237 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4239 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4242 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4244 ioa_cfg->reset_retries = 0;
4245 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4246 wake_up_all(&ioa_cfg->reset_wait_q);
4248 spin_unlock_irq(ioa_cfg->host->host_lock);
4249 scsi_unblock_requests(ioa_cfg->host);
4250 spin_lock_irq(ioa_cfg->host->host_lock);
4252 if (!ioa_cfg->allow_cmds)
4253 scsi_block_requests(ioa_cfg->host);
4256 return IPR_RC_JOB_RETURN;
4260 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4261 * @supported_dev: supported device struct
4262 * @vpids: vendor product id struct
4267 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4268 struct ipr_std_inq_vpids *vpids)
4270 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4271 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4272 supported_dev->num_records = 1;
4273 supported_dev->data_length =
4274 cpu_to_be16(sizeof(struct ipr_supported_device));
4275 supported_dev->reserved = 0;
4279 * ipr_set_supported_devs - Send Set Supported Devices for a device
4280 * @ipr_cmd: ipr command struct
4282 * This function send a Set Supported Devices to the adapter
4285 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4287 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4289 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4290 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4291 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4292 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4293 struct ipr_resource_entry *res = ipr_cmd->u.res;
4295 ipr_cmd->job_step = ipr_ioa_reset_done;
4297 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4298 if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
4301 ipr_cmd->u.res = res;
4302 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4304 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4305 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4306 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4308 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4309 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4310 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4312 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4313 sizeof(struct ipr_supported_device));
4314 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4315 offsetof(struct ipr_misc_cbs, supp_dev));
4316 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4317 ioarcb->write_data_transfer_length =
4318 cpu_to_be32(sizeof(struct ipr_supported_device));
4320 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4321 IPR_SET_SUP_DEVICE_TIMEOUT);
4323 ipr_cmd->job_step = ipr_set_supported_devs;
4324 return IPR_RC_JOB_RETURN;
4327 return IPR_RC_JOB_CONTINUE;
4331 * ipr_setup_write_cache - Disable write cache if needed
4332 * @ipr_cmd: ipr command struct
4334 * This function sets up adapters write cache to desired setting
4337 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4339 static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4341 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4343 ipr_cmd->job_step = ipr_set_supported_devs;
4344 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4345 struct ipr_resource_entry, queue);
4347 if (ioa_cfg->cache_state != CACHE_DISABLED)
4348 return IPR_RC_JOB_CONTINUE;
4350 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4351 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4352 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4353 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4355 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4357 return IPR_RC_JOB_RETURN;
4361 * ipr_get_mode_page - Locate specified mode page
4362 * @mode_pages: mode page buffer
4363 * @page_code: page code to find
4364 * @len: minimum required length for mode page
4367 * pointer to mode page / NULL on failure
4369 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4370 u32 page_code, u32 len)
4372 struct ipr_mode_page_hdr *mode_hdr;
4376 if (!mode_pages || (mode_pages->hdr.length == 0))
4379 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4380 mode_hdr = (struct ipr_mode_page_hdr *)
4381 (mode_pages->data + mode_pages->hdr.block_desc_len);
4384 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4385 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4389 page_length = (sizeof(struct ipr_mode_page_hdr) +
4390 mode_hdr->page_length);
4391 length -= page_length;
4392 mode_hdr = (struct ipr_mode_page_hdr *)
4393 ((unsigned long)mode_hdr + page_length);
4400 * ipr_check_term_power - Check for term power errors
4401 * @ioa_cfg: ioa config struct
4402 * @mode_pages: IOAFP mode pages buffer
4404 * Check the IOAFP's mode page 28 for term power errors
4409 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4410 struct ipr_mode_pages *mode_pages)
4414 struct ipr_dev_bus_entry *bus;
4415 struct ipr_mode_page28 *mode_page;
4417 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4418 sizeof(struct ipr_mode_page28));
4420 entry_length = mode_page->entry_length;
4422 bus = mode_page->bus;
4424 for (i = 0; i < mode_page->num_entries; i++) {
4425 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4426 dev_err(&ioa_cfg->pdev->dev,
4427 "Term power is absent on scsi bus %d\n",
4431 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4436 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4437 * @ioa_cfg: ioa config struct
4439 * Looks through the config table checking for SES devices. If
4440 * the SES device is in the SES table indicating a maximum SCSI
4441 * bus speed, the speed is limited for the bus.
4446 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4451 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4452 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4453 ioa_cfg->bus_attr[i].bus_width);
4455 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4456 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4461 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4462 * @ioa_cfg: ioa config struct
4463 * @mode_pages: mode page 28 buffer
4465 * Updates mode page 28 based on driver configuration
4470 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4471 struct ipr_mode_pages *mode_pages)
4473 int i, entry_length;
4474 struct ipr_dev_bus_entry *bus;
4475 struct ipr_bus_attributes *bus_attr;
4476 struct ipr_mode_page28 *mode_page;
4478 mode_page = ipr_get_mode_page(mode_pages, 0x28,
4479 sizeof(struct ipr_mode_page28));
4481 entry_length = mode_page->entry_length;
4483 /* Loop for each device bus entry */
4484 for (i = 0, bus = mode_page->bus;
4485 i < mode_page->num_entries;
4486 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4487 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4488 dev_err(&ioa_cfg->pdev->dev,
4489 "Invalid resource address reported: 0x%08X\n",
4490 IPR_GET_PHYS_LOC(bus->res_addr));
4494 bus_attr = &ioa_cfg->bus_attr[i];
4495 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4496 bus->bus_width = bus_attr->bus_width;
4497 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4498 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4499 if (bus_attr->qas_enabled)
4500 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4502 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4507 * ipr_build_mode_select - Build a mode select command
4508 * @ipr_cmd: ipr command struct
4509 * @res_handle: resource handle to send command to
4510 * @parm: Byte 2 of Mode Sense command
4511 * @dma_addr: DMA buffer address
4512 * @xfer_len: data transfer length
4517 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4518 __be32 res_handle, u8 parm, u32 dma_addr,
4521 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4522 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4524 ioarcb->res_handle = res_handle;
4525 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4526 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4527 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4528 ioarcb->cmd_pkt.cdb[1] = parm;
4529 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4531 ioadl->flags_and_data_len =
4532 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4533 ioadl->address = cpu_to_be32(dma_addr);
4534 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4535 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4539 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4540 * @ipr_cmd: ipr command struct
4542 * This function sets up the SCSI bus attributes and sends
4543 * a Mode Select for Page 28 to activate them.
4548 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4551 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4555 if (ioa_cfg->saved_mode_pages) {
4556 memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4557 ioa_cfg->saved_mode_page_len);
4558 length = ioa_cfg->saved_mode_page_len;
4560 ipr_scsi_bus_speed_limit(ioa_cfg);
4561 ipr_check_term_power(ioa_cfg, mode_pages);
4562 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4563 length = mode_pages->hdr.length + 1;
4564 mode_pages->hdr.length = 0;
4567 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4568 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4571 ipr_cmd->job_step = ipr_setup_write_cache;
4572 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4575 return IPR_RC_JOB_RETURN;
4579 * ipr_build_mode_sense - Builds a mode sense command
4580 * @ipr_cmd: ipr command struct
4581 * @res: resource entry struct
4582 * @parm: Byte 2 of mode sense command
4583 * @dma_addr: DMA address of mode sense buffer
4584 * @xfer_len: Size of DMA buffer
4589 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4591 u8 parm, u32 dma_addr, u8 xfer_len)
4593 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4594 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4596 ioarcb->res_handle = res_handle;
4597 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4598 ioarcb->cmd_pkt.cdb[2] = parm;
4599 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4600 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4602 ioadl->flags_and_data_len =
4603 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4604 ioadl->address = cpu_to_be32(dma_addr);
4605 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4606 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4610 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4611 * @ipr_cmd: ipr command struct
4613 * This function send a Page 28 mode sense to the IOA to
4614 * retrieve SCSI bus attributes.
4619 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4624 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4625 0x28, ioa_cfg->vpd_cbs_dma +
4626 offsetof(struct ipr_misc_cbs, mode_pages),
4627 sizeof(struct ipr_mode_pages));
4629 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4631 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4634 return IPR_RC_JOB_RETURN;
4638 * ipr_init_res_table - Initialize the resource table
4639 * @ipr_cmd: ipr command struct
4641 * This function looks through the existing resource table, comparing
4642 * it with the config table. This function will take care of old/new
4643 * devices and schedule adding/removing them from the mid-layer
4647 * IPR_RC_JOB_CONTINUE
4649 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4651 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4652 struct ipr_resource_entry *res, *temp;
4653 struct ipr_config_table_entry *cfgte;
4658 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4659 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4661 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4662 list_move_tail(&res->queue, &old_res);
4664 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4665 cfgte = &ioa_cfg->cfg_table->dev[i];
4668 list_for_each_entry_safe(res, temp, &old_res, queue) {
4669 if (!memcmp(&res->cfgte.res_addr,
4670 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4671 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4678 if (list_empty(&ioa_cfg->free_res_q)) {
4679 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4684 res = list_entry(ioa_cfg->free_res_q.next,
4685 struct ipr_resource_entry, queue);
4686 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4687 ipr_init_res_entry(res);
4692 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4695 list_for_each_entry_safe(res, temp, &old_res, queue) {
4697 res->del_from_ml = 1;
4698 res->sdev->hostdata = NULL;
4699 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4701 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4705 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4708 return IPR_RC_JOB_CONTINUE;
4712 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4713 * @ipr_cmd: ipr command struct
4715 * This function sends a Query IOA Configuration command
4716 * to the adapter to retrieve the IOA configuration table.
4721 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4723 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4724 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4725 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4726 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4729 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4730 ucode_vpd->major_release, ucode_vpd->card_type,
4731 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4732 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4733 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4735 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4736 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4737 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4739 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4740 ioarcb->read_data_transfer_length =
4741 cpu_to_be32(sizeof(struct ipr_config_table));
4743 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4744 ioadl->flags_and_data_len =
4745 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4747 ipr_cmd->job_step = ipr_init_res_table;
4749 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4752 return IPR_RC_JOB_RETURN;
4756 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4757 * @ipr_cmd: ipr command struct
4759 * This utility function sends an inquiry to the adapter.
4764 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4765 u32 dma_addr, u8 xfer_len)
4767 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4768 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4771 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4772 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4774 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4775 ioarcb->cmd_pkt.cdb[1] = flags;
4776 ioarcb->cmd_pkt.cdb[2] = page;
4777 ioarcb->cmd_pkt.cdb[4] = xfer_len;
4779 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4780 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4782 ioadl->address = cpu_to_be32(dma_addr);
4783 ioadl->flags_and_data_len =
4784 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4786 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4791 * ipr_inquiry_page_supported - Is the given inquiry page supported
4792 * @page0: inquiry page 0 buffer
4795 * This function determines if the specified inquiry page is supported.
4798 * 1 if page is supported / 0 if not
4800 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
4804 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
4805 if (page0->page[i] == page)
4812 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4813 * @ipr_cmd: ipr command struct
4815 * This function sends a Page 3 inquiry to the adapter
4816 * to retrieve software VPD information.
4819 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4821 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4823 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4824 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
4828 if (!ipr_inquiry_page_supported(page0, 1))
4829 ioa_cfg->cache_state = CACHE_NONE;
4831 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4833 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4834 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4835 sizeof(struct ipr_inquiry_page3));
4838 return IPR_RC_JOB_RETURN;
4842 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
4843 * @ipr_cmd: ipr command struct
4845 * This function sends a Page 0 inquiry to the adapter
4846 * to retrieve supported inquiry pages.
4849 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4851 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
4853 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4858 /* Grab the type out of the VPD and store it away */
4859 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4861 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4863 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4865 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
4866 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
4867 sizeof(struct ipr_inquiry_page0));
4870 return IPR_RC_JOB_RETURN;
4874 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4875 * @ipr_cmd: ipr command struct
4877 * This function sends a standard inquiry to the adapter.
4882 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4884 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4887 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
4889 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4890 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4891 sizeof(struct ipr_ioa_vpd));
4894 return IPR_RC_JOB_RETURN;
4898 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4899 * @ipr_cmd: ipr command struct
4901 * This function send an Identify Host Request Response Queue
4902 * command to establish the HRRQ with the adapter.
4907 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4909 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4910 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4913 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4915 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4916 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4918 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4919 ioarcb->cmd_pkt.cdb[2] =
4920 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4921 ioarcb->cmd_pkt.cdb[3] =
4922 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4923 ioarcb->cmd_pkt.cdb[4] =
4924 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4925 ioarcb->cmd_pkt.cdb[5] =
4926 ((u32) ioa_cfg->host_rrq_dma) & 0xff;
4927 ioarcb->cmd_pkt.cdb[7] =
4928 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4929 ioarcb->cmd_pkt.cdb[8] =
4930 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4932 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4934 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4937 return IPR_RC_JOB_RETURN;
4941 * ipr_reset_timer_done - Adapter reset timer function
4942 * @ipr_cmd: ipr command struct
4944 * Description: This function is used in adapter reset processing
4945 * for timing events. If the reset_cmd pointer in the IOA
4946 * config struct is not this adapter's we are doing nested
4947 * resets and fail_all_ops will take care of freeing the
4953 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
4955 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4956 unsigned long lock_flags = 0;
4958 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4960 if (ioa_cfg->reset_cmd == ipr_cmd) {
4961 list_del(&ipr_cmd->queue);
4962 ipr_cmd->done(ipr_cmd);
4965 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4969 * ipr_reset_start_timer - Start a timer for adapter reset job
4970 * @ipr_cmd: ipr command struct
4971 * @timeout: timeout value
4973 * Description: This function is used in adapter reset processing
4974 * for timing events. If the reset_cmd pointer in the IOA
4975 * config struct is not this adapter's we are doing nested
4976 * resets and fail_all_ops will take care of freeing the
4982 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
4983 unsigned long timeout)
4985 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
4986 ipr_cmd->done = ipr_reset_ioa_job;
4988 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
4989 ipr_cmd->timer.expires = jiffies + timeout;
4990 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
4991 add_timer(&ipr_cmd->timer);
4995 * ipr_init_ioa_mem - Initialize ioa_cfg control block
4996 * @ioa_cfg: ioa cfg struct
5001 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5003 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5005 /* Initialize Host RRQ pointers */
5006 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5007 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5008 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5009 ioa_cfg->toggle_bit = 1;
5011 /* Zero out config table */
5012 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5016 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5017 * @ipr_cmd: ipr command struct
5019 * This function reinitializes some control blocks and
5020 * enables destructive diagnostics on the adapter.
5025 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5027 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5028 volatile u32 int_reg;
5031 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5032 ipr_init_ioa_mem(ioa_cfg);
5034 ioa_cfg->allow_interrupts = 1;
5035 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5037 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5038 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5039 ioa_cfg->regs.clr_interrupt_mask_reg);
5040 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5041 return IPR_RC_JOB_CONTINUE;
5044 /* Enable destructive diagnostics on IOA */
5045 writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
5047 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5048 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5050 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5052 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5053 ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5054 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5055 ipr_cmd->done = ipr_reset_ioa_job;
5056 add_timer(&ipr_cmd->timer);
5057 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5060 return IPR_RC_JOB_RETURN;
5064 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5065 * @ipr_cmd: ipr command struct
5067 * This function is invoked when an adapter dump has run out
5068 * of processing time.
5071 * IPR_RC_JOB_CONTINUE
5073 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5075 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5077 if (ioa_cfg->sdt_state == GET_DUMP)
5078 ioa_cfg->sdt_state = ABORT_DUMP;
5080 ipr_cmd->job_step = ipr_reset_alert;
5082 return IPR_RC_JOB_CONTINUE;
5086 * ipr_unit_check_no_data - Log a unit check/no data error log
5087 * @ioa_cfg: ioa config struct
5089 * Logs an error indicating the adapter unit checked, but for some
5090 * reason, we were unable to fetch the unit check buffer.
5095 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5097 ioa_cfg->errors_logged++;
5098 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5102 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5103 * @ioa_cfg: ioa config struct
5105 * Fetches the unit check buffer from the adapter by clocking the data
5106 * through the mailbox register.
5111 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5113 unsigned long mailbox;
5114 struct ipr_hostrcb *hostrcb;
5115 struct ipr_uc_sdt sdt;
5118 mailbox = readl(ioa_cfg->ioa_mailbox);
5120 if (!ipr_sdt_is_fmt2(mailbox)) {
5121 ipr_unit_check_no_data(ioa_cfg);
5125 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5126 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5127 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5129 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5130 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5131 ipr_unit_check_no_data(ioa_cfg);
5135 /* Find length of the first sdt entry (UC buffer) */
5136 length = (be32_to_cpu(sdt.entry[0].end_offset) -
5137 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5139 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5140 struct ipr_hostrcb, queue);
5141 list_del(&hostrcb->queue);
5142 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5144 rc = ipr_get_ldump_data_section(ioa_cfg,
5145 be32_to_cpu(sdt.entry[0].bar_str_offset),
5146 (__be32 *)&hostrcb->hcam,
5147 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5150 ipr_handle_log_data(ioa_cfg, hostrcb);
5152 ipr_unit_check_no_data(ioa_cfg);
5154 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5158 * ipr_reset_restore_cfg_space - Restore PCI config space.
5159 * @ipr_cmd: ipr command struct
5161 * Description: This function restores the saved PCI config space of
5162 * the adapter, fails all outstanding ops back to the callers, and
5163 * fetches the dump/unit check if applicable to this reset.
5166 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5168 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5170 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5174 pci_unblock_user_cfg_access(ioa_cfg->pdev);
5175 rc = pci_restore_state(ioa_cfg->pdev);
5177 if (rc != PCIBIOS_SUCCESSFUL) {
5178 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5179 return IPR_RC_JOB_CONTINUE;
5182 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5183 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5184 return IPR_RC_JOB_CONTINUE;
5187 ipr_fail_all_ops(ioa_cfg);
5189 if (ioa_cfg->ioa_unit_checked) {
5190 ioa_cfg->ioa_unit_checked = 0;
5191 ipr_get_unit_check_buffer(ioa_cfg);
5192 ipr_cmd->job_step = ipr_reset_alert;
5193 ipr_reset_start_timer(ipr_cmd, 0);
5194 return IPR_RC_JOB_RETURN;
5197 if (ioa_cfg->in_ioa_bringdown) {
5198 ipr_cmd->job_step = ipr_ioa_bringdown_done;
5200 ipr_cmd->job_step = ipr_reset_enable_ioa;
5202 if (GET_DUMP == ioa_cfg->sdt_state) {
5203 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5204 ipr_cmd->job_step = ipr_reset_wait_for_dump;
5205 schedule_work(&ioa_cfg->work_q);
5206 return IPR_RC_JOB_RETURN;
5211 return IPR_RC_JOB_CONTINUE;
5215 * ipr_reset_start_bist - Run BIST on the adapter.
5216 * @ipr_cmd: ipr command struct
5218 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5221 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5223 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5225 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5229 pci_block_user_cfg_access(ioa_cfg->pdev);
5230 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5232 if (rc != PCIBIOS_SUCCESSFUL) {
5233 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5234 rc = IPR_RC_JOB_CONTINUE;
5236 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5237 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5238 rc = IPR_RC_JOB_RETURN;
5246 * ipr_reset_allowed - Query whether or not IOA can be reset
5247 * @ioa_cfg: ioa config struct
5250 * 0 if reset not allowed / non-zero if reset is allowed
5252 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5254 volatile u32 temp_reg;
5256 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5257 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5261 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5262 * @ipr_cmd: ipr command struct
5264 * Description: This function waits for adapter permission to run BIST,
5265 * then runs BIST. If the adapter does not give permission after a
5266 * reasonable time, we will reset the adapter anyway. The impact of
5267 * resetting the adapter without warning the adapter is the risk of
5268 * losing the persistent error log on the adapter. If the adapter is
5269 * reset while it is writing to the flash on the adapter, the flash
5270 * segment will have bad ECC and be zeroed.
5273 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5275 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5277 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5278 int rc = IPR_RC_JOB_RETURN;
5280 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5281 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5282 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5284 ipr_cmd->job_step = ipr_reset_start_bist;
5285 rc = IPR_RC_JOB_CONTINUE;
5292 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5293 * @ipr_cmd: ipr command struct
5295 * Description: This function alerts the adapter that it will be reset.
5296 * If memory space is not currently enabled, proceed directly
5297 * to running BIST on the adapter. The timer must always be started
5298 * so we guarantee we do not run BIST from ipr_isr.
5303 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5305 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5310 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5312 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5313 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5314 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5315 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5317 ipr_cmd->job_step = ipr_reset_start_bist;
5320 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5321 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5324 return IPR_RC_JOB_RETURN;
5328 * ipr_reset_ucode_download_done - Microcode download completion
5329 * @ipr_cmd: ipr command struct
5331 * Description: This function unmaps the microcode download buffer.
5334 * IPR_RC_JOB_CONTINUE
5336 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5338 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5339 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5341 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5342 sglist->num_sg, DMA_TO_DEVICE);
5344 ipr_cmd->job_step = ipr_reset_alert;
5345 return IPR_RC_JOB_CONTINUE;
5349 * ipr_reset_ucode_download - Download microcode to the adapter
5350 * @ipr_cmd: ipr command struct
5352 * Description: This function checks to see if it there is microcode
5353 * to download to the adapter. If there is, a download is performed.
5356 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5358 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5360 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5361 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5364 ipr_cmd->job_step = ipr_reset_alert;
5367 return IPR_RC_JOB_CONTINUE;
5369 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5370 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5371 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5372 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5373 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5374 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5375 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5377 ipr_build_ucode_ioadl(ipr_cmd, sglist);
5378 ipr_cmd->job_step = ipr_reset_ucode_download_done;
5380 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5381 IPR_WRITE_BUFFER_TIMEOUT);
5384 return IPR_RC_JOB_RETURN;
5388 * ipr_reset_shutdown_ioa - Shutdown the adapter
5389 * @ipr_cmd: ipr command struct
5391 * Description: This function issues an adapter shutdown of the
5392 * specified type to the specified adapter as part of the
5393 * adapter reset job.
5396 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5398 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5400 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5401 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5402 unsigned long timeout;
5403 int rc = IPR_RC_JOB_CONTINUE;
5406 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5407 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5408 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5409 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5410 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5412 if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5413 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5414 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5415 timeout = IPR_INTERNAL_TIMEOUT;
5417 timeout = IPR_SHUTDOWN_TIMEOUT;
5419 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5421 rc = IPR_RC_JOB_RETURN;
5422 ipr_cmd->job_step = ipr_reset_ucode_download;
5424 ipr_cmd->job_step = ipr_reset_alert;
5431 * ipr_reset_ioa_job - Adapter reset job
5432 * @ipr_cmd: ipr command struct
5434 * Description: This function is the job router for the adapter reset job.
5439 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5442 unsigned long scratch = ipr_cmd->u.scratch;
5443 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5446 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5448 if (ioa_cfg->reset_cmd != ipr_cmd) {
5450 * We are doing nested adapter resets and this is
5451 * not the current reset job.
5453 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5457 if (IPR_IOASC_SENSE_KEY(ioasc)) {
5458 dev_err(&ioa_cfg->pdev->dev,
5459 "0x%02X failed with IOASC: 0x%08X\n",
5460 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5462 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5463 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5467 ipr_reinit_ipr_cmnd(ipr_cmd);
5468 ipr_cmd->u.scratch = scratch;
5469 rc = ipr_cmd->job_step(ipr_cmd);
5470 } while(rc == IPR_RC_JOB_CONTINUE);
5474 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5475 * @ioa_cfg: ioa config struct
5476 * @job_step: first job step of reset job
5477 * @shutdown_type: shutdown type
5479 * Description: This function will initiate the reset of the given adapter
5480 * starting at the selected job step.
5481 * If the caller needs to wait on the completion of the reset,
5482 * the caller must sleep on the reset_wait_q.
5487 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5488 int (*job_step) (struct ipr_cmnd *),
5489 enum ipr_shutdown_type shutdown_type)
5491 struct ipr_cmnd *ipr_cmd;
5493 ioa_cfg->in_reset_reload = 1;
5494 ioa_cfg->allow_cmds = 0;
5495 scsi_block_requests(ioa_cfg->host);
5497 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5498 ioa_cfg->reset_cmd = ipr_cmd;
5499 ipr_cmd->job_step = job_step;
5500 ipr_cmd->u.shutdown_type = shutdown_type;
5502 ipr_reset_ioa_job(ipr_cmd);
5506 * ipr_initiate_ioa_reset - Initiate an adapter reset
5507 * @ioa_cfg: ioa config struct
5508 * @shutdown_type: shutdown type
5510 * Description: This function will initiate the reset of the given adapter.
5511 * If the caller needs to wait on the completion of the reset,
5512 * the caller must sleep on the reset_wait_q.
5517 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5518 enum ipr_shutdown_type shutdown_type)
5520 if (ioa_cfg->ioa_is_dead)
5523 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5524 ioa_cfg->sdt_state = ABORT_DUMP;
5526 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5527 dev_err(&ioa_cfg->pdev->dev,
5528 "IOA taken offline - error recovery failed\n");
5530 ioa_cfg->reset_retries = 0;
5531 ioa_cfg->ioa_is_dead = 1;
5533 if (ioa_cfg->in_ioa_bringdown) {
5534 ioa_cfg->reset_cmd = NULL;
5535 ioa_cfg->in_reset_reload = 0;
5536 ipr_fail_all_ops(ioa_cfg);
5537 wake_up_all(&ioa_cfg->reset_wait_q);
5539 spin_unlock_irq(ioa_cfg->host->host_lock);
5540 scsi_unblock_requests(ioa_cfg->host);
5541 spin_lock_irq(ioa_cfg->host->host_lock);
5544 ioa_cfg->in_ioa_bringdown = 1;
5545 shutdown_type = IPR_SHUTDOWN_NONE;
5549 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5554 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5555 * @ioa_cfg: ioa cfg struct
5557 * Description: This is the second phase of adapter intialization
5558 * This function takes care of initilizing the adapter to the point
5559 * where it can accept new commands.
5562 * 0 on sucess / -EIO on failure
5564 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5567 unsigned long host_lock_flags = 0;
5570 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5571 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5572 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5574 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5575 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5576 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5578 if (ioa_cfg->ioa_is_dead) {
5580 } else if (ipr_invalid_adapter(ioa_cfg)) {
5584 dev_err(&ioa_cfg->pdev->dev,
5585 "Adapter not supported in this hardware configuration.\n");
5588 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5595 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5596 * @ioa_cfg: ioa config struct
5601 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5605 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5606 if (ioa_cfg->ipr_cmnd_list[i])
5607 pci_pool_free(ioa_cfg->ipr_cmd_pool,
5608 ioa_cfg->ipr_cmnd_list[i],
5609 ioa_cfg->ipr_cmnd_list_dma[i]);
5611 ioa_cfg->ipr_cmnd_list[i] = NULL;
5614 if (ioa_cfg->ipr_cmd_pool)
5615 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5617 ioa_cfg->ipr_cmd_pool = NULL;
5621 * ipr_free_mem - Frees memory allocated for an adapter
5622 * @ioa_cfg: ioa cfg struct
5627 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5631 kfree(ioa_cfg->res_entries);
5632 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5633 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5634 ipr_free_cmd_blks(ioa_cfg);
5635 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5636 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5637 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5639 ioa_cfg->cfg_table_dma);
5641 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5642 pci_free_consistent(ioa_cfg->pdev,
5643 sizeof(struct ipr_hostrcb),
5644 ioa_cfg->hostrcb[i],
5645 ioa_cfg->hostrcb_dma[i]);
5648 ipr_free_dump(ioa_cfg);
5649 kfree(ioa_cfg->saved_mode_pages);
5650 kfree(ioa_cfg->trace);
5654 * ipr_free_all_resources - Free all allocated resources for an adapter.
5655 * @ipr_cmd: ipr command struct
5657 * This function frees all allocated resources for the
5658 * specified adapter.
5663 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5665 struct pci_dev *pdev = ioa_cfg->pdev;
5668 free_irq(pdev->irq, ioa_cfg);
5669 iounmap(ioa_cfg->hdw_dma_regs);
5670 pci_release_regions(pdev);
5671 ipr_free_mem(ioa_cfg);
5672 scsi_host_put(ioa_cfg->host);
5673 pci_disable_device(pdev);
5678 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5679 * @ioa_cfg: ioa config struct
5682 * 0 on success / -ENOMEM on allocation failure
5684 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5686 struct ipr_cmnd *ipr_cmd;
5687 struct ipr_ioarcb *ioarcb;
5688 dma_addr_t dma_addr;
5691 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5692 sizeof(struct ipr_cmnd), 8, 0);
5694 if (!ioa_cfg->ipr_cmd_pool)
5697 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5698 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5701 ipr_free_cmd_blks(ioa_cfg);
5705 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5706 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5707 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5709 ioarcb = &ipr_cmd->ioarcb;
5710 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5711 ioarcb->host_response_handle = cpu_to_be32(i << 2);
5712 ioarcb->write_ioadl_addr =
5713 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5714 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5715 ioarcb->ioasa_host_pci_addr =
5716 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5717 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5718 ipr_cmd->cmd_index = i;
5719 ipr_cmd->ioa_cfg = ioa_cfg;
5720 ipr_cmd->sense_buffer_dma = dma_addr +
5721 offsetof(struct ipr_cmnd, sense_buffer);
5723 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5730 * ipr_alloc_mem - Allocate memory for an adapter
5731 * @ioa_cfg: ioa config struct
5734 * 0 on success / non-zero for error
5736 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5738 struct pci_dev *pdev = ioa_cfg->pdev;
5739 int i, rc = -ENOMEM;
5742 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
5743 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5745 if (!ioa_cfg->res_entries)
5748 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5749 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5751 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5752 sizeof(struct ipr_misc_cbs),
5753 &ioa_cfg->vpd_cbs_dma);
5755 if (!ioa_cfg->vpd_cbs)
5756 goto out_free_res_entries;
5758 if (ipr_alloc_cmd_blks(ioa_cfg))
5759 goto out_free_vpd_cbs;
5761 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5762 sizeof(u32) * IPR_NUM_CMD_BLKS,
5763 &ioa_cfg->host_rrq_dma);
5765 if (!ioa_cfg->host_rrq)
5766 goto out_ipr_free_cmd_blocks;
5768 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5769 sizeof(struct ipr_config_table),
5770 &ioa_cfg->cfg_table_dma);
5772 if (!ioa_cfg->cfg_table)
5773 goto out_free_host_rrq;
5775 for (i = 0; i < IPR_NUM_HCAMS; i++) {
5776 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5777 sizeof(struct ipr_hostrcb),
5778 &ioa_cfg->hostrcb_dma[i]);
5780 if (!ioa_cfg->hostrcb[i])
5781 goto out_free_hostrcb_dma;
5783 ioa_cfg->hostrcb[i]->hostrcb_dma =
5784 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5785 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5788 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
5789 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5791 if (!ioa_cfg->trace)
5792 goto out_free_hostrcb_dma;
5799 out_free_hostrcb_dma:
5801 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5802 ioa_cfg->hostrcb[i],
5803 ioa_cfg->hostrcb_dma[i]);
5805 pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5806 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5808 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5809 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5810 out_ipr_free_cmd_blocks:
5811 ipr_free_cmd_blks(ioa_cfg);
5813 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5814 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5815 out_free_res_entries:
5816 kfree(ioa_cfg->res_entries);
5821 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5822 * @ioa_cfg: ioa config struct
5827 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5831 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5832 ioa_cfg->bus_attr[i].bus = i;
5833 ioa_cfg->bus_attr[i].qas_enabled = 0;
5834 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5835 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5836 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5838 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5843 * ipr_init_ioa_cfg - Initialize IOA config struct
5844 * @ioa_cfg: ioa config struct
5845 * @host: scsi host struct
5846 * @pdev: PCI dev struct
5851 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5852 struct Scsi_Host *host, struct pci_dev *pdev)
5854 const struct ipr_interrupt_offsets *p;
5855 struct ipr_interrupts *t;
5858 ioa_cfg->host = host;
5859 ioa_cfg->pdev = pdev;
5860 ioa_cfg->log_level = ipr_log_level;
5861 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5862 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5863 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5864 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5865 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5866 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5867 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5868 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5870 INIT_LIST_HEAD(&ioa_cfg->free_q);
5871 INIT_LIST_HEAD(&ioa_cfg->pending_q);
5872 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5873 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5874 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5875 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5876 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5877 init_waitqueue_head(&ioa_cfg->reset_wait_q);
5878 ioa_cfg->sdt_state = INACTIVE;
5879 if (ipr_enable_cache)
5880 ioa_cfg->cache_state = CACHE_ENABLED;
5882 ioa_cfg->cache_state = CACHE_DISABLED;
5884 ipr_initialize_bus_attr(ioa_cfg);
5886 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5887 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5888 host->max_channel = IPR_MAX_BUS_TO_SCAN;
5889 host->unique_id = host->host_no;
5890 host->max_cmd_len = IPR_MAX_CDB_LEN;
5891 pci_set_drvdata(pdev, ioa_cfg);
5893 p = &ioa_cfg->chip_cfg->regs;
5895 base = ioa_cfg->hdw_dma_regs;
5897 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5898 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5899 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5900 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5901 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5902 t->ioarrin_reg = base + p->ioarrin_reg;
5903 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5904 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5905 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5909 * ipr_get_chip_cfg - Find adapter chip configuration
5910 * @dev_id: PCI device id struct
5913 * ptr to chip config on success / NULL on failure
5915 static const struct ipr_chip_cfg_t * __devinit
5916 ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5920 if (dev_id->driver_data)
5921 return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5923 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5924 if (ipr_chip[i].vendor == dev_id->vendor &&
5925 ipr_chip[i].device == dev_id->device)
5926 return ipr_chip[i].cfg;
5931 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5932 * @pdev: PCI device struct
5933 * @dev_id: PCI device id struct
5936 * 0 on success / non-zero on failure
5938 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
5939 const struct pci_device_id *dev_id)
5941 struct ipr_ioa_cfg *ioa_cfg;
5942 struct Scsi_Host *host;
5943 unsigned long ipr_regs_pci;
5944 void __iomem *ipr_regs;
5945 u32 rc = PCIBIOS_SUCCESSFUL;
5949 if ((rc = pci_enable_device(pdev))) {
5950 dev_err(&pdev->dev, "Cannot enable adapter\n");
5954 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
5956 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
5959 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
5964 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
5965 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
5967 ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
5969 if (!ioa_cfg->chip_cfg) {
5970 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
5971 dev_id->vendor, dev_id->device);
5972 goto out_scsi_host_put;
5975 ipr_regs_pci = pci_resource_start(pdev, 0);
5977 rc = pci_request_regions(pdev, IPR_NAME);
5980 "Couldn't register memory range of registers\n");
5981 goto out_scsi_host_put;
5984 ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
5988 "Couldn't map memory range of registers\n");
5990 goto out_release_regions;
5993 ioa_cfg->hdw_dma_regs = ipr_regs;
5994 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
5995 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
5997 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
5999 pci_set_master(pdev);
6001 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6003 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6007 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6008 ioa_cfg->chip_cfg->cache_line_size);
6010 if (rc != PCIBIOS_SUCCESSFUL) {
6011 dev_err(&pdev->dev, "Write of cache line size failed\n");
6016 /* Save away PCI config space for use following IOA reset */
6017 rc = pci_save_state(pdev);
6019 if (rc != PCIBIOS_SUCCESSFUL) {
6020 dev_err(&pdev->dev, "Failed to save PCI config space\n");
6025 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6028 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6031 rc = ipr_alloc_mem(ioa_cfg);
6034 "Couldn't allocate enough memory for device driver!\n");
6038 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6039 rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6042 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6047 spin_lock(&ipr_driver_lock);
6048 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6049 spin_unlock(&ipr_driver_lock);
6056 ipr_free_mem(ioa_cfg);
6059 out_release_regions:
6060 pci_release_regions(pdev);
6062 scsi_host_put(host);
6064 pci_disable_device(pdev);
6069 * ipr_scan_vsets - Scans for VSET devices
6070 * @ioa_cfg: ioa config struct
6072 * Description: Since the VSET resources do not follow SAM in that we can have
6073 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6078 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6082 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6083 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6084 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6088 * ipr_initiate_ioa_bringdown - Bring down an adapter
6089 * @ioa_cfg: ioa config struct
6090 * @shutdown_type: shutdown type
6092 * Description: This function will initiate bringing down the adapter.
6093 * This consists of issuing an IOA shutdown to the adapter
6094 * to flush the cache, and running BIST.
6095 * If the caller needs to wait on the completion of the reset,
6096 * the caller must sleep on the reset_wait_q.
6101 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6102 enum ipr_shutdown_type shutdown_type)
6105 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6106 ioa_cfg->sdt_state = ABORT_DUMP;
6107 ioa_cfg->reset_retries = 0;
6108 ioa_cfg->in_ioa_bringdown = 1;
6109 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6114 * __ipr_remove - Remove a single adapter
6115 * @pdev: pci device struct
6117 * Adapter hot plug remove entry point.
6122 static void __ipr_remove(struct pci_dev *pdev)
6124 unsigned long host_lock_flags = 0;
6125 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6128 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6129 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6131 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6132 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6133 flush_scheduled_work();
6134 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6136 spin_lock(&ipr_driver_lock);
6137 list_del(&ioa_cfg->queue);
6138 spin_unlock(&ipr_driver_lock);
6140 if (ioa_cfg->sdt_state == ABORT_DUMP)
6141 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6142 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6144 ipr_free_all_resources(ioa_cfg);
6150 * ipr_remove - IOA hot plug remove entry point
6151 * @pdev: pci device struct
6153 * Adapter hot plug remove entry point.
6158 static void ipr_remove(struct pci_dev *pdev)
6160 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6164 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6166 ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6168 scsi_remove_host(ioa_cfg->host);
6176 * ipr_probe - Adapter hot plug add entry point
6179 * 0 on success / non-zero on failure
6181 static int __devinit ipr_probe(struct pci_dev *pdev,
6182 const struct pci_device_id *dev_id)
6184 struct ipr_ioa_cfg *ioa_cfg;
6187 rc = ipr_probe_ioa(pdev, dev_id);
6192 ioa_cfg = pci_get_drvdata(pdev);
6193 rc = ipr_probe_ioa_part2(ioa_cfg);
6200 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6207 rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6211 scsi_remove_host(ioa_cfg->host);
6216 rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6220 ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6222 scsi_remove_host(ioa_cfg->host);
6227 scsi_scan_host(ioa_cfg->host);
6228 ipr_scan_vsets(ioa_cfg);
6229 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6230 ioa_cfg->allow_ml_add_del = 1;
6231 ioa_cfg->host->max_channel = IPR_VSET_BUS;
6232 schedule_work(&ioa_cfg->work_q);
6237 * ipr_shutdown - Shutdown handler.
6238 * @pdev: pci device struct
6240 * This function is invoked upon system shutdown/reboot. It will issue
6241 * an adapter shutdown to the adapter to flush the write cache.
6246 static void ipr_shutdown(struct pci_dev *pdev)
6248 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6249 unsigned long lock_flags = 0;
6251 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6252 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6254 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6257 static struct pci_device_id ipr_pci_table[] __devinitdata = {
6258 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6259 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6260 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6261 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6262 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6263 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6264 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6265 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6266 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6267 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6268 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6269 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6270 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6271 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6272 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6273 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6274 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6275 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6276 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6277 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6278 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6279 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6280 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6281 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6282 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6283 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6284 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6287 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6289 static struct pci_driver ipr_driver = {
6291 .id_table = ipr_pci_table,
6293 .remove = ipr_remove,
6294 .shutdown = ipr_shutdown,
6298 * ipr_init - Module entry point
6301 * 0 on success / negative value on failure
6303 static int __init ipr_init(void)
6305 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6306 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6308 return pci_module_init(&ipr_driver);
6312 * ipr_exit - Module unload
6314 * Module unload entry point.
6319 static void __exit ipr_exit(void)
6321 pci_unregister_driver(&ipr_driver);
6324 module_init(ipr_init);
6325 module_exit(ipr_exit);