]> Pileus Git - ~andy/linux/commitdiff
[SCSI] zfcp: fix deadlock caused by shared work queue tasks
authorSwen Schillig <swen@vnet.ibm.com>
Wed, 1 Oct 2008 10:42:22 +0000 (12:42 +0200)
committerJames Bottomley <James.Bottomley@HansenPartnership.com>
Fri, 3 Oct 2008 17:11:55 +0000 (12:11 -0500)
Each adapter reopen trigger automatically a scan_port task which
is waiting for the ERP to be finished before further processing.
Since the initial device setup enqueues adapter, port and LUN which
are individual ERP actions, this process would start after
everything is done. Unfortunately the port_reopen requires another
scheduled work to be finished which is queued after the automatic
scan_port -> deadlock !

This fix creates an own work queue for ERP based nameserver requests.

Signed-off-by: Swen Schillig <swen@vnet.ibm.com>
Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_def.h
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_fsf.c

index b9984648aca6ed696a270eee76985f7950b01096..05f3de64f6a39bb22c4a1bd39955be899e520bb7 100644 (file)
@@ -168,6 +168,8 @@ static int __init zfcp_module_init(void)
        if (!zfcp_data.gid_pn_cache)
                goto out_gid_cache;
 
+       zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
+
        INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
        sema_init(&zfcp_data.config_sema, 1);
        rwlock_init(&zfcp_data.config_lock);
index 7fa8937c8c58d52a5377d874567912514c009f66..73eb41580f25c40fe3dc41861703de5c9f9f2bb8 100644 (file)
@@ -602,6 +602,7 @@ struct zfcp_data {
        struct kmem_cache       *fsf_req_qtcb_cache;
        struct kmem_cache       *sr_buffer_cache;
        struct kmem_cache       *gid_pn_cache;
+       struct workqueue_struct *work_queue;
 };
 
 /* struct used by memory pools for fsf_requests */
index 174b38fe7623fcd35e73598436b159c51f4ceea8..f5ebeb7ca2be5baa72f8b5f58c69a017fb89b9d6 100644 (file)
@@ -869,7 +869,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
                if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
                        return zfcp_erp_open_ptp_port(act);
                if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
-                       schedule_work(&port->gid_pn_work);
+                       queue_work(zfcp_data.work_queue, &port->gid_pn_work);
                        return ZFCP_ERP_CONTINUES;
                }
        case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
@@ -1209,7 +1209,7 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
        atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
        INIT_WORK(&p->work, zfcp_erp_scsi_scan);
        p->unit = unit;
-       schedule_work(&p->work);
+       queue_work(zfcp_data.work_queue, &p->work);
 }
 
 static void zfcp_erp_rport_register(struct zfcp_port *port)
index f073fff0868fa03f5df3ea20dd7255612fc4a2a8..600ef5711acb6a36050c490d0995c668e97b710a 100644 (file)
@@ -329,7 +329,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
        zfcp_fsf_req_free(req);
 
        atomic_inc(&adapter->stat_miss);
-       schedule_work(&adapter->stat_work);
+       queue_work(zfcp_data.work_queue, &adapter->stat_work);
 }
 
 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)