]> Pileus Git - ~andy/linux/blob - drivers/scsi/qla2xxx/qla_target.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net...
[~andy/linux] / drivers / scsi / qla2xxx / qla_target.c
1 /*
2  *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
3  *
4  *  based on qla2x00t.c code:
5  *
6  *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7  *  Copyright (C) 2004 - 2005 Leonid Stoljar
8  *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9  *  Copyright (C) 2006 - 2010 ID7 Ltd.
10  *
11  *  Forward port and refactoring to modern qla2xxx and target/configfs
12  *
13  *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License
17  *  as published by the Free Software Foundation, version 2
18  *  of the License.
19  *
20  *  This program is distributed in the hope that it will be useful,
21  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
22  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23  *  GNU General Public License for more details.
24  */
25
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41
42 #include "qla_def.h"
43 #include "qla_target.h"
44
45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
46 module_param(qlini_mode, charp, S_IRUGO);
47 MODULE_PARM_DESC(qlini_mode,
48         "Determines when initiator mode will be enabled. Possible values: "
49         "\"exclusive\" - initiator mode will be enabled on load, "
50         "disabled on enabling target mode and then on disabling target mode "
51         "enabled back; "
52         "\"disabled\" - initiator mode will never be enabled; "
53         "\"enabled\" (default) - initiator mode will always stay enabled.");
54
55 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
56
57 /*
58  * From scsi/fc/fc_fcp.h
59  */
60 enum fcp_resp_rsp_codes {
61         FCP_TMF_CMPL = 0,
62         FCP_DATA_LEN_INVALID = 1,
63         FCP_CMND_FIELDS_INVALID = 2,
64         FCP_DATA_PARAM_MISMATCH = 3,
65         FCP_TMF_REJECTED = 4,
66         FCP_TMF_FAILED = 5,
67         FCP_TMF_INVALID_LUN = 9,
68 };
69
70 /*
71  * fc_pri_ta from scsi/fc/fc_fcp.h
72  */
73 #define FCP_PTA_SIMPLE      0   /* simple task attribute */
74 #define FCP_PTA_HEADQ       1   /* head of queue task attribute */
75 #define FCP_PTA_ORDERED     2   /* ordered task attribute */
76 #define FCP_PTA_ACA         4   /* auto. contingent allegiance */
77 #define FCP_PTA_MASK        7   /* mask for task attribute field */
78 #define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
79 #define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
80
81 /*
82  * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
83  * must be called under HW lock and could unlock/lock it inside.
84  * It isn't an issue, since in the current implementation on the time when
85  * those functions are called:
86  *
87  *   - Either context is IRQ and only IRQ handler can modify HW data,
88  *     including rings related fields,
89  *
90  *   - Or access to target mode variables from struct qla_tgt doesn't
91  *     cross those functions boundaries, except tgt_stop, which
92  *     additionally protected by irq_cmd_count.
93  */
94 /* Predefs for callbacks handed to qla2xxx LLD */
95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
96         struct atio_from_isp *pkt);
97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
99         int fn, void *iocb, int flags);
100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
101         *cmd, struct atio_from_isp *atio, int ha_locked);
102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
103         struct qla_tgt_srr_imm *imm, int ha_lock);
104 /*
105  * Global Variables
106  */
107 static struct kmem_cache *qla_tgt_cmd_cachep;
108 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
109 static mempool_t *qla_tgt_mgmt_cmd_mempool;
110 static struct workqueue_struct *qla_tgt_wq;
111 static DEFINE_MUTEX(qla_tgt_mutex);
112 static LIST_HEAD(qla_tgt_glist);
113
114 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
115 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
116         struct qla_tgt *tgt,
117         const uint8_t *port_name)
118 {
119         struct qla_tgt_sess *sess;
120
121         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
122                 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
123                         return sess;
124         }
125
126         return NULL;
127 }
128
129 /* Might release hw lock, then reaquire!! */
130 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
131 {
132         /* Send marker if required */
133         if (unlikely(vha->marker_needed != 0)) {
134                 int rc = qla2x00_issue_marker(vha, vha_locked);
135                 if (rc != QLA_SUCCESS) {
136                         ql_dbg(ql_dbg_tgt, vha, 0xe03d,
137                             "qla_target(%d): issue_marker() failed\n",
138                             vha->vp_idx);
139                 }
140                 return rc;
141         }
142         return QLA_SUCCESS;
143 }
144
145 static inline
146 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
147         uint8_t *d_id)
148 {
149         struct qla_hw_data *ha = vha->hw;
150         uint8_t vp_idx;
151
152         if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
153                 return NULL;
154
155         if (vha->d_id.b.al_pa == d_id[2])
156                 return vha;
157
158         BUG_ON(ha->tgt.tgt_vp_map == NULL);
159         vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
160         if (likely(test_bit(vp_idx, ha->vp_idx_map)))
161                 return ha->tgt.tgt_vp_map[vp_idx].vha;
162
163         return NULL;
164 }
165
166 static inline
167 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
168         uint16_t vp_idx)
169 {
170         struct qla_hw_data *ha = vha->hw;
171
172         if (vha->vp_idx == vp_idx)
173                 return vha;
174
175         BUG_ON(ha->tgt.tgt_vp_map == NULL);
176         if (likely(test_bit(vp_idx, ha->vp_idx_map)))
177                 return ha->tgt.tgt_vp_map[vp_idx].vha;
178
179         return NULL;
180 }
181
182 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
183         struct atio_from_isp *atio)
184 {
185         switch (atio->u.raw.entry_type) {
186         case ATIO_TYPE7:
187         {
188                 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
189                     atio->u.isp24.fcp_hdr.d_id);
190                 if (unlikely(NULL == host)) {
191                         ql_dbg(ql_dbg_tgt, vha, 0xe03e,
192                             "qla_target(%d): Received ATIO_TYPE7 "
193                             "with unknown d_id %x:%x:%x\n", vha->vp_idx,
194                             atio->u.isp24.fcp_hdr.d_id[0],
195                             atio->u.isp24.fcp_hdr.d_id[1],
196                             atio->u.isp24.fcp_hdr.d_id[2]);
197                         break;
198                 }
199                 qlt_24xx_atio_pkt(host, atio);
200                 break;
201         }
202
203         case IMMED_NOTIFY_TYPE:
204         {
205                 struct scsi_qla_host *host = vha;
206                 struct imm_ntfy_from_isp *entry =
207                     (struct imm_ntfy_from_isp *)atio;
208
209                 if ((entry->u.isp24.vp_index != 0xFF) &&
210                     (entry->u.isp24.nport_handle != 0xFFFF)) {
211                         host = qlt_find_host_by_vp_idx(vha,
212                             entry->u.isp24.vp_index);
213                         if (unlikely(!host)) {
214                                 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
215                                     "qla_target(%d): Received "
216                                     "ATIO (IMMED_NOTIFY_TYPE) "
217                                     "with unknown vp_index %d\n",
218                                     vha->vp_idx, entry->u.isp24.vp_index);
219                                 break;
220                         }
221                 }
222                 qlt_24xx_atio_pkt(host, atio);
223                 break;
224         }
225
226         default:
227                 ql_dbg(ql_dbg_tgt, vha, 0xe040,
228                     "qla_target(%d): Received unknown ATIO atio "
229                     "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
230                 break;
231         }
232
233         return;
234 }
235
236 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
237 {
238         switch (pkt->entry_type) {
239         case CTIO_TYPE7:
240         {
241                 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
242                 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
243                     entry->vp_index);
244                 if (unlikely(!host)) {
245                         ql_dbg(ql_dbg_tgt, vha, 0xe041,
246                             "qla_target(%d): Response pkt (CTIO_TYPE7) "
247                             "received, with unknown vp_index %d\n",
248                             vha->vp_idx, entry->vp_index);
249                         break;
250                 }
251                 qlt_response_pkt(host, pkt);
252                 break;
253         }
254
255         case IMMED_NOTIFY_TYPE:
256         {
257                 struct scsi_qla_host *host = vha;
258                 struct imm_ntfy_from_isp *entry =
259                     (struct imm_ntfy_from_isp *)pkt;
260
261                 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
262                 if (unlikely(!host)) {
263                         ql_dbg(ql_dbg_tgt, vha, 0xe042,
264                             "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
265                             "received, with unknown vp_index %d\n",
266                             vha->vp_idx, entry->u.isp24.vp_index);
267                         break;
268                 }
269                 qlt_response_pkt(host, pkt);
270                 break;
271         }
272
273         case NOTIFY_ACK_TYPE:
274         {
275                 struct scsi_qla_host *host = vha;
276                 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
277
278                 if (0xFF != entry->u.isp24.vp_index) {
279                         host = qlt_find_host_by_vp_idx(vha,
280                             entry->u.isp24.vp_index);
281                         if (unlikely(!host)) {
282                                 ql_dbg(ql_dbg_tgt, vha, 0xe043,
283                                     "qla_target(%d): Response "
284                                     "pkt (NOTIFY_ACK_TYPE) "
285                                     "received, with unknown "
286                                     "vp_index %d\n", vha->vp_idx,
287                                     entry->u.isp24.vp_index);
288                                 break;
289                         }
290                 }
291                 qlt_response_pkt(host, pkt);
292                 break;
293         }
294
295         case ABTS_RECV_24XX:
296         {
297                 struct abts_recv_from_24xx *entry =
298                     (struct abts_recv_from_24xx *)pkt;
299                 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
300                     entry->vp_index);
301                 if (unlikely(!host)) {
302                         ql_dbg(ql_dbg_tgt, vha, 0xe044,
303                             "qla_target(%d): Response pkt "
304                             "(ABTS_RECV_24XX) received, with unknown "
305                             "vp_index %d\n", vha->vp_idx, entry->vp_index);
306                         break;
307                 }
308                 qlt_response_pkt(host, pkt);
309                 break;
310         }
311
312         case ABTS_RESP_24XX:
313         {
314                 struct abts_resp_to_24xx *entry =
315                     (struct abts_resp_to_24xx *)pkt;
316                 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
317                     entry->vp_index);
318                 if (unlikely(!host)) {
319                         ql_dbg(ql_dbg_tgt, vha, 0xe045,
320                             "qla_target(%d): Response pkt "
321                             "(ABTS_RECV_24XX) received, with unknown "
322                             "vp_index %d\n", vha->vp_idx, entry->vp_index);
323                         break;
324                 }
325                 qlt_response_pkt(host, pkt);
326                 break;
327         }
328
329         default:
330                 qlt_response_pkt(vha, pkt);
331                 break;
332         }
333
334 }
335
336 static void qlt_free_session_done(struct work_struct *work)
337 {
338         struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
339             free_work);
340         struct qla_tgt *tgt = sess->tgt;
341         struct scsi_qla_host *vha = sess->vha;
342         struct qla_hw_data *ha = vha->hw;
343
344         BUG_ON(!tgt);
345         /*
346          * Release the target session for FC Nexus from fabric module code.
347          */
348         if (sess->se_sess != NULL)
349                 ha->tgt.tgt_ops->free_session(sess);
350
351         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
352             "Unregistration of sess %p finished\n", sess);
353
354         kfree(sess);
355         /*
356          * We need to protect against race, when tgt is freed before or
357          * inside wake_up()
358          */
359         tgt->sess_count--;
360         if (tgt->sess_count == 0)
361                 wake_up_all(&tgt->waitQ);
362 }
363
364 /* ha->hardware_lock supposed to be held on entry */
365 void qlt_unreg_sess(struct qla_tgt_sess *sess)
366 {
367         struct scsi_qla_host *vha = sess->vha;
368
369         vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
370
371         list_del(&sess->sess_list_entry);
372         if (sess->deleted)
373                 list_del(&sess->del_list_entry);
374
375         INIT_WORK(&sess->free_work, qlt_free_session_done);
376         schedule_work(&sess->free_work);
377 }
378 EXPORT_SYMBOL(qlt_unreg_sess);
379
380 /* ha->hardware_lock supposed to be held on entry */
381 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
382 {
383         struct qla_hw_data *ha = vha->hw;
384         struct qla_tgt_sess *sess = NULL;
385         uint32_t unpacked_lun, lun = 0;
386         uint16_t loop_id;
387         int res = 0;
388         struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
389         struct atio_from_isp *a = (struct atio_from_isp *)iocb;
390
391         loop_id = le16_to_cpu(n->u.isp24.nport_handle);
392         if (loop_id == 0xFFFF) {
393 #if 0 /* FIXME: Re-enable Global event handling.. */
394                 /* Global event */
395                 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
396                 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
397                 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
398                         sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
399                             typeof(*sess), sess_list_entry);
400                         switch (mcmd) {
401                         case QLA_TGT_NEXUS_LOSS_SESS:
402                                 mcmd = QLA_TGT_NEXUS_LOSS;
403                                 break;
404                         case QLA_TGT_ABORT_ALL_SESS:
405                                 mcmd = QLA_TGT_ABORT_ALL;
406                                 break;
407                         case QLA_TGT_NEXUS_LOSS:
408                         case QLA_TGT_ABORT_ALL:
409                                 break;
410                         default:
411                                 ql_dbg(ql_dbg_tgt, vha, 0xe046,
412                                     "qla_target(%d): Not allowed "
413                                     "command %x in %s", vha->vp_idx,
414                                     mcmd, __func__);
415                                 sess = NULL;
416                                 break;
417                         }
418                 } else
419                         sess = NULL;
420 #endif
421         } else {
422                 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
423         }
424
425         ql_dbg(ql_dbg_tgt, vha, 0xe000,
426             "Using sess for qla_tgt_reset: %p\n", sess);
427         if (!sess) {
428                 res = -ESRCH;
429                 return res;
430         }
431
432         ql_dbg(ql_dbg_tgt, vha, 0xe047,
433             "scsi(%ld): resetting (session %p from port "
434             "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
435             "mcmd %x, loop_id %d)\n", vha->host_no, sess,
436             sess->port_name[0], sess->port_name[1],
437             sess->port_name[2], sess->port_name[3],
438             sess->port_name[4], sess->port_name[5],
439             sess->port_name[6], sess->port_name[7],
440             mcmd, loop_id);
441
442         lun = a->u.isp24.fcp_cmnd.lun;
443         unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
444
445         return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
446             iocb, QLA24XX_MGMT_SEND_NACK);
447 }
448
449 /* ha->hardware_lock supposed to be held on entry */
450 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
451         bool immediate)
452 {
453         struct qla_tgt *tgt = sess->tgt;
454         uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
455
456         if (sess->deleted)
457                 return;
458
459         ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
460             "Scheduling sess %p for deletion\n", sess);
461         list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
462         sess->deleted = 1;
463
464         if (immediate)
465                 dev_loss_tmo = 0;
466
467         sess->expires = jiffies + dev_loss_tmo * HZ;
468
469         ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
470             "qla_target(%d): session for port %02x:%02x:%02x:"
471             "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
472             "deletion in %u secs (expires: %lu) immed: %d\n",
473             sess->vha->vp_idx,
474             sess->port_name[0], sess->port_name[1],
475             sess->port_name[2], sess->port_name[3],
476             sess->port_name[4], sess->port_name[5],
477             sess->port_name[6], sess->port_name[7],
478             sess->loop_id, dev_loss_tmo, sess->expires, immediate);
479
480         if (immediate)
481                 schedule_delayed_work(&tgt->sess_del_work, 0);
482         else
483                 schedule_delayed_work(&tgt->sess_del_work,
484                     jiffies - sess->expires);
485 }
486
487 /* ha->hardware_lock supposed to be held on entry */
488 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
489 {
490         struct qla_tgt_sess *sess;
491
492         list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
493                 qlt_schedule_sess_for_deletion(sess, true);
494
495         /* At this point tgt could be already dead */
496 }
497
498 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
499         uint16_t *loop_id)
500 {
501         struct qla_hw_data *ha = vha->hw;
502         dma_addr_t gid_list_dma;
503         struct gid_list_info *gid_list;
504         char *id_iter;
505         int res, rc, i;
506         uint16_t entries;
507
508         gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
509             &gid_list_dma, GFP_KERNEL);
510         if (!gid_list) {
511                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
512                     "qla_target(%d): DMA Alloc failed of %u\n",
513                     vha->vp_idx, qla2x00_gid_list_size(ha));
514                 return -ENOMEM;
515         }
516
517         /* Get list of logged in devices */
518         rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
519         if (rc != QLA_SUCCESS) {
520                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
521                     "qla_target(%d): get_id_list() failed: %x\n",
522                     vha->vp_idx, rc);
523                 res = -1;
524                 goto out_free_id_list;
525         }
526
527         id_iter = (char *)gid_list;
528         res = -1;
529         for (i = 0; i < entries; i++) {
530                 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
531                 if ((gid->al_pa == s_id[2]) &&
532                     (gid->area == s_id[1]) &&
533                     (gid->domain == s_id[0])) {
534                         *loop_id = le16_to_cpu(gid->loop_id);
535                         res = 0;
536                         break;
537                 }
538                 id_iter += ha->gid_list_info_size;
539         }
540
541 out_free_id_list:
542         dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
543             gid_list, gid_list_dma);
544         return res;
545 }
546
547 static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
548         struct qla_tgt_sess *sess)
549 {
550         struct qla_hw_data *ha = vha->hw;
551         struct qla_port_24xx_data *pmap24;
552         bool res, found = false;
553         int rc, i;
554         uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
555         uint16_t entries;
556         void *pmap;
557         int pmap_len;
558         fc_port_t *fcport;
559         int global_resets;
560         unsigned long flags;
561
562 retry:
563         global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
564
565         rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
566         if (rc != QLA_SUCCESS) {
567                 res = false;
568                 goto out;
569         }
570
571         pmap24 = pmap;
572         entries = pmap_len/sizeof(*pmap24);
573
574         for (i = 0; i < entries; ++i) {
575                 if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
576                         loop_id = le16_to_cpu(pmap24[i].loop_id);
577                         found = true;
578                         break;
579                 }
580         }
581
582         kfree(pmap);
583
584         if (!found) {
585                 res = false;
586                 goto out;
587         }
588
589         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
590             "qlt_check_fcport_exist(): loop_id %d", loop_id);
591
592         fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
593         if (fcport == NULL) {
594                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
595                     "qla_target(%d): Allocation of tmp FC port failed",
596                     vha->vp_idx);
597                 res = false;
598                 goto out;
599         }
600
601         fcport->loop_id = loop_id;
602
603         rc = qla2x00_get_port_database(vha, fcport, 0);
604         if (rc != QLA_SUCCESS) {
605                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
606                     "qla_target(%d): Failed to retrieve fcport "
607                     "information -- get_port_database() returned %x "
608                     "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
609                 res = false;
610                 goto out_free_fcport;
611         }
612
613         if (global_resets !=
614             atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
615                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
616                     "qla_target(%d): global reset during session discovery"
617                     " (counter was %d, new %d), retrying",
618                     vha->vp_idx, global_resets,
619                     atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
620                 goto retry;
621         }
622
623         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
624             "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
625             "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
626             sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
627             fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
628
629         spin_lock_irqsave(&ha->hardware_lock, flags);
630         ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
631                                 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
632         spin_unlock_irqrestore(&ha->hardware_lock, flags);
633
634         res = true;
635
636 out_free_fcport:
637         kfree(fcport);
638
639 out:
640         return res;
641 }
642
643 /* ha->hardware_lock supposed to be held on entry */
644 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
645 {
646         BUG_ON(!sess->deleted);
647
648         list_del(&sess->del_list_entry);
649         sess->deleted = 0;
650 }
651
652 static void qlt_del_sess_work_fn(struct delayed_work *work)
653 {
654         struct qla_tgt *tgt = container_of(work, struct qla_tgt,
655             sess_del_work);
656         struct scsi_qla_host *vha = tgt->vha;
657         struct qla_hw_data *ha = vha->hw;
658         struct qla_tgt_sess *sess;
659         unsigned long flags;
660
661         spin_lock_irqsave(&ha->hardware_lock, flags);
662         while (!list_empty(&tgt->del_sess_list)) {
663                 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
664                     del_list_entry);
665                 if (time_after_eq(jiffies, sess->expires)) {
666                         bool cancel;
667
668                         qlt_undelete_sess(sess);
669
670                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
671                         cancel = qlt_check_fcport_exist(vha, sess);
672
673                         if (cancel) {
674                                 if (sess->deleted) {
675                                         /*
676                                          * sess was again deleted while we were
677                                          * discovering it
678                                          */
679                                         spin_lock_irqsave(&ha->hardware_lock,
680                                             flags);
681                                         continue;
682                                 }
683
684                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
685                                     "qla_target(%d): cancel deletion of "
686                                     "session for port %02x:%02x:%02x:%02x:%02x:"
687                                     "%02x:%02x:%02x (loop ID %d), because "
688                                     " it isn't deleted by firmware",
689                                     vha->vp_idx, sess->port_name[0],
690                                     sess->port_name[1], sess->port_name[2],
691                                     sess->port_name[3], sess->port_name[4],
692                                     sess->port_name[5], sess->port_name[6],
693                                     sess->port_name[7], sess->loop_id);
694                         } else {
695                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
696                                     "Timeout: sess %p about to be deleted\n",
697                                     sess);
698                                 ha->tgt.tgt_ops->shutdown_sess(sess);
699                                 ha->tgt.tgt_ops->put_sess(sess);
700                         }
701
702                         spin_lock_irqsave(&ha->hardware_lock, flags);
703                 } else {
704                         schedule_delayed_work(&tgt->sess_del_work,
705                             jiffies - sess->expires);
706                         break;
707                 }
708         }
709         spin_unlock_irqrestore(&ha->hardware_lock, flags);
710 }
711
712 /*
713  * Adds an extra ref to allow to drop hw lock after adding sess to the list.
714  * Caller must put it.
715  */
716 static struct qla_tgt_sess *qlt_create_sess(
717         struct scsi_qla_host *vha,
718         fc_port_t *fcport,
719         bool local)
720 {
721         struct qla_hw_data *ha = vha->hw;
722         struct qla_tgt_sess *sess;
723         unsigned long flags;
724         unsigned char be_sid[3];
725
726         /* Check to avoid double sessions */
727         spin_lock_irqsave(&ha->hardware_lock, flags);
728         list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
729                                 sess_list_entry) {
730                 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
731                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
732                             "Double sess %p found (s_id %x:%x:%x, "
733                             "loop_id %d), updating to d_id %x:%x:%x, "
734                             "loop_id %d", sess, sess->s_id.b.domain,
735                             sess->s_id.b.al_pa, sess->s_id.b.area,
736                             sess->loop_id, fcport->d_id.b.domain,
737                             fcport->d_id.b.al_pa, fcport->d_id.b.area,
738                             fcport->loop_id);
739
740                         if (sess->deleted)
741                                 qlt_undelete_sess(sess);
742
743                         kref_get(&sess->se_sess->sess_kref);
744                         ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
745                                                 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
746
747                         if (sess->local && !local)
748                                 sess->local = 0;
749                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
750
751                         return sess;
752                 }
753         }
754         spin_unlock_irqrestore(&ha->hardware_lock, flags);
755
756         sess = kzalloc(sizeof(*sess), GFP_KERNEL);
757         if (!sess) {
758                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
759                     "qla_target(%u): session allocation failed, "
760                     "all commands from port %02x:%02x:%02x:%02x:"
761                     "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
762                     fcport->port_name[0], fcport->port_name[1],
763                     fcport->port_name[2], fcport->port_name[3],
764                     fcport->port_name[4], fcport->port_name[5],
765                     fcport->port_name[6], fcport->port_name[7]);
766
767                 return NULL;
768         }
769         sess->tgt = ha->tgt.qla_tgt;
770         sess->vha = vha;
771         sess->s_id = fcport->d_id;
772         sess->loop_id = fcport->loop_id;
773         sess->local = local;
774
775         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
776             "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
777             sess, ha->tgt.qla_tgt);
778
779         be_sid[0] = sess->s_id.b.domain;
780         be_sid[1] = sess->s_id.b.area;
781         be_sid[2] = sess->s_id.b.al_pa;
782         /*
783          * Determine if this fc_port->port_name is allowed to access
784          * target mode using explict NodeACLs+MappedLUNs, or using
785          * TPG demo mode.  If this is successful a target mode FC nexus
786          * is created.
787          */
788         if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
789             &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
790                 kfree(sess);
791                 return NULL;
792         }
793         /*
794          * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
795          * access across ->hardware_lock reaquire.
796          */
797         kref_get(&sess->se_sess->sess_kref);
798
799         sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
800         BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
801         memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
802
803         spin_lock_irqsave(&ha->hardware_lock, flags);
804         list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
805         ha->tgt.qla_tgt->sess_count++;
806         spin_unlock_irqrestore(&ha->hardware_lock, flags);
807
808         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
809             "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
810             "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
811             " completion %ssupported) added\n",
812             vha->vp_idx, local ?  "local " : "", fcport->port_name[0],
813             fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
814             fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
815             fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
816             sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
817             "" : "not ");
818
819         return sess;
820 }
821
822 /*
823  * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
824  */
825 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
826 {
827         struct qla_hw_data *ha = vha->hw;
828         struct qla_tgt *tgt = ha->tgt.qla_tgt;
829         struct qla_tgt_sess *sess;
830         unsigned long flags;
831
832         if (!vha->hw->tgt.tgt_ops)
833                 return;
834
835         if (!tgt || (fcport->port_type != FCT_INITIATOR))
836                 return;
837
838         spin_lock_irqsave(&ha->hardware_lock, flags);
839         if (tgt->tgt_stop) {
840                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
841                 return;
842         }
843         sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
844         if (!sess) {
845                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
846
847                 mutex_lock(&ha->tgt.tgt_mutex);
848                 sess = qlt_create_sess(vha, fcport, false);
849                 mutex_unlock(&ha->tgt.tgt_mutex);
850
851                 spin_lock_irqsave(&ha->hardware_lock, flags);
852         } else {
853                 kref_get(&sess->se_sess->sess_kref);
854
855                 if (sess->deleted) {
856                         qlt_undelete_sess(sess);
857
858                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
859                             "qla_target(%u): %ssession for port %02x:"
860                             "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
861                             "reappeared\n", vha->vp_idx, sess->local ? "local "
862                             : "", sess->port_name[0], sess->port_name[1],
863                             sess->port_name[2], sess->port_name[3],
864                             sess->port_name[4], sess->port_name[5],
865                             sess->port_name[6], sess->port_name[7],
866                             sess->loop_id);
867
868                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
869                             "Reappeared sess %p\n", sess);
870                 }
871                 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
872                                         (fcport->flags & FCF_CONF_COMP_SUPPORTED));
873         }
874
875         if (sess && sess->local) {
876                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
877                     "qla_target(%u): local session for "
878                     "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
879                     "(loop ID %d) became global\n", vha->vp_idx,
880                     fcport->port_name[0], fcport->port_name[1],
881                     fcport->port_name[2], fcport->port_name[3],
882                     fcport->port_name[4], fcport->port_name[5],
883                     fcport->port_name[6], fcport->port_name[7],
884                     sess->loop_id);
885                 sess->local = 0;
886         }
887         spin_unlock_irqrestore(&ha->hardware_lock, flags);
888
889         ha->tgt.tgt_ops->put_sess(sess);
890 }
891
892 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
893 {
894         struct qla_hw_data *ha = vha->hw;
895         struct qla_tgt *tgt = ha->tgt.qla_tgt;
896         struct qla_tgt_sess *sess;
897         unsigned long flags;
898
899         if (!vha->hw->tgt.tgt_ops)
900                 return;
901
902         if (!tgt || (fcport->port_type != FCT_INITIATOR))
903                 return;
904
905         spin_lock_irqsave(&ha->hardware_lock, flags);
906         if (tgt->tgt_stop) {
907                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
908                 return;
909         }
910         sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
911         if (!sess) {
912                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
913                 return;
914         }
915
916         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
917
918         sess->local = 1;
919         qlt_schedule_sess_for_deletion(sess, false);
920         spin_unlock_irqrestore(&ha->hardware_lock, flags);
921 }
922
923 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
924 {
925         struct qla_hw_data *ha = tgt->ha;
926         unsigned long flags;
927         int res;
928         /*
929          * We need to protect against race, when tgt is freed before or
930          * inside wake_up()
931          */
932         spin_lock_irqsave(&ha->hardware_lock, flags);
933         ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
934             "tgt %p, empty(sess_list)=%d sess_count=%d\n",
935             tgt, list_empty(&tgt->sess_list), tgt->sess_count);
936         res = (tgt->sess_count == 0);
937         spin_unlock_irqrestore(&ha->hardware_lock, flags);
938
939         return res;
940 }
941
942 /* Called by tcm_qla2xxx configfs code */
943 void qlt_stop_phase1(struct qla_tgt *tgt)
944 {
945         struct scsi_qla_host *vha = tgt->vha;
946         struct qla_hw_data *ha = tgt->ha;
947         unsigned long flags;
948
949         if (tgt->tgt_stop || tgt->tgt_stopped) {
950                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
951                     "Already in tgt->tgt_stop or tgt_stopped state\n");
952                 dump_stack();
953                 return;
954         }
955
956         ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
957             vha->host_no, vha);
958         /*
959          * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
960          * Lock is needed, because we still can get an incoming packet.
961          */
962         mutex_lock(&ha->tgt.tgt_mutex);
963         spin_lock_irqsave(&ha->hardware_lock, flags);
964         tgt->tgt_stop = 1;
965         qlt_clear_tgt_db(tgt, true);
966         spin_unlock_irqrestore(&ha->hardware_lock, flags);
967         mutex_unlock(&ha->tgt.tgt_mutex);
968
969         flush_delayed_work(&tgt->sess_del_work);
970
971         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
972             "Waiting for sess works (tgt %p)", tgt);
973         spin_lock_irqsave(&tgt->sess_work_lock, flags);
974         while (!list_empty(&tgt->sess_works_list)) {
975                 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
976                 flush_scheduled_work();
977                 spin_lock_irqsave(&tgt->sess_work_lock, flags);
978         }
979         spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
980
981         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
982             "Waiting for tgt %p: list_empty(sess_list)=%d "
983             "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
984             tgt->sess_count);
985
986         wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
987
988         /* Big hammer */
989         if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
990                 qlt_disable_vha(vha);
991
992         /* Wait for sessions to clear out (just in case) */
993         wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
994 }
995 EXPORT_SYMBOL(qlt_stop_phase1);
996
997 /* Called by tcm_qla2xxx configfs code */
998 void qlt_stop_phase2(struct qla_tgt *tgt)
999 {
1000         struct qla_hw_data *ha = tgt->ha;
1001         unsigned long flags;
1002
1003         if (tgt->tgt_stopped) {
1004                 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
1005                     "Already in tgt->tgt_stopped state\n");
1006                 dump_stack();
1007                 return;
1008         }
1009
1010         ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
1011             "Waiting for %d IRQ commands to complete (tgt %p)",
1012             tgt->irq_cmd_count, tgt);
1013
1014         mutex_lock(&ha->tgt.tgt_mutex);
1015         spin_lock_irqsave(&ha->hardware_lock, flags);
1016         while (tgt->irq_cmd_count != 0) {
1017                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1018                 udelay(2);
1019                 spin_lock_irqsave(&ha->hardware_lock, flags);
1020         }
1021         tgt->tgt_stop = 0;
1022         tgt->tgt_stopped = 1;
1023         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1024         mutex_unlock(&ha->tgt.tgt_mutex);
1025
1026         ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
1027             tgt);
1028 }
1029 EXPORT_SYMBOL(qlt_stop_phase2);
1030
1031 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1032 static void qlt_release(struct qla_tgt *tgt)
1033 {
1034         struct qla_hw_data *ha = tgt->ha;
1035
1036         if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1037                 qlt_stop_phase2(tgt);
1038
1039         ha->tgt.qla_tgt = NULL;
1040
1041         ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
1042             "Release of tgt %p finished\n", tgt);
1043
1044         kfree(tgt);
1045 }
1046
1047 /* ha->hardware_lock supposed to be held on entry */
1048 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1049         const void *param, unsigned int param_size)
1050 {
1051         struct qla_tgt_sess_work_param *prm;
1052         unsigned long flags;
1053
1054         prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1055         if (!prm) {
1056                 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1057                     "qla_target(%d): Unable to create session "
1058                     "work, command will be refused", 0);
1059                 return -ENOMEM;
1060         }
1061
1062         ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1063             "Scheduling work (type %d, prm %p)"
1064             " to find session for param %p (size %d, tgt %p)\n",
1065             type, prm, param, param_size, tgt);
1066
1067         prm->type = type;
1068         memcpy(&prm->tm_iocb, param, param_size);
1069
1070         spin_lock_irqsave(&tgt->sess_work_lock, flags);
1071         list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1072         spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1073
1074         schedule_work(&tgt->sess_work);
1075
1076         return 0;
1077 }
1078
1079 /*
1080  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1081  */
1082 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1083         struct imm_ntfy_from_isp *ntfy,
1084         uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1085         uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1086 {
1087         struct qla_hw_data *ha = vha->hw;
1088         request_t *pkt;
1089         struct nack_to_isp *nack;
1090
1091         ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1092
1093         /* Send marker if required */
1094         if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1095                 return;
1096
1097         pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1098         if (!pkt) {
1099                 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1100                     "qla_target(%d): %s failed: unable to allocate "
1101                     "request packet\n", vha->vp_idx, __func__);
1102                 return;
1103         }
1104
1105         if (ha->tgt.qla_tgt != NULL)
1106                 ha->tgt.qla_tgt->notify_ack_expected++;
1107
1108         pkt->entry_type = NOTIFY_ACK_TYPE;
1109         pkt->entry_count = 1;
1110
1111         nack = (struct nack_to_isp *)pkt;
1112         nack->ox_id = ntfy->ox_id;
1113
1114         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1115         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1116                 nack->u.isp24.flags = ntfy->u.isp24.flags &
1117                         __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1118         }
1119         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1120         nack->u.isp24.status = ntfy->u.isp24.status;
1121         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1122         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1123         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1124         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1125         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1126         nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1127         nack->u.isp24.srr_reject_code = srr_reject_code;
1128         nack->u.isp24.srr_reject_code_expl = srr_explan;
1129         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1130
1131         ql_dbg(ql_dbg_tgt, vha, 0xe005,
1132             "qla_target(%d): Sending 24xx Notify Ack %d\n",
1133             vha->vp_idx, nack->u.isp24.status);
1134
1135         qla2x00_start_iocbs(vha, vha->req);
1136 }
1137
1138 /*
1139  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1140  */
1141 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1142         struct abts_recv_from_24xx *abts, uint32_t status,
1143         bool ids_reversed)
1144 {
1145         struct qla_hw_data *ha = vha->hw;
1146         struct abts_resp_to_24xx *resp;
1147         uint32_t f_ctl;
1148         uint8_t *p;
1149
1150         ql_dbg(ql_dbg_tgt, vha, 0xe006,
1151             "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1152             ha, abts, status);
1153
1154         /* Send marker if required */
1155         if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1156                 return;
1157
1158         resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1159         if (!resp) {
1160                 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1161                     "qla_target(%d): %s failed: unable to allocate "
1162                     "request packet", vha->vp_idx, __func__);
1163                 return;
1164         }
1165
1166         resp->entry_type = ABTS_RESP_24XX;
1167         resp->entry_count = 1;
1168         resp->nport_handle = abts->nport_handle;
1169         resp->vp_index = vha->vp_idx;
1170         resp->sof_type = abts->sof_type;
1171         resp->exchange_address = abts->exchange_address;
1172         resp->fcp_hdr_le = abts->fcp_hdr_le;
1173         f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1174             F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1175             F_CTL_SEQ_INITIATIVE);
1176         p = (uint8_t *)&f_ctl;
1177         resp->fcp_hdr_le.f_ctl[0] = *p++;
1178         resp->fcp_hdr_le.f_ctl[1] = *p++;
1179         resp->fcp_hdr_le.f_ctl[2] = *p;
1180         if (ids_reversed) {
1181                 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1182                 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1183                 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1184                 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1185                 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1186                 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1187         } else {
1188                 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1189                 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1190                 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1191                 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1192                 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1193                 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1194         }
1195         resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1196         if (status == FCP_TMF_CMPL) {
1197                 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1198                 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1199                 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1200                 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1201                 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1202                 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1203         } else {
1204                 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1205                 resp->payload.ba_rjt.reason_code =
1206                         BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1207                 /* Other bytes are zero */
1208         }
1209
1210         ha->tgt.qla_tgt->abts_resp_expected++;
1211
1212         qla2x00_start_iocbs(vha, vha->req);
1213 }
1214
1215 /*
1216  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1217  */
1218 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1219         struct abts_resp_from_24xx_fw *entry)
1220 {
1221         struct ctio7_to_24xx *ctio;
1222
1223         ql_dbg(ql_dbg_tgt, vha, 0xe007,
1224             "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1225         /* Send marker if required */
1226         if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1227                 return;
1228
1229         ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1230         if (ctio == NULL) {
1231                 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1232                     "qla_target(%d): %s failed: unable to allocate "
1233                     "request packet\n", vha->vp_idx, __func__);
1234                 return;
1235         }
1236
1237         /*
1238          * We've got on entrance firmware's response on by us generated
1239          * ABTS response. So, in it ID fields are reversed.
1240          */
1241
1242         ctio->entry_type = CTIO_TYPE7;
1243         ctio->entry_count = 1;
1244         ctio->nport_handle = entry->nport_handle;
1245         ctio->handle = QLA_TGT_SKIP_HANDLE |    CTIO_COMPLETION_HANDLE_MARK;
1246         ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1247         ctio->vp_index = vha->vp_idx;
1248         ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1249         ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1250         ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1251         ctio->exchange_addr = entry->exchange_addr_to_abort;
1252         ctio->u.status1.flags =
1253             __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1254                 CTIO7_FLAGS_TERMINATE);
1255         ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1256
1257         qla2x00_start_iocbs(vha, vha->req);
1258
1259         qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1260             FCP_TMF_CMPL, true);
1261 }
1262
1263 /* ha->hardware_lock supposed to be held on entry */
1264 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1265         struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1266 {
1267         struct qla_hw_data *ha = vha->hw;
1268         struct se_session *se_sess = sess->se_sess;
1269         struct qla_tgt_mgmt_cmd *mcmd;
1270         struct se_cmd *se_cmd;
1271         u32 lun = 0;
1272         int rc;
1273         bool found_lun = false;
1274
1275         spin_lock(&se_sess->sess_cmd_lock);
1276         list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1277                 struct qla_tgt_cmd *cmd =
1278                         container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1279                 if (cmd->tag == abts->exchange_addr_to_abort) {
1280                         lun = cmd->unpacked_lun;
1281                         found_lun = true;
1282                         break;
1283                 }
1284         }
1285         spin_unlock(&se_sess->sess_cmd_lock);
1286
1287         if (!found_lun)
1288                 return -ENOENT;
1289
1290         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1291             "qla_target(%d): task abort (tag=%d)\n",
1292             vha->vp_idx, abts->exchange_addr_to_abort);
1293
1294         mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1295         if (mcmd == NULL) {
1296                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1297                     "qla_target(%d): %s: Allocation of ABORT cmd failed",
1298                     vha->vp_idx, __func__);
1299                 return -ENOMEM;
1300         }
1301         memset(mcmd, 0, sizeof(*mcmd));
1302
1303         mcmd->sess = sess;
1304         memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1305
1306         rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1307             abts->exchange_addr_to_abort);
1308         if (rc != 0) {
1309                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1310                     "qla_target(%d):  tgt_ops->handle_tmr()"
1311                     " failed: %d", vha->vp_idx, rc);
1312                 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1313                 return -EFAULT;
1314         }
1315
1316         return 0;
1317 }
1318
1319 /*
1320  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1321  */
1322 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1323         struct abts_recv_from_24xx *abts)
1324 {
1325         struct qla_hw_data *ha = vha->hw;
1326         struct qla_tgt_sess *sess;
1327         uint32_t tag = abts->exchange_addr_to_abort;
1328         uint8_t s_id[3];
1329         int rc;
1330
1331         if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1332                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1333                     "qla_target(%d): ABTS: Abort Sequence not "
1334                     "supported\n", vha->vp_idx);
1335                 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1336                 return;
1337         }
1338
1339         if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1340                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1341                     "qla_target(%d): ABTS: Unknown Exchange "
1342                     "Address received\n", vha->vp_idx);
1343                 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1344                 return;
1345         }
1346
1347         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1348             "qla_target(%d): task abort (s_id=%x:%x:%x, "
1349             "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1350             abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1351             le32_to_cpu(abts->fcp_hdr_le.parameter));
1352
1353         s_id[0] = abts->fcp_hdr_le.s_id[2];
1354         s_id[1] = abts->fcp_hdr_le.s_id[1];
1355         s_id[2] = abts->fcp_hdr_le.s_id[0];
1356
1357         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1358         if (!sess) {
1359                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1360                     "qla_target(%d): task abort for non-existant session\n",
1361                     vha->vp_idx);
1362                 rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
1363                     QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1364                 if (rc != 0) {
1365                         qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1366                             false);
1367                 }
1368                 return;
1369         }
1370
1371         rc = __qlt_24xx_handle_abts(vha, abts, sess);
1372         if (rc != 0) {
1373                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1374                     "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1375                     vha->vp_idx, rc);
1376                 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1377                 return;
1378         }
1379 }
1380
1381 /*
1382  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1383  */
1384 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1385         struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1386 {
1387         struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1388         struct ctio7_to_24xx *ctio;
1389
1390         ql_dbg(ql_dbg_tgt, ha, 0xe008,
1391             "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1392             ha, atio, resp_code);
1393
1394         /* Send marker if required */
1395         if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1396                 return;
1397
1398         ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1399         if (ctio == NULL) {
1400                 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1401                     "qla_target(%d): %s failed: unable to allocate "
1402                     "request packet\n", ha->vp_idx, __func__);
1403                 return;
1404         }
1405
1406         ctio->entry_type = CTIO_TYPE7;
1407         ctio->entry_count = 1;
1408         ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1409         ctio->nport_handle = mcmd->sess->loop_id;
1410         ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1411         ctio->vp_index = ha->vp_idx;
1412         ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1413         ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1414         ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1415         ctio->exchange_addr = atio->u.isp24.exchange_addr;
1416         ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1417             __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1418                 CTIO7_FLAGS_SEND_STATUS);
1419         ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1420         ctio->u.status1.scsi_status =
1421             __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1422         ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1423         ctio->u.status1.sense_data[0] = resp_code;
1424
1425         qla2x00_start_iocbs(ha, ha->req);
1426 }
1427
1428 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1429 {
1430         mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1431 }
1432 EXPORT_SYMBOL(qlt_free_mcmd);
1433
1434 /* callback from target fabric module code */
1435 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1436 {
1437         struct scsi_qla_host *vha = mcmd->sess->vha;
1438         struct qla_hw_data *ha = vha->hw;
1439         unsigned long flags;
1440
1441         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1442             "TM response mcmd (%p) status %#x state %#x",
1443             mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1444
1445         spin_lock_irqsave(&ha->hardware_lock, flags);
1446         if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1447                 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1448                     0, 0, 0, 0, 0, 0);
1449         else {
1450                 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1451                         qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1452                             mcmd->fc_tm_rsp, false);
1453                 else
1454                         qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1455                             mcmd->fc_tm_rsp);
1456         }
1457         /*
1458          * Make the callback for ->free_mcmd() to queue_work() and invoke
1459          * target_put_sess_cmd() to drop cmd_kref to 1.  The final
1460          * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1461          * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1462          * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1463          * qlt_xmit_tm_rsp() returns here..
1464          */
1465         ha->tgt.tgt_ops->free_mcmd(mcmd);
1466         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1467 }
1468 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1469
1470 /* No locks */
1471 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1472 {
1473         struct qla_tgt_cmd *cmd = prm->cmd;
1474
1475         BUG_ON(cmd->sg_cnt == 0);
1476
1477         prm->sg = (struct scatterlist *)cmd->sg;
1478         prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1479             cmd->sg_cnt, cmd->dma_data_direction);
1480         if (unlikely(prm->seg_cnt == 0))
1481                 goto out_err;
1482
1483         prm->cmd->sg_mapped = 1;
1484
1485         /*
1486          * If greater than four sg entries then we need to allocate
1487          * the continuation entries
1488          */
1489         if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1490                 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1491                     prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
1492
1493         ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1494             prm->seg_cnt, prm->req_cnt);
1495         return 0;
1496
1497 out_err:
1498         ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1499             "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1500             0, prm->cmd->sg_cnt);
1501         return -1;
1502 }
1503
1504 static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1505         struct qla_tgt_cmd *cmd)
1506 {
1507         struct qla_hw_data *ha = vha->hw;
1508
1509         BUG_ON(!cmd->sg_mapped);
1510         pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1511         cmd->sg_mapped = 0;
1512 }
1513
1514 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1515         uint32_t req_cnt)
1516 {
1517         struct qla_hw_data *ha = vha->hw;
1518         device_reg_t __iomem *reg = ha->iobase;
1519         uint32_t cnt;
1520
1521         if (vha->req->cnt < (req_cnt + 2)) {
1522                 cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
1523
1524                 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
1525                     "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1526                     "vha->req->cnt=%d, req_cnt=%d\n", cnt,
1527                     vha->req->ring_index, vha->req->cnt, req_cnt);
1528                 if  (vha->req->ring_index < cnt)
1529                         vha->req->cnt = cnt - vha->req->ring_index;
1530                 else
1531                         vha->req->cnt = vha->req->length -
1532                             (vha->req->ring_index - cnt);
1533         }
1534
1535         if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1536                 ql_dbg(ql_dbg_tgt, vha, 0xe00b,
1537                     "qla_target(%d): There is no room in the "
1538                     "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1539                     "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
1540                     vha->req->cnt, req_cnt);
1541                 return -EAGAIN;
1542         }
1543         vha->req->cnt -= req_cnt;
1544
1545         return 0;
1546 }
1547
1548 /*
1549  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1550  */
1551 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1552 {
1553         /* Adjust ring index. */
1554         vha->req->ring_index++;
1555         if (vha->req->ring_index == vha->req->length) {
1556                 vha->req->ring_index = 0;
1557                 vha->req->ring_ptr = vha->req->ring;
1558         } else {
1559                 vha->req->ring_ptr++;
1560         }
1561         return (cont_entry_t *)vha->req->ring_ptr;
1562 }
1563
1564 /* ha->hardware_lock supposed to be held on entry */
1565 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1566 {
1567         struct qla_hw_data *ha = vha->hw;
1568         uint32_t h;
1569
1570         h = ha->tgt.current_handle;
1571         /* always increment cmd handle */
1572         do {
1573                 ++h;
1574                 if (h > DEFAULT_OUTSTANDING_COMMANDS)
1575                         h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1576                 if (h == ha->tgt.current_handle) {
1577                         ql_dbg(ql_dbg_tgt, vha, 0xe04e,
1578                             "qla_target(%d): Ran out of "
1579                             "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1580                         h = QLA_TGT_NULL_HANDLE;
1581                         break;
1582                 }
1583         } while ((h == QLA_TGT_NULL_HANDLE) ||
1584             (h == QLA_TGT_SKIP_HANDLE) ||
1585             (ha->tgt.cmds[h-1] != NULL));
1586
1587         if (h != QLA_TGT_NULL_HANDLE)
1588                 ha->tgt.current_handle = h;
1589
1590         return h;
1591 }
1592
1593 /* ha->hardware_lock supposed to be held on entry */
1594 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1595         struct scsi_qla_host *vha)
1596 {
1597         uint32_t h;
1598         struct ctio7_to_24xx *pkt;
1599         struct qla_hw_data *ha = vha->hw;
1600         struct atio_from_isp *atio = &prm->cmd->atio;
1601
1602         pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1603         prm->pkt = pkt;
1604         memset(pkt, 0, sizeof(*pkt));
1605
1606         pkt->entry_type = CTIO_TYPE7;
1607         pkt->entry_count = (uint8_t)prm->req_cnt;
1608         pkt->vp_index = vha->vp_idx;
1609
1610         h = qlt_make_handle(vha);
1611         if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1612                 /*
1613                  * CTIO type 7 from the firmware doesn't provide a way to
1614                  * know the initiator's LOOP ID, hence we can't find
1615                  * the session and, so, the command.
1616                  */
1617                 return -EAGAIN;
1618         } else
1619                 ha->tgt.cmds[h-1] = prm->cmd;
1620
1621         pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1622         pkt->nport_handle = prm->cmd->loop_id;
1623         pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1624         pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1625         pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1626         pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1627         pkt->exchange_addr = atio->u.isp24.exchange_addr;
1628         pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1629         pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1630         pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1631
1632         ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1633             "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1634             vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
1635             le16_to_cpu(pkt->u.status0.ox_id));
1636         return 0;
1637 }
1638
1639 /*
1640  * ha->hardware_lock supposed to be held on entry. We have already made sure
1641  * that there is sufficient amount of request entries to not drop it.
1642  */
1643 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1644         struct scsi_qla_host *vha)
1645 {
1646         int cnt;
1647         uint32_t *dword_ptr;
1648         int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1649
1650         /* Build continuation packets */
1651         while (prm->seg_cnt > 0) {
1652                 cont_a64_entry_t *cont_pkt64 =
1653                         (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1654
1655                 /*
1656                  * Make sure that from cont_pkt64 none of
1657                  * 64-bit specific fields used for 32-bit
1658                  * addressing. Cast to (cont_entry_t *) for
1659                  * that.
1660                  */
1661
1662                 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1663
1664                 cont_pkt64->entry_count = 1;
1665                 cont_pkt64->sys_define = 0;
1666
1667                 if (enable_64bit_addressing) {
1668                         cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1669                         dword_ptr =
1670                             (uint32_t *)&cont_pkt64->dseg_0_address;
1671                 } else {
1672                         cont_pkt64->entry_type = CONTINUE_TYPE;
1673                         dword_ptr =
1674                             (uint32_t *)&((cont_entry_t *)
1675                                 cont_pkt64)->dseg_0_address;
1676                 }
1677
1678                 /* Load continuation entry data segments */
1679                 for (cnt = 0;
1680                     cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1681                     cnt++, prm->seg_cnt--) {
1682                         *dword_ptr++ =
1683                             cpu_to_le32(pci_dma_lo32
1684                                 (sg_dma_address(prm->sg)));
1685                         if (enable_64bit_addressing) {
1686                                 *dword_ptr++ =
1687                                     cpu_to_le32(pci_dma_hi32
1688                                         (sg_dma_address
1689                                         (prm->sg)));
1690                         }
1691                         *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1692
1693                         ql_dbg(ql_dbg_tgt, vha, 0xe00d,
1694                             "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1695                             (long long unsigned int)
1696                             pci_dma_hi32(sg_dma_address(prm->sg)),
1697                             (long long unsigned int)
1698                             pci_dma_lo32(sg_dma_address(prm->sg)),
1699                             (int)sg_dma_len(prm->sg));
1700
1701                         prm->sg = sg_next(prm->sg);
1702                 }
1703         }
1704 }
1705
1706 /*
1707  * ha->hardware_lock supposed to be held on entry. We have already made sure
1708  * that there is sufficient amount of request entries to not drop it.
1709  */
1710 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1711         struct scsi_qla_host *vha)
1712 {
1713         int cnt;
1714         uint32_t *dword_ptr;
1715         int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1716         struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1717
1718         ql_dbg(ql_dbg_tgt, vha, 0xe00e,
1719             "iocb->scsi_status=%x, iocb->flags=%x\n",
1720             le16_to_cpu(pkt24->u.status0.scsi_status),
1721             le16_to_cpu(pkt24->u.status0.flags));
1722
1723         pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1724
1725         /* Setup packet address segment pointer */
1726         dword_ptr = pkt24->u.status0.dseg_0_address;
1727
1728         /* Set total data segment count */
1729         if (prm->seg_cnt)
1730                 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1731
1732         if (prm->seg_cnt == 0) {
1733                 /* No data transfer */
1734                 *dword_ptr++ = 0;
1735                 *dword_ptr = 0;
1736                 return;
1737         }
1738
1739         /* If scatter gather */
1740         ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
1741
1742         /* Load command entry data segments */
1743         for (cnt = 0;
1744             (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1745             cnt++, prm->seg_cnt--) {
1746                 *dword_ptr++ =
1747                     cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1748                 if (enable_64bit_addressing) {
1749                         *dword_ptr++ =
1750                             cpu_to_le32(pci_dma_hi32(
1751                                 sg_dma_address(prm->sg)));
1752                 }
1753                 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1754
1755                 ql_dbg(ql_dbg_tgt, vha, 0xe010,
1756                     "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1757                     (long long unsigned int)pci_dma_hi32(sg_dma_address(
1758                     prm->sg)),
1759                     (long long unsigned int)pci_dma_lo32(sg_dma_address(
1760                     prm->sg)),
1761                     (int)sg_dma_len(prm->sg));
1762
1763                 prm->sg = sg_next(prm->sg);
1764         }
1765
1766         qlt_load_cont_data_segments(prm, vha);
1767 }
1768
1769 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1770 {
1771         return cmd->bufflen > 0;
1772 }
1773
1774 /*
1775  * Called without ha->hardware_lock held
1776  */
1777 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1778         struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1779         uint32_t *full_req_cnt)
1780 {
1781         struct qla_tgt *tgt = cmd->tgt;
1782         struct scsi_qla_host *vha = tgt->vha;
1783         struct qla_hw_data *ha = vha->hw;
1784         struct se_cmd *se_cmd = &cmd->se_cmd;
1785
1786         if (unlikely(cmd->aborted)) {
1787                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1788                     "qla_target(%d): terminating exchange "
1789                     "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
1790                     se_cmd, cmd->tag);
1791
1792                 cmd->state = QLA_TGT_STATE_ABORTED;
1793
1794                 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1795
1796                 /* !! At this point cmd could be already freed !! */
1797                 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1798         }
1799
1800         ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
1801             vha->vp_idx, cmd->tag);
1802
1803         prm->cmd = cmd;
1804         prm->tgt = tgt;
1805         prm->rq_result = scsi_status;
1806         prm->sense_buffer = &cmd->sense_buffer[0];
1807         prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1808         prm->sg = NULL;
1809         prm->seg_cnt = -1;
1810         prm->req_cnt = 1;
1811         prm->add_status_pkt = 0;
1812
1813         ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
1814             prm->rq_result, xmit_type);
1815
1816         /* Send marker if required */
1817         if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1818                 return -EFAULT;
1819
1820         ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
1821
1822         if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1823                 if  (qlt_pci_map_calc_cnt(prm) != 0)
1824                         return -EAGAIN;
1825         }
1826
1827         *full_req_cnt = prm->req_cnt;
1828
1829         if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1830                 prm->residual = se_cmd->residual_count;
1831                 ql_dbg(ql_dbg_tgt, vha, 0xe014,
1832                     "Residual underflow: %d (tag %d, "
1833                     "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1834                     cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1835                     cmd->bufflen, prm->rq_result);
1836                 prm->rq_result |= SS_RESIDUAL_UNDER;
1837         } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1838                 prm->residual = se_cmd->residual_count;
1839                 ql_dbg(ql_dbg_tgt, vha, 0xe015,
1840                     "Residual overflow: %d (tag %d, "
1841                     "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1842                     cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1843                     cmd->bufflen, prm->rq_result);
1844                 prm->rq_result |= SS_RESIDUAL_OVER;
1845         }
1846
1847         if (xmit_type & QLA_TGT_XMIT_STATUS) {
1848                 /*
1849                  * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1850                  * ignored in *xmit_response() below
1851                  */
1852                 if (qlt_has_data(cmd)) {
1853                         if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1854                             (IS_FWI2_CAPABLE(ha) &&
1855                             (prm->rq_result != 0))) {
1856                                 prm->add_status_pkt = 1;
1857                                 (*full_req_cnt)++;
1858                         }
1859                 }
1860         }
1861
1862         ql_dbg(ql_dbg_tgt, vha, 0xe016,
1863             "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1864             prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
1865
1866         return 0;
1867 }
1868
1869 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1870         struct qla_tgt_cmd *cmd, int sending_sense)
1871 {
1872         if (ha->tgt.enable_class_2)
1873                 return 0;
1874
1875         if (sending_sense)
1876                 return cmd->conf_compl_supported;
1877         else
1878                 return ha->tgt.enable_explicit_conf &&
1879                     cmd->conf_compl_supported;
1880 }
1881
1882 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1883 /*
1884  *  Original taken from the XFS code
1885  */
1886 static unsigned long qlt_srr_random(void)
1887 {
1888         static int Inited;
1889         static unsigned long RandomValue;
1890         static DEFINE_SPINLOCK(lock);
1891         /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1892         register long rv;
1893         register long lo;
1894         register long hi;
1895         unsigned long flags;
1896
1897         spin_lock_irqsave(&lock, flags);
1898         if (!Inited) {
1899                 RandomValue = jiffies;
1900                 Inited = 1;
1901         }
1902         rv = RandomValue;
1903         hi = rv / 127773;
1904         lo = rv % 127773;
1905         rv = 16807 * lo - 2836 * hi;
1906         if (rv <= 0)
1907                 rv += 2147483647;
1908         RandomValue = rv;
1909         spin_unlock_irqrestore(&lock, flags);
1910         return rv;
1911 }
1912
1913 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1914 {
1915 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1916         if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1917             == 50) {
1918                 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1919                 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1920                     "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
1921         }
1922 #endif
1923         /*
1924          * It's currently not possible to simulate SRRs for FCP_WRITE without
1925          * a physical link layer failure, so don't even try here..
1926          */
1927         if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1928                 return;
1929
1930         if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1931             ((qlt_srr_random() % 100) == 20)) {
1932                 int i, leave = 0;
1933                 unsigned int tot_len = 0;
1934
1935                 while (leave == 0)
1936                         leave = qlt_srr_random() % cmd->sg_cnt;
1937
1938                 for (i = 0; i < leave; i++)
1939                         tot_len += cmd->sg[i].length;
1940
1941                 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1942                     "Cutting cmd %p (tag %d) buffer"
1943                     " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1944                     " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
1945                     cmd->bufflen, cmd->sg_cnt);
1946
1947                 cmd->bufflen = tot_len;
1948                 cmd->sg_cnt = leave;
1949         }
1950
1951         if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1952                 unsigned int offset = qlt_srr_random() % cmd->bufflen;
1953
1954                 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1955                     "Cutting cmd %p (tag %d) buffer head "
1956                     "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
1957                     cmd->bufflen);
1958                 if (offset == 0)
1959                         *xmit_type &= ~QLA_TGT_XMIT_DATA;
1960                 else if (qlt_set_data_offset(cmd, offset)) {
1961                         ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1962                             "qlt_set_data_offset() failed (tag %d)", cmd->tag);
1963                 }
1964         }
1965 }
1966 #else
1967 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1968 {}
1969 #endif
1970
1971 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1972         struct qla_tgt_prm *prm)
1973 {
1974         prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
1975             (uint32_t)sizeof(ctio->u.status1.sense_data));
1976         ctio->u.status0.flags |=
1977             __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
1978         if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1979                 ctio->u.status0.flags |= __constant_cpu_to_le16(
1980                     CTIO7_FLAGS_EXPLICIT_CONFORM |
1981                     CTIO7_FLAGS_CONFORM_REQ);
1982         }
1983         ctio->u.status0.residual = cpu_to_le32(prm->residual);
1984         ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1985         if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1986                 int i;
1987
1988                 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1989                         if (prm->cmd->se_cmd.scsi_status != 0) {
1990                                 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1991                                     "Skipping EXPLICIT_CONFORM and "
1992                                     "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1993                                     "non GOOD status\n");
1994                                 goto skip_explict_conf;
1995                         }
1996                         ctio->u.status1.flags |= __constant_cpu_to_le16(
1997                             CTIO7_FLAGS_EXPLICIT_CONFORM |
1998                             CTIO7_FLAGS_CONFORM_REQ);
1999                 }
2000 skip_explict_conf:
2001                 ctio->u.status1.flags &=
2002                     ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2003                 ctio->u.status1.flags |=
2004                     __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2005                 ctio->u.status1.scsi_status |=
2006                     __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
2007                 ctio->u.status1.sense_length =
2008                     cpu_to_le16(prm->sense_buffer_len);
2009                 for (i = 0; i < prm->sense_buffer_len/4; i++)
2010                         ((uint32_t *)ctio->u.status1.sense_data)[i] =
2011                                 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2012 #if 0
2013                 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
2014                         static int q;
2015                         if (q < 10) {
2016                                 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2017                                     "qla_target(%d): %d bytes of sense "
2018                                     "lost", prm->tgt->ha->vp_idx,
2019                                     prm->sense_buffer_len % 4);
2020                                 q++;
2021                         }
2022                 }
2023 #endif
2024         } else {
2025                 ctio->u.status1.flags &=
2026                     ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2027                 ctio->u.status1.flags |=
2028                     __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2029                 ctio->u.status1.sense_length = 0;
2030                 memset(ctio->u.status1.sense_data, 0,
2031                     sizeof(ctio->u.status1.sense_data));
2032         }
2033
2034         /* Sense with len > 24, is it possible ??? */
2035 }
2036
2037 /*
2038  * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2039  * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2040  */
2041 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2042         uint8_t scsi_status)
2043 {
2044         struct scsi_qla_host *vha = cmd->vha;
2045         struct qla_hw_data *ha = vha->hw;
2046         struct ctio7_to_24xx *pkt;
2047         struct qla_tgt_prm prm;
2048         uint32_t full_req_cnt = 0;
2049         unsigned long flags = 0;
2050         int res;
2051
2052         memset(&prm, 0, sizeof(prm));
2053         qlt_check_srr_debug(cmd, &xmit_type);
2054
2055         ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2056             "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2057             "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
2058             1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
2059
2060         res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2061             &full_req_cnt);
2062         if (unlikely(res != 0)) {
2063                 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2064                         return 0;
2065
2066                 return res;
2067         }
2068
2069         spin_lock_irqsave(&ha->hardware_lock, flags);
2070
2071         /* Does F/W have an IOCBs for this request */
2072         res = qlt_check_reserve_free_req(vha, full_req_cnt);
2073         if (unlikely(res))
2074                 goto out_unmap_unlock;
2075
2076         res = qlt_24xx_build_ctio_pkt(&prm, vha);
2077         if (unlikely(res != 0))
2078                 goto out_unmap_unlock;
2079
2080
2081         pkt = (struct ctio7_to_24xx *)prm.pkt;
2082
2083         if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2084                 pkt->u.status0.flags |=
2085                     __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2086                         CTIO7_FLAGS_STATUS_MODE_0);
2087
2088                 qlt_load_data_segments(&prm, vha);
2089
2090                 if (prm.add_status_pkt == 0) {
2091                         if (xmit_type & QLA_TGT_XMIT_STATUS) {
2092                                 pkt->u.status0.scsi_status =
2093                                     cpu_to_le16(prm.rq_result);
2094                                 pkt->u.status0.residual =
2095                                     cpu_to_le32(prm.residual);
2096                                 pkt->u.status0.flags |= __constant_cpu_to_le16(
2097                                     CTIO7_FLAGS_SEND_STATUS);
2098                                 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2099                                         pkt->u.status0.flags |=
2100                                             __constant_cpu_to_le16(
2101                                                 CTIO7_FLAGS_EXPLICIT_CONFORM |
2102                                                 CTIO7_FLAGS_CONFORM_REQ);
2103                                 }
2104                         }
2105
2106                 } else {
2107                         /*
2108                          * We have already made sure that there is sufficient
2109                          * amount of request entries to not drop HW lock in
2110                          * req_pkt().
2111                          */
2112                         struct ctio7_to_24xx *ctio =
2113                                 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2114
2115                         ql_dbg(ql_dbg_tgt, vha, 0xe019,
2116                             "Building additional status packet\n");
2117
2118                         memcpy(ctio, pkt, sizeof(*ctio));
2119                         ctio->entry_count = 1;
2120                         ctio->dseg_count = 0;
2121                         ctio->u.status1.flags &= ~__constant_cpu_to_le16(
2122                             CTIO7_FLAGS_DATA_IN);
2123
2124                         /* Real finish is ctio_m1's finish */
2125                         pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2126                         pkt->u.status0.flags |= __constant_cpu_to_le16(
2127                             CTIO7_FLAGS_DONT_RET_CTIO);
2128                         qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2129                             &prm);
2130                         pr_debug("Status CTIO7: %p\n", ctio);
2131                 }
2132         } else
2133                 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2134
2135
2136         cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2137
2138         ql_dbg(ql_dbg_tgt, vha, 0xe01a,
2139             "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2140             pkt, scsi_status);
2141
2142         qla2x00_start_iocbs(vha, vha->req);
2143         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2144
2145         return 0;
2146
2147 out_unmap_unlock:
2148         if (cmd->sg_mapped)
2149                 qlt_unmap_sg(vha, cmd);
2150         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2151
2152         return res;
2153 }
2154 EXPORT_SYMBOL(qlt_xmit_response);
2155
2156 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2157 {
2158         struct ctio7_to_24xx *pkt;
2159         struct scsi_qla_host *vha = cmd->vha;
2160         struct qla_hw_data *ha = vha->hw;
2161         struct qla_tgt *tgt = cmd->tgt;
2162         struct qla_tgt_prm prm;
2163         unsigned long flags;
2164         int res = 0;
2165
2166         memset(&prm, 0, sizeof(prm));
2167         prm.cmd = cmd;
2168         prm.tgt = tgt;
2169         prm.sg = NULL;
2170         prm.req_cnt = 1;
2171
2172         /* Send marker if required */
2173         if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2174                 return -EIO;
2175
2176         ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
2177             (int)vha->vp_idx);
2178
2179         /* Calculate number of entries and segments required */
2180         if (qlt_pci_map_calc_cnt(&prm) != 0)
2181                 return -EAGAIN;
2182
2183         spin_lock_irqsave(&ha->hardware_lock, flags);
2184
2185         /* Does F/W have an IOCBs for this request */
2186         res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2187         if (res != 0)
2188                 goto out_unlock_free_unmap;
2189
2190         res = qlt_24xx_build_ctio_pkt(&prm, vha);
2191         if (unlikely(res != 0))
2192                 goto out_unlock_free_unmap;
2193         pkt = (struct ctio7_to_24xx *)prm.pkt;
2194         pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2195             CTIO7_FLAGS_STATUS_MODE_0);
2196         qlt_load_data_segments(&prm, vha);
2197
2198         cmd->state = QLA_TGT_STATE_NEED_DATA;
2199
2200         qla2x00_start_iocbs(vha, vha->req);
2201         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2202
2203         return res;
2204
2205 out_unlock_free_unmap:
2206         if (cmd->sg_mapped)
2207                 qlt_unmap_sg(vha, cmd);
2208         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2209
2210         return res;
2211 }
2212 EXPORT_SYMBOL(qlt_rdy_to_xfer);
2213
2214 /* If hardware_lock held on entry, might drop it, then reaquire */
2215 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2216 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2217         struct qla_tgt_cmd *cmd,
2218         struct atio_from_isp *atio)
2219 {
2220         struct ctio7_to_24xx *ctio24;
2221         struct qla_hw_data *ha = vha->hw;
2222         request_t *pkt;
2223         int ret = 0;
2224
2225         ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2226
2227         pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
2228         if (pkt == NULL) {
2229                 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2230                     "qla_target(%d): %s failed: unable to allocate "
2231                     "request packet\n", vha->vp_idx, __func__);
2232                 return -ENOMEM;
2233         }
2234
2235         if (cmd != NULL) {
2236                 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2237                         ql_dbg(ql_dbg_tgt, vha, 0xe051,
2238                             "qla_target(%d): Terminating cmd %p with "
2239                             "incorrect state %d\n", vha->vp_idx, cmd,
2240                             cmd->state);
2241                 } else
2242                         ret = 1;
2243         }
2244
2245         pkt->entry_count = 1;
2246         pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2247
2248         ctio24 = (struct ctio7_to_24xx *)pkt;
2249         ctio24->entry_type = CTIO_TYPE7;
2250         ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2251         ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2252         ctio24->vp_index = vha->vp_idx;
2253         ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2254         ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2255         ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2256         ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2257         ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2258             __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2259                 CTIO7_FLAGS_TERMINATE);
2260         ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
2261
2262         /* Most likely, it isn't needed */
2263         ctio24->u.status1.residual = get_unaligned((uint32_t *)
2264             &atio->u.isp24.fcp_cmnd.add_cdb[
2265             atio->u.isp24.fcp_cmnd.add_cdb_len]);
2266         if (ctio24->u.status1.residual != 0)
2267                 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2268
2269         qla2x00_start_iocbs(vha, vha->req);
2270         return ret;
2271 }
2272
2273 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2274         struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2275 {
2276         unsigned long flags;
2277         int rc;
2278
2279         if (qlt_issue_marker(vha, ha_locked) < 0)
2280                 return;
2281
2282         if (ha_locked) {
2283                 rc = __qlt_send_term_exchange(vha, cmd, atio);
2284                 goto done;
2285         }
2286         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2287         rc = __qlt_send_term_exchange(vha, cmd, atio);
2288         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2289 done:
2290         if (rc == 1) {
2291                 if (!ha_locked && !in_interrupt())
2292                         msleep(250); /* just in case */
2293
2294                 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2295         }
2296 }
2297
2298 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2299 {
2300         BUG_ON(cmd->sg_mapped);
2301
2302         if (unlikely(cmd->free_sg))
2303                 kfree(cmd->sg);
2304         kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2305 }
2306 EXPORT_SYMBOL(qlt_free_cmd);
2307
2308 /* ha->hardware_lock supposed to be held on entry */
2309 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2310         struct qla_tgt_cmd *cmd, void *ctio)
2311 {
2312         struct qla_tgt_srr_ctio *sc;
2313         struct qla_hw_data *ha = vha->hw;
2314         struct qla_tgt *tgt = ha->tgt.qla_tgt;
2315         struct qla_tgt_srr_imm *imm;
2316
2317         tgt->ctio_srr_id++;
2318
2319         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2320             "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2321
2322         if (!ctio) {
2323                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2324                     "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2325                     vha->vp_idx);
2326                 return -EINVAL;
2327         }
2328
2329         sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2330         if (sc != NULL) {
2331                 sc->cmd = cmd;
2332                 /* IRQ is already OFF */
2333                 spin_lock(&tgt->srr_lock);
2334                 sc->srr_id = tgt->ctio_srr_id;
2335                 list_add_tail(&sc->srr_list_entry,
2336                     &tgt->srr_ctio_list);
2337                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2338                     "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2339                 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2340                         int found = 0;
2341                         list_for_each_entry(imm, &tgt->srr_imm_list,
2342                             srr_list_entry) {
2343                                 if (imm->srr_id == sc->srr_id) {
2344                                         found = 1;
2345                                         break;
2346                                 }
2347                         }
2348                         if (found) {
2349                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2350                                     "Scheduling srr work\n");
2351                                 schedule_work(&tgt->srr_work);
2352                         } else {
2353                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2354                                     "qla_target(%d): imm_srr_id "
2355                                     "== ctio_srr_id (%d), but there is no "
2356                                     "corresponding SRR IMM, deleting CTIO "
2357                                     "SRR %p\n", vha->vp_idx,
2358                                     tgt->ctio_srr_id, sc);
2359                                 list_del(&sc->srr_list_entry);
2360                                 spin_unlock(&tgt->srr_lock);
2361
2362                                 kfree(sc);
2363                                 return -EINVAL;
2364                         }
2365                 }
2366                 spin_unlock(&tgt->srr_lock);
2367         } else {
2368                 struct qla_tgt_srr_imm *ti;
2369
2370                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2371                     "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2372                     vha->vp_idx);
2373                 spin_lock(&tgt->srr_lock);
2374                 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2375                     srr_list_entry) {
2376                         if (imm->srr_id == tgt->ctio_srr_id) {
2377                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2378                                     "IMM SRR %p deleted (id %d)\n",
2379                                     imm, imm->srr_id);
2380                                 list_del(&imm->srr_list_entry);
2381                                 qlt_reject_free_srr_imm(vha, imm, 1);
2382                         }
2383                 }
2384                 spin_unlock(&tgt->srr_lock);
2385
2386                 return -ENOMEM;
2387         }
2388
2389         return 0;
2390 }
2391
2392 /*
2393  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2394  */
2395 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2396         struct qla_tgt_cmd *cmd, uint32_t status)
2397 {
2398         int term = 0;
2399
2400         if (ctio != NULL) {
2401                 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2402                 term = !(c->flags &
2403                     __constant_cpu_to_le16(OF_TERM_EXCH));
2404         } else
2405                 term = 1;
2406
2407         if (term)
2408                 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2409
2410         return term;
2411 }
2412
2413 /* ha->hardware_lock supposed to be held on entry */
2414 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2415         uint32_t handle)
2416 {
2417         struct qla_hw_data *ha = vha->hw;
2418
2419         handle--;
2420         if (ha->tgt.cmds[handle] != NULL) {
2421                 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2422                 ha->tgt.cmds[handle] = NULL;
2423                 return cmd;
2424         } else
2425                 return NULL;
2426 }
2427
2428 /* ha->hardware_lock supposed to be held on entry */
2429 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2430         uint32_t handle, void *ctio)
2431 {
2432         struct qla_tgt_cmd *cmd = NULL;
2433
2434         /* Clear out internal marks */
2435         handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2436             CTIO_INTERMEDIATE_HANDLE_MARK);
2437
2438         if (handle != QLA_TGT_NULL_HANDLE) {
2439                 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
2440                         ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
2441                             "SKIP_HANDLE CTIO\n");
2442                         return NULL;
2443                 }
2444                 /* handle-1 is actually used */
2445                 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
2446                         ql_dbg(ql_dbg_tgt, vha, 0xe052,
2447                             "qla_target(%d): Wrong handle %x received\n",
2448                             vha->vp_idx, handle);
2449                         return NULL;
2450                 }
2451                 cmd = qlt_get_cmd(vha, handle);
2452                 if (unlikely(cmd == NULL)) {
2453                         ql_dbg(ql_dbg_tgt, vha, 0xe053,
2454                             "qla_target(%d): Suspicious: unable to "
2455                             "find the command with handle %x\n", vha->vp_idx,
2456                             handle);
2457                         return NULL;
2458                 }
2459         } else if (ctio != NULL) {
2460                 /* We can't get loop ID from CTIO7 */
2461                 ql_dbg(ql_dbg_tgt, vha, 0xe054,
2462                     "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2463                     "support NULL handles\n", vha->vp_idx);
2464                 return NULL;
2465         }
2466
2467         return cmd;
2468 }
2469
2470 /*
2471  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2472  */
2473 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2474         uint32_t status, void *ctio)
2475 {
2476         struct qla_hw_data *ha = vha->hw;
2477         struct se_cmd *se_cmd;
2478         struct target_core_fabric_ops *tfo;
2479         struct qla_tgt_cmd *cmd;
2480
2481         ql_dbg(ql_dbg_tgt, vha, 0xe01e,
2482             "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2483             vha->vp_idx, ctio, status, handle);
2484
2485         if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2486                 /* That could happen only in case of an error/reset/abort */
2487                 if (status != CTIO_SUCCESS) {
2488                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
2489                             "Intermediate CTIO received"
2490                             " (status %x)\n", status);
2491                 }
2492                 return;
2493         }
2494
2495         cmd = qlt_ctio_to_cmd(vha, handle, ctio);
2496         if (cmd == NULL)
2497                 return;
2498
2499         se_cmd = &cmd->se_cmd;
2500         tfo = se_cmd->se_tfo;
2501
2502         if (cmd->sg_mapped)
2503                 qlt_unmap_sg(vha, cmd);
2504
2505         if (unlikely(status != CTIO_SUCCESS)) {
2506                 switch (status & 0xFFFF) {
2507                 case CTIO_LIP_RESET:
2508                 case CTIO_TARGET_RESET:
2509                 case CTIO_ABORTED:
2510                 case CTIO_TIMEOUT:
2511                 case CTIO_INVALID_RX_ID:
2512                         /* They are OK */
2513                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
2514                             "qla_target(%d): CTIO with "
2515                             "status %#x received, state %x, se_cmd %p, "
2516                             "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2517                             "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
2518                             status, cmd->state, se_cmd);
2519                         break;
2520
2521                 case CTIO_PORT_LOGGED_OUT:
2522                 case CTIO_PORT_UNAVAILABLE:
2523                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
2524                             "qla_target(%d): CTIO with PORT LOGGED "
2525                             "OUT (29) or PORT UNAVAILABLE (28) status %x "
2526                             "received (state %x, se_cmd %p)\n", vha->vp_idx,
2527                             status, cmd->state, se_cmd);
2528                         break;
2529
2530                 case CTIO_SRR_RECEIVED:
2531                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
2532                             "qla_target(%d): CTIO with SRR_RECEIVED"
2533                             " status %x received (state %x, se_cmd %p)\n",
2534                             vha->vp_idx, status, cmd->state, se_cmd);
2535                         if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
2536                                 break;
2537                         else
2538                                 return;
2539
2540                 default:
2541                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
2542                             "qla_target(%d): CTIO with error status "
2543                             "0x%x received (state %x, se_cmd %p\n",
2544                             vha->vp_idx, status, cmd->state, se_cmd);
2545                         break;
2546                 }
2547
2548                 if (cmd->state != QLA_TGT_STATE_NEED_DATA)
2549                         if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
2550                                 return;
2551         }
2552
2553         if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2554                 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
2555         } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
2556                 int rx_status = 0;
2557
2558                 cmd->state = QLA_TGT_STATE_DATA_IN;
2559
2560                 if (unlikely(status != CTIO_SUCCESS))
2561                         rx_status = -EIO;
2562                 else
2563                         cmd->write_data_transferred = 1;
2564
2565                 ql_dbg(ql_dbg_tgt, vha, 0xe020,
2566                     "Data received, context %x, rx_status %d\n",
2567                     0x0, rx_status);
2568
2569                 ha->tgt.tgt_ops->handle_data(cmd);
2570                 return;
2571         } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
2572                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
2573                     "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
2574         } else {
2575                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
2576                     "qla_target(%d): A command in state (%d) should "
2577                     "not return a CTIO complete\n", vha->vp_idx, cmd->state);
2578         }
2579
2580         if (unlikely(status != CTIO_SUCCESS)) {
2581                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
2582                 dump_stack();
2583         }
2584
2585         ha->tgt.tgt_ops->free_cmd(cmd);
2586 }
2587
2588 /* ha->hardware_lock supposed to be held on entry */
2589 /* called via callback from qla2xxx */
2590 void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
2591 {
2592         struct qla_hw_data *ha = vha->hw;
2593         struct qla_tgt *tgt = ha->tgt.qla_tgt;
2594
2595         if (likely(tgt == NULL)) {
2596                 ql_dbg(ql_dbg_tgt, vha, 0xe021,
2597                     "CTIO, but target mode not enabled"
2598                     " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
2599                 return;
2600         }
2601
2602         tgt->irq_cmd_count++;
2603         qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
2604         tgt->irq_cmd_count--;
2605 }
2606
2607 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
2608         uint8_t task_codes)
2609 {
2610         int fcp_task_attr;
2611
2612         switch (task_codes) {
2613         case ATIO_SIMPLE_QUEUE:
2614                 fcp_task_attr = MSG_SIMPLE_TAG;
2615                 break;
2616         case ATIO_HEAD_OF_QUEUE:
2617                 fcp_task_attr = MSG_HEAD_TAG;
2618                 break;
2619         case ATIO_ORDERED_QUEUE:
2620                 fcp_task_attr = MSG_ORDERED_TAG;
2621                 break;
2622         case ATIO_ACA_QUEUE:
2623                 fcp_task_attr = MSG_ACA_TAG;
2624                 break;
2625         case ATIO_UNTAGGED:
2626                 fcp_task_attr = MSG_SIMPLE_TAG;
2627                 break;
2628         default:
2629                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
2630                     "qla_target: unknown task code %x, use ORDERED instead\n",
2631                     task_codes);
2632                 fcp_task_attr = MSG_ORDERED_TAG;
2633                 break;
2634         }
2635
2636         return fcp_task_attr;
2637 }
2638
2639 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
2640                                         uint8_t *);
2641 /*
2642  * Process context for I/O path into tcm_qla2xxx code
2643  */
2644 static void qlt_do_work(struct work_struct *work)
2645 {
2646         struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2647         scsi_qla_host_t *vha = cmd->vha;
2648         struct qla_hw_data *ha = vha->hw;
2649         struct qla_tgt *tgt = ha->tgt.qla_tgt;
2650         struct qla_tgt_sess *sess = NULL;
2651         struct atio_from_isp *atio = &cmd->atio;
2652         unsigned char *cdb;
2653         unsigned long flags;
2654         uint32_t data_length;
2655         int ret, fcp_task_attr, data_dir, bidi = 0;
2656
2657         if (tgt->tgt_stop)
2658                 goto out_term;
2659
2660         spin_lock_irqsave(&ha->hardware_lock, flags);
2661         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2662             atio->u.isp24.fcp_hdr.s_id);
2663         /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
2664         if (sess)
2665                 kref_get(&sess->se_sess->sess_kref);
2666         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2667
2668         if (unlikely(!sess)) {
2669                 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
2670
2671                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
2672                         "qla_target(%d): Unable to find wwn login"
2673                         " (s_id %x:%x:%x), trying to create it manually\n",
2674                         vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2675
2676                 if (atio->u.raw.entry_count > 1) {
2677                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
2678                                 "Dropping multy entry cmd %p\n", cmd);
2679                         goto out_term;
2680                 }
2681
2682                 mutex_lock(&ha->tgt.tgt_mutex);
2683                 sess = qlt_make_local_sess(vha, s_id);
2684                 /* sess has an extra creation ref. */
2685                 mutex_unlock(&ha->tgt.tgt_mutex);
2686
2687                 if (!sess)
2688                         goto out_term;
2689         }
2690
2691         cmd->sess = sess;
2692         cmd->loop_id = sess->loop_id;
2693         cmd->conf_compl_supported = sess->conf_compl_supported;
2694
2695         cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
2696         cmd->tag = atio->u.isp24.exchange_addr;
2697         cmd->unpacked_lun = scsilun_to_int(
2698             (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
2699
2700         if (atio->u.isp24.fcp_cmnd.rddata &&
2701             atio->u.isp24.fcp_cmnd.wrdata) {
2702                 bidi = 1;
2703                 data_dir = DMA_TO_DEVICE;
2704         } else if (atio->u.isp24.fcp_cmnd.rddata)
2705                 data_dir = DMA_FROM_DEVICE;
2706         else if (atio->u.isp24.fcp_cmnd.wrdata)
2707                 data_dir = DMA_TO_DEVICE;
2708         else
2709                 data_dir = DMA_NONE;
2710
2711         fcp_task_attr = qlt_get_fcp_task_attr(vha,
2712             atio->u.isp24.fcp_cmnd.task_attr);
2713         data_length = be32_to_cpu(get_unaligned((uint32_t *)
2714             &atio->u.isp24.fcp_cmnd.add_cdb[
2715             atio->u.isp24.fcp_cmnd.add_cdb_len]));
2716
2717         ql_dbg(ql_dbg_tgt, vha, 0xe022,
2718             "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
2719             cmd, cmd->unpacked_lun, cmd->tag);
2720
2721         ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
2722             fcp_task_attr, data_dir, bidi);
2723         if (ret != 0)
2724                 goto out_term;
2725         /*
2726          * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
2727          */
2728         ha->tgt.tgt_ops->put_sess(sess);
2729         return;
2730
2731 out_term:
2732         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
2733         /*
2734          * cmd has not sent to target yet, so pass NULL as the second
2735          * argument to qlt_send_term_exchange() and free the memory here.
2736          */
2737         spin_lock_irqsave(&ha->hardware_lock, flags);
2738         qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
2739         kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2740         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2741         if (sess)
2742                 ha->tgt.tgt_ops->put_sess(sess);
2743 }
2744
2745 /* ha->hardware_lock supposed to be held on entry */
2746 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2747         struct atio_from_isp *atio)
2748 {
2749         struct qla_hw_data *ha = vha->hw;
2750         struct qla_tgt *tgt = ha->tgt.qla_tgt;
2751         struct qla_tgt_cmd *cmd;
2752
2753         if (unlikely(tgt->tgt_stop)) {
2754                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
2755                     "New command while device %p is shutting down\n", tgt);
2756                 return -EFAULT;
2757         }
2758
2759         cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
2760         if (!cmd) {
2761                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
2762                     "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
2763                 return -ENOMEM;
2764         }
2765
2766         INIT_LIST_HEAD(&cmd->cmd_list);
2767
2768         memcpy(&cmd->atio, atio, sizeof(*atio));
2769         cmd->state = QLA_TGT_STATE_NEW;
2770         cmd->tgt = ha->tgt.qla_tgt;
2771         cmd->vha = vha;
2772
2773         INIT_WORK(&cmd->work, qlt_do_work);
2774         queue_work(qla_tgt_wq, &cmd->work);
2775         return 0;
2776
2777 }
2778
2779 /* ha->hardware_lock supposed to be held on entry */
2780 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
2781         int fn, void *iocb, int flags)
2782 {
2783         struct scsi_qla_host *vha = sess->vha;
2784         struct qla_hw_data *ha = vha->hw;
2785         struct qla_tgt_mgmt_cmd *mcmd;
2786         int res;
2787         uint8_t tmr_func;
2788
2789         mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2790         if (!mcmd) {
2791                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
2792                     "qla_target(%d): Allocation of management "
2793                     "command failed, some commands and their data could "
2794                     "leak\n", vha->vp_idx);
2795                 return -ENOMEM;
2796         }
2797         memset(mcmd, 0, sizeof(*mcmd));
2798         mcmd->sess = sess;
2799
2800         if (iocb) {
2801                 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2802                     sizeof(mcmd->orig_iocb.imm_ntfy));
2803         }
2804         mcmd->tmr_func = fn;
2805         mcmd->flags = flags;
2806
2807         switch (fn) {
2808         case QLA_TGT_CLEAR_ACA:
2809                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
2810                     "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
2811                 tmr_func = TMR_CLEAR_ACA;
2812                 break;
2813
2814         case QLA_TGT_TARGET_RESET:
2815                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
2816                     "qla_target(%d): TARGET_RESET received\n",
2817                     sess->vha->vp_idx);
2818                 tmr_func = TMR_TARGET_WARM_RESET;
2819                 break;
2820
2821         case QLA_TGT_LUN_RESET:
2822                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
2823                     "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
2824                 tmr_func = TMR_LUN_RESET;
2825                 break;
2826
2827         case QLA_TGT_CLEAR_TS:
2828                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
2829                     "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
2830                 tmr_func = TMR_CLEAR_TASK_SET;
2831                 break;
2832
2833         case QLA_TGT_ABORT_TS:
2834                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
2835                     "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
2836                 tmr_func = TMR_ABORT_TASK_SET;
2837                 break;
2838 #if 0
2839         case QLA_TGT_ABORT_ALL:
2840                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
2841                     "qla_target(%d): Doing ABORT_ALL_TASKS\n",
2842                     sess->vha->vp_idx);
2843                 tmr_func = 0;
2844                 break;
2845
2846         case QLA_TGT_ABORT_ALL_SESS:
2847                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
2848                     "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
2849                     sess->vha->vp_idx);
2850                 tmr_func = 0;
2851                 break;
2852
2853         case QLA_TGT_NEXUS_LOSS_SESS:
2854                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
2855                     "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
2856                     sess->vha->vp_idx);
2857                 tmr_func = 0;
2858                 break;
2859
2860         case QLA_TGT_NEXUS_LOSS:
2861                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
2862                     "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
2863                 tmr_func = 0;
2864                 break;
2865 #endif
2866         default:
2867                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
2868                     "qla_target(%d): Unknown task mgmt fn 0x%x\n",
2869                     sess->vha->vp_idx, fn);
2870                 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2871                 return -ENOSYS;
2872         }
2873
2874         res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
2875         if (res != 0) {
2876                 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
2877                     "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
2878                     sess->vha->vp_idx, res);
2879                 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2880                 return -EFAULT;
2881         }
2882
2883         return 0;
2884 }
2885
2886 /* ha->hardware_lock supposed to be held on entry */
2887 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
2888 {
2889         struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2890         struct qla_hw_data *ha = vha->hw;
2891         struct qla_tgt *tgt;
2892         struct qla_tgt_sess *sess;
2893         uint32_t lun, unpacked_lun;
2894         int lun_size, fn;
2895
2896         tgt = ha->tgt.qla_tgt;
2897
2898         lun = a->u.isp24.fcp_cmnd.lun;
2899         lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
2900         fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
2901         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2902             a->u.isp24.fcp_hdr.s_id);
2903         unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2904
2905         if (!sess) {
2906                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
2907                     "qla_target(%d): task mgmt fn 0x%x for "
2908                     "non-existant session\n", vha->vp_idx, fn);
2909                 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
2910                     sizeof(struct atio_from_isp));
2911         }
2912
2913         return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
2914 }
2915
2916 /* ha->hardware_lock supposed to be held on entry */
2917 static int __qlt_abort_task(struct scsi_qla_host *vha,
2918         struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
2919 {
2920         struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2921         struct qla_hw_data *ha = vha->hw;
2922         struct qla_tgt_mgmt_cmd *mcmd;
2923         uint32_t lun, unpacked_lun;
2924         int rc;
2925
2926         mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2927         if (mcmd == NULL) {
2928                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
2929                     "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
2930                     vha->vp_idx, __func__);
2931                 return -ENOMEM;
2932         }
2933         memset(mcmd, 0, sizeof(*mcmd));
2934
2935         mcmd->sess = sess;
2936         memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2937             sizeof(mcmd->orig_iocb.imm_ntfy));
2938
2939         lun = a->u.isp24.fcp_cmnd.lun;
2940         unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2941
2942         rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
2943             le16_to_cpu(iocb->u.isp2x.seq_id));
2944         if (rc != 0) {
2945                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
2946                     "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2947                     vha->vp_idx, rc);
2948                 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2949                 return -EFAULT;
2950         }
2951
2952         return 0;
2953 }
2954
2955 /* ha->hardware_lock supposed to be held on entry */
2956 static int qlt_abort_task(struct scsi_qla_host *vha,
2957         struct imm_ntfy_from_isp *iocb)
2958 {
2959         struct qla_hw_data *ha = vha->hw;
2960         struct qla_tgt_sess *sess;
2961         int loop_id;
2962
2963         loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
2964
2965         sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
2966         if (sess == NULL) {
2967                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
2968                     "qla_target(%d): task abort for unexisting "
2969                     "session\n", vha->vp_idx);
2970                 return qlt_sched_sess_work(ha->tgt.qla_tgt,
2971                     QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
2972         }
2973
2974         return __qlt_abort_task(vha, iocb, sess);
2975 }
2976
2977 /*
2978  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2979  */
2980 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2981         struct imm_ntfy_from_isp *iocb)
2982 {
2983         struct qla_hw_data *ha = vha->hw;
2984         int res = 0;
2985
2986         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
2987             "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
2988             " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
2989             iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
2990             iocb->u.isp24.status_subcode);
2991
2992         switch (iocb->u.isp24.status_subcode) {
2993         case ELS_PLOGI:
2994         case ELS_FLOGI:
2995         case ELS_PRLI:
2996         case ELS_LOGO:
2997         case ELS_PRLO:
2998                 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
2999                 break;
3000         case ELS_PDISC:
3001         case ELS_ADISC:
3002         {
3003                 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3004                 if (tgt->link_reinit_iocb_pending) {
3005                         qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3006                             0, 0, 0, 0, 0, 0);
3007                         tgt->link_reinit_iocb_pending = 0;
3008                 }
3009                 res = 1; /* send notify ack */
3010                 break;
3011         }
3012
3013         default:
3014                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3015                     "qla_target(%d): Unsupported ELS command %x "
3016                     "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
3017                 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3018                 break;
3019         }
3020
3021         return res;
3022 }
3023
3024 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3025 {
3026         struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3027         size_t first_offset = 0, rem_offset = offset, tmp = 0;
3028         int i, sg_srr_cnt, bufflen = 0;
3029
3030         ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
3031             "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3032             "cmd->sg_cnt: %u, direction: %d\n",
3033             cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3034
3035         /*
3036          * FIXME: Reject non zero SRR relative offset until we can test
3037          * this code properly.
3038          */
3039         pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3040         return -1;
3041
3042         if (!cmd->sg || !cmd->sg_cnt) {
3043                 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3044                     "Missing cmd->sg or zero cmd->sg_cnt in"
3045                     " qla_tgt_set_data_offset\n");
3046                 return -EINVAL;
3047         }
3048         /*
3049          * Walk the current cmd->sg list until we locate the new sg_srr_start
3050          */
3051         for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
3052                 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
3053                     "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3054                     i, sg, sg_page(sg), sg->length, sg->offset);
3055
3056                 if ((sg->length + tmp) > offset) {
3057                         first_offset = rem_offset;
3058                         sg_srr_start = sg;
3059                         ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
3060                             "Found matching sg[%d], using %p as sg_srr_start, "
3061                             "and using first_offset: %zu\n", i, sg,
3062                             first_offset);
3063                         break;
3064                 }
3065                 tmp += sg->length;
3066                 rem_offset -= sg->length;
3067         }
3068
3069         if (!sg_srr_start) {
3070                 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
3071                     "Unable to locate sg_srr_start for offset: %u\n", offset);
3072                 return -EINVAL;
3073         }
3074         sg_srr_cnt = (cmd->sg_cnt - i);
3075
3076         sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
3077         if (!sg_srr) {
3078                 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
3079                     "Unable to allocate sgp\n");
3080                 return -ENOMEM;
3081         }
3082         sg_init_table(sg_srr, sg_srr_cnt);
3083         sgp = &sg_srr[0];
3084         /*
3085          * Walk the remaining list for sg_srr_start, mapping to the newly
3086          * allocated sg_srr taking first_offset into account.
3087          */
3088         for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
3089                 if (first_offset) {
3090                         sg_set_page(sgp, sg_page(sg),
3091                             (sg->length - first_offset), first_offset);
3092                         first_offset = 0;
3093                 } else {
3094                         sg_set_page(sgp, sg_page(sg), sg->length, 0);
3095                 }
3096                 bufflen += sgp->length;
3097
3098                 sgp = sg_next(sgp);
3099                 if (!sgp)
3100                         break;
3101         }
3102
3103         cmd->sg = sg_srr;
3104         cmd->sg_cnt = sg_srr_cnt;
3105         cmd->bufflen = bufflen;
3106         cmd->offset += offset;
3107         cmd->free_sg = 1;
3108
3109         ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
3110         ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
3111             cmd->sg_cnt);
3112         ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
3113             cmd->bufflen);
3114         ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
3115             cmd->offset);
3116
3117         if (cmd->sg_cnt < 0)
3118                 BUG();
3119
3120         if (cmd->bufflen < 0)
3121                 BUG();
3122
3123         return 0;
3124 }
3125
3126 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
3127         uint32_t srr_rel_offs, int *xmit_type)
3128 {
3129         int res = 0, rel_offs;
3130
3131         rel_offs = srr_rel_offs - cmd->offset;
3132         ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3133             srr_rel_offs, rel_offs);
3134
3135         *xmit_type = QLA_TGT_XMIT_ALL;
3136
3137         if (rel_offs < 0) {
3138                 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
3139                     "qla_target(%d): SRR rel_offs (%d) < 0",
3140                     cmd->vha->vp_idx, rel_offs);
3141                 res = -1;
3142         } else if (rel_offs == cmd->bufflen)
3143                 *xmit_type = QLA_TGT_XMIT_STATUS;
3144         else if (rel_offs > 0)
3145                 res = qlt_set_data_offset(cmd, rel_offs);
3146
3147         return res;
3148 }
3149
3150 /* No locks, thread context */
3151 static void qlt_handle_srr(struct scsi_qla_host *vha,
3152         struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
3153 {
3154         struct imm_ntfy_from_isp *ntfy =
3155             (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3156         struct qla_hw_data *ha = vha->hw;
3157         struct qla_tgt_cmd *cmd = sctio->cmd;
3158         struct se_cmd *se_cmd = &cmd->se_cmd;
3159         unsigned long flags;
3160         int xmit_type = 0, resp = 0;
3161         uint32_t offset;
3162         uint16_t srr_ui;
3163
3164         offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3165         srr_ui = ntfy->u.isp24.srr_ui;
3166
3167         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3168             cmd, srr_ui);
3169
3170         switch (srr_ui) {
3171         case SRR_IU_STATUS:
3172                 spin_lock_irqsave(&ha->hardware_lock, flags);
3173                 qlt_send_notify_ack(vha, ntfy,
3174                     0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3175                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3176                 xmit_type = QLA_TGT_XMIT_STATUS;
3177                 resp = 1;
3178                 break;
3179         case SRR_IU_DATA_IN:
3180                 if (!cmd->sg || !cmd->sg_cnt) {
3181                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3182                             "Unable to process SRR_IU_DATA_IN due to"
3183                             " missing cmd->sg, state: %d\n", cmd->state);
3184                         dump_stack();
3185                         goto out_reject;
3186                 }
3187                 if (se_cmd->scsi_status != 0) {
3188                         ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3189                             "Rejecting SRR_IU_DATA_IN with non GOOD "
3190                             "scsi_status\n");
3191                         goto out_reject;
3192                 }
3193                 cmd->bufflen = se_cmd->data_length;
3194
3195                 if (qlt_has_data(cmd)) {
3196                         if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3197                                 goto out_reject;
3198                         spin_lock_irqsave(&ha->hardware_lock, flags);
3199                         qlt_send_notify_ack(vha, ntfy,
3200                             0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3201                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3202                         resp = 1;
3203                 } else {
3204                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3205                             "qla_target(%d): SRR for in data for cmd "
3206                             "without them (tag %d, SCSI status %d), "
3207                             "reject", vha->vp_idx, cmd->tag,
3208                             cmd->se_cmd.scsi_status);
3209                         goto out_reject;
3210                 }
3211                 break;
3212         case SRR_IU_DATA_OUT:
3213                 if (!cmd->sg || !cmd->sg_cnt) {
3214                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3215                             "Unable to process SRR_IU_DATA_OUT due to"
3216                             " missing cmd->sg\n");
3217                         dump_stack();
3218                         goto out_reject;
3219                 }
3220                 if (se_cmd->scsi_status != 0) {
3221                         ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3222                             "Rejecting SRR_IU_DATA_OUT"
3223                             " with non GOOD scsi_status\n");
3224                         goto out_reject;
3225                 }
3226                 cmd->bufflen = se_cmd->data_length;
3227
3228                 if (qlt_has_data(cmd)) {
3229                         if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3230                                 goto out_reject;
3231                         spin_lock_irqsave(&ha->hardware_lock, flags);
3232                         qlt_send_notify_ack(vha, ntfy,
3233                             0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3234                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3235                         if (xmit_type & QLA_TGT_XMIT_DATA)
3236                                 qlt_rdy_to_xfer(cmd);
3237                 } else {
3238                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3239                             "qla_target(%d): SRR for out data for cmd "
3240                             "without them (tag %d, SCSI status %d), "
3241                             "reject", vha->vp_idx, cmd->tag,
3242                             cmd->se_cmd.scsi_status);
3243                         goto out_reject;
3244                 }
3245                 break;
3246         default:
3247                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3248                     "qla_target(%d): Unknown srr_ui value %x",
3249                     vha->vp_idx, srr_ui);
3250                 goto out_reject;
3251         }
3252
3253         /* Transmit response in case of status and data-in cases */
3254         if (resp)
3255                 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3256
3257         return;
3258
3259 out_reject:
3260         spin_lock_irqsave(&ha->hardware_lock, flags);
3261         qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3262             NOTIFY_ACK_SRR_FLAGS_REJECT,
3263             NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3264             NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3265         if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3266                 cmd->state = QLA_TGT_STATE_DATA_IN;
3267                 dump_stack();
3268         } else
3269                 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3270         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3271 }
3272
3273 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3274         struct qla_tgt_srr_imm *imm, int ha_locked)
3275 {
3276         struct qla_hw_data *ha = vha->hw;
3277         unsigned long flags = 0;
3278
3279         if (!ha_locked)
3280                 spin_lock_irqsave(&ha->hardware_lock, flags);
3281
3282         qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3283             NOTIFY_ACK_SRR_FLAGS_REJECT,
3284             NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3285             NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3286
3287         if (!ha_locked)
3288                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3289
3290         kfree(imm);
3291 }
3292
3293 static void qlt_handle_srr_work(struct work_struct *work)
3294 {
3295         struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3296         struct scsi_qla_host *vha = tgt->vha;
3297         struct qla_tgt_srr_ctio *sctio;
3298         unsigned long flags;
3299
3300         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
3301             tgt);
3302
3303 restart:
3304         spin_lock_irqsave(&tgt->srr_lock, flags);
3305         list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
3306                 struct qla_tgt_srr_imm *imm, *i, *ti;
3307                 struct qla_tgt_cmd *cmd;
3308                 struct se_cmd *se_cmd;
3309
3310                 imm = NULL;
3311                 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
3312                                                 srr_list_entry) {
3313                         if (i->srr_id == sctio->srr_id) {
3314                                 list_del(&i->srr_list_entry);
3315                                 if (imm) {
3316                                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
3317                                           "qla_target(%d): There must be "
3318                                           "only one IMM SRR per CTIO SRR "
3319                                           "(IMM SRR %p, id %d, CTIO %p\n",
3320                                           vha->vp_idx, i, i->srr_id, sctio);
3321                                         qlt_reject_free_srr_imm(tgt->vha, i, 0);
3322                                 } else
3323                                         imm = i;
3324                         }
3325                 }
3326
3327                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
3328                     "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
3329                     sctio->srr_id);
3330
3331                 if (imm == NULL) {
3332                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
3333                             "Not found matching IMM for SRR CTIO (id %d)\n",
3334                             sctio->srr_id);
3335                         continue;
3336                 } else
3337                         list_del(&sctio->srr_list_entry);
3338
3339                 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3340
3341                 cmd = sctio->cmd;
3342                 /*
3343                  * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3344                  * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3345                  * logic..
3346                  */
3347                 cmd->offset = 0;
3348                 if (cmd->free_sg) {
3349                         kfree(cmd->sg);
3350                         cmd->sg = NULL;
3351                         cmd->free_sg = 0;
3352                 }
3353                 se_cmd = &cmd->se_cmd;
3354
3355                 cmd->sg_cnt = se_cmd->t_data_nents;
3356                 cmd->sg = se_cmd->t_data_sg;
3357
3358                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
3359                     "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3360                     "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
3361                     se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
3362
3363                 qlt_handle_srr(vha, sctio, imm);
3364
3365                 kfree(imm);
3366                 kfree(sctio);
3367                 goto restart;
3368         }
3369         spin_unlock_irqrestore(&tgt->srr_lock, flags);
3370 }
3371
3372 /* ha->hardware_lock supposed to be held on entry */
3373 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
3374         struct imm_ntfy_from_isp *iocb)
3375 {
3376         struct qla_tgt_srr_imm *imm;
3377         struct qla_hw_data *ha = vha->hw;
3378         struct qla_tgt *tgt = ha->tgt.qla_tgt;
3379         struct qla_tgt_srr_ctio *sctio;
3380
3381         tgt->imm_srr_id++;
3382
3383         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
3384             vha->vp_idx);
3385
3386         imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
3387         if (imm != NULL) {
3388                 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
3389
3390                 /* IRQ is already OFF */
3391                 spin_lock(&tgt->srr_lock);
3392                 imm->srr_id = tgt->imm_srr_id;
3393                 list_add_tail(&imm->srr_list_entry,
3394                     &tgt->srr_imm_list);
3395                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
3396                     "IMM NTFY SRR %p added (id %d, ui %x)\n",
3397                     imm, imm->srr_id, iocb->u.isp24.srr_ui);
3398                 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3399                         int found = 0;
3400                         list_for_each_entry(sctio, &tgt->srr_ctio_list,
3401                             srr_list_entry) {
3402                                 if (sctio->srr_id == imm->srr_id) {
3403                                         found = 1;
3404                                         break;
3405                                 }
3406                         }
3407                         if (found) {
3408                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
3409                                     "Scheduling srr work\n");
3410                                 schedule_work(&tgt->srr_work);
3411                         } else {
3412                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
3413                                     "qla_target(%d): imm_srr_id "
3414                                     "== ctio_srr_id (%d), but there is no "
3415                                     "corresponding SRR CTIO, deleting IMM "
3416                                     "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
3417                                     imm);
3418                                 list_del(&imm->srr_list_entry);
3419
3420                                 kfree(imm);
3421
3422                                 spin_unlock(&tgt->srr_lock);
3423                                 goto out_reject;
3424                         }
3425                 }
3426                 spin_unlock(&tgt->srr_lock);
3427         } else {
3428                 struct qla_tgt_srr_ctio *ts;
3429
3430                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
3431                     "qla_target(%d): Unable to allocate SRR IMM "
3432                     "entry, SRR request will be rejected\n", vha->vp_idx);
3433
3434                 /* IRQ is already OFF */
3435                 spin_lock(&tgt->srr_lock);
3436                 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
3437                     srr_list_entry) {
3438                         if (sctio->srr_id == tgt->imm_srr_id) {
3439                                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
3440                                     "CTIO SRR %p deleted (id %d)\n",
3441                                     sctio, sctio->srr_id);
3442                                 list_del(&sctio->srr_list_entry);
3443                                 qlt_send_term_exchange(vha, sctio->cmd,
3444                                     &sctio->cmd->atio, 1);
3445                                 kfree(sctio);
3446                         }
3447                 }
3448                 spin_unlock(&tgt->srr_lock);
3449                 goto out_reject;
3450         }
3451
3452         return;
3453
3454 out_reject:
3455         qlt_send_notify_ack(vha, iocb, 0, 0, 0,
3456             NOTIFY_ACK_SRR_FLAGS_REJECT,
3457             NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3458             NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3459 }
3460
3461 /*
3462  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3463  */
3464 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
3465         struct imm_ntfy_from_isp *iocb)
3466 {
3467         struct qla_hw_data *ha = vha->hw;
3468         uint32_t add_flags = 0;
3469         int send_notify_ack = 1;
3470         uint16_t status;
3471
3472         status = le16_to_cpu(iocb->u.isp2x.status);
3473         switch (status) {
3474         case IMM_NTFY_LIP_RESET:
3475         {
3476                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
3477                     "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3478                     vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
3479                     iocb->u.isp24.status_subcode);
3480
3481                 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3482                         send_notify_ack = 0;
3483                 break;
3484         }
3485
3486         case IMM_NTFY_LIP_LINK_REINIT:
3487         {
3488                 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3489                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
3490                     "qla_target(%d): LINK REINIT (loop %#x, "
3491                     "subcode %x)\n", vha->vp_idx,
3492                     le16_to_cpu(iocb->u.isp24.nport_handle),
3493                     iocb->u.isp24.status_subcode);
3494                 if (tgt->link_reinit_iocb_pending) {
3495                         qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3496                             0, 0, 0, 0, 0, 0);
3497                 }
3498                 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
3499                 tgt->link_reinit_iocb_pending = 1;
3500                 /*
3501                  * QLogic requires to wait after LINK REINIT for possible
3502                  * PDISC or ADISC ELS commands
3503                  */
3504                 send_notify_ack = 0;
3505                 break;
3506         }
3507
3508         case IMM_NTFY_PORT_LOGOUT:
3509                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
3510                     "qla_target(%d): Port logout (loop "
3511                     "%#x, subcode %x)\n", vha->vp_idx,
3512                     le16_to_cpu(iocb->u.isp24.nport_handle),
3513                     iocb->u.isp24.status_subcode);
3514
3515                 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
3516                         send_notify_ack = 0;
3517                 /* The sessions will be cleared in the callback, if needed */
3518                 break;
3519
3520         case IMM_NTFY_GLBL_TPRLO:
3521                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
3522                     "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
3523                 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3524                         send_notify_ack = 0;
3525                 /* The sessions will be cleared in the callback, if needed */
3526                 break;
3527
3528         case IMM_NTFY_PORT_CONFIG:
3529                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
3530                     "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
3531                     status);
3532                 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3533                         send_notify_ack = 0;
3534                 /* The sessions will be cleared in the callback, if needed */
3535                 break;
3536
3537         case IMM_NTFY_GLBL_LOGO:
3538                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
3539                     "qla_target(%d): Link failure detected\n",
3540                     vha->vp_idx);
3541                 /* I_T nexus loss */
3542                 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3543                         send_notify_ack = 0;
3544                 break;
3545
3546         case IMM_NTFY_IOCB_OVERFLOW:
3547                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
3548                     "qla_target(%d): Cannot provide requested "
3549                     "capability (IOCB overflowed the immediate notify "
3550                     "resource count)\n", vha->vp_idx);
3551                 break;
3552
3553         case IMM_NTFY_ABORT_TASK:
3554                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
3555                     "qla_target(%d): Abort Task (S %08x I %#x -> "
3556                     "L %#x)\n", vha->vp_idx,
3557                     le16_to_cpu(iocb->u.isp2x.seq_id),
3558                     GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
3559                     le16_to_cpu(iocb->u.isp2x.lun));
3560                 if (qlt_abort_task(vha, iocb) == 0)
3561                         send_notify_ack = 0;
3562                 break;
3563
3564         case IMM_NTFY_RESOURCE:
3565                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
3566                     "qla_target(%d): Out of resources, host %ld\n",
3567                     vha->vp_idx, vha->host_no);
3568                 break;
3569
3570         case IMM_NTFY_MSG_RX:
3571                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
3572                     "qla_target(%d): Immediate notify task %x\n",
3573                     vha->vp_idx, iocb->u.isp2x.task_flags);
3574                 if (qlt_handle_task_mgmt(vha, iocb) == 0)
3575                         send_notify_ack = 0;
3576                 break;
3577
3578         case IMM_NTFY_ELS:
3579                 if (qlt_24xx_handle_els(vha, iocb) == 0)
3580                         send_notify_ack = 0;
3581                 break;
3582
3583         case IMM_NTFY_SRR:
3584                 qlt_prepare_srr_imm(vha, iocb);
3585                 send_notify_ack = 0;
3586                 break;
3587
3588         default:
3589                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
3590                     "qla_target(%d): Received unknown immediate "
3591                     "notify status %x\n", vha->vp_idx, status);
3592                 break;
3593         }
3594
3595         if (send_notify_ack)
3596                 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
3597 }
3598
3599 /*
3600  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3601  * This function sends busy to ISP 2xxx or 24xx.
3602  */
3603 static void qlt_send_busy(struct scsi_qla_host *vha,
3604         struct atio_from_isp *atio, uint16_t status)
3605 {
3606         struct ctio7_to_24xx *ctio24;
3607         struct qla_hw_data *ha = vha->hw;
3608         request_t *pkt;
3609         struct qla_tgt_sess *sess = NULL;
3610
3611         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3612             atio->u.isp24.fcp_hdr.s_id);
3613         if (!sess) {
3614                 qlt_send_term_exchange(vha, NULL, atio, 1);
3615                 return;
3616         }
3617         /* Sending marker isn't necessary, since we called from ISR */
3618
3619         pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3620         if (!pkt) {
3621                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
3622                     "qla_target(%d): %s failed: unable to allocate "
3623                     "request packet", vha->vp_idx, __func__);
3624                 return;
3625         }
3626
3627         pkt->entry_count = 1;
3628         pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3629
3630         ctio24 = (struct ctio7_to_24xx *)pkt;
3631         ctio24->entry_type = CTIO_TYPE7;
3632         ctio24->nport_handle = sess->loop_id;
3633         ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
3634         ctio24->vp_index = vha->vp_idx;
3635         ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3636         ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3637         ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3638         ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3639         ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3640             __constant_cpu_to_le16(
3641                 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
3642                 CTIO7_FLAGS_DONT_RET_CTIO);
3643         /*
3644          * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
3645          * if the explicit conformation is used.
3646          */
3647         ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
3648         ctio24->u.status1.scsi_status = cpu_to_le16(status);
3649         ctio24->u.status1.residual = get_unaligned((uint32_t *)
3650             &atio->u.isp24.fcp_cmnd.add_cdb[
3651             atio->u.isp24.fcp_cmnd.add_cdb_len]);
3652         if (ctio24->u.status1.residual != 0)
3653                 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3654
3655         qla2x00_start_iocbs(vha, vha->req);
3656 }
3657
3658 /* ha->hardware_lock supposed to be held on entry */
3659 /* called via callback from qla2xxx */
3660 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3661         struct atio_from_isp *atio)
3662 {
3663         struct qla_hw_data *ha = vha->hw;
3664         struct qla_tgt *tgt = ha->tgt.qla_tgt;
3665         int rc;
3666
3667         if (unlikely(tgt == NULL)) {
3668                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
3669                     "ATIO pkt, but no tgt (ha %p)", ha);
3670                 return;
3671         }
3672         ql_dbg(ql_dbg_tgt, vha, 0xe02c,
3673             "qla_target(%d): ATIO pkt %p: type %02x count %02x",
3674             vha->vp_idx, atio, atio->u.raw.entry_type,
3675             atio->u.raw.entry_count);
3676         /*
3677          * In tgt_stop mode we also should allow all requests to pass.
3678          * Otherwise, some commands can stuck.
3679          */
3680
3681         tgt->irq_cmd_count++;
3682
3683         switch (atio->u.raw.entry_type) {
3684         case ATIO_TYPE7:
3685                 ql_dbg(ql_dbg_tgt, vha, 0xe02d,
3686                     "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
3687                     "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3688                     vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
3689                     atio->u.isp24.fcp_cmnd.rddata,
3690                     atio->u.isp24.fcp_cmnd.wrdata,
3691                     atio->u.isp24.fcp_cmnd.add_cdb_len,
3692                     be32_to_cpu(get_unaligned((uint32_t *)
3693                         &atio->u.isp24.fcp_cmnd.add_cdb[
3694                         atio->u.isp24.fcp_cmnd.add_cdb_len])),
3695                     atio->u.isp24.fcp_hdr.s_id[0],
3696                     atio->u.isp24.fcp_hdr.s_id[1],
3697                     atio->u.isp24.fcp_hdr.s_id[2]);
3698
3699                 if (unlikely(atio->u.isp24.exchange_addr ==
3700                     ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
3701                         ql_dbg(ql_dbg_tgt, vha, 0xe058,
3702                             "qla_target(%d): ATIO_TYPE7 "
3703                             "received with UNKNOWN exchange address, "
3704                             "sending QUEUE_FULL\n", vha->vp_idx);
3705                         qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
3706                         break;
3707                 }
3708                 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
3709                         rc = qlt_handle_cmd_for_atio(vha, atio);
3710                 else
3711                         rc = qlt_handle_task_mgmt(vha, atio);
3712                 if (unlikely(rc != 0)) {
3713                         if (rc == -ESRCH) {
3714 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3715                                 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3716 #else
3717                                 qlt_send_term_exchange(vha, NULL, atio, 1);
3718 #endif
3719                         } else {
3720                                 if (tgt->tgt_stop) {
3721                                         ql_dbg(ql_dbg_tgt, vha, 0xe059,
3722                                             "qla_target: Unable to send "
3723                                             "command to target for req, "
3724                                             "ignoring.\n");
3725                                 } else {
3726                                         ql_dbg(ql_dbg_tgt, vha, 0xe05a,
3727                                             "qla_target(%d): Unable to send "
3728                                             "command to target, sending BUSY "
3729                                             "status.\n", vha->vp_idx);
3730                                         qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3731                                 }
3732                         }
3733                 }
3734                 break;
3735
3736         case IMMED_NOTIFY_TYPE:
3737         {
3738                 if (unlikely(atio->u.isp2x.entry_status != 0)) {
3739                         ql_dbg(ql_dbg_tgt, vha, 0xe05b,
3740                             "qla_target(%d): Received ATIO packet %x "
3741                             "with error status %x\n", vha->vp_idx,
3742                             atio->u.raw.entry_type,
3743                             atio->u.isp2x.entry_status);
3744                         break;
3745                 }
3746                 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
3747                 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
3748                 break;
3749         }
3750
3751         default:
3752                 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
3753                     "qla_target(%d): Received unknown ATIO atio "
3754                     "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
3755                 break;
3756         }
3757
3758         tgt->irq_cmd_count--;
3759 }
3760
3761 /* ha->hardware_lock supposed to be held on entry */
3762 /* called via callback from qla2xxx */
3763 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
3764 {
3765         struct qla_hw_data *ha = vha->hw;
3766         struct qla_tgt *tgt = ha->tgt.qla_tgt;
3767
3768         if (unlikely(tgt == NULL)) {
3769                 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
3770                     "qla_target(%d): Response pkt %x received, but no "
3771                     "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
3772                 return;
3773         }
3774
3775         ql_dbg(ql_dbg_tgt, vha, 0xe02f,
3776             "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
3777             "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
3778             pkt->entry_count, pkt->entry_status, pkt->handle);
3779
3780         /*
3781          * In tgt_stop mode we also should allow all requests to pass.
3782          * Otherwise, some commands can stuck.
3783          */
3784
3785         tgt->irq_cmd_count++;
3786
3787         switch (pkt->entry_type) {
3788         case CTIO_TYPE7:
3789         {
3790                 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
3791                 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
3792                     vha->vp_idx);
3793                 qlt_do_ctio_completion(vha, entry->handle,
3794                     le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3795                     entry);
3796                 break;
3797         }
3798
3799         case ACCEPT_TGT_IO_TYPE:
3800         {
3801                 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
3802                 int rc;
3803                 ql_dbg(ql_dbg_tgt, vha, 0xe031,
3804                     "ACCEPT_TGT_IO instance %d status %04x "
3805                     "lun %04x read/write %d data_length %04x "
3806                     "target_id %02x rx_id %04x\n ", vha->vp_idx,
3807                     le16_to_cpu(atio->u.isp2x.status),
3808                     le16_to_cpu(atio->u.isp2x.lun),
3809                     atio->u.isp2x.execution_codes,
3810                     le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
3811                     atio), atio->u.isp2x.rx_id);
3812                 if (atio->u.isp2x.status !=
3813                     __constant_cpu_to_le16(ATIO_CDB_VALID)) {
3814                         ql_dbg(ql_dbg_tgt, vha, 0xe05e,
3815                             "qla_target(%d): ATIO with error "
3816                             "status %x received\n", vha->vp_idx,
3817                             le16_to_cpu(atio->u.isp2x.status));
3818                         break;
3819                 }
3820                 ql_dbg(ql_dbg_tgt, vha, 0xe032,
3821                     "FCP CDB: 0x%02x, sizeof(cdb): %lu",
3822                     atio->u.isp2x.cdb[0], (unsigned long
3823                     int)sizeof(atio->u.isp2x.cdb));
3824
3825                 rc = qlt_handle_cmd_for_atio(vha, atio);
3826                 if (unlikely(rc != 0)) {
3827                         if (rc == -ESRCH) {
3828 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3829                                 qlt_send_busy(vha, atio, 0);
3830 #else
3831                                 qlt_send_term_exchange(vha, NULL, atio, 1);
3832 #endif
3833                         } else {
3834                                 if (tgt->tgt_stop) {
3835                                         ql_dbg(ql_dbg_tgt, vha, 0xe05f,
3836                                             "qla_target: Unable to send "
3837                                             "command to target, sending TERM "
3838                                             "EXCHANGE for rsp\n");
3839                                         qlt_send_term_exchange(vha, NULL,
3840                                             atio, 1);
3841                                 } else {
3842                                         ql_dbg(ql_dbg_tgt, vha, 0xe060,
3843                                             "qla_target(%d): Unable to send "
3844                                             "command to target, sending BUSY "
3845                                             "status\n", vha->vp_idx);
3846                                         qlt_send_busy(vha, atio, 0);
3847                                 }
3848                         }
3849                 }
3850         }
3851         break;
3852
3853         case CONTINUE_TGT_IO_TYPE:
3854         {
3855                 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3856                 ql_dbg(ql_dbg_tgt, vha, 0xe033,
3857                     "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
3858                 qlt_do_ctio_completion(vha, entry->handle,
3859                     le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3860                     entry);
3861                 break;
3862         }
3863
3864         case CTIO_A64_TYPE:
3865         {
3866                 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3867                 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
3868                     vha->vp_idx);
3869                 qlt_do_ctio_completion(vha, entry->handle,
3870                     le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3871                     entry);
3872                 break;
3873         }
3874
3875         case IMMED_NOTIFY_TYPE:
3876                 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
3877                 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
3878                 break;
3879
3880         case NOTIFY_ACK_TYPE:
3881                 if (tgt->notify_ack_expected > 0) {
3882                         struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
3883                         ql_dbg(ql_dbg_tgt, vha, 0xe036,
3884                             "NOTIFY_ACK seq %08x status %x\n",
3885                             le16_to_cpu(entry->u.isp2x.seq_id),
3886                             le16_to_cpu(entry->u.isp2x.status));
3887                         tgt->notify_ack_expected--;
3888                         if (entry->u.isp2x.status !=
3889                             __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
3890                                 ql_dbg(ql_dbg_tgt, vha, 0xe061,
3891                                     "qla_target(%d): NOTIFY_ACK "
3892                                     "failed %x\n", vha->vp_idx,
3893                                     le16_to_cpu(entry->u.isp2x.status));
3894                         }
3895                 } else {
3896                         ql_dbg(ql_dbg_tgt, vha, 0xe062,
3897                             "qla_target(%d): Unexpected NOTIFY_ACK received\n",
3898                             vha->vp_idx);
3899                 }
3900                 break;
3901
3902         case ABTS_RECV_24XX:
3903                 ql_dbg(ql_dbg_tgt, vha, 0xe037,
3904                     "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
3905                 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
3906                 break;
3907
3908         case ABTS_RESP_24XX:
3909                 if (tgt->abts_resp_expected > 0) {
3910                         struct abts_resp_from_24xx_fw *entry =
3911                                 (struct abts_resp_from_24xx_fw *)pkt;
3912                         ql_dbg(ql_dbg_tgt, vha, 0xe038,
3913                             "ABTS_RESP_24XX: compl_status %x\n",
3914                             entry->compl_status);
3915                         tgt->abts_resp_expected--;
3916                         if (le16_to_cpu(entry->compl_status) !=
3917                             ABTS_RESP_COMPL_SUCCESS) {
3918                                 if ((entry->error_subcode1 == 0x1E) &&
3919                                     (entry->error_subcode2 == 0)) {
3920                                         /*
3921                                          * We've got a race here: aborted
3922                                          * exchange not terminated, i.e.
3923                                          * response for the aborted command was
3924                                          * sent between the abort request was
3925                                          * received and processed.
3926                                          * Unfortunately, the firmware has a
3927                                          * silly requirement that all aborted
3928                                          * exchanges must be explicitely
3929                                          * terminated, otherwise it refuses to
3930                                          * send responses for the abort
3931                                          * requests. So, we have to
3932                                          * (re)terminate the exchange and retry
3933                                          * the abort response.
3934                                          */
3935                                         qlt_24xx_retry_term_exchange(vha,
3936                                             entry);
3937                                 } else
3938                                         ql_dbg(ql_dbg_tgt, vha, 0xe063,
3939                                             "qla_target(%d): ABTS_RESP_24XX "
3940                                             "failed %x (subcode %x:%x)",
3941                                             vha->vp_idx, entry->compl_status,
3942                                             entry->error_subcode1,
3943                                             entry->error_subcode2);
3944                         }
3945                 } else {
3946                         ql_dbg(ql_dbg_tgt, vha, 0xe064,
3947                             "qla_target(%d): Unexpected ABTS_RESP_24XX "
3948                             "received\n", vha->vp_idx);
3949                 }
3950                 break;
3951
3952         default:
3953                 ql_dbg(ql_dbg_tgt, vha, 0xe065,
3954                     "qla_target(%d): Received unknown response pkt "
3955                     "type %x\n", vha->vp_idx, pkt->entry_type);
3956                 break;
3957         }
3958
3959         tgt->irq_cmd_count--;
3960 }
3961
3962 /*
3963  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3964  */
3965 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
3966         uint16_t *mailbox)
3967 {
3968         struct qla_hw_data *ha = vha->hw;
3969         struct qla_tgt *tgt = ha->tgt.qla_tgt;
3970         int login_code;
3971
3972         ql_dbg(ql_dbg_tgt, vha, 0xe039,
3973             "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
3974             vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
3975             ha->operating_mode, ha->current_topology);
3976
3977         if (!ha->tgt.tgt_ops)
3978                 return;
3979
3980         if (unlikely(tgt == NULL)) {
3981                 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
3982                     "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
3983                 return;
3984         }
3985
3986         if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
3987             IS_QLA2100(ha))
3988                 return;
3989         /*
3990          * In tgt_stop mode we also should allow all requests to pass.
3991          * Otherwise, some commands can stuck.
3992          */
3993
3994         tgt->irq_cmd_count++;
3995
3996         switch (code) {
3997         case MBA_RESET:                 /* Reset */
3998         case MBA_SYSTEM_ERR:            /* System Error */
3999         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
4000         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
4001                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
4002                     "qla_target(%d): System error async event %#x "
4003                     "occurred", vha->vp_idx, code);
4004                 break;
4005         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up. */
4006                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4007                 break;
4008
4009         case MBA_LOOP_UP:
4010         {
4011                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
4012                     "qla_target(%d): Async LOOP_UP occurred "
4013                     "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4014                     le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4015                     le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4016                 if (tgt->link_reinit_iocb_pending) {
4017                         qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
4018                             0, 0, 0, 0, 0, 0);
4019                         tgt->link_reinit_iocb_pending = 0;
4020                 }
4021                 break;
4022         }
4023
4024         case MBA_LIP_OCCURRED:
4025         case MBA_LOOP_DOWN:
4026         case MBA_LIP_RESET:
4027         case MBA_RSCN_UPDATE:
4028                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
4029                     "qla_target(%d): Async event %#x occurred "
4030                     "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4031                     le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4032                     le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4033                 break;
4034
4035         case MBA_PORT_UPDATE:
4036                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4037                     "qla_target(%d): Port update async event %#x "
4038                     "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
4039                     "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4040                     le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4041                     le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4042
4043                 login_code = le16_to_cpu(mailbox[2]);
4044                 if (login_code == 0x4)
4045                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4046                             "Async MB 2: Got PLOGI Complete\n");
4047                 else if (login_code == 0x7)
4048                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4049                             "Async MB 2: Port Logged Out\n");
4050                 break;
4051
4052         default:
4053                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
4054                     "qla_target(%d): Async event %#x occurred: "
4055                     "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4056                     code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4057                     le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4058                 break;
4059         }
4060
4061         tgt->irq_cmd_count--;
4062 }
4063
4064 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4065         uint16_t loop_id)
4066 {
4067         fc_port_t *fcport;
4068         int rc;
4069
4070         fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4071         if (!fcport) {
4072                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4073                     "qla_target(%d): Allocation of tmp FC port failed",
4074                     vha->vp_idx);
4075                 return NULL;
4076         }
4077
4078         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
4079
4080         fcport->loop_id = loop_id;
4081
4082         rc = qla2x00_get_port_database(vha, fcport, 0);
4083         if (rc != QLA_SUCCESS) {
4084                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
4085                     "qla_target(%d): Failed to retrieve fcport "
4086                     "information -- get_port_database() returned %x "
4087                     "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
4088                 kfree(fcport);
4089                 return NULL;
4090         }
4091
4092         return fcport;
4093 }
4094
4095 /* Must be called under tgt_mutex */
4096 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
4097         uint8_t *s_id)
4098 {
4099         struct qla_hw_data *ha = vha->hw;
4100         struct qla_tgt_sess *sess = NULL;
4101         fc_port_t *fcport = NULL;
4102         int rc, global_resets;
4103         uint16_t loop_id = 0;
4104
4105 retry:
4106         global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
4107
4108         rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
4109         if (rc != 0) {
4110                 if ((s_id[0] == 0xFF) &&
4111                     (s_id[1] == 0xFC)) {
4112                         /*
4113                          * This is Domain Controller, so it should be
4114                          * OK to drop SCSI commands from it.
4115                          */
4116                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
4117                             "Unable to find initiator with S_ID %x:%x:%x",
4118                             s_id[0], s_id[1], s_id[2]);
4119                 } else
4120                         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
4121                             "qla_target(%d): Unable to find "
4122                             "initiator with S_ID %x:%x:%x",
4123                             vha->vp_idx, s_id[0], s_id[1],
4124                             s_id[2]);
4125                 return NULL;
4126         }
4127
4128         fcport = qlt_get_port_database(vha, loop_id);
4129         if (!fcport)
4130                 return NULL;
4131
4132         if (global_resets !=
4133             atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
4134                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
4135                     "qla_target(%d): global reset during session discovery "
4136                     "(counter was %d, new %d), retrying", vha->vp_idx,
4137                     global_resets,
4138                     atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
4139                 goto retry;
4140         }
4141
4142         sess = qlt_create_sess(vha, fcport, true);
4143
4144         kfree(fcport);
4145         return sess;
4146 }
4147
4148 static void qlt_abort_work(struct qla_tgt *tgt,
4149         struct qla_tgt_sess_work_param *prm)
4150 {
4151         struct scsi_qla_host *vha = tgt->vha;
4152         struct qla_hw_data *ha = vha->hw;
4153         struct qla_tgt_sess *sess = NULL;
4154         unsigned long flags;
4155         uint32_t be_s_id;
4156         uint8_t s_id[3];
4157         int rc;
4158
4159         spin_lock_irqsave(&ha->hardware_lock, flags);
4160
4161         if (tgt->tgt_stop)
4162                 goto out_term;
4163
4164         s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
4165         s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
4166         s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
4167
4168         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4169             (unsigned char *)&be_s_id);
4170         if (!sess) {
4171                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4172
4173                 mutex_lock(&ha->tgt.tgt_mutex);
4174                 sess = qlt_make_local_sess(vha, s_id);
4175                 /* sess has got an extra creation ref */
4176                 mutex_unlock(&ha->tgt.tgt_mutex);
4177
4178                 spin_lock_irqsave(&ha->hardware_lock, flags);
4179                 if (!sess)
4180                         goto out_term;
4181         } else {
4182                 kref_get(&sess->se_sess->sess_kref);
4183         }
4184
4185         if (tgt->tgt_stop)
4186                 goto out_term;
4187
4188         rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
4189         if (rc != 0)
4190                 goto out_term;
4191         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4192
4193         ha->tgt.tgt_ops->put_sess(sess);
4194         return;
4195
4196 out_term:
4197         qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
4198         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4199         if (sess)
4200                 ha->tgt.tgt_ops->put_sess(sess);
4201 }
4202
4203 static void qlt_tmr_work(struct qla_tgt *tgt,
4204         struct qla_tgt_sess_work_param *prm)
4205 {
4206         struct atio_from_isp *a = &prm->tm_iocb2;
4207         struct scsi_qla_host *vha = tgt->vha;
4208         struct qla_hw_data *ha = vha->hw;
4209         struct qla_tgt_sess *sess = NULL;
4210         unsigned long flags;
4211         uint8_t *s_id = NULL; /* to hide compiler warnings */
4212         int rc;
4213         uint32_t lun, unpacked_lun;
4214         int lun_size, fn;
4215         void *iocb;
4216
4217         spin_lock_irqsave(&ha->hardware_lock, flags);
4218
4219         if (tgt->tgt_stop)
4220                 goto out_term;
4221
4222         s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
4223         sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
4224         if (!sess) {
4225                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4226
4227                 mutex_lock(&ha->tgt.tgt_mutex);
4228                 sess = qlt_make_local_sess(vha, s_id);
4229                 /* sess has got an extra creation ref */
4230                 mutex_unlock(&ha->tgt.tgt_mutex);
4231
4232                 spin_lock_irqsave(&ha->hardware_lock, flags);
4233                 if (!sess)
4234                         goto out_term;
4235         } else {
4236                 kref_get(&sess->se_sess->sess_kref);
4237         }
4238
4239         iocb = a;
4240         lun = a->u.isp24.fcp_cmnd.lun;
4241         lun_size = sizeof(lun);
4242         fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4243         unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4244
4245         rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4246         if (rc != 0)
4247                 goto out_term;
4248         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4249
4250         ha->tgt.tgt_ops->put_sess(sess);
4251         return;
4252
4253 out_term:
4254         qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
4255         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4256         if (sess)
4257                 ha->tgt.tgt_ops->put_sess(sess);
4258 }
4259
4260 static void qlt_sess_work_fn(struct work_struct *work)
4261 {
4262         struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
4263         struct scsi_qla_host *vha = tgt->vha;
4264         unsigned long flags;
4265
4266         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
4267
4268         spin_lock_irqsave(&tgt->sess_work_lock, flags);
4269         while (!list_empty(&tgt->sess_works_list)) {
4270                 struct qla_tgt_sess_work_param *prm = list_entry(
4271                     tgt->sess_works_list.next, typeof(*prm),
4272                     sess_works_list_entry);
4273
4274                 /*
4275                  * This work can be scheduled on several CPUs at time, so we
4276                  * must delete the entry to eliminate double processing
4277                  */
4278                 list_del(&prm->sess_works_list_entry);
4279
4280                 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4281
4282                 switch (prm->type) {
4283                 case QLA_TGT_SESS_WORK_ABORT:
4284                         qlt_abort_work(tgt, prm);
4285                         break;
4286                 case QLA_TGT_SESS_WORK_TM:
4287                         qlt_tmr_work(tgt, prm);
4288                         break;
4289                 default:
4290                         BUG_ON(1);
4291                         break;
4292                 }
4293
4294                 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4295
4296                 kfree(prm);
4297         }
4298         spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4299 }
4300
4301 /* Must be called under tgt_host_action_mutex */
4302 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4303 {
4304         struct qla_tgt *tgt;
4305
4306         if (!QLA_TGT_MODE_ENABLED())
4307                 return 0;
4308
4309         if (!IS_TGT_MODE_CAPABLE(ha)) {
4310                 ql_log(ql_log_warn, base_vha, 0xe070,
4311                     "This adapter does not support target mode.\n");
4312                 return 0;
4313         }
4314
4315         ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
4316             "Registering target for host %ld(%p)", base_vha->host_no, ha);
4317
4318         BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
4319
4320         tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
4321         if (!tgt) {
4322                 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
4323                     "Unable to allocate struct qla_tgt\n");
4324                 return -ENOMEM;
4325         }
4326
4327         if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
4328                 base_vha->host->hostt->supported_mode |= MODE_TARGET;
4329
4330         tgt->ha = ha;
4331         tgt->vha = base_vha;
4332         init_waitqueue_head(&tgt->waitQ);
4333         INIT_LIST_HEAD(&tgt->sess_list);
4334         INIT_LIST_HEAD(&tgt->del_sess_list);
4335         INIT_DELAYED_WORK(&tgt->sess_del_work,
4336                 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
4337         spin_lock_init(&tgt->sess_work_lock);
4338         INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
4339         INIT_LIST_HEAD(&tgt->sess_works_list);
4340         spin_lock_init(&tgt->srr_lock);
4341         INIT_LIST_HEAD(&tgt->srr_ctio_list);
4342         INIT_LIST_HEAD(&tgt->srr_imm_list);
4343         INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
4344         atomic_set(&tgt->tgt_global_resets_count, 0);
4345
4346         ha->tgt.qla_tgt = tgt;
4347
4348         ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
4349                 "qla_target(%d): using 64 Bit PCI addressing",
4350                 base_vha->vp_idx);
4351         tgt->tgt_enable_64bit_addr = 1;
4352         /* 3 is reserved */
4353         tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
4354         tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
4355         tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
4356
4357         mutex_lock(&qla_tgt_mutex);
4358         list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
4359         mutex_unlock(&qla_tgt_mutex);
4360
4361         return 0;
4362 }
4363
4364 /* Must be called under tgt_host_action_mutex */
4365 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4366 {
4367         if (!ha->tgt.qla_tgt)
4368                 return 0;
4369
4370         mutex_lock(&qla_tgt_mutex);
4371         list_del(&ha->tgt.qla_tgt->tgt_list_entry);
4372         mutex_unlock(&qla_tgt_mutex);
4373
4374         ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
4375             vha->host_no, ha);
4376         qlt_release(ha->tgt.qla_tgt);
4377
4378         return 0;
4379 }
4380
4381 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
4382         unsigned char *b)
4383 {
4384         int i;
4385
4386         pr_debug("qla2xxx HW vha->node_name: ");
4387         for (i = 0; i < WWN_SIZE; i++)
4388                 pr_debug("%02x ", vha->node_name[i]);
4389         pr_debug("\n");
4390         pr_debug("qla2xxx HW vha->port_name: ");
4391         for (i = 0; i < WWN_SIZE; i++)
4392                 pr_debug("%02x ", vha->port_name[i]);
4393         pr_debug("\n");
4394
4395         pr_debug("qla2xxx passed configfs WWPN: ");
4396         put_unaligned_be64(wwpn, b);
4397         for (i = 0; i < WWN_SIZE; i++)
4398                 pr_debug("%02x ", b[i]);
4399         pr_debug("\n");
4400 }
4401
4402 /**
4403  * qla_tgt_lport_register - register lport with external module
4404  *
4405  * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
4406  * @wwpn: Passwd FC target WWPN
4407  * @callback:  lport initialization callback for tcm_qla2xxx code
4408  * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4409  */
4410 int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4411         int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
4412 {
4413         struct qla_tgt *tgt;
4414         struct scsi_qla_host *vha;
4415         struct qla_hw_data *ha;
4416         struct Scsi_Host *host;
4417         unsigned long flags;
4418         int rc;
4419         u8 b[WWN_SIZE];
4420
4421         mutex_lock(&qla_tgt_mutex);
4422         list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
4423                 vha = tgt->vha;
4424                 ha = vha->hw;
4425
4426                 host = vha->host;
4427                 if (!host)
4428                         continue;
4429
4430                 if (ha->tgt.tgt_ops != NULL)
4431                         continue;
4432
4433                 if (!(host->hostt->supported_mode & MODE_TARGET))
4434                         continue;
4435
4436                 spin_lock_irqsave(&ha->hardware_lock, flags);
4437                 if (host->active_mode & MODE_TARGET) {
4438                         pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4439                             host->host_no);
4440                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4441                         continue;
4442                 }
4443                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4444
4445                 if (!scsi_host_get(host)) {
4446                         ql_dbg(ql_dbg_tgt, vha, 0xe068,
4447                             "Unable to scsi_host_get() for"
4448                             " qla2xxx scsi_host\n");
4449                         continue;
4450                 }
4451                 qlt_lport_dump(vha, wwpn, b);
4452
4453                 if (memcmp(vha->port_name, b, WWN_SIZE)) {
4454                         scsi_host_put(host);
4455                         continue;
4456                 }
4457                 /*
4458                  * Setup passed parameters ahead of invoking callback
4459                  */
4460                 ha->tgt.tgt_ops = qla_tgt_ops;
4461                 ha->tgt.target_lport_ptr = target_lport_ptr;
4462                 rc = (*callback)(vha);
4463                 if (rc != 0) {
4464                         ha->tgt.tgt_ops = NULL;
4465                         ha->tgt.target_lport_ptr = NULL;
4466                 }
4467                 mutex_unlock(&qla_tgt_mutex);
4468                 return rc;
4469         }
4470         mutex_unlock(&qla_tgt_mutex);
4471
4472         return -ENODEV;
4473 }
4474 EXPORT_SYMBOL(qlt_lport_register);
4475
4476 /**
4477  * qla_tgt_lport_deregister - Degister lport
4478  *
4479  * @vha:  Registered scsi_qla_host pointer
4480  */
4481 void qlt_lport_deregister(struct scsi_qla_host *vha)
4482 {
4483         struct qla_hw_data *ha = vha->hw;
4484         struct Scsi_Host *sh = vha->host;
4485         /*
4486          * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4487          */
4488         ha->tgt.target_lport_ptr = NULL;
4489         ha->tgt.tgt_ops = NULL;
4490         /*
4491          * Release the Scsi_Host reference for the underlying qla2xxx host
4492          */
4493         scsi_host_put(sh);
4494 }
4495 EXPORT_SYMBOL(qlt_lport_deregister);
4496
4497 /* Must be called under HW lock */
4498 void qlt_set_mode(struct scsi_qla_host *vha)
4499 {
4500         struct qla_hw_data *ha = vha->hw;
4501
4502         switch (ql2x_ini_mode) {
4503         case QLA2XXX_INI_MODE_DISABLED:
4504         case QLA2XXX_INI_MODE_EXCLUSIVE:
4505                 vha->host->active_mode = MODE_TARGET;
4506                 break;
4507         case QLA2XXX_INI_MODE_ENABLED:
4508                 vha->host->active_mode |= MODE_TARGET;
4509                 break;
4510         default:
4511                 break;
4512         }
4513
4514         if (ha->tgt.ini_mode_force_reverse)
4515                 qla_reverse_ini_mode(vha);
4516 }
4517
4518 /* Must be called under HW lock */
4519 void qlt_clear_mode(struct scsi_qla_host *vha)
4520 {
4521         struct qla_hw_data *ha = vha->hw;
4522
4523         switch (ql2x_ini_mode) {
4524         case QLA2XXX_INI_MODE_DISABLED:
4525                 vha->host->active_mode = MODE_UNKNOWN;
4526                 break;
4527         case QLA2XXX_INI_MODE_EXCLUSIVE:
4528                 vha->host->active_mode = MODE_INITIATOR;
4529                 break;
4530         case QLA2XXX_INI_MODE_ENABLED:
4531                 vha->host->active_mode &= ~MODE_TARGET;
4532                 break;
4533         default:
4534                 break;
4535         }
4536
4537         if (ha->tgt.ini_mode_force_reverse)
4538                 qla_reverse_ini_mode(vha);
4539 }
4540
4541 /*
4542  * qla_tgt_enable_vha - NO LOCK HELD
4543  *
4544  * host_reset, bring up w/ Target Mode Enabled
4545  */
4546 void
4547 qlt_enable_vha(struct scsi_qla_host *vha)
4548 {
4549         struct qla_hw_data *ha = vha->hw;
4550         struct qla_tgt *tgt = ha->tgt.qla_tgt;
4551         unsigned long flags;
4552
4553         if (!tgt) {
4554                 ql_dbg(ql_dbg_tgt, vha, 0xe069,
4555                     "Unable to locate qla_tgt pointer from"
4556                     " struct qla_hw_data\n");
4557                 dump_stack();
4558                 return;
4559         }
4560
4561         spin_lock_irqsave(&ha->hardware_lock, flags);
4562         tgt->tgt_stopped = 0;
4563         qlt_set_mode(vha);
4564         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4565
4566         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4567         qla2xxx_wake_dpc(vha);
4568         qla2x00_wait_for_hba_online(vha);
4569 }
4570 EXPORT_SYMBOL(qlt_enable_vha);
4571
4572 /*
4573  * qla_tgt_disable_vha - NO LOCK HELD
4574  *
4575  * Disable Target Mode and reset the adapter
4576  */
4577 void
4578 qlt_disable_vha(struct scsi_qla_host *vha)
4579 {
4580         struct qla_hw_data *ha = vha->hw;
4581         struct qla_tgt *tgt = ha->tgt.qla_tgt;
4582         unsigned long flags;
4583
4584         if (!tgt) {
4585                 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
4586                     "Unable to locate qla_tgt pointer from"
4587                     " struct qla_hw_data\n");
4588                 dump_stack();
4589                 return;
4590         }
4591
4592         spin_lock_irqsave(&ha->hardware_lock, flags);
4593         qlt_clear_mode(vha);
4594         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4595
4596         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4597         qla2xxx_wake_dpc(vha);
4598         qla2x00_wait_for_hba_online(vha);
4599 }
4600
4601 /*
4602  * Called from qla_init.c:qla24xx_vport_create() contex to setup
4603  * the target mode specific struct scsi_qla_host and struct qla_hw_data
4604  * members.
4605  */
4606 void
4607 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
4608 {
4609         if (!qla_tgt_mode_enabled(vha))
4610                 return;
4611
4612         mutex_init(&ha->tgt.tgt_mutex);
4613         mutex_init(&ha->tgt.tgt_host_action_mutex);
4614
4615         qlt_clear_mode(vha);
4616
4617         /*
4618          * NOTE: Currently the value is kept the same for <24xx and
4619          * >=24xx ISPs. If it is necessary to change it,
4620          * the check should be added for specific ISPs,
4621          * assigning the value appropriately.
4622          */
4623         ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
4624 }
4625
4626 void
4627 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
4628 {
4629         /*
4630          * FC-4 Feature bit 0 indicates target functionality to the name server.
4631          */
4632         if (qla_tgt_mode_enabled(vha)) {
4633                 if (qla_ini_mode_enabled(vha))
4634                         ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
4635                 else
4636                         ct_req->req.rff_id.fc4_feature = BIT_0;
4637         } else if (qla_ini_mode_enabled(vha)) {
4638                 ct_req->req.rff_id.fc4_feature = BIT_1;
4639         }
4640 }
4641
4642 /*
4643  * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
4644  * @ha: HA context
4645  *
4646  * Beginning of ATIO ring has initialization control block already built
4647  * by nvram config routine.
4648  *
4649  * Returns 0 on success.
4650  */
4651 void
4652 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
4653 {
4654         struct qla_hw_data *ha = vha->hw;
4655         uint16_t cnt;
4656         struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
4657
4658         if (!qla_tgt_mode_enabled(vha))
4659                 return;
4660
4661         for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
4662                 pkt->u.raw.signature = ATIO_PROCESSED;
4663                 pkt++;
4664         }
4665
4666 }
4667
4668 /*
4669  * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
4670  * @ha: SCSI driver HA context
4671  */
4672 void
4673 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
4674 {
4675         struct qla_hw_data *ha = vha->hw;
4676         struct atio_from_isp *pkt;
4677         int cnt, i;
4678
4679         if (!vha->flags.online)
4680                 return;
4681
4682         while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
4683                 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4684                 cnt = pkt->u.raw.entry_count;
4685
4686                 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
4687
4688                 for (i = 0; i < cnt; i++) {
4689                         ha->tgt.atio_ring_index++;
4690                         if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
4691                                 ha->tgt.atio_ring_index = 0;
4692                                 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4693                         } else
4694                                 ha->tgt.atio_ring_ptr++;
4695
4696                         pkt->u.raw.signature = ATIO_PROCESSED;
4697                         pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4698                 }
4699                 wmb();
4700         }
4701
4702         /* Adjust ring index */
4703         WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
4704 }
4705
4706 void
4707 qlt_24xx_config_rings(struct scsi_qla_host *vha)
4708 {
4709         struct qla_hw_data *ha = vha->hw;
4710         if (!QLA_TGT_MODE_ENABLED())
4711                 return;
4712
4713         WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
4714         WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
4715         RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
4716
4717         if (IS_ATIO_MSIX_CAPABLE(ha)) {
4718                 struct qla_msix_entry *msix = &ha->msix_entries[2];
4719                 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
4720
4721                 icb->msix_atio = cpu_to_le16(msix->entry);
4722                 ql_dbg(ql_dbg_init, vha, 0xf072,
4723                     "Registering ICB vector 0x%x for atio que.\n",
4724                     msix->entry);
4725         }
4726 }
4727
4728 void
4729 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
4730 {
4731         struct qla_hw_data *ha = vha->hw;
4732
4733         if (qla_tgt_mode_enabled(vha)) {
4734                 if (!ha->tgt.saved_set) {
4735                         /* We save only once */
4736                         ha->tgt.saved_exchange_count = nv->exchange_count;
4737                         ha->tgt.saved_firmware_options_1 =
4738                             nv->firmware_options_1;
4739                         ha->tgt.saved_firmware_options_2 =
4740                             nv->firmware_options_2;
4741                         ha->tgt.saved_firmware_options_3 =
4742                             nv->firmware_options_3;
4743                         ha->tgt.saved_set = 1;
4744                 }
4745
4746                 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
4747
4748                 /* Enable target mode */
4749                 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
4750
4751                 /* Disable ini mode, if requested */
4752                 if (!qla_ini_mode_enabled(vha))
4753                         nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
4754
4755                 /* Disable Full Login after LIP */
4756                 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4757                 /* Enable initial LIP */
4758                 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
4759                 /* Enable FC tapes support */
4760                 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4761                 /* Disable Full Login after LIP */
4762                 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4763                 /* Enable target PRLI control */
4764                 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
4765         } else {
4766                 if (ha->tgt.saved_set) {
4767                         nv->exchange_count = ha->tgt.saved_exchange_count;
4768                         nv->firmware_options_1 =
4769                             ha->tgt.saved_firmware_options_1;
4770                         nv->firmware_options_2 =
4771                             ha->tgt.saved_firmware_options_2;
4772                         nv->firmware_options_3 =
4773                             ha->tgt.saved_firmware_options_3;
4774                 }
4775                 return;
4776         }
4777
4778         /* out-of-order frames reassembly */
4779         nv->firmware_options_3 |= BIT_6|BIT_9;
4780
4781         if (ha->tgt.enable_class_2) {
4782                 if (vha->flags.init_done)
4783                         fc_host_supported_classes(vha->host) =
4784                                 FC_COS_CLASS2 | FC_COS_CLASS3;
4785
4786                 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
4787         } else {
4788                 if (vha->flags.init_done)
4789                         fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
4790
4791                 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
4792         }
4793 }
4794
4795 void
4796 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
4797         struct init_cb_24xx *icb)
4798 {
4799         struct qla_hw_data *ha = vha->hw;
4800
4801         if (ha->tgt.node_name_set) {
4802                 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
4803                 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
4804         }
4805 }
4806
4807 void
4808 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
4809 {
4810         struct qla_hw_data *ha = vha->hw;
4811
4812         if (!QLA_TGT_MODE_ENABLED())
4813                 return;
4814
4815         if (qla_tgt_mode_enabled(vha)) {
4816                 if (!ha->tgt.saved_set) {
4817                         /* We save only once */
4818                         ha->tgt.saved_exchange_count = nv->exchange_count;
4819                         ha->tgt.saved_firmware_options_1 =
4820                             nv->firmware_options_1;
4821                         ha->tgt.saved_firmware_options_2 =
4822                             nv->firmware_options_2;
4823                         ha->tgt.saved_firmware_options_3 =
4824                             nv->firmware_options_3;
4825                         ha->tgt.saved_set = 1;
4826                 }
4827
4828                 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
4829
4830                 /* Enable target mode */
4831                 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
4832
4833                 /* Disable ini mode, if requested */
4834                 if (!qla_ini_mode_enabled(vha))
4835                         nv->firmware_options_1 |=
4836                             __constant_cpu_to_le32(BIT_5);
4837
4838                 /* Disable Full Login after LIP */
4839                 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4840                 /* Enable initial LIP */
4841                 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
4842                 /* Enable FC tapes support */
4843                 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4844                 /* Disable Full Login after LIP */
4845                 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4846                 /* Enable target PRLI control */
4847                 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
4848         } else {
4849                 if (ha->tgt.saved_set) {
4850                         nv->exchange_count = ha->tgt.saved_exchange_count;
4851                         nv->firmware_options_1 =
4852                             ha->tgt.saved_firmware_options_1;
4853                         nv->firmware_options_2 =
4854                             ha->tgt.saved_firmware_options_2;
4855                         nv->firmware_options_3 =
4856                             ha->tgt.saved_firmware_options_3;
4857                 }
4858                 return;
4859         }
4860
4861         /* out-of-order frames reassembly */
4862         nv->firmware_options_3 |= BIT_6|BIT_9;
4863
4864         if (ha->tgt.enable_class_2) {
4865                 if (vha->flags.init_done)
4866                         fc_host_supported_classes(vha->host) =
4867                                 FC_COS_CLASS2 | FC_COS_CLASS3;
4868
4869                 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
4870         } else {
4871                 if (vha->flags.init_done)
4872                         fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
4873
4874                 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
4875         }
4876 }
4877
4878 void
4879 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
4880         struct init_cb_81xx *icb)
4881 {
4882         struct qla_hw_data *ha = vha->hw;
4883
4884         if (!QLA_TGT_MODE_ENABLED())
4885                 return;
4886
4887         if (ha->tgt.node_name_set) {
4888                 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
4889                 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
4890         }
4891 }
4892
4893 void
4894 qlt_83xx_iospace_config(struct qla_hw_data *ha)
4895 {
4896         if (!QLA_TGT_MODE_ENABLED())
4897                 return;
4898
4899         ha->msix_count += 1; /* For ATIO Q */
4900 }
4901
4902 int
4903 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
4904         struct sts_entry_24xx *pkt)
4905 {
4906         switch (pkt->entry_type) {
4907         case ABTS_RECV_24XX:
4908         case ABTS_RESP_24XX:
4909         case CTIO_TYPE7:
4910         case NOTIFY_ACK_TYPE:
4911                 return 1;
4912         default:
4913                 return 0;
4914         }
4915 }
4916
4917 void
4918 qlt_modify_vp_config(struct scsi_qla_host *vha,
4919         struct vp_config_entry_24xx *vpmod)
4920 {
4921         if (qla_tgt_mode_enabled(vha))
4922                 vpmod->options_idx1 &= ~BIT_5;
4923         /* Disable ini mode, if requested */
4924         if (!qla_ini_mode_enabled(vha))
4925                 vpmod->options_idx1 &= ~BIT_4;
4926 }
4927
4928 void
4929 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
4930 {
4931         if (!QLA_TGT_MODE_ENABLED())
4932                 return;
4933
4934         if  (ha->mqenable || IS_QLA83XX(ha)) {
4935                 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
4936                 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
4937         } else {
4938                 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
4939                 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
4940         }
4941
4942         mutex_init(&ha->tgt.tgt_mutex);
4943         mutex_init(&ha->tgt.tgt_host_action_mutex);
4944         qlt_clear_mode(base_vha);
4945 }
4946
4947 irqreturn_t
4948 qla83xx_msix_atio_q(int irq, void *dev_id)
4949 {
4950         struct rsp_que *rsp;
4951         scsi_qla_host_t *vha;
4952         struct qla_hw_data *ha;
4953         unsigned long flags;
4954
4955         rsp = (struct rsp_que *) dev_id;
4956         ha = rsp->hw;
4957         vha = pci_get_drvdata(ha->pdev);
4958
4959         spin_lock_irqsave(&ha->hardware_lock, flags);
4960
4961         qlt_24xx_process_atio_queue(vha);
4962         qla24xx_process_response_queue(vha, rsp);
4963
4964         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4965
4966         return IRQ_HANDLED;
4967 }
4968
4969 int
4970 qlt_mem_alloc(struct qla_hw_data *ha)
4971 {
4972         if (!QLA_TGT_MODE_ENABLED())
4973                 return 0;
4974
4975         ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
4976             MAX_MULTI_ID_FABRIC, GFP_KERNEL);
4977         if (!ha->tgt.tgt_vp_map)
4978                 return -ENOMEM;
4979
4980         ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
4981             (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
4982             &ha->tgt.atio_dma, GFP_KERNEL);
4983         if (!ha->tgt.atio_ring) {
4984                 kfree(ha->tgt.tgt_vp_map);
4985                 return -ENOMEM;
4986         }
4987         return 0;
4988 }
4989
4990 void
4991 qlt_mem_free(struct qla_hw_data *ha)
4992 {
4993         if (!QLA_TGT_MODE_ENABLED())
4994                 return;
4995
4996         if (ha->tgt.atio_ring) {
4997                 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
4998                     sizeof(struct atio_from_isp), ha->tgt.atio_ring,
4999                     ha->tgt.atio_dma);
5000         }
5001         kfree(ha->tgt.tgt_vp_map);
5002 }
5003
5004 /* vport_slock to be held by the caller */
5005 void
5006 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
5007 {
5008         if (!QLA_TGT_MODE_ENABLED())
5009                 return;
5010
5011         switch (cmd) {
5012         case SET_VP_IDX:
5013                 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
5014                 break;
5015         case SET_AL_PA:
5016                 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
5017                 break;
5018         case RESET_VP_IDX:
5019                 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
5020                 break;
5021         case RESET_AL_PA:
5022                 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
5023                 break;
5024         }
5025 }
5026
5027 static int __init qlt_parse_ini_mode(void)
5028 {
5029         if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
5030                 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
5031         else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
5032                 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
5033         else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
5034                 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
5035         else
5036                 return false;
5037
5038         return true;
5039 }
5040
5041 int __init qlt_init(void)
5042 {
5043         int ret;
5044
5045         if (!qlt_parse_ini_mode()) {
5046                 ql_log(ql_log_fatal, NULL, 0xe06b,
5047                     "qlt_parse_ini_mode() failed\n");
5048                 return -EINVAL;
5049         }
5050
5051         if (!QLA_TGT_MODE_ENABLED())
5052                 return 0;
5053
5054         qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
5055             sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
5056             NULL);
5057         if (!qla_tgt_cmd_cachep) {
5058                 ql_log(ql_log_fatal, NULL, 0xe06c,
5059                     "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
5060                 return -ENOMEM;
5061         }
5062
5063         qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
5064             sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
5065             qla_tgt_mgmt_cmd), 0, NULL);
5066         if (!qla_tgt_mgmt_cmd_cachep) {
5067                 ql_log(ql_log_fatal, NULL, 0xe06d,
5068                     "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
5069                 ret = -ENOMEM;
5070                 goto out;
5071         }
5072
5073         qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
5074             mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
5075         if (!qla_tgt_mgmt_cmd_mempool) {
5076                 ql_log(ql_log_fatal, NULL, 0xe06e,
5077                     "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
5078                 ret = -ENOMEM;
5079                 goto out_mgmt_cmd_cachep;
5080         }
5081
5082         qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
5083         if (!qla_tgt_wq) {
5084                 ql_log(ql_log_fatal, NULL, 0xe06f,
5085                     "alloc_workqueue for qla_tgt_wq failed\n");
5086                 ret = -ENOMEM;
5087                 goto out_cmd_mempool;
5088         }
5089         /*
5090          * Return 1 to signal that initiator-mode is being disabled
5091          */
5092         return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
5093
5094 out_cmd_mempool:
5095         mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5096 out_mgmt_cmd_cachep:
5097         kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
5098 out:
5099         kmem_cache_destroy(qla_tgt_cmd_cachep);
5100         return ret;
5101 }
5102
5103 void qlt_exit(void)
5104 {
5105         if (!QLA_TGT_MODE_ENABLED())
5106                 return;
5107
5108         destroy_workqueue(qla_tgt_wq);
5109         mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5110         kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
5111         kmem_cache_destroy(qla_tgt_cmd_cachep);
5112 }