1 /*******************************************************************************
2 * Filename: target_core_transport.c
4 * This file contains the Generic Target Engine Core.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/delay.h>
31 #include <linux/string.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
38 #include <linux/cdrom.h>
39 #include <linux/module.h>
40 #include <linux/ratelimit.h>
41 #include <asm/unaligned.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <scsi/scsi_tcq.h>
48 #include <target/target_core_base.h>
49 #include <target/target_core_backend.h>
50 #include <target/target_core_fabric.h>
51 #include <target/target_core_configfs.h>
53 #include "target_core_internal.h"
54 #include "target_core_alua.h"
55 #include "target_core_pr.h"
56 #include "target_core_ua.h"
58 static int sub_api_initialized;
60 static struct workqueue_struct *target_completion_wq;
61 static struct kmem_cache *se_sess_cache;
62 struct kmem_cache *se_ua_cache;
63 struct kmem_cache *t10_pr_reg_cache;
64 struct kmem_cache *t10_alua_lu_gp_cache;
65 struct kmem_cache *t10_alua_lu_gp_mem_cache;
66 struct kmem_cache *t10_alua_tg_pt_gp_cache;
67 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
69 static int transport_generic_write_pending(struct se_cmd *);
70 static int transport_processing_thread(void *param);
71 static void transport_complete_task_attr(struct se_cmd *cmd);
72 static void transport_handle_queue_full(struct se_cmd *cmd,
73 struct se_device *dev);
74 static int transport_generic_get_mem(struct se_cmd *cmd);
75 static void transport_put_cmd(struct se_cmd *cmd);
76 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
77 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
78 static void target_complete_ok_work(struct work_struct *work);
80 int init_se_kmem_caches(void)
82 se_sess_cache = kmem_cache_create("se_sess_cache",
83 sizeof(struct se_session), __alignof__(struct se_session),
86 pr_err("kmem_cache_create() for struct se_session"
90 se_ua_cache = kmem_cache_create("se_ua_cache",
91 sizeof(struct se_ua), __alignof__(struct se_ua),
94 pr_err("kmem_cache_create() for struct se_ua failed\n");
95 goto out_free_sess_cache;
97 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
98 sizeof(struct t10_pr_registration),
99 __alignof__(struct t10_pr_registration), 0, NULL);
100 if (!t10_pr_reg_cache) {
101 pr_err("kmem_cache_create() for struct t10_pr_registration"
103 goto out_free_ua_cache;
105 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
106 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
108 if (!t10_alua_lu_gp_cache) {
109 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
111 goto out_free_pr_reg_cache;
113 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
114 sizeof(struct t10_alua_lu_gp_member),
115 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
116 if (!t10_alua_lu_gp_mem_cache) {
117 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
119 goto out_free_lu_gp_cache;
121 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
122 sizeof(struct t10_alua_tg_pt_gp),
123 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
124 if (!t10_alua_tg_pt_gp_cache) {
125 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
127 goto out_free_lu_gp_mem_cache;
129 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
130 "t10_alua_tg_pt_gp_mem_cache",
131 sizeof(struct t10_alua_tg_pt_gp_member),
132 __alignof__(struct t10_alua_tg_pt_gp_member),
134 if (!t10_alua_tg_pt_gp_mem_cache) {
135 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
137 goto out_free_tg_pt_gp_cache;
140 target_completion_wq = alloc_workqueue("target_completion",
142 if (!target_completion_wq)
143 goto out_free_tg_pt_gp_mem_cache;
147 out_free_tg_pt_gp_mem_cache:
148 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
149 out_free_tg_pt_gp_cache:
150 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
151 out_free_lu_gp_mem_cache:
152 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
153 out_free_lu_gp_cache:
154 kmem_cache_destroy(t10_alua_lu_gp_cache);
155 out_free_pr_reg_cache:
156 kmem_cache_destroy(t10_pr_reg_cache);
158 kmem_cache_destroy(se_ua_cache);
160 kmem_cache_destroy(se_sess_cache);
165 void release_se_kmem_caches(void)
167 destroy_workqueue(target_completion_wq);
168 kmem_cache_destroy(se_sess_cache);
169 kmem_cache_destroy(se_ua_cache);
170 kmem_cache_destroy(t10_pr_reg_cache);
171 kmem_cache_destroy(t10_alua_lu_gp_cache);
172 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
173 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
174 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
177 /* This code ensures unique mib indexes are handed out. */
178 static DEFINE_SPINLOCK(scsi_mib_index_lock);
179 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
182 * Allocate a new row index for the entry type specified
184 u32 scsi_get_new_index(scsi_index_t type)
188 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
190 spin_lock(&scsi_mib_index_lock);
191 new_index = ++scsi_mib_index[type];
192 spin_unlock(&scsi_mib_index_lock);
197 static void transport_init_queue_obj(struct se_queue_obj *qobj)
199 atomic_set(&qobj->queue_cnt, 0);
200 INIT_LIST_HEAD(&qobj->qobj_list);
201 init_waitqueue_head(&qobj->thread_wq);
202 spin_lock_init(&qobj->cmd_queue_lock);
205 void transport_subsystem_check_init(void)
209 if (sub_api_initialized)
212 ret = request_module("target_core_iblock");
214 pr_err("Unable to load target_core_iblock\n");
216 ret = request_module("target_core_file");
218 pr_err("Unable to load target_core_file\n");
220 ret = request_module("target_core_pscsi");
222 pr_err("Unable to load target_core_pscsi\n");
224 ret = request_module("target_core_stgt");
226 pr_err("Unable to load target_core_stgt\n");
228 sub_api_initialized = 1;
232 struct se_session *transport_init_session(void)
234 struct se_session *se_sess;
236 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
238 pr_err("Unable to allocate struct se_session from"
240 return ERR_PTR(-ENOMEM);
242 INIT_LIST_HEAD(&se_sess->sess_list);
243 INIT_LIST_HEAD(&se_sess->sess_acl_list);
244 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
245 INIT_LIST_HEAD(&se_sess->sess_wait_list);
246 spin_lock_init(&se_sess->sess_cmd_lock);
247 kref_init(&se_sess->sess_kref);
251 EXPORT_SYMBOL(transport_init_session);
254 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
256 void __transport_register_session(
257 struct se_portal_group *se_tpg,
258 struct se_node_acl *se_nacl,
259 struct se_session *se_sess,
260 void *fabric_sess_ptr)
262 unsigned char buf[PR_REG_ISID_LEN];
264 se_sess->se_tpg = se_tpg;
265 se_sess->fabric_sess_ptr = fabric_sess_ptr;
267 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
269 * Only set for struct se_session's that will actually be moving I/O.
270 * eg: *NOT* discovery sessions.
274 * If the fabric module supports an ISID based TransportID,
275 * save this value in binary from the fabric I_T Nexus now.
277 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
278 memset(&buf[0], 0, PR_REG_ISID_LEN);
279 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
280 &buf[0], PR_REG_ISID_LEN);
281 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
283 kref_get(&se_nacl->acl_kref);
285 spin_lock_irq(&se_nacl->nacl_sess_lock);
287 * The se_nacl->nacl_sess pointer will be set to the
288 * last active I_T Nexus for each struct se_node_acl.
290 se_nacl->nacl_sess = se_sess;
292 list_add_tail(&se_sess->sess_acl_list,
293 &se_nacl->acl_sess_list);
294 spin_unlock_irq(&se_nacl->nacl_sess_lock);
296 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
298 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
299 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
301 EXPORT_SYMBOL(__transport_register_session);
303 void transport_register_session(
304 struct se_portal_group *se_tpg,
305 struct se_node_acl *se_nacl,
306 struct se_session *se_sess,
307 void *fabric_sess_ptr)
311 spin_lock_irqsave(&se_tpg->session_lock, flags);
312 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
313 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
315 EXPORT_SYMBOL(transport_register_session);
317 void target_release_session(struct kref *kref)
319 struct se_session *se_sess = container_of(kref,
320 struct se_session, sess_kref);
321 struct se_portal_group *se_tpg = se_sess->se_tpg;
323 se_tpg->se_tpg_tfo->close_session(se_sess);
326 void target_get_session(struct se_session *se_sess)
328 kref_get(&se_sess->sess_kref);
330 EXPORT_SYMBOL(target_get_session);
332 void target_put_session(struct se_session *se_sess)
334 struct se_portal_group *tpg = se_sess->se_tpg;
336 if (tpg->se_tpg_tfo->put_session != NULL) {
337 tpg->se_tpg_tfo->put_session(se_sess);
340 kref_put(&se_sess->sess_kref, target_release_session);
342 EXPORT_SYMBOL(target_put_session);
344 static void target_complete_nacl(struct kref *kref)
346 struct se_node_acl *nacl = container_of(kref,
347 struct se_node_acl, acl_kref);
349 complete(&nacl->acl_free_comp);
352 void target_put_nacl(struct se_node_acl *nacl)
354 kref_put(&nacl->acl_kref, target_complete_nacl);
357 void transport_deregister_session_configfs(struct se_session *se_sess)
359 struct se_node_acl *se_nacl;
362 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
364 se_nacl = se_sess->se_node_acl;
366 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
367 if (se_nacl->acl_stop == 0)
368 list_del(&se_sess->sess_acl_list);
370 * If the session list is empty, then clear the pointer.
371 * Otherwise, set the struct se_session pointer from the tail
372 * element of the per struct se_node_acl active session list.
374 if (list_empty(&se_nacl->acl_sess_list))
375 se_nacl->nacl_sess = NULL;
377 se_nacl->nacl_sess = container_of(
378 se_nacl->acl_sess_list.prev,
379 struct se_session, sess_acl_list);
381 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
384 EXPORT_SYMBOL(transport_deregister_session_configfs);
386 void transport_free_session(struct se_session *se_sess)
388 kmem_cache_free(se_sess_cache, se_sess);
390 EXPORT_SYMBOL(transport_free_session);
392 void transport_deregister_session(struct se_session *se_sess)
394 struct se_portal_group *se_tpg = se_sess->se_tpg;
395 struct target_core_fabric_ops *se_tfo;
396 struct se_node_acl *se_nacl;
398 bool comp_nacl = true;
401 transport_free_session(se_sess);
404 se_tfo = se_tpg->se_tpg_tfo;
406 spin_lock_irqsave(&se_tpg->session_lock, flags);
407 list_del(&se_sess->sess_list);
408 se_sess->se_tpg = NULL;
409 se_sess->fabric_sess_ptr = NULL;
410 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
413 * Determine if we need to do extra work for this initiator node's
414 * struct se_node_acl if it had been previously dynamically generated.
416 se_nacl = se_sess->se_node_acl;
418 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
419 if (se_nacl && se_nacl->dynamic_node_acl) {
420 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
421 list_del(&se_nacl->acl_list);
422 se_tpg->num_node_acls--;
423 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
424 core_tpg_wait_for_nacl_pr_ref(se_nacl);
425 core_free_device_list_for_node(se_nacl, se_tpg);
426 se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
429 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
432 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
434 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
435 se_tpg->se_tpg_tfo->get_fabric_name());
437 * If last kref is dropping now for an explict NodeACL, awake sleeping
438 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
441 if (se_nacl && comp_nacl == true)
442 target_put_nacl(se_nacl);
444 transport_free_session(se_sess);
446 EXPORT_SYMBOL(transport_deregister_session);
449 * Called with cmd->t_state_lock held.
451 static void target_remove_from_state_list(struct se_cmd *cmd)
453 struct se_device *dev = cmd->se_dev;
459 if (cmd->transport_state & CMD_T_BUSY)
462 spin_lock_irqsave(&dev->execute_task_lock, flags);
463 if (cmd->state_active) {
464 list_del(&cmd->state_list);
465 cmd->state_active = false;
467 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
470 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
474 spin_lock_irqsave(&cmd->t_state_lock, flags);
476 * Determine if IOCTL context caller in requesting the stopping of this
477 * command for LUN shutdown purposes.
479 if (cmd->transport_state & CMD_T_LUN_STOP) {
480 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
481 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
483 cmd->transport_state &= ~CMD_T_ACTIVE;
484 if (remove_from_lists)
485 target_remove_from_state_list(cmd);
486 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
488 complete(&cmd->transport_lun_stop_comp);
492 if (remove_from_lists) {
493 target_remove_from_state_list(cmd);
496 * Clear struct se_cmd->se_lun before the handoff to FE.
502 * Determine if frontend context caller is requesting the stopping of
503 * this command for frontend exceptions.
505 if (cmd->transport_state & CMD_T_STOP) {
506 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
508 cmd->se_tfo->get_task_tag(cmd));
510 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
512 complete(&cmd->t_transport_stop_comp);
516 cmd->transport_state &= ~CMD_T_ACTIVE;
517 if (remove_from_lists) {
519 * Some fabric modules like tcm_loop can release
520 * their internally allocated I/O reference now and
523 * Fabric modules are expected to return '1' here if the
524 * se_cmd being passed is released at this point,
525 * or zero if not being released.
527 if (cmd->se_tfo->check_stop_free != NULL) {
528 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
529 return cmd->se_tfo->check_stop_free(cmd);
533 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
537 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
539 return transport_cmd_check_stop(cmd, true);
542 static void transport_lun_remove_cmd(struct se_cmd *cmd)
544 struct se_lun *lun = cmd->se_lun;
550 spin_lock_irqsave(&cmd->t_state_lock, flags);
551 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
552 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
553 target_remove_from_state_list(cmd);
555 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
557 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
558 if (!list_empty(&cmd->se_lun_node))
559 list_del_init(&cmd->se_lun_node);
560 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
563 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
565 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
566 transport_lun_remove_cmd(cmd);
568 if (transport_cmd_check_stop_to_fabric(cmd))
571 transport_remove_cmd_from_queue(cmd);
572 transport_put_cmd(cmd);
576 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
579 struct se_device *dev = cmd->se_dev;
580 struct se_queue_obj *qobj = &dev->dev_queue_obj;
584 spin_lock_irqsave(&cmd->t_state_lock, flags);
585 cmd->t_state = t_state;
586 cmd->transport_state |= CMD_T_ACTIVE;
587 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
590 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
592 /* If the cmd is already on the list, remove it before we add it */
593 if (!list_empty(&cmd->se_queue_node))
594 list_del(&cmd->se_queue_node);
596 atomic_inc(&qobj->queue_cnt);
599 list_add(&cmd->se_queue_node, &qobj->qobj_list);
601 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
602 cmd->transport_state |= CMD_T_QUEUED;
603 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
605 wake_up_interruptible(&qobj->thread_wq);
608 static struct se_cmd *
609 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
614 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
615 if (list_empty(&qobj->qobj_list)) {
616 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
619 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
621 cmd->transport_state &= ~CMD_T_QUEUED;
622 list_del_init(&cmd->se_queue_node);
623 atomic_dec(&qobj->queue_cnt);
624 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
629 static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
631 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
634 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
635 if (!(cmd->transport_state & CMD_T_QUEUED)) {
636 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
639 cmd->transport_state &= ~CMD_T_QUEUED;
640 atomic_dec(&qobj->queue_cnt);
641 list_del_init(&cmd->se_queue_node);
642 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
645 static void target_complete_failure_work(struct work_struct *work)
647 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
649 transport_generic_request_failure(cmd);
652 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
654 struct se_device *dev = cmd->se_dev;
655 int success = scsi_status == GOOD;
658 cmd->scsi_status = scsi_status;
661 spin_lock_irqsave(&cmd->t_state_lock, flags);
662 cmd->transport_state &= ~CMD_T_BUSY;
664 if (dev && dev->transport->transport_complete) {
665 if (dev->transport->transport_complete(cmd,
666 cmd->t_data_sg) != 0) {
667 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
673 * See if we are waiting to complete for an exception condition.
675 if (cmd->transport_state & CMD_T_REQUEST_STOP) {
676 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
677 complete(&cmd->task_stop_comp);
682 cmd->transport_state |= CMD_T_FAILED;
685 * Check for case where an explict ABORT_TASK has been received
686 * and transport_wait_for_tasks() will be waiting for completion..
688 if (cmd->transport_state & CMD_T_ABORTED &&
689 cmd->transport_state & CMD_T_STOP) {
690 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
691 complete(&cmd->t_transport_stop_comp);
693 } else if (cmd->transport_state & CMD_T_FAILED) {
694 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
695 INIT_WORK(&cmd->work, target_complete_failure_work);
697 INIT_WORK(&cmd->work, target_complete_ok_work);
700 cmd->t_state = TRANSPORT_COMPLETE;
701 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
702 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
704 queue_work(target_completion_wq, &cmd->work);
706 EXPORT_SYMBOL(target_complete_cmd);
708 static void target_add_to_state_list(struct se_cmd *cmd)
710 struct se_device *dev = cmd->se_dev;
713 spin_lock_irqsave(&dev->execute_task_lock, flags);
714 if (!cmd->state_active) {
715 list_add_tail(&cmd->state_list, &dev->state_list);
716 cmd->state_active = true;
718 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
722 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
725 static void target_qf_do_work(struct work_struct *work)
727 struct se_device *dev = container_of(work, struct se_device,
729 LIST_HEAD(qf_cmd_list);
730 struct se_cmd *cmd, *cmd_tmp;
732 spin_lock_irq(&dev->qf_cmd_lock);
733 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
734 spin_unlock_irq(&dev->qf_cmd_lock);
736 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
737 list_del(&cmd->se_qf_node);
738 atomic_dec(&dev->dev_qf_count);
739 smp_mb__after_atomic_dec();
741 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
742 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
743 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
744 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
747 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
751 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
753 switch (cmd->data_direction) {
756 case DMA_FROM_DEVICE:
760 case DMA_BIDIRECTIONAL:
769 void transport_dump_dev_state(
770 struct se_device *dev,
774 *bl += sprintf(b + *bl, "Status: ");
775 switch (dev->dev_status) {
776 case TRANSPORT_DEVICE_ACTIVATED:
777 *bl += sprintf(b + *bl, "ACTIVATED");
779 case TRANSPORT_DEVICE_DEACTIVATED:
780 *bl += sprintf(b + *bl, "DEACTIVATED");
782 case TRANSPORT_DEVICE_SHUTDOWN:
783 *bl += sprintf(b + *bl, "SHUTDOWN");
785 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
786 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
787 *bl += sprintf(b + *bl, "OFFLINE");
790 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
794 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
795 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
796 dev->se_sub_dev->se_dev_attrib.block_size,
797 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
798 *bl += sprintf(b + *bl, " ");
801 void transport_dump_vpd_proto_id(
803 unsigned char *p_buf,
806 unsigned char buf[VPD_TMP_BUF_SIZE];
809 memset(buf, 0, VPD_TMP_BUF_SIZE);
810 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
812 switch (vpd->protocol_identifier) {
814 sprintf(buf+len, "Fibre Channel\n");
817 sprintf(buf+len, "Parallel SCSI\n");
820 sprintf(buf+len, "SSA\n");
823 sprintf(buf+len, "IEEE 1394\n");
826 sprintf(buf+len, "SCSI Remote Direct Memory Access"
830 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
833 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
836 sprintf(buf+len, "Automation/Drive Interface Transport"
840 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
843 sprintf(buf+len, "Unknown 0x%02x\n",
844 vpd->protocol_identifier);
849 strncpy(p_buf, buf, p_buf_len);
855 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
858 * Check if the Protocol Identifier Valid (PIV) bit is set..
860 * from spc3r23.pdf section 7.5.1
862 if (page_83[1] & 0x80) {
863 vpd->protocol_identifier = (page_83[0] & 0xf0);
864 vpd->protocol_identifier_set = 1;
865 transport_dump_vpd_proto_id(vpd, NULL, 0);
868 EXPORT_SYMBOL(transport_set_vpd_proto_id);
870 int transport_dump_vpd_assoc(
872 unsigned char *p_buf,
875 unsigned char buf[VPD_TMP_BUF_SIZE];
879 memset(buf, 0, VPD_TMP_BUF_SIZE);
880 len = sprintf(buf, "T10 VPD Identifier Association: ");
882 switch (vpd->association) {
884 sprintf(buf+len, "addressed logical unit\n");
887 sprintf(buf+len, "target port\n");
890 sprintf(buf+len, "SCSI target device\n");
893 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
899 strncpy(p_buf, buf, p_buf_len);
906 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
909 * The VPD identification association..
911 * from spc3r23.pdf Section 7.6.3.1 Table 297
913 vpd->association = (page_83[1] & 0x30);
914 return transport_dump_vpd_assoc(vpd, NULL, 0);
916 EXPORT_SYMBOL(transport_set_vpd_assoc);
918 int transport_dump_vpd_ident_type(
920 unsigned char *p_buf,
923 unsigned char buf[VPD_TMP_BUF_SIZE];
927 memset(buf, 0, VPD_TMP_BUF_SIZE);
928 len = sprintf(buf, "T10 VPD Identifier Type: ");
930 switch (vpd->device_identifier_type) {
932 sprintf(buf+len, "Vendor specific\n");
935 sprintf(buf+len, "T10 Vendor ID based\n");
938 sprintf(buf+len, "EUI-64 based\n");
941 sprintf(buf+len, "NAA\n");
944 sprintf(buf+len, "Relative target port identifier\n");
947 sprintf(buf+len, "SCSI name string\n");
950 sprintf(buf+len, "Unsupported: 0x%02x\n",
951 vpd->device_identifier_type);
957 if (p_buf_len < strlen(buf)+1)
959 strncpy(p_buf, buf, p_buf_len);
967 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
970 * The VPD identifier type..
972 * from spc3r23.pdf Section 7.6.3.1 Table 298
974 vpd->device_identifier_type = (page_83[1] & 0x0f);
975 return transport_dump_vpd_ident_type(vpd, NULL, 0);
977 EXPORT_SYMBOL(transport_set_vpd_ident_type);
979 int transport_dump_vpd_ident(
981 unsigned char *p_buf,
984 unsigned char buf[VPD_TMP_BUF_SIZE];
987 memset(buf, 0, VPD_TMP_BUF_SIZE);
989 switch (vpd->device_identifier_code_set) {
990 case 0x01: /* Binary */
991 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
992 &vpd->device_identifier[0]);
994 case 0x02: /* ASCII */
995 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
996 &vpd->device_identifier[0]);
998 case 0x03: /* UTF-8 */
999 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1000 &vpd->device_identifier[0]);
1003 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1004 " 0x%02x", vpd->device_identifier_code_set);
1010 strncpy(p_buf, buf, p_buf_len);
1012 pr_debug("%s", buf);
1018 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1020 static const char hex_str[] = "0123456789abcdef";
1021 int j = 0, i = 4; /* offset to start of the identifer */
1024 * The VPD Code Set (encoding)
1026 * from spc3r23.pdf Section 7.6.3.1 Table 296
1028 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1029 switch (vpd->device_identifier_code_set) {
1030 case 0x01: /* Binary */
1031 vpd->device_identifier[j++] =
1032 hex_str[vpd->device_identifier_type];
1033 while (i < (4 + page_83[3])) {
1034 vpd->device_identifier[j++] =
1035 hex_str[(page_83[i] & 0xf0) >> 4];
1036 vpd->device_identifier[j++] =
1037 hex_str[page_83[i] & 0x0f];
1041 case 0x02: /* ASCII */
1042 case 0x03: /* UTF-8 */
1043 while (i < (4 + page_83[3]))
1044 vpd->device_identifier[j++] = page_83[i++];
1050 return transport_dump_vpd_ident(vpd, NULL, 0);
1052 EXPORT_SYMBOL(transport_set_vpd_ident);
1054 static void core_setup_task_attr_emulation(struct se_device *dev)
1057 * If this device is from Target_Core_Mod/pSCSI, disable the
1058 * SAM Task Attribute emulation.
1060 * This is currently not available in upsream Linux/SCSI Target
1061 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1063 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1064 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1068 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1069 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1070 " device\n", dev->transport->name,
1071 dev->transport->get_device_rev(dev));
1074 static void scsi_dump_inquiry(struct se_device *dev)
1076 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1080 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1082 for (i = 0; i < 8; i++)
1083 if (wwn->vendor[i] >= 0x20)
1084 buf[i] = wwn->vendor[i];
1088 pr_debug(" Vendor: %s\n", buf);
1090 for (i = 0; i < 16; i++)
1091 if (wwn->model[i] >= 0x20)
1092 buf[i] = wwn->model[i];
1096 pr_debug(" Model: %s\n", buf);
1098 for (i = 0; i < 4; i++)
1099 if (wwn->revision[i] >= 0x20)
1100 buf[i] = wwn->revision[i];
1104 pr_debug(" Revision: %s\n", buf);
1106 device_type = dev->transport->get_device_type(dev);
1107 pr_debug(" Type: %s ", scsi_device_type(device_type));
1108 pr_debug(" ANSI SCSI revision: %02x\n",
1109 dev->transport->get_device_rev(dev));
1112 struct se_device *transport_add_device_to_core_hba(
1114 struct se_subsystem_api *transport,
1115 struct se_subsystem_dev *se_dev,
1117 void *transport_dev,
1118 struct se_dev_limits *dev_limits,
1119 const char *inquiry_prod,
1120 const char *inquiry_rev)
1123 struct se_device *dev;
1125 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1127 pr_err("Unable to allocate memory for se_dev_t\n");
1131 transport_init_queue_obj(&dev->dev_queue_obj);
1132 dev->dev_flags = device_flags;
1133 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1134 dev->dev_ptr = transport_dev;
1136 dev->se_sub_dev = se_dev;
1137 dev->transport = transport;
1138 INIT_LIST_HEAD(&dev->dev_list);
1139 INIT_LIST_HEAD(&dev->dev_sep_list);
1140 INIT_LIST_HEAD(&dev->dev_tmr_list);
1141 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1142 INIT_LIST_HEAD(&dev->state_list);
1143 INIT_LIST_HEAD(&dev->qf_cmd_list);
1144 spin_lock_init(&dev->execute_task_lock);
1145 spin_lock_init(&dev->delayed_cmd_lock);
1146 spin_lock_init(&dev->dev_reservation_lock);
1147 spin_lock_init(&dev->dev_status_lock);
1148 spin_lock_init(&dev->se_port_lock);
1149 spin_lock_init(&dev->se_tmr_lock);
1150 spin_lock_init(&dev->qf_cmd_lock);
1151 atomic_set(&dev->dev_ordered_id, 0);
1153 se_dev_set_default_attribs(dev, dev_limits);
1155 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1156 dev->creation_time = get_jiffies_64();
1157 spin_lock_init(&dev->stats_lock);
1159 spin_lock(&hba->device_lock);
1160 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1162 spin_unlock(&hba->device_lock);
1164 * Setup the SAM Task Attribute emulation for struct se_device
1166 core_setup_task_attr_emulation(dev);
1168 * Force PR and ALUA passthrough emulation with internal object use.
1170 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1172 * Setup the Reservations infrastructure for struct se_device
1174 core_setup_reservations(dev, force_pt);
1176 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1178 if (core_setup_alua(dev, force_pt) < 0)
1182 * Startup the struct se_device processing thread
1184 dev->process_thread = kthread_run(transport_processing_thread, dev,
1185 "LIO_%s", dev->transport->name);
1186 if (IS_ERR(dev->process_thread)) {
1187 pr_err("Unable to create kthread: LIO_%s\n",
1188 dev->transport->name);
1192 * Setup work_queue for QUEUE_FULL
1194 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1196 * Preload the initial INQUIRY const values if we are doing
1197 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1198 * passthrough because this is being provided by the backend LLD.
1199 * This is required so that transport_get_inquiry() copies these
1200 * originals once back into DEV_T10_WWN(dev) for the virtual device
1203 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1204 if (!inquiry_prod || !inquiry_rev) {
1205 pr_err("All non TCM/pSCSI plugins require"
1206 " INQUIRY consts\n");
1210 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1211 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1212 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1214 scsi_dump_inquiry(dev);
1218 kthread_stop(dev->process_thread);
1220 spin_lock(&hba->device_lock);
1221 list_del(&dev->dev_list);
1223 spin_unlock(&hba->device_lock);
1225 se_release_vpd_for_dev(dev);
1231 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1233 int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1235 struct se_device *dev = cmd->se_dev;
1237 if (cmd->unknown_data_length) {
1238 cmd->data_length = size;
1239 } else if (size != cmd->data_length) {
1240 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1241 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1242 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1243 cmd->data_length, size, cmd->t_task_cdb[0]);
1245 cmd->cmd_spdtl = size;
1247 if (cmd->data_direction == DMA_TO_DEVICE) {
1248 pr_err("Rejecting underflow/overflow"
1250 goto out_invalid_cdb_field;
1253 * Reject READ_* or WRITE_* with overflow/underflow for
1254 * type SCF_SCSI_DATA_CDB.
1256 if (dev->se_sub_dev->se_dev_attrib.block_size != 512) {
1257 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1258 " CDB on non 512-byte sector setup subsystem"
1259 " plugin: %s\n", dev->transport->name);
1260 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1261 goto out_invalid_cdb_field;
1264 if (size > cmd->data_length) {
1265 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1266 cmd->residual_count = (size - cmd->data_length);
1268 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1269 cmd->residual_count = (cmd->data_length - size);
1271 cmd->data_length = size;
1276 out_invalid_cdb_field:
1277 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1278 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1283 * Used by fabric modules containing a local struct se_cmd within their
1284 * fabric dependent per I/O descriptor.
1286 void transport_init_se_cmd(
1288 struct target_core_fabric_ops *tfo,
1289 struct se_session *se_sess,
1293 unsigned char *sense_buffer)
1295 INIT_LIST_HEAD(&cmd->se_lun_node);
1296 INIT_LIST_HEAD(&cmd->se_delayed_node);
1297 INIT_LIST_HEAD(&cmd->se_qf_node);
1298 INIT_LIST_HEAD(&cmd->se_queue_node);
1299 INIT_LIST_HEAD(&cmd->se_cmd_list);
1300 INIT_LIST_HEAD(&cmd->state_list);
1301 init_completion(&cmd->transport_lun_fe_stop_comp);
1302 init_completion(&cmd->transport_lun_stop_comp);
1303 init_completion(&cmd->t_transport_stop_comp);
1304 init_completion(&cmd->cmd_wait_comp);
1305 init_completion(&cmd->task_stop_comp);
1306 spin_lock_init(&cmd->t_state_lock);
1307 cmd->transport_state = CMD_T_DEV_ACTIVE;
1310 cmd->se_sess = se_sess;
1311 cmd->data_length = data_length;
1312 cmd->data_direction = data_direction;
1313 cmd->sam_task_attr = task_attr;
1314 cmd->sense_buffer = sense_buffer;
1316 cmd->state_active = false;
1318 EXPORT_SYMBOL(transport_init_se_cmd);
1320 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1323 * Check if SAM Task Attribute emulation is enabled for this
1324 * struct se_device storage object
1326 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1329 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1330 pr_debug("SAM Task Attribute ACA"
1331 " emulation is not supported\n");
1335 * Used to determine when ORDERED commands should go from
1336 * Dormant to Active status.
1338 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1339 smp_mb__after_atomic_inc();
1340 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1341 cmd->se_ordered_id, cmd->sam_task_attr,
1342 cmd->se_dev->transport->name);
1346 /* target_setup_cmd_from_cdb():
1348 * Called from fabric RX Thread.
1350 int target_setup_cmd_from_cdb(
1354 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
1355 u32 pr_reg_type = 0;
1357 unsigned long flags;
1361 * Ensure that the received CDB is less than the max (252 + 8) bytes
1362 * for VARIABLE_LENGTH_CMD
1364 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1365 pr_err("Received SCSI CDB with command_size: %d that"
1366 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1367 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1368 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1369 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1373 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1374 * allocate the additional extended CDB buffer now.. Otherwise
1375 * setup the pointer from __t_task_cdb to t_task_cdb.
1377 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1378 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1380 if (!cmd->t_task_cdb) {
1381 pr_err("Unable to allocate cmd->t_task_cdb"
1382 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1383 scsi_command_size(cdb),
1384 (unsigned long)sizeof(cmd->__t_task_cdb));
1385 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1386 cmd->scsi_sense_reason =
1387 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1391 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1393 * Copy the original CDB into cmd->
1395 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1398 * Check for an existing UNIT ATTENTION condition
1400 if (core_scsi3_ua_check(cmd, cdb) < 0) {
1401 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1402 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
1406 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
1409 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
1410 * The ALUA additional sense code qualifier (ASCQ) is determined
1411 * by the ALUA primary or secondary access state..
1414 pr_debug("[%s]: ALUA TG Port not available, "
1415 "SenseKey: NOT_READY, ASC/ASCQ: "
1417 cmd->se_tfo->get_fabric_name(), alua_ascq);
1419 transport_set_sense_codes(cmd, 0x04, alua_ascq);
1420 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1421 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
1424 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1425 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1430 * Check status for SPC-3 Persistent Reservations
1432 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
1433 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
1434 cmd, cdb, pr_reg_type) != 0) {
1435 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1436 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
1437 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1438 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
1442 * This means the CDB is allowed for the SCSI Initiator port
1443 * when said port is *NOT* holding the legacy SPC-2 or
1444 * SPC-3 Persistent Reservation.
1448 ret = cmd->se_dev->transport->parse_cdb(cmd);
1452 spin_lock_irqsave(&cmd->t_state_lock, flags);
1453 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1454 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1457 * Check for SAM Task Attribute Emulation
1459 if (transport_check_alloc_task_attr(cmd) < 0) {
1460 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1461 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1464 spin_lock(&cmd->se_lun->lun_sep_lock);
1465 if (cmd->se_lun->lun_sep)
1466 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1467 spin_unlock(&cmd->se_lun->lun_sep_lock);
1470 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1473 * Used by fabric module frontends to queue tasks directly.
1474 * Many only be used from process context only
1476 int transport_handle_cdb_direct(
1483 pr_err("cmd->se_lun is NULL\n");
1486 if (in_interrupt()) {
1488 pr_err("transport_generic_handle_cdb cannot be called"
1489 " from interrupt context\n");
1493 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
1494 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1495 * in existing usage to ensure that outstanding descriptors are handled
1496 * correctly during shutdown via transport_wait_for_tasks()
1498 * Also, we don't take cmd->t_state_lock here as we only expect
1499 * this to be called for initial descriptor submission.
1501 cmd->t_state = TRANSPORT_NEW_CMD;
1502 cmd->transport_state |= CMD_T_ACTIVE;
1505 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1506 * so follow TRANSPORT_NEW_CMD processing thread context usage
1507 * and call transport_generic_request_failure() if necessary..
1509 ret = transport_generic_new_cmd(cmd);
1511 transport_generic_request_failure(cmd);
1515 EXPORT_SYMBOL(transport_handle_cdb_direct);
1518 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1520 * @se_cmd: command descriptor to submit
1521 * @se_sess: associated se_sess for endpoint
1522 * @cdb: pointer to SCSI CDB
1523 * @sense: pointer to SCSI sense buffer
1524 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1525 * @data_length: fabric expected data transfer length
1526 * @task_addr: SAM task attribute
1527 * @data_dir: DMA data direction
1528 * @flags: flags for command submission from target_sc_flags_tables
1530 * This may only be called from process context, and also currently
1531 * assumes internal allocation of fabric payload buffer by target-core.
1533 void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1534 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1535 u32 data_length, int task_attr, int data_dir, int flags)
1537 struct se_portal_group *se_tpg;
1540 se_tpg = se_sess->se_tpg;
1542 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1543 BUG_ON(in_interrupt());
1545 * Initialize se_cmd for target operation. From this point
1546 * exceptions are handled by sending exception status via
1547 * target_core_fabric_ops->queue_status() callback
1549 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1550 data_length, data_dir, task_attr, sense);
1551 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1552 se_cmd->unknown_data_length = 1;
1554 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1555 * se_sess->sess_cmd_list. A second kref_get here is necessary
1556 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1557 * kref_put() to happen during fabric packet acknowledgement.
1559 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1561 * Signal bidirectional data payloads to target-core
1563 if (flags & TARGET_SCF_BIDI_OP)
1564 se_cmd->se_cmd_flags |= SCF_BIDI;
1566 * Locate se_lun pointer and attach it to struct se_cmd
1568 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
1569 transport_send_check_condition_and_sense(se_cmd,
1570 se_cmd->scsi_sense_reason, 0);
1571 target_put_sess_cmd(se_sess, se_cmd);
1575 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1577 transport_generic_request_failure(se_cmd);
1582 * Check if we need to delay processing because of ALUA
1583 * Active/NonOptimized primary access state..
1585 core_alua_check_nonop_delay(se_cmd);
1588 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1589 * for immediate execution of READs, otherwise wait for
1590 * transport_generic_handle_data() to be called for WRITEs
1591 * when fabric has filled the incoming buffer.
1593 transport_handle_cdb_direct(se_cmd);
1596 EXPORT_SYMBOL(target_submit_cmd);
1598 static void target_complete_tmr_failure(struct work_struct *work)
1600 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1602 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1603 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1604 transport_generic_free_cmd(se_cmd, 0);
1608 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1611 * @se_cmd: command descriptor to submit
1612 * @se_sess: associated se_sess for endpoint
1613 * @sense: pointer to SCSI sense buffer
1614 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1615 * @fabric_context: fabric context for TMR req
1616 * @tm_type: Type of TM request
1617 * @gfp: gfp type for caller
1618 * @tag: referenced task tag for TMR_ABORT_TASK
1619 * @flags: submit cmd flags
1621 * Callable from all contexts.
1624 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1625 unsigned char *sense, u32 unpacked_lun,
1626 void *fabric_tmr_ptr, unsigned char tm_type,
1627 gfp_t gfp, unsigned int tag, int flags)
1629 struct se_portal_group *se_tpg;
1632 se_tpg = se_sess->se_tpg;
1635 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1636 0, DMA_NONE, MSG_SIMPLE_TAG, sense);
1638 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1639 * allocation failure.
1641 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1645 if (tm_type == TMR_ABORT_TASK)
1646 se_cmd->se_tmr_req->ref_task_tag = tag;
1648 /* See target_submit_cmd for commentary */
1649 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1651 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1654 * For callback during failure handling, push this work off
1655 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1657 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1658 schedule_work(&se_cmd->work);
1661 transport_generic_handle_tmr(se_cmd);
1664 EXPORT_SYMBOL(target_submit_tmr);
1667 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1668 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1669 * complete setup in TCM process context w/ TFO->new_cmd_map().
1671 int transport_generic_handle_cdb_map(
1676 pr_err("cmd->se_lun is NULL\n");
1680 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1683 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1685 /* transport_generic_handle_data():
1689 int transport_generic_handle_data(
1693 * For the software fabric case, then we assume the nexus is being
1694 * failed/shutdown when signals are pending from the kthread context
1695 * caller, so we return a failure. For the HW target mode case running
1696 * in interrupt code, the signal_pending() check is skipped.
1698 if (!in_interrupt() && signal_pending(current))
1701 * If the received CDB has aleady been ABORTED by the generic
1702 * target engine, we now call transport_check_aborted_status()
1703 * to queue any delated TASK_ABORTED status for the received CDB to the
1704 * fabric module as we are expecting no further incoming DATA OUT
1705 * sequences at this point.
1707 if (transport_check_aborted_status(cmd, 1) != 0)
1710 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1713 EXPORT_SYMBOL(transport_generic_handle_data);
1715 /* transport_generic_handle_tmr():
1719 int transport_generic_handle_tmr(
1722 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1725 EXPORT_SYMBOL(transport_generic_handle_tmr);
1728 * If the cmd is active, request it to be stopped and sleep until it
1731 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1733 bool was_active = false;
1735 if (cmd->transport_state & CMD_T_BUSY) {
1736 cmd->transport_state |= CMD_T_REQUEST_STOP;
1737 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1739 pr_debug("cmd %p waiting to complete\n", cmd);
1740 wait_for_completion(&cmd->task_stop_comp);
1741 pr_debug("cmd %p stopped successfully\n", cmd);
1743 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1744 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1745 cmd->transport_state &= ~CMD_T_BUSY;
1753 * Handle SAM-esque emulation for generic transport request failures.
1755 void transport_generic_request_failure(struct se_cmd *cmd)
1759 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1760 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1761 cmd->t_task_cdb[0]);
1762 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1763 cmd->se_tfo->get_cmd_state(cmd),
1764 cmd->t_state, cmd->scsi_sense_reason);
1765 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1766 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1767 (cmd->transport_state & CMD_T_STOP) != 0,
1768 (cmd->transport_state & CMD_T_SENT) != 0);
1771 * For SAM Task Attribute emulation for failed struct se_cmd
1773 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1774 transport_complete_task_attr(cmd);
1776 switch (cmd->scsi_sense_reason) {
1777 case TCM_NON_EXISTENT_LUN:
1778 case TCM_UNSUPPORTED_SCSI_OPCODE:
1779 case TCM_INVALID_CDB_FIELD:
1780 case TCM_INVALID_PARAMETER_LIST:
1781 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1782 case TCM_UNKNOWN_MODE_PAGE:
1783 case TCM_WRITE_PROTECTED:
1784 case TCM_CHECK_CONDITION_ABORT_CMD:
1785 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1786 case TCM_CHECK_CONDITION_NOT_READY:
1788 case TCM_RESERVATION_CONFLICT:
1790 * No SENSE Data payload for this case, set SCSI Status
1791 * and queue the response to $FABRIC_MOD.
1793 * Uses linux/include/scsi/scsi.h SAM status codes defs
1795 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1797 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1798 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1801 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1804 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1805 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1806 cmd->orig_fe_lun, 0x2C,
1807 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1809 ret = cmd->se_tfo->queue_status(cmd);
1810 if (ret == -EAGAIN || ret == -ENOMEM)
1814 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1815 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1816 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1820 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1821 * make the call to transport_send_check_condition_and_sense()
1822 * directly. Otherwise expect the fabric to make the call to
1823 * transport_send_check_condition_and_sense() after handling
1824 * possible unsoliticied write data payloads.
1826 ret = transport_send_check_condition_and_sense(cmd,
1827 cmd->scsi_sense_reason, 0);
1828 if (ret == -EAGAIN || ret == -ENOMEM)
1832 transport_lun_remove_cmd(cmd);
1833 if (!transport_cmd_check_stop_to_fabric(cmd))
1838 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1839 transport_handle_queue_full(cmd, cmd->se_dev);
1841 EXPORT_SYMBOL(transport_generic_request_failure);
1843 static void __target_execute_cmd(struct se_cmd *cmd)
1847 spin_lock_irq(&cmd->t_state_lock);
1848 cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
1849 spin_unlock_irq(&cmd->t_state_lock);
1851 if (cmd->execute_cmd)
1852 error = cmd->execute_cmd(cmd);
1855 spin_lock_irq(&cmd->t_state_lock);
1856 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1857 spin_unlock_irq(&cmd->t_state_lock);
1859 transport_generic_request_failure(cmd);
1863 static void target_execute_cmd(struct se_cmd *cmd)
1865 struct se_device *dev = cmd->se_dev;
1868 * Determine if IOCTL context caller in requesting the stopping of this
1869 * command for LUN shutdown purposes.
1871 spin_lock_irq(&cmd->t_state_lock);
1872 if (cmd->transport_state & CMD_T_LUN_STOP) {
1873 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
1874 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
1876 cmd->transport_state &= ~CMD_T_ACTIVE;
1877 spin_unlock_irq(&cmd->t_state_lock);
1878 complete(&cmd->transport_lun_stop_comp);
1882 * Determine if frontend context caller is requesting the stopping of
1883 * this command for frontend exceptions.
1885 if (cmd->transport_state & CMD_T_STOP) {
1886 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
1888 cmd->se_tfo->get_task_tag(cmd));
1890 spin_unlock_irq(&cmd->t_state_lock);
1891 complete(&cmd->t_transport_stop_comp);
1895 cmd->t_state = TRANSPORT_PROCESSING;
1896 spin_unlock_irq(&cmd->t_state_lock);
1898 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1902 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1903 * to allow the passed struct se_cmd list of tasks to the front of the list.
1905 switch (cmd->sam_task_attr) {
1907 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
1908 "se_ordered_id: %u\n",
1909 cmd->t_task_cdb[0], cmd->se_ordered_id);
1911 case MSG_ORDERED_TAG:
1912 atomic_inc(&dev->dev_ordered_sync);
1913 smp_mb__after_atomic_inc();
1915 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1916 " se_ordered_id: %u\n",
1917 cmd->t_task_cdb[0], cmd->se_ordered_id);
1920 * Execute an ORDERED command if no other older commands
1921 * exist that need to be completed first.
1923 if (!atomic_read(&dev->simple_cmds))
1928 * For SIMPLE and UNTAGGED Task Attribute commands
1930 atomic_inc(&dev->simple_cmds);
1931 smp_mb__after_atomic_inc();
1935 if (atomic_read(&dev->dev_ordered_sync) != 0) {
1936 spin_lock(&dev->delayed_cmd_lock);
1937 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1938 spin_unlock(&dev->delayed_cmd_lock);
1940 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1941 " delayed CMD list, se_ordered_id: %u\n",
1942 cmd->t_task_cdb[0], cmd->sam_task_attr,
1943 cmd->se_ordered_id);
1949 * Otherwise, no ORDERED task attributes exist..
1951 __target_execute_cmd(cmd);
1955 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
1957 static int transport_get_sense_data(struct se_cmd *cmd)
1959 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
1960 struct se_device *dev = cmd->se_dev;
1961 unsigned long flags;
1964 WARN_ON(!cmd->se_lun);
1969 spin_lock_irqsave(&cmd->t_state_lock, flags);
1970 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
1971 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1975 if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
1978 if (!dev->transport->get_sense_buffer) {
1979 pr_err("dev->transport->get_sense_buffer is NULL\n");
1983 sense_buffer = dev->transport->get_sense_buffer(cmd);
1984 if (!sense_buffer) {
1985 pr_err("ITT 0x%08x cmd %p: Unable to locate"
1986 " sense buffer for task with sense\n",
1987 cmd->se_tfo->get_task_tag(cmd), cmd);
1991 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1993 offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
1995 memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
1997 /* Automatically padded */
1998 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
2000 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
2001 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
2005 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2010 * Process all commands up to the last received ORDERED task attribute which
2011 * requires another blocking boundary
2013 static void target_restart_delayed_cmds(struct se_device *dev)
2018 spin_lock(&dev->delayed_cmd_lock);
2019 if (list_empty(&dev->delayed_cmd_list)) {
2020 spin_unlock(&dev->delayed_cmd_lock);
2024 cmd = list_entry(dev->delayed_cmd_list.next,
2025 struct se_cmd, se_delayed_node);
2026 list_del(&cmd->se_delayed_node);
2027 spin_unlock(&dev->delayed_cmd_lock);
2029 __target_execute_cmd(cmd);
2031 if (cmd->sam_task_attr == MSG_ORDERED_TAG)
2037 * Called from I/O completion to determine which dormant/delayed
2038 * and ordered cmds need to have their tasks added to the execution queue.
2040 static void transport_complete_task_attr(struct se_cmd *cmd)
2042 struct se_device *dev = cmd->se_dev;
2044 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
2045 atomic_dec(&dev->simple_cmds);
2046 smp_mb__after_atomic_dec();
2047 dev->dev_cur_ordered_id++;
2048 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
2049 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
2050 cmd->se_ordered_id);
2051 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
2052 dev->dev_cur_ordered_id++;
2053 pr_debug("Incremented dev_cur_ordered_id: %u for"
2054 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
2055 cmd->se_ordered_id);
2056 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
2057 atomic_dec(&dev->dev_ordered_sync);
2058 smp_mb__after_atomic_dec();
2060 dev->dev_cur_ordered_id++;
2061 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
2062 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
2065 target_restart_delayed_cmds(dev);
2068 static void transport_complete_qf(struct se_cmd *cmd)
2072 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2073 transport_complete_task_attr(cmd);
2075 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2076 ret = cmd->se_tfo->queue_status(cmd);
2081 switch (cmd->data_direction) {
2082 case DMA_FROM_DEVICE:
2083 ret = cmd->se_tfo->queue_data_in(cmd);
2086 if (cmd->t_bidi_data_sg) {
2087 ret = cmd->se_tfo->queue_data_in(cmd);
2091 /* Fall through for DMA_TO_DEVICE */
2093 ret = cmd->se_tfo->queue_status(cmd);
2101 transport_handle_queue_full(cmd, cmd->se_dev);
2104 transport_lun_remove_cmd(cmd);
2105 transport_cmd_check_stop_to_fabric(cmd);
2108 static void transport_handle_queue_full(
2110 struct se_device *dev)
2112 spin_lock_irq(&dev->qf_cmd_lock);
2113 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2114 atomic_inc(&dev->dev_qf_count);
2115 smp_mb__after_atomic_inc();
2116 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2118 schedule_work(&cmd->se_dev->qf_work_queue);
2121 static void target_complete_ok_work(struct work_struct *work)
2123 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2124 int reason = 0, ret;
2127 * Check if we need to move delayed/dormant tasks from cmds on the
2128 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2131 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2132 transport_complete_task_attr(cmd);
2134 * Check to schedule QUEUE_FULL work, or execute an existing
2135 * cmd->transport_qf_callback()
2137 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2138 schedule_work(&cmd->se_dev->qf_work_queue);
2141 * Check if we need to retrieve a sense buffer from
2142 * the struct se_cmd in question.
2144 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2145 if (transport_get_sense_data(cmd) < 0)
2146 reason = TCM_NON_EXISTENT_LUN;
2148 if (cmd->scsi_status) {
2149 ret = transport_send_check_condition_and_sense(
2151 if (ret == -EAGAIN || ret == -ENOMEM)
2154 transport_lun_remove_cmd(cmd);
2155 transport_cmd_check_stop_to_fabric(cmd);
2160 * Check for a callback, used by amongst other things
2161 * XDWRITE_READ_10 emulation.
2163 if (cmd->transport_complete_callback)
2164 cmd->transport_complete_callback(cmd);
2166 switch (cmd->data_direction) {
2167 case DMA_FROM_DEVICE:
2168 spin_lock(&cmd->se_lun->lun_sep_lock);
2169 if (cmd->se_lun->lun_sep) {
2170 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2173 spin_unlock(&cmd->se_lun->lun_sep_lock);
2175 ret = cmd->se_tfo->queue_data_in(cmd);
2176 if (ret == -EAGAIN || ret == -ENOMEM)
2180 spin_lock(&cmd->se_lun->lun_sep_lock);
2181 if (cmd->se_lun->lun_sep) {
2182 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
2185 spin_unlock(&cmd->se_lun->lun_sep_lock);
2187 * Check if we need to send READ payload for BIDI-COMMAND
2189 if (cmd->t_bidi_data_sg) {
2190 spin_lock(&cmd->se_lun->lun_sep_lock);
2191 if (cmd->se_lun->lun_sep) {
2192 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
2195 spin_unlock(&cmd->se_lun->lun_sep_lock);
2196 ret = cmd->se_tfo->queue_data_in(cmd);
2197 if (ret == -EAGAIN || ret == -ENOMEM)
2201 /* Fall through for DMA_TO_DEVICE */
2203 ret = cmd->se_tfo->queue_status(cmd);
2204 if (ret == -EAGAIN || ret == -ENOMEM)
2211 transport_lun_remove_cmd(cmd);
2212 transport_cmd_check_stop_to_fabric(cmd);
2216 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2217 " data_direction: %d\n", cmd, cmd->data_direction);
2218 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2219 transport_handle_queue_full(cmd, cmd->se_dev);
2222 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
2224 struct scatterlist *sg;
2227 for_each_sg(sgl, sg, nents, count)
2228 __free_page(sg_page(sg));
2233 static inline void transport_free_pages(struct se_cmd *cmd)
2235 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
2238 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2239 cmd->t_data_sg = NULL;
2240 cmd->t_data_nents = 0;
2242 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2243 cmd->t_bidi_data_sg = NULL;
2244 cmd->t_bidi_data_nents = 0;
2248 * transport_release_cmd - free a command
2249 * @cmd: command to free
2251 * This routine unconditionally frees a command, and reference counting
2252 * or list removal must be done in the caller.
2254 static void transport_release_cmd(struct se_cmd *cmd)
2256 BUG_ON(!cmd->se_tfo);
2258 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2259 core_tmr_release_req(cmd->se_tmr_req);
2260 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2261 kfree(cmd->t_task_cdb);
2263 * If this cmd has been setup with target_get_sess_cmd(), drop
2264 * the kref and call ->release_cmd() in kref callback.
2266 if (cmd->check_release != 0) {
2267 target_put_sess_cmd(cmd->se_sess, cmd);
2270 cmd->se_tfo->release_cmd(cmd);
2274 * transport_put_cmd - release a reference to a command
2275 * @cmd: command to release
2277 * This routine releases our reference to the command and frees it if possible.
2279 static void transport_put_cmd(struct se_cmd *cmd)
2281 unsigned long flags;
2283 spin_lock_irqsave(&cmd->t_state_lock, flags);
2284 if (atomic_read(&cmd->t_fe_count)) {
2285 if (!atomic_dec_and_test(&cmd->t_fe_count))
2289 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
2290 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
2291 target_remove_from_state_list(cmd);
2293 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2295 transport_free_pages(cmd);
2296 transport_release_cmd(cmd);
2299 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2303 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
2304 * allocating in the core.
2305 * @cmd: Associated se_cmd descriptor
2306 * @mem: SGL style memory for TCM WRITE / READ
2307 * @sg_mem_num: Number of SGL elements
2308 * @mem_bidi_in: SGL style memory for TCM BIDI READ
2309 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
2311 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
2314 int transport_generic_map_mem_to_cmd(
2316 struct scatterlist *sgl,
2318 struct scatterlist *sgl_bidi,
2321 if (!sgl || !sgl_count)
2325 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
2326 * scatterlists already have been set to follow what the fabric
2327 * passes for the original expected data transfer length.
2329 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2330 pr_warn("Rejecting SCSI DATA overflow for fabric using"
2331 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
2332 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2333 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2337 cmd->t_data_sg = sgl;
2338 cmd->t_data_nents = sgl_count;
2340 if (sgl_bidi && sgl_bidi_count) {
2341 cmd->t_bidi_data_sg = sgl_bidi;
2342 cmd->t_bidi_data_nents = sgl_bidi_count;
2344 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
2347 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
2349 void *transport_kmap_data_sg(struct se_cmd *cmd)
2351 struct scatterlist *sg = cmd->t_data_sg;
2352 struct page **pages;
2357 * We need to take into account a possible offset here for fabrics like
2358 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2359 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2361 if (!cmd->t_data_nents)
2363 else if (cmd->t_data_nents == 1)
2364 return kmap(sg_page(sg)) + sg->offset;
2366 /* >1 page. use vmap */
2367 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2371 /* convert sg[] to pages[] */
2372 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2373 pages[i] = sg_page(sg);
2376 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2378 if (!cmd->t_data_vmap)
2381 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2383 EXPORT_SYMBOL(transport_kmap_data_sg);
2385 void transport_kunmap_data_sg(struct se_cmd *cmd)
2387 if (!cmd->t_data_nents) {
2389 } else if (cmd->t_data_nents == 1) {
2390 kunmap(sg_page(cmd->t_data_sg));
2394 vunmap(cmd->t_data_vmap);
2395 cmd->t_data_vmap = NULL;
2397 EXPORT_SYMBOL(transport_kunmap_data_sg);
2400 transport_generic_get_mem(struct se_cmd *cmd)
2402 u32 length = cmd->data_length;
2408 nents = DIV_ROUND_UP(length, PAGE_SIZE);
2409 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
2410 if (!cmd->t_data_sg)
2413 cmd->t_data_nents = nents;
2414 sg_init_table(cmd->t_data_sg, nents);
2416 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
2419 u32 page_len = min_t(u32, length, PAGE_SIZE);
2420 page = alloc_page(GFP_KERNEL | zero_flag);
2424 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
2432 __free_page(sg_page(&cmd->t_data_sg[i]));
2435 kfree(cmd->t_data_sg);
2436 cmd->t_data_sg = NULL;
2441 * Allocate any required resources to execute the command. For writes we
2442 * might not have the payload yet, so notify the fabric via a call to
2443 * ->write_pending instead. Otherwise place it on the execution queue.
2445 int transport_generic_new_cmd(struct se_cmd *cmd)
2450 * Determine is the TCM fabric module has already allocated physical
2451 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2454 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2456 ret = transport_generic_get_mem(cmd);
2461 /* Workaround for handling zero-length control CDBs */
2462 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
2463 spin_lock_irq(&cmd->t_state_lock);
2464 cmd->t_state = TRANSPORT_COMPLETE;
2465 cmd->transport_state |= CMD_T_ACTIVE;
2466 spin_unlock_irq(&cmd->t_state_lock);
2468 if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
2469 u8 ua_asc = 0, ua_ascq = 0;
2471 core_scsi3_ua_clear_for_request_sense(cmd,
2475 INIT_WORK(&cmd->work, target_complete_ok_work);
2476 queue_work(target_completion_wq, &cmd->work);
2480 atomic_inc(&cmd->t_fe_count);
2483 * For WRITEs, let the fabric know its buffer is ready.
2485 * The command will be added to the execution queue after its write
2488 * Everything else but a WRITE, add the command to the execution queue.
2490 target_add_to_state_list(cmd);
2491 if (cmd->data_direction == DMA_TO_DEVICE)
2492 return transport_generic_write_pending(cmd);
2493 target_execute_cmd(cmd);
2497 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2498 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2501 EXPORT_SYMBOL(transport_generic_new_cmd);
2503 /* transport_generic_process_write():
2507 void transport_generic_process_write(struct se_cmd *cmd)
2509 target_execute_cmd(cmd);
2511 EXPORT_SYMBOL(transport_generic_process_write);
2513 static void transport_write_pending_qf(struct se_cmd *cmd)
2517 ret = cmd->se_tfo->write_pending(cmd);
2518 if (ret == -EAGAIN || ret == -ENOMEM) {
2519 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2521 transport_handle_queue_full(cmd, cmd->se_dev);
2525 static int transport_generic_write_pending(struct se_cmd *cmd)
2527 unsigned long flags;
2530 spin_lock_irqsave(&cmd->t_state_lock, flags);
2531 cmd->t_state = TRANSPORT_WRITE_PENDING;
2532 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2535 * Clear the se_cmd for WRITE_PENDING status in order to set
2536 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
2537 * from HW target mode interrupt code. This is safe to be called
2538 * with remove_from_lists false before the cmd->se_tfo->write_pending
2539 * because the se_cmd->se_lun pointer is not being cleared.
2541 transport_cmd_check_stop(cmd, false);
2544 * Call the fabric write_pending function here to let the
2545 * frontend know that WRITE buffers are ready.
2547 ret = cmd->se_tfo->write_pending(cmd);
2548 if (ret == -EAGAIN || ret == -ENOMEM)
2556 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2557 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2558 transport_handle_queue_full(cmd, cmd->se_dev);
2562 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2564 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2565 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2566 transport_wait_for_tasks(cmd);
2568 transport_release_cmd(cmd);
2571 transport_wait_for_tasks(cmd);
2573 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
2576 transport_lun_remove_cmd(cmd);
2578 transport_put_cmd(cmd);
2581 EXPORT_SYMBOL(transport_generic_free_cmd);
2583 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
2584 * @se_sess: session to reference
2585 * @se_cmd: command descriptor to add
2586 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2588 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2591 unsigned long flags;
2593 kref_init(&se_cmd->cmd_kref);
2595 * Add a second kref if the fabric caller is expecting to handle
2596 * fabric acknowledgement that requires two target_put_sess_cmd()
2597 * invocations before se_cmd descriptor release.
2599 if (ack_kref == true) {
2600 kref_get(&se_cmd->cmd_kref);
2601 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2604 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2605 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2606 se_cmd->check_release = 1;
2607 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2609 EXPORT_SYMBOL(target_get_sess_cmd);
2611 static void target_release_cmd_kref(struct kref *kref)
2613 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2614 struct se_session *se_sess = se_cmd->se_sess;
2615 unsigned long flags;
2617 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2618 if (list_empty(&se_cmd->se_cmd_list)) {
2619 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2620 se_cmd->se_tfo->release_cmd(se_cmd);
2623 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2624 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2625 complete(&se_cmd->cmd_wait_comp);
2628 list_del(&se_cmd->se_cmd_list);
2629 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2631 se_cmd->se_tfo->release_cmd(se_cmd);
2634 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
2635 * @se_sess: session to reference
2636 * @se_cmd: command descriptor to drop
2638 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
2640 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2642 EXPORT_SYMBOL(target_put_sess_cmd);
2644 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
2645 * @se_sess: session to split
2647 void target_splice_sess_cmd_list(struct se_session *se_sess)
2649 struct se_cmd *se_cmd;
2650 unsigned long flags;
2652 WARN_ON(!list_empty(&se_sess->sess_wait_list));
2653 INIT_LIST_HEAD(&se_sess->sess_wait_list);
2655 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2656 se_sess->sess_tearing_down = 1;
2658 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2660 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
2661 se_cmd->cmd_wait_set = 1;
2663 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2665 EXPORT_SYMBOL(target_splice_sess_cmd_list);
2667 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
2668 * @se_sess: session to wait for active I/O
2669 * @wait_for_tasks: Make extra transport_wait_for_tasks call
2671 void target_wait_for_sess_cmds(
2672 struct se_session *se_sess,
2675 struct se_cmd *se_cmd, *tmp_cmd;
2678 list_for_each_entry_safe(se_cmd, tmp_cmd,
2679 &se_sess->sess_wait_list, se_cmd_list) {
2680 list_del(&se_cmd->se_cmd_list);
2682 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2683 " %d\n", se_cmd, se_cmd->t_state,
2684 se_cmd->se_tfo->get_cmd_state(se_cmd));
2686 if (wait_for_tasks) {
2687 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
2688 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2689 se_cmd->se_tfo->get_cmd_state(se_cmd));
2691 rc = transport_wait_for_tasks(se_cmd);
2693 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
2694 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2695 se_cmd->se_tfo->get_cmd_state(se_cmd));
2699 wait_for_completion(&se_cmd->cmd_wait_comp);
2700 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2701 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2702 se_cmd->se_tfo->get_cmd_state(se_cmd));
2705 se_cmd->se_tfo->release_cmd(se_cmd);
2708 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2710 /* transport_lun_wait_for_tasks():
2712 * Called from ConfigFS context to stop the passed struct se_cmd to allow
2713 * an struct se_lun to be successfully shutdown.
2715 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
2717 unsigned long flags;
2721 * If the frontend has already requested this struct se_cmd to
2722 * be stopped, we can safely ignore this struct se_cmd.
2724 spin_lock_irqsave(&cmd->t_state_lock, flags);
2725 if (cmd->transport_state & CMD_T_STOP) {
2726 cmd->transport_state &= ~CMD_T_LUN_STOP;
2728 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
2729 cmd->se_tfo->get_task_tag(cmd));
2730 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2731 transport_cmd_check_stop(cmd, false);
2734 cmd->transport_state |= CMD_T_LUN_FE_STOP;
2735 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2737 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
2739 // XXX: audit task_flags checks.
2740 spin_lock_irqsave(&cmd->t_state_lock, flags);
2741 if ((cmd->transport_state & CMD_T_BUSY) &&
2742 (cmd->transport_state & CMD_T_SENT)) {
2743 if (!target_stop_cmd(cmd, &flags))
2746 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2748 pr_debug("ConfigFS: cmd: %p stop tasks ret:"
2751 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
2752 cmd->se_tfo->get_task_tag(cmd));
2753 wait_for_completion(&cmd->transport_lun_stop_comp);
2754 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
2755 cmd->se_tfo->get_task_tag(cmd));
2757 transport_remove_cmd_from_queue(cmd);
2762 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
2764 struct se_cmd *cmd = NULL;
2765 unsigned long lun_flags, cmd_flags;
2767 * Do exception processing and return CHECK_CONDITION status to the
2770 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2771 while (!list_empty(&lun->lun_cmd_list)) {
2772 cmd = list_first_entry(&lun->lun_cmd_list,
2773 struct se_cmd, se_lun_node);
2774 list_del_init(&cmd->se_lun_node);
2776 spin_lock(&cmd->t_state_lock);
2777 pr_debug("SE_LUN[%d] - Setting cmd->transport"
2778 "_lun_stop for ITT: 0x%08x\n",
2779 cmd->se_lun->unpacked_lun,
2780 cmd->se_tfo->get_task_tag(cmd));
2781 cmd->transport_state |= CMD_T_LUN_STOP;
2782 spin_unlock(&cmd->t_state_lock);
2784 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
2787 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
2788 cmd->se_tfo->get_task_tag(cmd),
2789 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2793 * If the Storage engine still owns the iscsi_cmd_t, determine
2794 * and/or stop its context.
2796 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
2797 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
2798 cmd->se_tfo->get_task_tag(cmd));
2800 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
2801 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2805 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
2806 "_wait_for_tasks(): SUCCESS\n",
2807 cmd->se_lun->unpacked_lun,
2808 cmd->se_tfo->get_task_tag(cmd));
2810 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2811 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
2812 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2815 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
2816 target_remove_from_state_list(cmd);
2817 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2820 * The Storage engine stopped this struct se_cmd before it was
2821 * send to the fabric frontend for delivery back to the
2822 * Initiator Node. Return this SCSI CDB back with an
2823 * CHECK_CONDITION status.
2826 transport_send_check_condition_and_sense(cmd,
2827 TCM_NON_EXISTENT_LUN, 0);
2829 * If the fabric frontend is waiting for this iscsi_cmd_t to
2830 * be released, notify the waiting thread now that LU has
2831 * finished accessing it.
2833 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
2834 if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
2835 pr_debug("SE_LUN[%d] - Detected FE stop for"
2836 " struct se_cmd: %p ITT: 0x%08x\n",
2838 cmd, cmd->se_tfo->get_task_tag(cmd));
2840 spin_unlock_irqrestore(&cmd->t_state_lock,
2842 transport_cmd_check_stop(cmd, false);
2843 complete(&cmd->transport_lun_fe_stop_comp);
2844 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2847 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
2848 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
2850 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
2851 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
2853 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
2856 static int transport_clear_lun_thread(void *p)
2858 struct se_lun *lun = p;
2860 __transport_clear_lun_from_sessions(lun);
2861 complete(&lun->lun_shutdown_comp);
2866 int transport_clear_lun_from_sessions(struct se_lun *lun)
2868 struct task_struct *kt;
2870 kt = kthread_run(transport_clear_lun_thread, lun,
2871 "tcm_cl_%u", lun->unpacked_lun);
2873 pr_err("Unable to start clear_lun thread\n");
2876 wait_for_completion(&lun->lun_shutdown_comp);
2882 * transport_wait_for_tasks - wait for completion to occur
2883 * @cmd: command to wait
2885 * Called from frontend fabric context to wait for storage engine
2886 * to pause and/or release frontend generated struct se_cmd.
2888 bool transport_wait_for_tasks(struct se_cmd *cmd)
2890 unsigned long flags;
2892 spin_lock_irqsave(&cmd->t_state_lock, flags);
2893 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2894 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2895 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2899 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2900 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2901 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2905 * If we are already stopped due to an external event (ie: LUN shutdown)
2906 * sleep until the connection can have the passed struct se_cmd back.
2907 * The cmd->transport_lun_stopped_sem will be upped by
2908 * transport_clear_lun_from_sessions() once the ConfigFS context caller
2909 * has completed its operation on the struct se_cmd.
2911 if (cmd->transport_state & CMD_T_LUN_STOP) {
2912 pr_debug("wait_for_tasks: Stopping"
2913 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
2914 "_stop_comp); for ITT: 0x%08x\n",
2915 cmd->se_tfo->get_task_tag(cmd));
2917 * There is a special case for WRITES where a FE exception +
2918 * LUN shutdown means ConfigFS context is still sleeping on
2919 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
2920 * We go ahead and up transport_lun_stop_comp just to be sure
2923 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2924 complete(&cmd->transport_lun_stop_comp);
2925 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
2926 spin_lock_irqsave(&cmd->t_state_lock, flags);
2928 target_remove_from_state_list(cmd);
2930 * At this point, the frontend who was the originator of this
2931 * struct se_cmd, now owns the structure and can be released through
2932 * normal means below.
2934 pr_debug("wait_for_tasks: Stopped"
2935 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
2936 "stop_comp); for ITT: 0x%08x\n",
2937 cmd->se_tfo->get_task_tag(cmd));
2939 cmd->transport_state &= ~CMD_T_LUN_STOP;
2942 if (!(cmd->transport_state & CMD_T_ACTIVE)) {
2943 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2947 cmd->transport_state |= CMD_T_STOP;
2949 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
2950 " i_state: %d, t_state: %d, CMD_T_STOP\n",
2951 cmd, cmd->se_tfo->get_task_tag(cmd),
2952 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2954 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2956 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
2958 wait_for_completion(&cmd->t_transport_stop_comp);
2960 spin_lock_irqsave(&cmd->t_state_lock, flags);
2961 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2963 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
2964 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
2965 cmd->se_tfo->get_task_tag(cmd));
2967 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2971 EXPORT_SYMBOL(transport_wait_for_tasks);
2973 static int transport_get_sense_codes(
2978 *asc = cmd->scsi_asc;
2979 *ascq = cmd->scsi_ascq;
2984 static int transport_set_sense_codes(
2989 cmd->scsi_asc = asc;
2990 cmd->scsi_ascq = ascq;
2995 int transport_send_check_condition_and_sense(
3000 unsigned char *buffer = cmd->sense_buffer;
3001 unsigned long flags;
3003 u8 asc = 0, ascq = 0;
3005 spin_lock_irqsave(&cmd->t_state_lock, flags);
3006 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3007 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3010 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3011 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3013 if (!reason && from_transport)
3016 if (!from_transport)
3017 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3019 * Data Segment and SenseLength of the fabric response PDU.
3021 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
3022 * from include/scsi/scsi_cmnd.h
3024 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
3025 TRANSPORT_SENSE_BUFFER);
3027 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
3028 * SENSE KEY values from include/scsi/scsi.h
3031 case TCM_NON_EXISTENT_LUN:
3033 buffer[offset] = 0x70;
3034 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3035 /* ILLEGAL REQUEST */
3036 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3037 /* LOGICAL UNIT NOT SUPPORTED */
3038 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
3040 case TCM_UNSUPPORTED_SCSI_OPCODE:
3041 case TCM_SECTOR_COUNT_TOO_MANY:
3043 buffer[offset] = 0x70;
3044 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3045 /* ILLEGAL REQUEST */
3046 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3047 /* INVALID COMMAND OPERATION CODE */
3048 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
3050 case TCM_UNKNOWN_MODE_PAGE:
3052 buffer[offset] = 0x70;
3053 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3054 /* ILLEGAL REQUEST */
3055 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3056 /* INVALID FIELD IN CDB */
3057 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
3059 case TCM_CHECK_CONDITION_ABORT_CMD:
3061 buffer[offset] = 0x70;
3062 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3063 /* ABORTED COMMAND */
3064 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3065 /* BUS DEVICE RESET FUNCTION OCCURRED */
3066 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
3067 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
3069 case TCM_INCORRECT_AMOUNT_OF_DATA:
3071 buffer[offset] = 0x70;
3072 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3073 /* ABORTED COMMAND */
3074 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3076 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
3077 /* NOT ENOUGH UNSOLICITED DATA */
3078 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
3080 case TCM_INVALID_CDB_FIELD:
3082 buffer[offset] = 0x70;
3083 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3084 /* ILLEGAL REQUEST */
3085 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3086 /* INVALID FIELD IN CDB */
3087 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
3089 case TCM_INVALID_PARAMETER_LIST:
3091 buffer[offset] = 0x70;
3092 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3093 /* ILLEGAL REQUEST */
3094 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3095 /* INVALID FIELD IN PARAMETER LIST */
3096 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
3098 case TCM_UNEXPECTED_UNSOLICITED_DATA:
3100 buffer[offset] = 0x70;
3101 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3102 /* ABORTED COMMAND */
3103 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3105 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
3106 /* UNEXPECTED_UNSOLICITED_DATA */
3107 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
3109 case TCM_SERVICE_CRC_ERROR:
3111 buffer[offset] = 0x70;
3112 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3113 /* ABORTED COMMAND */
3114 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3115 /* PROTOCOL SERVICE CRC ERROR */
3116 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
3118 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
3120 case TCM_SNACK_REJECTED:
3122 buffer[offset] = 0x70;
3123 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3124 /* ABORTED COMMAND */
3125 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3127 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
3128 /* FAILED RETRANSMISSION REQUEST */
3129 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
3131 case TCM_WRITE_PROTECTED:
3133 buffer[offset] = 0x70;
3134 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3136 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
3137 /* WRITE PROTECTED */
3138 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
3140 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
3142 buffer[offset] = 0x70;
3143 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3144 /* UNIT ATTENTION */
3145 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
3146 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
3147 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
3148 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
3150 case TCM_CHECK_CONDITION_NOT_READY:
3152 buffer[offset] = 0x70;
3153 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3155 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
3156 transport_get_sense_codes(cmd, &asc, &ascq);
3157 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
3158 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
3160 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
3163 buffer[offset] = 0x70;
3164 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3165 /* ILLEGAL REQUEST */
3166 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3167 /* LOGICAL UNIT COMMUNICATION FAILURE */
3168 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
3172 * This code uses linux/include/scsi/scsi.h SAM status codes!
3174 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3176 * Automatically padded, this value is encoded in the fabric's
3177 * data_length response PDU containing the SCSI defined sense data.
3179 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
3182 return cmd->se_tfo->queue_status(cmd);
3184 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3186 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3190 if (cmd->transport_state & CMD_T_ABORTED) {
3192 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
3195 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
3196 " status for CDB: 0x%02x ITT: 0x%08x\n",
3198 cmd->se_tfo->get_task_tag(cmd));
3200 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
3201 cmd->se_tfo->queue_status(cmd);
3206 EXPORT_SYMBOL(transport_check_aborted_status);
3208 void transport_send_task_abort(struct se_cmd *cmd)
3210 unsigned long flags;
3212 spin_lock_irqsave(&cmd->t_state_lock, flags);
3213 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3214 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3217 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3220 * If there are still expected incoming fabric WRITEs, we wait
3221 * until until they have completed before sending a TASK_ABORTED
3222 * response. This response with TASK_ABORTED status will be
3223 * queued back to fabric module by transport_check_aborted_status().
3225 if (cmd->data_direction == DMA_TO_DEVICE) {
3226 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3227 cmd->transport_state |= CMD_T_ABORTED;
3228 smp_mb__after_atomic_inc();
3231 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3233 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
3234 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
3235 cmd->se_tfo->get_task_tag(cmd));
3237 cmd->se_tfo->queue_status(cmd);
3240 static int transport_generic_do_tmr(struct se_cmd *cmd)
3242 struct se_device *dev = cmd->se_dev;
3243 struct se_tmr_req *tmr = cmd->se_tmr_req;
3246 switch (tmr->function) {
3247 case TMR_ABORT_TASK:
3248 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3250 case TMR_ABORT_TASK_SET:
3252 case TMR_CLEAR_TASK_SET:
3253 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3256 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3257 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3258 TMR_FUNCTION_REJECTED;
3260 case TMR_TARGET_WARM_RESET:
3261 tmr->response = TMR_FUNCTION_REJECTED;
3263 case TMR_TARGET_COLD_RESET:
3264 tmr->response = TMR_FUNCTION_REJECTED;
3267 pr_err("Uknown TMR function: 0x%02x.\n",
3269 tmr->response = TMR_FUNCTION_REJECTED;
3273 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3274 cmd->se_tfo->queue_tm_rsp(cmd);
3276 transport_cmd_check_stop_to_fabric(cmd);
3280 /* transport_processing_thread():
3284 static int transport_processing_thread(void *param)
3288 struct se_device *dev = param;
3290 while (!kthread_should_stop()) {
3291 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
3292 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
3293 kthread_should_stop());
3298 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
3302 switch (cmd->t_state) {
3303 case TRANSPORT_NEW_CMD:
3306 case TRANSPORT_NEW_CMD_MAP:
3307 if (!cmd->se_tfo->new_cmd_map) {
3308 pr_err("cmd->se_tfo->new_cmd_map is"
3309 " NULL for TRANSPORT_NEW_CMD_MAP\n");
3312 ret = cmd->se_tfo->new_cmd_map(cmd);
3314 transport_generic_request_failure(cmd);
3317 ret = transport_generic_new_cmd(cmd);
3319 transport_generic_request_failure(cmd);
3323 case TRANSPORT_PROCESS_WRITE:
3324 transport_generic_process_write(cmd);
3326 case TRANSPORT_PROCESS_TMR:
3327 transport_generic_do_tmr(cmd);
3329 case TRANSPORT_COMPLETE_QF_WP:
3330 transport_write_pending_qf(cmd);
3332 case TRANSPORT_COMPLETE_QF_OK:
3333 transport_complete_qf(cmd);
3336 pr_err("Unknown t_state: %d for ITT: 0x%08x "
3337 "i_state: %d on SE LUN: %u\n",
3339 cmd->se_tfo->get_task_tag(cmd),
3340 cmd->se_tfo->get_cmd_state(cmd),
3341 cmd->se_lun->unpacked_lun);
3349 WARN_ON(!list_empty(&dev->state_list));
3350 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
3351 dev->process_thread = NULL;