1 /*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 * (C) Copyright 2010-2012 RisingTide Systems LLC.
5 * (C) Copyright 2010-2012 IBM Corp.
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 ****************************************************************************/
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
49 #include <linux/virtio_scsi.h>
50 #include <linux/llist.h>
51 #include <linux/bitmap.h>
55 #include "tcm_vhost.h"
58 VHOST_SCSI_VQ_CTL = 0,
59 VHOST_SCSI_VQ_EVT = 1,
64 * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
65 * kernel but disabling it helps.
66 * TODO: debug and remove the workaround.
69 VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
70 (1ULL << VIRTIO_SCSI_F_HOTPLUG)
73 #define VHOST_SCSI_MAX_TARGET 256
74 #define VHOST_SCSI_MAX_VQ 128
75 #define VHOST_SCSI_MAX_EVENT 128
78 /* Protected by vhost_scsi->dev.mutex */
79 struct tcm_vhost_tpg **vs_tpg;
80 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
83 struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
85 struct vhost_work vs_completion_work; /* cmd completion work item */
86 struct llist_head vs_completion_list; /* cmd completion queue */
88 struct vhost_work vs_event_work; /* evt injection work item */
89 struct llist_head vs_event_list; /* evt injection queue */
91 bool vs_events_missed; /* any missed events, protected by vq->mutex */
92 int vs_events_nr; /* num of pending events, protected by vq->mutex */
95 /* Local pointer to allocated TCM configfs fabric module */
96 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
98 static struct workqueue_struct *tcm_vhost_workqueue;
100 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
101 static DEFINE_MUTEX(tcm_vhost_mutex);
102 static LIST_HEAD(tcm_vhost_list);
104 static int iov_num_pages(struct iovec *iov)
106 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
107 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
110 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
115 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
120 static char *tcm_vhost_get_fabric_name(void)
125 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
127 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
128 struct tcm_vhost_tpg, se_tpg);
129 struct tcm_vhost_tport *tport = tpg->tport;
131 switch (tport->tport_proto_id) {
132 case SCSI_PROTOCOL_SAS:
133 return sas_get_fabric_proto_ident(se_tpg);
134 case SCSI_PROTOCOL_FCP:
135 return fc_get_fabric_proto_ident(se_tpg);
136 case SCSI_PROTOCOL_ISCSI:
137 return iscsi_get_fabric_proto_ident(se_tpg);
139 pr_err("Unknown tport_proto_id: 0x%02x, using"
140 " SAS emulation\n", tport->tport_proto_id);
144 return sas_get_fabric_proto_ident(se_tpg);
147 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
149 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
150 struct tcm_vhost_tpg, se_tpg);
151 struct tcm_vhost_tport *tport = tpg->tport;
153 return &tport->tport_name[0];
156 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
158 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
159 struct tcm_vhost_tpg, se_tpg);
160 return tpg->tport_tpgt;
163 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
168 static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
169 struct se_node_acl *se_nacl,
170 struct t10_pr_registration *pr_reg,
174 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
175 struct tcm_vhost_tpg, se_tpg);
176 struct tcm_vhost_tport *tport = tpg->tport;
178 switch (tport->tport_proto_id) {
179 case SCSI_PROTOCOL_SAS:
180 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
182 case SCSI_PROTOCOL_FCP:
183 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
185 case SCSI_PROTOCOL_ISCSI:
186 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
189 pr_err("Unknown tport_proto_id: 0x%02x, using"
190 " SAS emulation\n", tport->tport_proto_id);
194 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
198 static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
199 struct se_node_acl *se_nacl,
200 struct t10_pr_registration *pr_reg,
203 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
204 struct tcm_vhost_tpg, se_tpg);
205 struct tcm_vhost_tport *tport = tpg->tport;
207 switch (tport->tport_proto_id) {
208 case SCSI_PROTOCOL_SAS:
209 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
211 case SCSI_PROTOCOL_FCP:
212 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
214 case SCSI_PROTOCOL_ISCSI:
215 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
218 pr_err("Unknown tport_proto_id: 0x%02x, using"
219 " SAS emulation\n", tport->tport_proto_id);
223 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
227 static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
230 char **port_nexus_ptr)
232 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
233 struct tcm_vhost_tpg, se_tpg);
234 struct tcm_vhost_tport *tport = tpg->tport;
236 switch (tport->tport_proto_id) {
237 case SCSI_PROTOCOL_SAS:
238 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
240 case SCSI_PROTOCOL_FCP:
241 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
243 case SCSI_PROTOCOL_ISCSI:
244 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
247 pr_err("Unknown tport_proto_id: 0x%02x, using"
248 " SAS emulation\n", tport->tport_proto_id);
252 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
256 static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
257 struct se_portal_group *se_tpg)
259 struct tcm_vhost_nacl *nacl;
261 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
263 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
267 return &nacl->se_node_acl;
270 static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
271 struct se_node_acl *se_nacl)
273 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
274 struct tcm_vhost_nacl, se_node_acl);
278 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
283 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
288 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
293 static void tcm_vhost_close_session(struct se_session *se_sess)
298 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
303 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
305 /* Go ahead and process the write immediately */
306 target_execute_cmd(se_cmd);
310 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
315 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
320 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
325 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
330 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
332 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
334 llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
336 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
339 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
341 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
342 struct tcm_vhost_cmd, tvc_se_cmd);
343 vhost_scsi_complete_cmd(tv_cmd);
347 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
349 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
350 struct tcm_vhost_cmd, tvc_se_cmd);
351 vhost_scsi_complete_cmd(tv_cmd);
355 static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
360 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
366 static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
367 u32 event, u32 reason)
369 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
370 struct tcm_vhost_evt *evt;
372 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
373 vs->vs_events_missed = true;
377 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
379 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
380 vs->vs_events_missed = true;
384 evt->event.event = event;
385 evt->event.reason = reason;
391 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
393 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
395 /* TODO locking against target/backend threads? */
396 transport_generic_free_cmd(se_cmd, 1);
398 if (tv_cmd->tvc_sgl_count) {
400 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
401 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
403 kfree(tv_cmd->tvc_sgl);
409 static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
410 struct tcm_vhost_evt *evt)
412 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
413 struct virtio_scsi_event *event = &evt->event;
414 struct virtio_scsi_event __user *eventp;
418 if (!vq->private_data) {
419 vs->vs_events_missed = true;
424 vhost_disable_notify(&vs->dev, vq);
425 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
426 ARRAY_SIZE(vq->iov), &out, &in,
429 vs->vs_events_missed = true;
432 if (head == vq->num) {
433 if (vhost_enable_notify(&vs->dev, vq))
435 vs->vs_events_missed = true;
439 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
440 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
441 vq->iov[out].iov_len);
442 vs->vs_events_missed = true;
446 if (vs->vs_events_missed) {
447 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
448 vs->vs_events_missed = false;
451 eventp = vq->iov[out].iov_base;
452 ret = __copy_to_user(eventp, event, sizeof(*event));
454 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
456 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
459 static void tcm_vhost_evt_work(struct vhost_work *work)
461 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
463 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
464 struct tcm_vhost_evt *evt;
465 struct llist_node *llnode;
467 mutex_lock(&vq->mutex);
468 llnode = llist_del_all(&vs->vs_event_list);
470 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
471 llnode = llist_next(llnode);
472 tcm_vhost_do_evt_work(vs, evt);
473 tcm_vhost_free_evt(vs, evt);
475 mutex_unlock(&vq->mutex);
478 /* Fill in status and signal that we are done processing this command
480 * This is scheduled in the vhost work queue so we are called with the owner
481 * process mm and can access the vring.
483 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
485 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
487 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
488 struct virtio_scsi_cmd_resp v_rsp;
489 struct tcm_vhost_cmd *tv_cmd;
490 struct llist_node *llnode;
491 struct se_cmd *se_cmd;
494 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
495 llnode = llist_del_all(&vs->vs_completion_list);
497 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
498 tvc_completion_list);
499 llnode = llist_next(llnode);
500 se_cmd = &tv_cmd->tvc_se_cmd;
502 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
503 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
505 memset(&v_rsp, 0, sizeof(v_rsp));
506 v_rsp.resid = se_cmd->residual_count;
507 /* TODO is status_qualifier field needed? */
508 v_rsp.status = se_cmd->scsi_status;
509 v_rsp.sense_len = se_cmd->scsi_sense_length;
510 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
512 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
513 if (likely(ret == 0)) {
514 vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
515 vq = tv_cmd->tvc_vq - vs->vqs;
516 __set_bit(vq, signal);
518 pr_err("Faulted on virtio_scsi_cmd_resp\n");
520 vhost_scsi_free_cmd(tv_cmd);
524 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
526 vhost_signal(&vs->dev, &vs->vqs[vq]);
529 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
530 struct tcm_vhost_tpg *tv_tpg,
531 struct virtio_scsi_cmd_req *v_req,
535 struct tcm_vhost_cmd *tv_cmd;
536 struct tcm_vhost_nexus *tv_nexus;
538 tv_nexus = tv_tpg->tpg_nexus;
540 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
541 return ERR_PTR(-EIO);
544 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
546 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
547 return ERR_PTR(-ENOMEM);
549 tv_cmd->tvc_tag = v_req->tag;
550 tv_cmd->tvc_task_attr = v_req->task_attr;
551 tv_cmd->tvc_exp_data_len = exp_data_len;
552 tv_cmd->tvc_data_direction = data_direction;
553 tv_cmd->tvc_nexus = tv_nexus;
559 * Map a user memory range into a scatterlist
561 * Returns the number of scatterlist entries used or -errno on error.
563 static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
564 unsigned int sgl_count, struct iovec *iov, int write)
566 unsigned int npages = 0, pages_nr, offset, nbytes;
567 struct scatterlist *sg = sgl;
568 void __user *ptr = iov->iov_base;
569 size_t len = iov->iov_len;
573 pages_nr = iov_num_pages(iov);
574 if (pages_nr > sgl_count)
577 pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
581 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
582 /* No pages were pinned */
585 /* Less pages pinned than wanted */
586 if (ret != pages_nr) {
587 for (i = 0; i < ret; i++)
594 offset = (uintptr_t)ptr & ~PAGE_MASK;
595 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
596 sg_set_page(sg, pages[npages], nbytes, offset);
608 static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
609 struct iovec *iov, unsigned int niov, int write)
614 struct scatterlist *sg;
617 * Find out how long sglist needs to be
620 for (i = 0; i < niov; i++)
621 sgl_count += iov_num_pages(&iov[i]);
623 /* TODO overflow checking */
625 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
628 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
630 sg_init_table(sg, sgl_count);
632 tv_cmd->tvc_sgl = sg;
633 tv_cmd->tvc_sgl_count = sgl_count;
635 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
636 for (i = 0; i < niov; i++) {
637 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
639 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
640 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
641 kfree(tv_cmd->tvc_sgl);
642 tv_cmd->tvc_sgl = NULL;
643 tv_cmd->tvc_sgl_count = 0;
653 static void tcm_vhost_submission_work(struct work_struct *work)
655 struct tcm_vhost_cmd *tv_cmd =
656 container_of(work, struct tcm_vhost_cmd, work);
657 struct tcm_vhost_nexus *tv_nexus;
658 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
659 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
660 int rc, sg_no_bidi = 0;
662 if (tv_cmd->tvc_sgl_count) {
663 sg_ptr = tv_cmd->tvc_sgl;
664 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
666 if (se_cmd->se_cmd_flags & SCF_BIDI) {
674 tv_nexus = tv_cmd->tvc_nexus;
676 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
677 tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
678 tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
679 tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
680 0, sg_ptr, tv_cmd->tvc_sgl_count,
681 sg_bidi_ptr, sg_no_bidi);
683 transport_send_check_condition_and_sense(se_cmd,
684 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
685 transport_generic_free_cmd(se_cmd, 0);
689 static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
690 struct vhost_virtqueue *vq, int head, unsigned out)
692 struct virtio_scsi_cmd_resp __user *resp;
693 struct virtio_scsi_cmd_resp rsp;
696 memset(&rsp, 0, sizeof(rsp));
697 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
698 resp = vq->iov[out].iov_base;
699 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
701 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
703 pr_err("Faulted on virtio_scsi_cmd_resp\n");
706 static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
707 struct vhost_virtqueue *vq)
709 struct tcm_vhost_tpg **vs_tpg;
710 struct virtio_scsi_cmd_req v_req;
711 struct tcm_vhost_tpg *tv_tpg;
712 struct tcm_vhost_cmd *tv_cmd;
713 u32 exp_data_len, data_first, data_num, data_direction;
719 * We can handle the vq only after the endpoint is setup by calling the
720 * VHOST_SCSI_SET_ENDPOINT ioctl.
722 * TODO: Check that we are running from vhost_worker which acts
723 * as read-side critical section for vhost kind of RCU.
724 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
726 vs_tpg = rcu_dereference_check(vq->private_data, 1);
730 mutex_lock(&vq->mutex);
731 vhost_disable_notify(&vs->dev, vq);
734 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
735 ARRAY_SIZE(vq->iov), &out, &in,
737 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
739 /* On error, stop handling until the next kick. */
740 if (unlikely(head < 0))
742 /* Nothing new? Wait for eventfd to tell us they refilled. */
743 if (head == vq->num) {
744 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
745 vhost_disable_notify(&vs->dev, vq);
751 /* FIXME: BIDI operation */
752 if (out == 1 && in == 1) {
753 data_direction = DMA_NONE;
756 } else if (out == 1 && in > 1) {
757 data_direction = DMA_FROM_DEVICE;
758 data_first = out + 1;
760 } else if (out > 1 && in == 1) {
761 data_direction = DMA_TO_DEVICE;
765 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
771 * Check for a sane resp buffer so we can report errors to
774 if (unlikely(vq->iov[out].iov_len !=
775 sizeof(struct virtio_scsi_cmd_resp))) {
776 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
777 " bytes\n", vq->iov[out].iov_len);
781 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
782 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
783 " bytes\n", vq->iov[0].iov_len);
786 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
787 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
788 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
791 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
795 /* Extract the tpgt */
796 target = v_req.lun[1];
797 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
799 /* Target does not exist, fail the request */
800 if (unlikely(!tv_tpg)) {
801 vhost_scsi_send_bad_target(vs, vq, head, out);
806 for (i = 0; i < data_num; i++)
807 exp_data_len += vq->iov[data_first + i].iov_len;
809 tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
810 exp_data_len, data_direction);
811 if (IS_ERR(tv_cmd)) {
812 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
816 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
817 ": %d\n", tv_cmd, exp_data_len, data_direction);
819 tv_cmd->tvc_vhost = vs;
821 tv_cmd->tvc_resp = vq->iov[out].iov_base;
824 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
825 * that will be used by tcm_vhost_new_cmd_map() and down into
826 * target_setup_cmd_from_cdb()
828 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
830 * Check that the recieved CDB size does not exceeded our
831 * hardcoded max for tcm_vhost
833 /* TODO what if cdb was too small for varlen cdb header? */
834 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
835 TCM_VHOST_MAX_CDB_SIZE)) {
836 vq_err(vq, "Received SCSI CDB with command_size: %d that"
837 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
838 scsi_command_size(tv_cmd->tvc_cdb),
839 TCM_VHOST_MAX_CDB_SIZE);
842 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
844 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
845 tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
847 if (data_direction != DMA_NONE) {
848 ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
849 &vq->iov[data_first], data_num,
850 data_direction == DMA_TO_DEVICE);
852 vq_err(vq, "Failed to map iov to sgl\n");
858 * Save the descriptor from vhost_get_vq_desc() to be used to
859 * complete the virtio-scsi request in TCM callback context via
860 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
862 tv_cmd->tvc_vq_desc = head;
864 * Dispatch tv_cmd descriptor for cmwq execution in process
865 * context provided by tcm_vhost_workqueue. This also ensures
866 * tv_cmd is executed on the same kworker CPU as this vhost
867 * thread to gain positive L2 cache locality effects..
869 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
870 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
873 mutex_unlock(&vq->mutex);
877 vhost_scsi_free_cmd(tv_cmd);
879 vhost_scsi_send_bad_target(vs, vq, head, out);
880 mutex_unlock(&vq->mutex);
883 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
885 pr_debug("%s: The handling func for control queue.\n", __func__);
888 static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
889 struct se_lun *lun, u32 event, u32 reason)
891 struct tcm_vhost_evt *evt;
893 evt = tcm_vhost_allocate_evt(vs, event, reason);
898 /* TODO: share lun setup code with virtio-scsi.ko */
900 * Note: evt->event is zeroed when we allocate it and
901 * lun[4-7] need to be zero according to virtio-scsi spec.
903 evt->event.lun[0] = 0x01;
904 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
905 if (lun->unpacked_lun >= 256)
906 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
907 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
910 llist_add(&evt->list, &vs->vs_event_list);
911 vhost_work_queue(&vs->dev, &vs->vs_event_work);
914 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
916 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
918 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
920 mutex_lock(&vq->mutex);
921 if (!vq->private_data)
924 if (vs->vs_events_missed)
925 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
927 mutex_unlock(&vq->mutex);
930 static void vhost_scsi_handle_kick(struct vhost_work *work)
932 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
934 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
936 vhost_scsi_handle_vq(vs, vq);
939 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
941 vhost_poll_flush(&vs->dev.vqs[index].poll);
944 static void vhost_scsi_flush(struct vhost_scsi *vs)
948 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
949 vhost_scsi_flush_vq(vs, i);
950 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
951 vhost_work_flush(&vs->dev, &vs->vs_event_work);
955 * Called from vhost_scsi_ioctl() context to walk the list of available
956 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
958 * The lock nesting rule is:
959 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
961 static int vhost_scsi_set_endpoint(
962 struct vhost_scsi *vs,
963 struct vhost_scsi_target *t)
965 struct tcm_vhost_tport *tv_tport;
966 struct tcm_vhost_tpg *tv_tpg;
967 struct tcm_vhost_tpg **vs_tpg;
968 struct vhost_virtqueue *vq;
969 int index, ret, i, len;
972 mutex_lock(&tcm_vhost_mutex);
973 mutex_lock(&vs->dev.mutex);
975 /* Verify that ring has been setup correctly. */
976 for (index = 0; index < vs->dev.nvqs; ++index) {
977 /* Verify that ring has been setup correctly. */
978 if (!vhost_vq_access_ok(&vs->vqs[index])) {
984 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
985 vs_tpg = kzalloc(len, GFP_KERNEL);
991 memcpy(vs_tpg, vs->vs_tpg, len);
993 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
994 mutex_lock(&tv_tpg->tv_tpg_mutex);
995 if (!tv_tpg->tpg_nexus) {
996 mutex_unlock(&tv_tpg->tv_tpg_mutex);
999 if (tv_tpg->tv_tpg_vhost_count != 0) {
1000 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1003 tv_tport = tv_tpg->tport;
1005 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1006 if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
1008 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1012 tv_tpg->tv_tpg_vhost_count++;
1013 tv_tpg->vhost_scsi = vs;
1014 vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
1015 smp_mb__after_atomic_inc();
1018 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1022 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1023 sizeof(vs->vs_vhost_wwpn));
1024 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1026 /* Flushing the vhost_work acts as synchronize_rcu */
1027 mutex_lock(&vq->mutex);
1028 rcu_assign_pointer(vq->private_data, vs_tpg);
1029 vhost_init_used(vq);
1030 mutex_unlock(&vq->mutex);
1038 * Act as synchronize_rcu to make sure access to
1039 * old vs->vs_tpg is finished.
1041 vhost_scsi_flush(vs);
1043 vs->vs_tpg = vs_tpg;
1046 mutex_unlock(&vs->dev.mutex);
1047 mutex_unlock(&tcm_vhost_mutex);
1051 static int vhost_scsi_clear_endpoint(
1052 struct vhost_scsi *vs,
1053 struct vhost_scsi_target *t)
1055 struct tcm_vhost_tport *tv_tport;
1056 struct tcm_vhost_tpg *tv_tpg;
1057 struct vhost_virtqueue *vq;
1062 mutex_lock(&tcm_vhost_mutex);
1063 mutex_lock(&vs->dev.mutex);
1064 /* Verify that ring has been setup correctly. */
1065 for (index = 0; index < vs->dev.nvqs; ++index) {
1066 if (!vhost_vq_access_ok(&vs->vqs[index])) {
1077 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1079 tv_tpg = vs->vs_tpg[target];
1083 mutex_lock(&tv_tpg->tv_tpg_mutex);
1084 tv_tport = tv_tpg->tport;
1090 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1091 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1092 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1093 tv_tport->tport_name, tv_tpg->tport_tpgt,
1094 t->vhost_wwpn, t->vhost_tpgt);
1098 tv_tpg->tv_tpg_vhost_count--;
1099 tv_tpg->vhost_scsi = NULL;
1100 vs->vs_tpg[target] = NULL;
1102 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1105 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1107 /* Flushing the vhost_work acts as synchronize_rcu */
1108 mutex_lock(&vq->mutex);
1109 rcu_assign_pointer(vq->private_data, NULL);
1110 mutex_unlock(&vq->mutex);
1114 * Act as synchronize_rcu to make sure access to
1115 * old vs->vs_tpg is finished.
1117 vhost_scsi_flush(vs);
1120 WARN_ON(vs->vs_events_nr);
1121 mutex_unlock(&vs->dev.mutex);
1122 mutex_unlock(&tcm_vhost_mutex);
1126 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1128 mutex_unlock(&vs->dev.mutex);
1129 mutex_unlock(&tcm_vhost_mutex);
1133 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1135 if (features & ~VHOST_SCSI_FEATURES)
1138 mutex_lock(&vs->dev.mutex);
1139 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1140 !vhost_log_access_ok(&vs->dev)) {
1141 mutex_unlock(&vs->dev.mutex);
1144 vs->dev.acked_features = features;
1146 vhost_scsi_flush(vs);
1147 mutex_unlock(&vs->dev.mutex);
1151 static int vhost_scsi_open(struct inode *inode, struct file *f)
1153 struct vhost_scsi *s;
1156 s = kzalloc(sizeof(*s), GFP_KERNEL);
1160 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1161 vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1163 s->vs_events_nr = 0;
1164 s->vs_events_missed = false;
1166 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
1167 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
1168 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
1169 s->vqs[i].handle_kick = vhost_scsi_handle_kick;
1170 r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
1176 f->private_data = s;
1180 static int vhost_scsi_release(struct inode *inode, struct file *f)
1182 struct vhost_scsi *s = f->private_data;
1183 struct vhost_scsi_target t;
1185 mutex_lock(&s->dev.mutex);
1186 memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1187 mutex_unlock(&s->dev.mutex);
1188 vhost_scsi_clear_endpoint(s, &t);
1189 vhost_dev_stop(&s->dev);
1190 vhost_dev_cleanup(&s->dev, false);
1191 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1192 vhost_scsi_flush(s);
1197 static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1200 struct vhost_scsi *vs = f->private_data;
1201 struct vhost_scsi_target backend;
1202 void __user *argp = (void __user *)arg;
1203 u64 __user *featurep = argp;
1204 u32 __user *eventsp = argp;
1207 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1208 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
1211 case VHOST_SCSI_SET_ENDPOINT:
1212 if (copy_from_user(&backend, argp, sizeof backend))
1214 if (backend.reserved != 0)
1217 return vhost_scsi_set_endpoint(vs, &backend);
1218 case VHOST_SCSI_CLEAR_ENDPOINT:
1219 if (copy_from_user(&backend, argp, sizeof backend))
1221 if (backend.reserved != 0)
1224 return vhost_scsi_clear_endpoint(vs, &backend);
1225 case VHOST_SCSI_GET_ABI_VERSION:
1226 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1229 case VHOST_SCSI_SET_EVENTS_MISSED:
1230 if (get_user(events_missed, eventsp))
1232 mutex_lock(&vq->mutex);
1233 vs->vs_events_missed = events_missed;
1234 mutex_unlock(&vq->mutex);
1236 case VHOST_SCSI_GET_EVENTS_MISSED:
1237 mutex_lock(&vq->mutex);
1238 events_missed = vs->vs_events_missed;
1239 mutex_unlock(&vq->mutex);
1240 if (put_user(events_missed, eventsp))
1243 case VHOST_GET_FEATURES:
1244 features = VHOST_SCSI_FEATURES;
1245 if (copy_to_user(featurep, &features, sizeof features))
1248 case VHOST_SET_FEATURES:
1249 if (copy_from_user(&features, featurep, sizeof features))
1251 return vhost_scsi_set_features(vs, features);
1253 mutex_lock(&vs->dev.mutex);
1254 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1255 /* TODO: flush backend after dev ioctl. */
1256 if (r == -ENOIOCTLCMD)
1257 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1258 mutex_unlock(&vs->dev.mutex);
1263 #ifdef CONFIG_COMPAT
1264 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1267 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1271 static const struct file_operations vhost_scsi_fops = {
1272 .owner = THIS_MODULE,
1273 .release = vhost_scsi_release,
1274 .unlocked_ioctl = vhost_scsi_ioctl,
1275 #ifdef CONFIG_COMPAT
1276 .compat_ioctl = vhost_scsi_compat_ioctl,
1278 .open = vhost_scsi_open,
1279 .llseek = noop_llseek,
1282 static struct miscdevice vhost_scsi_misc = {
1288 static int __init vhost_scsi_register(void)
1290 return misc_register(&vhost_scsi_misc);
1293 static int vhost_scsi_deregister(void)
1295 return misc_deregister(&vhost_scsi_misc);
1298 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1300 switch (tport->tport_proto_id) {
1301 case SCSI_PROTOCOL_SAS:
1303 case SCSI_PROTOCOL_FCP:
1305 case SCSI_PROTOCOL_ISCSI:
1314 static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1315 struct se_lun *lun, bool plug)
1318 struct vhost_scsi *vs = tpg->vhost_scsi;
1319 struct vhost_virtqueue *vq;
1325 mutex_lock(&vs->dev.mutex);
1326 if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1327 mutex_unlock(&vs->dev.mutex);
1332 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1334 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1336 vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
1337 mutex_lock(&vq->mutex);
1338 tcm_vhost_send_evt(vs, tpg, lun,
1339 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1340 mutex_unlock(&vq->mutex);
1341 mutex_unlock(&vs->dev.mutex);
1344 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1346 tcm_vhost_do_plug(tpg, lun, true);
1349 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1351 tcm_vhost_do_plug(tpg, lun, false);
1354 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1357 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1358 struct tcm_vhost_tpg, se_tpg);
1360 mutex_lock(&tcm_vhost_mutex);
1362 mutex_lock(&tv_tpg->tv_tpg_mutex);
1363 tv_tpg->tv_tpg_port_count++;
1364 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1366 tcm_vhost_hotplug(tv_tpg, lun);
1368 mutex_unlock(&tcm_vhost_mutex);
1373 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1376 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1377 struct tcm_vhost_tpg, se_tpg);
1379 mutex_lock(&tcm_vhost_mutex);
1381 mutex_lock(&tv_tpg->tv_tpg_mutex);
1382 tv_tpg->tv_tpg_port_count--;
1383 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1385 tcm_vhost_hotunplug(tv_tpg, lun);
1387 mutex_unlock(&tcm_vhost_mutex);
1390 static struct se_node_acl *tcm_vhost_make_nodeacl(
1391 struct se_portal_group *se_tpg,
1392 struct config_group *group,
1395 struct se_node_acl *se_nacl, *se_nacl_new;
1396 struct tcm_vhost_nacl *nacl;
1400 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1401 return ERR_PTR(-EINVAL); */
1402 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1404 return ERR_PTR(-ENOMEM);
1408 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1409 * when converting a NodeACL from demo mode -> explict
1411 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1413 if (IS_ERR(se_nacl)) {
1414 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1418 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1420 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1421 nacl->iport_wwpn = wwpn;
1426 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1428 struct tcm_vhost_nacl *nacl = container_of(se_acl,
1429 struct tcm_vhost_nacl, se_node_acl);
1430 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1434 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1437 struct se_portal_group *se_tpg;
1438 struct tcm_vhost_nexus *tv_nexus;
1440 mutex_lock(&tv_tpg->tv_tpg_mutex);
1441 if (tv_tpg->tpg_nexus) {
1442 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1443 pr_debug("tv_tpg->tpg_nexus already exists\n");
1446 se_tpg = &tv_tpg->se_tpg;
1448 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1450 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1451 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1455 * Initialize the struct se_session pointer
1457 tv_nexus->tvn_se_sess = transport_init_session();
1458 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1459 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1464 * Since we are running in 'demo mode' this call with generate a
1465 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1466 * the SCSI Initiator port name of the passed configfs group 'name'.
1468 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1469 se_tpg, (unsigned char *)name);
1470 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1471 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1472 pr_debug("core_tpg_check_initiator_node_acl() failed"
1474 transport_free_session(tv_nexus->tvn_se_sess);
1479 * Now register the TCM vhost virtual I_T Nexus as active with the
1480 * call to __transport_register_session()
1482 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1483 tv_nexus->tvn_se_sess, tv_nexus);
1484 tv_tpg->tpg_nexus = tv_nexus;
1486 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1490 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1492 struct se_session *se_sess;
1493 struct tcm_vhost_nexus *tv_nexus;
1495 mutex_lock(&tpg->tv_tpg_mutex);
1496 tv_nexus = tpg->tpg_nexus;
1498 mutex_unlock(&tpg->tv_tpg_mutex);
1502 se_sess = tv_nexus->tvn_se_sess;
1504 mutex_unlock(&tpg->tv_tpg_mutex);
1508 if (tpg->tv_tpg_port_count != 0) {
1509 mutex_unlock(&tpg->tv_tpg_mutex);
1510 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1511 " active TPG port count: %d\n",
1512 tpg->tv_tpg_port_count);
1516 if (tpg->tv_tpg_vhost_count != 0) {
1517 mutex_unlock(&tpg->tv_tpg_mutex);
1518 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1519 " active TPG vhost count: %d\n",
1520 tpg->tv_tpg_vhost_count);
1524 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1525 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1526 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1528 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1530 transport_deregister_session(tv_nexus->tvn_se_sess);
1531 tpg->tpg_nexus = NULL;
1532 mutex_unlock(&tpg->tv_tpg_mutex);
1538 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1541 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1542 struct tcm_vhost_tpg, se_tpg);
1543 struct tcm_vhost_nexus *tv_nexus;
1546 mutex_lock(&tv_tpg->tv_tpg_mutex);
1547 tv_nexus = tv_tpg->tpg_nexus;
1549 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1552 ret = snprintf(page, PAGE_SIZE, "%s\n",
1553 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1554 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1559 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1563 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1564 struct tcm_vhost_tpg, se_tpg);
1565 struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1566 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1569 * Shutdown the active I_T nexus if 'NULL' is passed..
1571 if (!strncmp(page, "NULL", 4)) {
1572 ret = tcm_vhost_drop_nexus(tv_tpg);
1573 return (!ret) ? count : ret;
1576 * Otherwise make sure the passed virtual Initiator port WWN matches
1577 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1578 * tcm_vhost_make_nexus().
1580 if (strlen(page) >= TCM_VHOST_NAMELEN) {
1581 pr_err("Emulated NAA Sas Address: %s, exceeds"
1582 " max: %d\n", page, TCM_VHOST_NAMELEN);
1585 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1587 ptr = strstr(i_port, "naa.");
1589 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1590 pr_err("Passed SAS Initiator Port %s does not"
1591 " match target port protoid: %s\n", i_port,
1592 tcm_vhost_dump_proto_id(tport_wwn));
1595 port_ptr = &i_port[0];
1598 ptr = strstr(i_port, "fc.");
1600 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1601 pr_err("Passed FCP Initiator Port %s does not"
1602 " match target port protoid: %s\n", i_port,
1603 tcm_vhost_dump_proto_id(tport_wwn));
1606 port_ptr = &i_port[3]; /* Skip over "fc." */
1609 ptr = strstr(i_port, "iqn.");
1611 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1612 pr_err("Passed iSCSI Initiator Port %s does not"
1613 " match target port protoid: %s\n", i_port,
1614 tcm_vhost_dump_proto_id(tport_wwn));
1617 port_ptr = &i_port[0];
1620 pr_err("Unable to locate prefix for emulated Initiator Port:"
1624 * Clear any trailing newline for the NAA WWN
1627 if (i_port[strlen(i_port)-1] == '\n')
1628 i_port[strlen(i_port)-1] = '\0';
1630 ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1637 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1639 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1640 &tcm_vhost_tpg_nexus.attr,
1644 static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1645 struct config_group *group,
1648 struct tcm_vhost_tport *tport = container_of(wwn,
1649 struct tcm_vhost_tport, tport_wwn);
1651 struct tcm_vhost_tpg *tpg;
1655 if (strstr(name, "tpgt_") != name)
1656 return ERR_PTR(-EINVAL);
1657 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1658 return ERR_PTR(-EINVAL);
1660 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1662 pr_err("Unable to allocate struct tcm_vhost_tpg");
1663 return ERR_PTR(-ENOMEM);
1665 mutex_init(&tpg->tv_tpg_mutex);
1666 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1668 tpg->tport_tpgt = tpgt;
1670 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1671 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1676 mutex_lock(&tcm_vhost_mutex);
1677 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1678 mutex_unlock(&tcm_vhost_mutex);
1680 return &tpg->se_tpg;
1683 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1685 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1686 struct tcm_vhost_tpg, se_tpg);
1688 mutex_lock(&tcm_vhost_mutex);
1689 list_del(&tpg->tv_tpg_list);
1690 mutex_unlock(&tcm_vhost_mutex);
1692 * Release the virtual I_T Nexus for this vhost TPG
1694 tcm_vhost_drop_nexus(tpg);
1696 * Deregister the se_tpg from TCM..
1698 core_tpg_deregister(se_tpg);
1702 static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1703 struct config_group *group,
1706 struct tcm_vhost_tport *tport;
1711 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1712 return ERR_PTR(-EINVAL); */
1714 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1716 pr_err("Unable to allocate struct tcm_vhost_tport");
1717 return ERR_PTR(-ENOMEM);
1719 tport->tport_wwpn = wwpn;
1721 * Determine the emulated Protocol Identifier and Target Port Name
1722 * based on the incoming configfs directory name.
1724 ptr = strstr(name, "naa.");
1726 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1729 ptr = strstr(name, "fc.");
1731 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1732 off = 3; /* Skip over "fc." */
1735 ptr = strstr(name, "iqn.");
1737 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1741 pr_err("Unable to locate prefix for emulated Target Port:"
1744 return ERR_PTR(-EINVAL);
1747 if (strlen(name) >= TCM_VHOST_NAMELEN) {
1748 pr_err("Emulated %s Address: %s, exceeds"
1749 " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1752 return ERR_PTR(-EINVAL);
1754 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1756 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1757 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1759 return &tport->tport_wwn;
1762 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1764 struct tcm_vhost_tport *tport = container_of(wwn,
1765 struct tcm_vhost_tport, tport_wwn);
1767 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1768 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1774 static ssize_t tcm_vhost_wwn_show_attr_version(
1775 struct target_fabric_configfs *tf,
1778 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1779 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1780 utsname()->machine);
1783 TF_WWN_ATTR_RO(tcm_vhost, version);
1785 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1786 &tcm_vhost_wwn_version.attr,
1790 static struct target_core_fabric_ops tcm_vhost_ops = {
1791 .get_fabric_name = tcm_vhost_get_fabric_name,
1792 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
1793 .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
1794 .tpg_get_tag = tcm_vhost_get_tag,
1795 .tpg_get_default_depth = tcm_vhost_get_default_depth,
1796 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
1797 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
1798 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
1799 .tpg_check_demo_mode = tcm_vhost_check_true,
1800 .tpg_check_demo_mode_cache = tcm_vhost_check_true,
1801 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
1802 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
1803 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
1804 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
1805 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
1806 .release_cmd = tcm_vhost_release_cmd,
1807 .shutdown_session = tcm_vhost_shutdown_session,
1808 .close_session = tcm_vhost_close_session,
1809 .sess_get_index = tcm_vhost_sess_get_index,
1810 .sess_get_initiator_sid = NULL,
1811 .write_pending = tcm_vhost_write_pending,
1812 .write_pending_status = tcm_vhost_write_pending_status,
1813 .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
1814 .get_task_tag = tcm_vhost_get_task_tag,
1815 .get_cmd_state = tcm_vhost_get_cmd_state,
1816 .queue_data_in = tcm_vhost_queue_data_in,
1817 .queue_status = tcm_vhost_queue_status,
1818 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
1820 * Setup callers for generic logic in target_core_fabric_configfs.c
1822 .fabric_make_wwn = tcm_vhost_make_tport,
1823 .fabric_drop_wwn = tcm_vhost_drop_tport,
1824 .fabric_make_tpg = tcm_vhost_make_tpg,
1825 .fabric_drop_tpg = tcm_vhost_drop_tpg,
1826 .fabric_post_link = tcm_vhost_port_link,
1827 .fabric_pre_unlink = tcm_vhost_port_unlink,
1828 .fabric_make_np = NULL,
1829 .fabric_drop_np = NULL,
1830 .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
1831 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
1834 static int tcm_vhost_register_configfs(void)
1836 struct target_fabric_configfs *fabric;
1839 pr_debug("TCM_VHOST fabric module %s on %s/%s"
1840 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1841 utsname()->machine);
1843 * Register the top level struct config_item_type with TCM core
1845 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
1846 if (IS_ERR(fabric)) {
1847 pr_err("target_fabric_configfs_init() failed\n");
1848 return PTR_ERR(fabric);
1851 * Setup fabric->tf_ops from our local tcm_vhost_ops
1853 fabric->tf_ops = tcm_vhost_ops;
1855 * Setup default attribute lists for various fabric->tf_cit_tmpl
1857 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
1858 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
1859 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1860 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1861 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1862 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1863 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1864 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1865 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1867 * Register the fabric for use within TCM
1869 ret = target_fabric_configfs_register(fabric);
1871 pr_err("target_fabric_configfs_register() failed"
1872 " for TCM_VHOST\n");
1876 * Setup our local pointer to *fabric
1878 tcm_vhost_fabric_configfs = fabric;
1879 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
1883 static void tcm_vhost_deregister_configfs(void)
1885 if (!tcm_vhost_fabric_configfs)
1888 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
1889 tcm_vhost_fabric_configfs = NULL;
1890 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
1893 static int __init tcm_vhost_init(void)
1897 * Use our own dedicated workqueue for submitting I/O into
1898 * target core to avoid contention within system_wq.
1900 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
1901 if (!tcm_vhost_workqueue)
1904 ret = vhost_scsi_register();
1906 goto out_destroy_workqueue;
1908 ret = tcm_vhost_register_configfs();
1910 goto out_vhost_scsi_deregister;
1914 out_vhost_scsi_deregister:
1915 vhost_scsi_deregister();
1916 out_destroy_workqueue:
1917 destroy_workqueue(tcm_vhost_workqueue);
1922 static void tcm_vhost_exit(void)
1924 tcm_vhost_deregister_configfs();
1925 vhost_scsi_deregister();
1926 destroy_workqueue(tcm_vhost_workqueue);
1929 MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
1930 MODULE_LICENSE("GPL");
1931 module_init(tcm_vhost_init);
1932 module_exit(tcm_vhost_exit);