]> Pileus Git - ~andy/linux/blob - drivers/vhost/tcm_vhost.c
vhost: fix error handling in RESET_OWNER ioctl
[~andy/linux] / drivers / vhost / tcm_vhost.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2012 RisingTide Systems LLC.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51
52 #include "vhost.c"
53 #include "vhost.h"
54 #include "tcm_vhost.h"
55
56 enum {
57         VHOST_SCSI_VQ_CTL = 0,
58         VHOST_SCSI_VQ_EVT = 1,
59         VHOST_SCSI_VQ_IO = 2,
60 };
61
62 /*
63  * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
64  * kernel but disabling it helps.
65  * TODO: debug and remove the workaround.
66  */
67 enum {
68         VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
69                               (1ULL << VIRTIO_SCSI_F_HOTPLUG)
70 };
71
72 #define VHOST_SCSI_MAX_TARGET   256
73 #define VHOST_SCSI_MAX_VQ       128
74 #define VHOST_SCSI_MAX_EVENT    128
75
76 struct vhost_scsi_inflight {
77         /* Wait for the flush operation to finish */
78         struct completion comp;
79         /* Refcount for the inflight reqs */
80         struct kref kref;
81 };
82
83 struct vhost_scsi_virtqueue {
84         struct vhost_virtqueue vq;
85         /*
86          * Reference counting for inflight reqs, used for flush operation. At
87          * each time, one reference tracks new commands submitted, while we
88          * wait for another one to reach 0.
89          */
90         struct vhost_scsi_inflight inflights[2];
91         /*
92          * Indicate current inflight in use, protected by vq->mutex.
93          * Writers must also take dev mutex and flush under it.
94          */
95         int inflight_idx;
96 };
97
98 struct vhost_scsi {
99         /* Protected by vhost_scsi->dev.mutex */
100         struct tcm_vhost_tpg **vs_tpg;
101         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
102
103         struct vhost_dev dev;
104         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
105
106         struct vhost_work vs_completion_work; /* cmd completion work item */
107         struct llist_head vs_completion_list; /* cmd completion queue */
108
109         struct vhost_work vs_event_work; /* evt injection work item */
110         struct llist_head vs_event_list; /* evt injection queue */
111
112         bool vs_events_missed; /* any missed events, protected by vq->mutex */
113         int vs_events_nr; /* num of pending events, protected by vq->mutex */
114 };
115
116 /* Local pointer to allocated TCM configfs fabric module */
117 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
118
119 static struct workqueue_struct *tcm_vhost_workqueue;
120
121 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
122 static DEFINE_MUTEX(tcm_vhost_mutex);
123 static LIST_HEAD(tcm_vhost_list);
124
125 static int iov_num_pages(struct iovec *iov)
126 {
127         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
128                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
129 }
130
131 void tcm_vhost_done_inflight(struct kref *kref)
132 {
133         struct vhost_scsi_inflight *inflight;
134
135         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
136         complete(&inflight->comp);
137 }
138
139 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
140                                     struct vhost_scsi_inflight *old_inflight[])
141 {
142         struct vhost_scsi_inflight *new_inflight;
143         struct vhost_virtqueue *vq;
144         int idx, i;
145
146         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
147                 vq = &vs->vqs[i].vq;
148
149                 mutex_lock(&vq->mutex);
150
151                 /* store old infight */
152                 idx = vs->vqs[i].inflight_idx;
153                 if (old_inflight)
154                         old_inflight[i] = &vs->vqs[i].inflights[idx];
155
156                 /* setup new infight */
157                 vs->vqs[i].inflight_idx = idx ^ 1;
158                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
159                 kref_init(&new_inflight->kref);
160                 init_completion(&new_inflight->comp);
161
162                 mutex_unlock(&vq->mutex);
163         }
164 }
165
166 static struct vhost_scsi_inflight *
167 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
168 {
169         struct vhost_scsi_inflight *inflight;
170         struct vhost_scsi_virtqueue *svq;
171
172         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
173         inflight = &svq->inflights[svq->inflight_idx];
174         kref_get(&inflight->kref);
175
176         return inflight;
177 }
178
179 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
180 {
181         kref_put(&inflight->kref, tcm_vhost_done_inflight);
182 }
183
184 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
185 {
186         return 1;
187 }
188
189 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
190 {
191         return 0;
192 }
193
194 static char *tcm_vhost_get_fabric_name(void)
195 {
196         return "vhost";
197 }
198
199 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
200 {
201         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
202                                 struct tcm_vhost_tpg, se_tpg);
203         struct tcm_vhost_tport *tport = tpg->tport;
204
205         switch (tport->tport_proto_id) {
206         case SCSI_PROTOCOL_SAS:
207                 return sas_get_fabric_proto_ident(se_tpg);
208         case SCSI_PROTOCOL_FCP:
209                 return fc_get_fabric_proto_ident(se_tpg);
210         case SCSI_PROTOCOL_ISCSI:
211                 return iscsi_get_fabric_proto_ident(se_tpg);
212         default:
213                 pr_err("Unknown tport_proto_id: 0x%02x, using"
214                         " SAS emulation\n", tport->tport_proto_id);
215                 break;
216         }
217
218         return sas_get_fabric_proto_ident(se_tpg);
219 }
220
221 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
222 {
223         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
224                                 struct tcm_vhost_tpg, se_tpg);
225         struct tcm_vhost_tport *tport = tpg->tport;
226
227         return &tport->tport_name[0];
228 }
229
230 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
231 {
232         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
233                                 struct tcm_vhost_tpg, se_tpg);
234         return tpg->tport_tpgt;
235 }
236
237 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
238 {
239         return 1;
240 }
241
242 static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
243         struct se_node_acl *se_nacl,
244         struct t10_pr_registration *pr_reg,
245         int *format_code,
246         unsigned char *buf)
247 {
248         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
249                                 struct tcm_vhost_tpg, se_tpg);
250         struct tcm_vhost_tport *tport = tpg->tport;
251
252         switch (tport->tport_proto_id) {
253         case SCSI_PROTOCOL_SAS:
254                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
255                                         format_code, buf);
256         case SCSI_PROTOCOL_FCP:
257                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
258                                         format_code, buf);
259         case SCSI_PROTOCOL_ISCSI:
260                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
261                                         format_code, buf);
262         default:
263                 pr_err("Unknown tport_proto_id: 0x%02x, using"
264                         " SAS emulation\n", tport->tport_proto_id);
265                 break;
266         }
267
268         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
269                         format_code, buf);
270 }
271
272 static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
273         struct se_node_acl *se_nacl,
274         struct t10_pr_registration *pr_reg,
275         int *format_code)
276 {
277         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
278                                 struct tcm_vhost_tpg, se_tpg);
279         struct tcm_vhost_tport *tport = tpg->tport;
280
281         switch (tport->tport_proto_id) {
282         case SCSI_PROTOCOL_SAS:
283                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
284                                         format_code);
285         case SCSI_PROTOCOL_FCP:
286                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
287                                         format_code);
288         case SCSI_PROTOCOL_ISCSI:
289                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
290                                         format_code);
291         default:
292                 pr_err("Unknown tport_proto_id: 0x%02x, using"
293                         " SAS emulation\n", tport->tport_proto_id);
294                 break;
295         }
296
297         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
298                         format_code);
299 }
300
301 static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
302         const char *buf,
303         u32 *out_tid_len,
304         char **port_nexus_ptr)
305 {
306         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
307                                 struct tcm_vhost_tpg, se_tpg);
308         struct tcm_vhost_tport *tport = tpg->tport;
309
310         switch (tport->tport_proto_id) {
311         case SCSI_PROTOCOL_SAS:
312                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
313                                         port_nexus_ptr);
314         case SCSI_PROTOCOL_FCP:
315                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
316                                         port_nexus_ptr);
317         case SCSI_PROTOCOL_ISCSI:
318                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
319                                         port_nexus_ptr);
320         default:
321                 pr_err("Unknown tport_proto_id: 0x%02x, using"
322                         " SAS emulation\n", tport->tport_proto_id);
323                 break;
324         }
325
326         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
327                         port_nexus_ptr);
328 }
329
330 static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
331         struct se_portal_group *se_tpg)
332 {
333         struct tcm_vhost_nacl *nacl;
334
335         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
336         if (!nacl) {
337                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
338                 return NULL;
339         }
340
341         return &nacl->se_node_acl;
342 }
343
344 static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
345         struct se_node_acl *se_nacl)
346 {
347         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
348                         struct tcm_vhost_nacl, se_node_acl);
349         kfree(nacl);
350 }
351
352 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
353 {
354         return 1;
355 }
356
357 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
358 {
359         return;
360 }
361
362 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
363 {
364         return 0;
365 }
366
367 static void tcm_vhost_close_session(struct se_session *se_sess)
368 {
369         return;
370 }
371
372 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
373 {
374         return 0;
375 }
376
377 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
378 {
379         /* Go ahead and process the write immediately */
380         target_execute_cmd(se_cmd);
381         return 0;
382 }
383
384 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
385 {
386         return 0;
387 }
388
389 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
390 {
391         return;
392 }
393
394 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
395 {
396         return 0;
397 }
398
399 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
400 {
401         return 0;
402 }
403
404 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
405 {
406         struct vhost_scsi *vs = tv_cmd->tvc_vhost;
407
408         llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
409
410         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
411 }
412
413 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
414 {
415         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
416                                 struct tcm_vhost_cmd, tvc_se_cmd);
417         vhost_scsi_complete_cmd(tv_cmd);
418         return 0;
419 }
420
421 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
422 {
423         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
424                                 struct tcm_vhost_cmd, tvc_se_cmd);
425         vhost_scsi_complete_cmd(tv_cmd);
426         return 0;
427 }
428
429 static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
430 {
431         return 0;
432 }
433
434 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
435 {
436         vs->vs_events_nr--;
437         kfree(evt);
438 }
439
440 static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
441         u32 event, u32 reason)
442 {
443         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
444         struct tcm_vhost_evt *evt;
445
446         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
447                 vs->vs_events_missed = true;
448                 return NULL;
449         }
450
451         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
452         if (!evt) {
453                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
454                 vs->vs_events_missed = true;
455                 return NULL;
456         }
457
458         evt->event.event = event;
459         evt->event.reason = reason;
460         vs->vs_events_nr++;
461
462         return evt;
463 }
464
465 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
466 {
467         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
468
469         /* TODO locking against target/backend threads? */
470         transport_generic_free_cmd(se_cmd, 1);
471
472         if (tv_cmd->tvc_sgl_count) {
473                 u32 i;
474                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
475                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
476
477                 kfree(tv_cmd->tvc_sgl);
478         }
479
480         tcm_vhost_put_inflight(tv_cmd->inflight);
481
482         kfree(tv_cmd);
483 }
484
485 static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
486         struct tcm_vhost_evt *evt)
487 {
488         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
489         struct virtio_scsi_event *event = &evt->event;
490         struct virtio_scsi_event __user *eventp;
491         unsigned out, in;
492         int head, ret;
493
494         if (!vq->private_data) {
495                 vs->vs_events_missed = true;
496                 return;
497         }
498
499 again:
500         vhost_disable_notify(&vs->dev, vq);
501         head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
502                         ARRAY_SIZE(vq->iov), &out, &in,
503                         NULL, NULL);
504         if (head < 0) {
505                 vs->vs_events_missed = true;
506                 return;
507         }
508         if (head == vq->num) {
509                 if (vhost_enable_notify(&vs->dev, vq))
510                         goto again;
511                 vs->vs_events_missed = true;
512                 return;
513         }
514
515         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
516                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
517                                 vq->iov[out].iov_len);
518                 vs->vs_events_missed = true;
519                 return;
520         }
521
522         if (vs->vs_events_missed) {
523                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
524                 vs->vs_events_missed = false;
525         }
526
527         eventp = vq->iov[out].iov_base;
528         ret = __copy_to_user(eventp, event, sizeof(*event));
529         if (!ret)
530                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
531         else
532                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
533 }
534
535 static void tcm_vhost_evt_work(struct vhost_work *work)
536 {
537         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
538                                         vs_event_work);
539         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
540         struct tcm_vhost_evt *evt;
541         struct llist_node *llnode;
542
543         mutex_lock(&vq->mutex);
544         llnode = llist_del_all(&vs->vs_event_list);
545         while (llnode) {
546                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
547                 llnode = llist_next(llnode);
548                 tcm_vhost_do_evt_work(vs, evt);
549                 tcm_vhost_free_evt(vs, evt);
550         }
551         mutex_unlock(&vq->mutex);
552 }
553
554 /* Fill in status and signal that we are done processing this command
555  *
556  * This is scheduled in the vhost work queue so we are called with the owner
557  * process mm and can access the vring.
558  */
559 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
560 {
561         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
562                                         vs_completion_work);
563         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
564         struct virtio_scsi_cmd_resp v_rsp;
565         struct tcm_vhost_cmd *tv_cmd;
566         struct llist_node *llnode;
567         struct se_cmd *se_cmd;
568         int ret, vq;
569
570         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
571         llnode = llist_del_all(&vs->vs_completion_list);
572         while (llnode) {
573                 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
574                                      tvc_completion_list);
575                 llnode = llist_next(llnode);
576                 se_cmd = &tv_cmd->tvc_se_cmd;
577
578                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
579                         tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
580
581                 memset(&v_rsp, 0, sizeof(v_rsp));
582                 v_rsp.resid = se_cmd->residual_count;
583                 /* TODO is status_qualifier field needed? */
584                 v_rsp.status = se_cmd->scsi_status;
585                 v_rsp.sense_len = se_cmd->scsi_sense_length;
586                 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
587                        v_rsp.sense_len);
588                 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
589                 if (likely(ret == 0)) {
590                         struct vhost_scsi_virtqueue *q;
591                         vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
592                         q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
593                         vq = q - vs->vqs;
594                         __set_bit(vq, signal);
595                 } else
596                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
597
598                 vhost_scsi_free_cmd(tv_cmd);
599         }
600
601         vq = -1;
602         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
603                 < VHOST_SCSI_MAX_VQ)
604                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
605 }
606
607 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
608         struct vhost_virtqueue *vq,
609         struct tcm_vhost_tpg *tv_tpg,
610         struct virtio_scsi_cmd_req *v_req,
611         u32 exp_data_len,
612         int data_direction)
613 {
614         struct tcm_vhost_cmd *tv_cmd;
615         struct tcm_vhost_nexus *tv_nexus;
616
617         tv_nexus = tv_tpg->tpg_nexus;
618         if (!tv_nexus) {
619                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
620                 return ERR_PTR(-EIO);
621         }
622
623         tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
624         if (!tv_cmd) {
625                 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
626                 return ERR_PTR(-ENOMEM);
627         }
628         tv_cmd->tvc_tag = v_req->tag;
629         tv_cmd->tvc_task_attr = v_req->task_attr;
630         tv_cmd->tvc_exp_data_len = exp_data_len;
631         tv_cmd->tvc_data_direction = data_direction;
632         tv_cmd->tvc_nexus = tv_nexus;
633         tv_cmd->inflight = tcm_vhost_get_inflight(vq);
634
635         return tv_cmd;
636 }
637
638 /*
639  * Map a user memory range into a scatterlist
640  *
641  * Returns the number of scatterlist entries used or -errno on error.
642  */
643 static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
644         unsigned int sgl_count, struct iovec *iov, int write)
645 {
646         unsigned int npages = 0, pages_nr, offset, nbytes;
647         struct scatterlist *sg = sgl;
648         void __user *ptr = iov->iov_base;
649         size_t len = iov->iov_len;
650         struct page **pages;
651         int ret, i;
652
653         pages_nr = iov_num_pages(iov);
654         if (pages_nr > sgl_count)
655                 return -ENOBUFS;
656
657         pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
658         if (!pages)
659                 return -ENOMEM;
660
661         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
662         /* No pages were pinned */
663         if (ret < 0)
664                 goto out;
665         /* Less pages pinned than wanted */
666         if (ret != pages_nr) {
667                 for (i = 0; i < ret; i++)
668                         put_page(pages[i]);
669                 ret = -EFAULT;
670                 goto out;
671         }
672
673         while (len > 0) {
674                 offset = (uintptr_t)ptr & ~PAGE_MASK;
675                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
676                 sg_set_page(sg, pages[npages], nbytes, offset);
677                 ptr += nbytes;
678                 len -= nbytes;
679                 sg++;
680                 npages++;
681         }
682
683 out:
684         kfree(pages);
685         return ret;
686 }
687
688 static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
689         struct iovec *iov, unsigned int niov, int write)
690 {
691         int ret;
692         unsigned int i;
693         u32 sgl_count;
694         struct scatterlist *sg;
695
696         /*
697          * Find out how long sglist needs to be
698          */
699         sgl_count = 0;
700         for (i = 0; i < niov; i++)
701                 sgl_count += iov_num_pages(&iov[i]);
702
703         /* TODO overflow checking */
704
705         sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
706         if (!sg)
707                 return -ENOMEM;
708         pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
709                sg, sgl_count, !sg);
710         sg_init_table(sg, sgl_count);
711
712         tv_cmd->tvc_sgl = sg;
713         tv_cmd->tvc_sgl_count = sgl_count;
714
715         pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
716         for (i = 0; i < niov; i++) {
717                 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
718                 if (ret < 0) {
719                         for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
720                                 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
721                         kfree(tv_cmd->tvc_sgl);
722                         tv_cmd->tvc_sgl = NULL;
723                         tv_cmd->tvc_sgl_count = 0;
724                         return ret;
725                 }
726
727                 sg += ret;
728                 sgl_count -= ret;
729         }
730         return 0;
731 }
732
733 static void tcm_vhost_submission_work(struct work_struct *work)
734 {
735         struct tcm_vhost_cmd *tv_cmd =
736                 container_of(work, struct tcm_vhost_cmd, work);
737         struct tcm_vhost_nexus *tv_nexus;
738         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
739         struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
740         int rc, sg_no_bidi = 0;
741
742         if (tv_cmd->tvc_sgl_count) {
743                 sg_ptr = tv_cmd->tvc_sgl;
744 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
745 #if 0
746                 if (se_cmd->se_cmd_flags & SCF_BIDI) {
747                         sg_bidi_ptr = NULL;
748                         sg_no_bidi = 0;
749                 }
750 #endif
751         } else {
752                 sg_ptr = NULL;
753         }
754         tv_nexus = tv_cmd->tvc_nexus;
755
756         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
757                         tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
758                         tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
759                         tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
760                         0, sg_ptr, tv_cmd->tvc_sgl_count,
761                         sg_bidi_ptr, sg_no_bidi);
762         if (rc < 0) {
763                 transport_send_check_condition_and_sense(se_cmd,
764                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
765                 transport_generic_free_cmd(se_cmd, 0);
766         }
767 }
768
769 static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
770         struct vhost_virtqueue *vq, int head, unsigned out)
771 {
772         struct virtio_scsi_cmd_resp __user *resp;
773         struct virtio_scsi_cmd_resp rsp;
774         int ret;
775
776         memset(&rsp, 0, sizeof(rsp));
777         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
778         resp = vq->iov[out].iov_base;
779         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
780         if (!ret)
781                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
782         else
783                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
784 }
785
786 static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
787         struct vhost_virtqueue *vq)
788 {
789         struct tcm_vhost_tpg **vs_tpg;
790         struct virtio_scsi_cmd_req v_req;
791         struct tcm_vhost_tpg *tv_tpg;
792         struct tcm_vhost_cmd *tv_cmd;
793         u32 exp_data_len, data_first, data_num, data_direction;
794         unsigned out, in, i;
795         int head, ret;
796         u8 target;
797
798         /*
799          * We can handle the vq only after the endpoint is setup by calling the
800          * VHOST_SCSI_SET_ENDPOINT ioctl.
801          *
802          * TODO: Check that we are running from vhost_worker which acts
803          * as read-side critical section for vhost kind of RCU.
804          * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
805          */
806         vs_tpg = rcu_dereference_check(vq->private_data, 1);
807         if (!vs_tpg)
808                 return;
809
810         mutex_lock(&vq->mutex);
811         vhost_disable_notify(&vs->dev, vq);
812
813         for (;;) {
814                 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
815                                         ARRAY_SIZE(vq->iov), &out, &in,
816                                         NULL, NULL);
817                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
818                                         head, out, in);
819                 /* On error, stop handling until the next kick. */
820                 if (unlikely(head < 0))
821                         break;
822                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
823                 if (head == vq->num) {
824                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
825                                 vhost_disable_notify(&vs->dev, vq);
826                                 continue;
827                         }
828                         break;
829                 }
830
831 /* FIXME: BIDI operation */
832                 if (out == 1 && in == 1) {
833                         data_direction = DMA_NONE;
834                         data_first = 0;
835                         data_num = 0;
836                 } else if (out == 1 && in > 1) {
837                         data_direction = DMA_FROM_DEVICE;
838                         data_first = out + 1;
839                         data_num = in - 1;
840                 } else if (out > 1 && in == 1) {
841                         data_direction = DMA_TO_DEVICE;
842                         data_first = 1;
843                         data_num = out - 1;
844                 } else {
845                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
846                                         out, in);
847                         break;
848                 }
849
850                 /*
851                  * Check for a sane resp buffer so we can report errors to
852                  * the guest.
853                  */
854                 if (unlikely(vq->iov[out].iov_len !=
855                                         sizeof(struct virtio_scsi_cmd_resp))) {
856                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
857                                 " bytes\n", vq->iov[out].iov_len);
858                         break;
859                 }
860
861                 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
862                         vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
863                                 " bytes\n", vq->iov[0].iov_len);
864                         break;
865                 }
866                 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
867                         " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
868                 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
869                                 sizeof(v_req));
870                 if (unlikely(ret)) {
871                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
872                         break;
873                 }
874
875                 /* Extract the tpgt */
876                 target = v_req.lun[1];
877                 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
878
879                 /* Target does not exist, fail the request */
880                 if (unlikely(!tv_tpg)) {
881                         vhost_scsi_send_bad_target(vs, vq, head, out);
882                         continue;
883                 }
884
885                 exp_data_len = 0;
886                 for (i = 0; i < data_num; i++)
887                         exp_data_len += vq->iov[data_first + i].iov_len;
888
889                 tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
890                                         exp_data_len, data_direction);
891                 if (IS_ERR(tv_cmd)) {
892                         vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
893                                         PTR_ERR(tv_cmd));
894                         goto err_cmd;
895                 }
896                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
897                         ": %d\n", tv_cmd, exp_data_len, data_direction);
898
899                 tv_cmd->tvc_vhost = vs;
900                 tv_cmd->tvc_vq = vq;
901                 tv_cmd->tvc_resp = vq->iov[out].iov_base;
902
903                 /*
904                  * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
905                  * that will be used by tcm_vhost_new_cmd_map() and down into
906                  * target_setup_cmd_from_cdb()
907                  */
908                 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
909                 /*
910                  * Check that the recieved CDB size does not exceeded our
911                  * hardcoded max for tcm_vhost
912                  */
913                 /* TODO what if cdb was too small for varlen cdb header? */
914                 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
915                                         TCM_VHOST_MAX_CDB_SIZE)) {
916                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
917                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
918                                 scsi_command_size(tv_cmd->tvc_cdb),
919                                 TCM_VHOST_MAX_CDB_SIZE);
920                         goto err_free;
921                 }
922                 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
923
924                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
925                         tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
926
927                 if (data_direction != DMA_NONE) {
928                         ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
929                                         &vq->iov[data_first], data_num,
930                                         data_direction == DMA_TO_DEVICE);
931                         if (unlikely(ret)) {
932                                 vq_err(vq, "Failed to map iov to sgl\n");
933                                 goto err_free;
934                         }
935                 }
936
937                 /*
938                  * Save the descriptor from vhost_get_vq_desc() to be used to
939                  * complete the virtio-scsi request in TCM callback context via
940                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
941                  */
942                 tv_cmd->tvc_vq_desc = head;
943                 /*
944                  * Dispatch tv_cmd descriptor for cmwq execution in process
945                  * context provided by tcm_vhost_workqueue.  This also ensures
946                  * tv_cmd is executed on the same kworker CPU as this vhost
947                  * thread to gain positive L2 cache locality effects..
948                  */
949                 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
950                 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
951         }
952
953         mutex_unlock(&vq->mutex);
954         return;
955
956 err_free:
957         vhost_scsi_free_cmd(tv_cmd);
958 err_cmd:
959         vhost_scsi_send_bad_target(vs, vq, head, out);
960         mutex_unlock(&vq->mutex);
961 }
962
963 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
964 {
965         pr_debug("%s: The handling func for control queue.\n", __func__);
966 }
967
968 static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
969         struct se_lun *lun, u32 event, u32 reason)
970 {
971         struct tcm_vhost_evt *evt;
972
973         evt = tcm_vhost_allocate_evt(vs, event, reason);
974         if (!evt)
975                 return;
976
977         if (tpg && lun) {
978                 /* TODO: share lun setup code with virtio-scsi.ko */
979                 /*
980                  * Note: evt->event is zeroed when we allocate it and
981                  * lun[4-7] need to be zero according to virtio-scsi spec.
982                  */
983                 evt->event.lun[0] = 0x01;
984                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
985                 if (lun->unpacked_lun >= 256)
986                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
987                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
988         }
989
990         llist_add(&evt->list, &vs->vs_event_list);
991         vhost_work_queue(&vs->dev, &vs->vs_event_work);
992 }
993
994 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
995 {
996         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
997                                                 poll.work);
998         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
999
1000         mutex_lock(&vq->mutex);
1001         if (!vq->private_data)
1002                 goto out;
1003
1004         if (vs->vs_events_missed)
1005                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1006 out:
1007         mutex_unlock(&vq->mutex);
1008 }
1009
1010 static void vhost_scsi_handle_kick(struct vhost_work *work)
1011 {
1012         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1013                                                 poll.work);
1014         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1015
1016         vhost_scsi_handle_vq(vs, vq);
1017 }
1018
1019 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1020 {
1021         vhost_poll_flush(&vs->vqs[index].vq.poll);
1022 }
1023
1024 /* Callers must hold dev mutex */
1025 static void vhost_scsi_flush(struct vhost_scsi *vs)
1026 {
1027         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1028         int i;
1029
1030         /* Init new inflight and remember the old inflight */
1031         tcm_vhost_init_inflight(vs, old_inflight);
1032
1033         /*
1034          * The inflight->kref was initialized to 1. We decrement it here to
1035          * indicate the start of the flush operation so that it will reach 0
1036          * when all the reqs are finished.
1037          */
1038         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1039                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1040
1041         /* Flush both the vhost poll and vhost work */
1042         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1043                 vhost_scsi_flush_vq(vs, i);
1044         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1045         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1046
1047         /* Wait for all reqs issued before the flush to be finished */
1048         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1049                 wait_for_completion(&old_inflight[i]->comp);
1050 }
1051
1052 /*
1053  * Called from vhost_scsi_ioctl() context to walk the list of available
1054  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1055  *
1056  *  The lock nesting rule is:
1057  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1058  */
1059 static int vhost_scsi_set_endpoint(
1060         struct vhost_scsi *vs,
1061         struct vhost_scsi_target *t)
1062 {
1063         struct tcm_vhost_tport *tv_tport;
1064         struct tcm_vhost_tpg *tv_tpg;
1065         struct tcm_vhost_tpg **vs_tpg;
1066         struct vhost_virtqueue *vq;
1067         int index, ret, i, len;
1068         bool match = false;
1069
1070         mutex_lock(&tcm_vhost_mutex);
1071         mutex_lock(&vs->dev.mutex);
1072
1073         /* Verify that ring has been setup correctly. */
1074         for (index = 0; index < vs->dev.nvqs; ++index) {
1075                 /* Verify that ring has been setup correctly. */
1076                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1077                         ret = -EFAULT;
1078                         goto out;
1079                 }
1080         }
1081
1082         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1083         vs_tpg = kzalloc(len, GFP_KERNEL);
1084         if (!vs_tpg) {
1085                 ret = -ENOMEM;
1086                 goto out;
1087         }
1088         if (vs->vs_tpg)
1089                 memcpy(vs_tpg, vs->vs_tpg, len);
1090
1091         list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
1092                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1093                 if (!tv_tpg->tpg_nexus) {
1094                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1095                         continue;
1096                 }
1097                 if (tv_tpg->tv_tpg_vhost_count != 0) {
1098                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1099                         continue;
1100                 }
1101                 tv_tport = tv_tpg->tport;
1102
1103                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1104                         if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
1105                                 kfree(vs_tpg);
1106                                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1107                                 ret = -EEXIST;
1108                                 goto out;
1109                         }
1110                         tv_tpg->tv_tpg_vhost_count++;
1111                         tv_tpg->vhost_scsi = vs;
1112                         vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
1113                         smp_mb__after_atomic_inc();
1114                         match = true;
1115                 }
1116                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1117         }
1118
1119         if (match) {
1120                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1121                        sizeof(vs->vs_vhost_wwpn));
1122                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1123                         vq = &vs->vqs[i].vq;
1124                         /* Flushing the vhost_work acts as synchronize_rcu */
1125                         mutex_lock(&vq->mutex);
1126                         rcu_assign_pointer(vq->private_data, vs_tpg);
1127                         vhost_init_used(vq);
1128                         mutex_unlock(&vq->mutex);
1129                 }
1130                 ret = 0;
1131         } else {
1132                 ret = -EEXIST;
1133         }
1134
1135         /*
1136          * Act as synchronize_rcu to make sure access to
1137          * old vs->vs_tpg is finished.
1138          */
1139         vhost_scsi_flush(vs);
1140         kfree(vs->vs_tpg);
1141         vs->vs_tpg = vs_tpg;
1142
1143 out:
1144         mutex_unlock(&vs->dev.mutex);
1145         mutex_unlock(&tcm_vhost_mutex);
1146         return ret;
1147 }
1148
1149 static int vhost_scsi_clear_endpoint(
1150         struct vhost_scsi *vs,
1151         struct vhost_scsi_target *t)
1152 {
1153         struct tcm_vhost_tport *tv_tport;
1154         struct tcm_vhost_tpg *tv_tpg;
1155         struct vhost_virtqueue *vq;
1156         bool match = false;
1157         int index, ret, i;
1158         u8 target;
1159
1160         mutex_lock(&tcm_vhost_mutex);
1161         mutex_lock(&vs->dev.mutex);
1162         /* Verify that ring has been setup correctly. */
1163         for (index = 0; index < vs->dev.nvqs; ++index) {
1164                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1165                         ret = -EFAULT;
1166                         goto err_dev;
1167                 }
1168         }
1169
1170         if (!vs->vs_tpg) {
1171                 ret = 0;
1172                 goto err_dev;
1173         }
1174
1175         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1176                 target = i;
1177                 tv_tpg = vs->vs_tpg[target];
1178                 if (!tv_tpg)
1179                         continue;
1180
1181                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1182                 tv_tport = tv_tpg->tport;
1183                 if (!tv_tport) {
1184                         ret = -ENODEV;
1185                         goto err_tpg;
1186                 }
1187
1188                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1189                         pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1190                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1191                                 tv_tport->tport_name, tv_tpg->tport_tpgt,
1192                                 t->vhost_wwpn, t->vhost_tpgt);
1193                         ret = -EINVAL;
1194                         goto err_tpg;
1195                 }
1196                 tv_tpg->tv_tpg_vhost_count--;
1197                 tv_tpg->vhost_scsi = NULL;
1198                 vs->vs_tpg[target] = NULL;
1199                 match = true;
1200                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1201         }
1202         if (match) {
1203                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1204                         vq = &vs->vqs[i].vq;
1205                         /* Flushing the vhost_work acts as synchronize_rcu */
1206                         mutex_lock(&vq->mutex);
1207                         rcu_assign_pointer(vq->private_data, NULL);
1208                         mutex_unlock(&vq->mutex);
1209                 }
1210         }
1211         /*
1212          * Act as synchronize_rcu to make sure access to
1213          * old vs->vs_tpg is finished.
1214          */
1215         vhost_scsi_flush(vs);
1216         kfree(vs->vs_tpg);
1217         vs->vs_tpg = NULL;
1218         WARN_ON(vs->vs_events_nr);
1219         mutex_unlock(&vs->dev.mutex);
1220         mutex_unlock(&tcm_vhost_mutex);
1221         return 0;
1222
1223 err_tpg:
1224         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1225 err_dev:
1226         mutex_unlock(&vs->dev.mutex);
1227         mutex_unlock(&tcm_vhost_mutex);
1228         return ret;
1229 }
1230
1231 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1232 {
1233         if (features & ~VHOST_SCSI_FEATURES)
1234                 return -EOPNOTSUPP;
1235
1236         mutex_lock(&vs->dev.mutex);
1237         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1238             !vhost_log_access_ok(&vs->dev)) {
1239                 mutex_unlock(&vs->dev.mutex);
1240                 return -EFAULT;
1241         }
1242         vs->dev.acked_features = features;
1243         smp_wmb();
1244         vhost_scsi_flush(vs);
1245         mutex_unlock(&vs->dev.mutex);
1246         return 0;
1247 }
1248
1249 static int vhost_scsi_open(struct inode *inode, struct file *f)
1250 {
1251         struct vhost_scsi *s;
1252         struct vhost_virtqueue **vqs;
1253         int r, i;
1254
1255         s = kzalloc(sizeof(*s), GFP_KERNEL);
1256         if (!s)
1257                 return -ENOMEM;
1258
1259         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1260         if (!vqs) {
1261                 kfree(s);
1262                 return -ENOMEM;
1263         }
1264
1265         vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1266         vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1267
1268         s->vs_events_nr = 0;
1269         s->vs_events_missed = false;
1270
1271         vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
1272         vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
1273         s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1274         s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1275         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1276                 vqs[i] = &s->vqs[i].vq;
1277                 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1278         }
1279         r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
1280
1281         tcm_vhost_init_inflight(s, NULL);
1282
1283         if (r < 0) {
1284                 kfree(vqs);
1285                 kfree(s);
1286                 return r;
1287         }
1288
1289         f->private_data = s;
1290         return 0;
1291 }
1292
1293 static int vhost_scsi_release(struct inode *inode, struct file *f)
1294 {
1295         struct vhost_scsi *s = f->private_data;
1296         struct vhost_scsi_target t;
1297
1298         mutex_lock(&s->dev.mutex);
1299         memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1300         mutex_unlock(&s->dev.mutex);
1301         vhost_scsi_clear_endpoint(s, &t);
1302         vhost_dev_stop(&s->dev);
1303         vhost_dev_cleanup(&s->dev, false);
1304         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1305         vhost_scsi_flush(s);
1306         kfree(s->dev.vqs);
1307         kfree(s);
1308         return 0;
1309 }
1310
1311 static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1312                                 unsigned long arg)
1313 {
1314         struct vhost_scsi *vs = f->private_data;
1315         struct vhost_scsi_target backend;
1316         void __user *argp = (void __user *)arg;
1317         u64 __user *featurep = argp;
1318         u32 __user *eventsp = argp;
1319         u32 events_missed;
1320         u64 features;
1321         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1322         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1323
1324         switch (ioctl) {
1325         case VHOST_SCSI_SET_ENDPOINT:
1326                 if (copy_from_user(&backend, argp, sizeof backend))
1327                         return -EFAULT;
1328                 if (backend.reserved != 0)
1329                         return -EOPNOTSUPP;
1330
1331                 return vhost_scsi_set_endpoint(vs, &backend);
1332         case VHOST_SCSI_CLEAR_ENDPOINT:
1333                 if (copy_from_user(&backend, argp, sizeof backend))
1334                         return -EFAULT;
1335                 if (backend.reserved != 0)
1336                         return -EOPNOTSUPP;
1337
1338                 return vhost_scsi_clear_endpoint(vs, &backend);
1339         case VHOST_SCSI_GET_ABI_VERSION:
1340                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1341                         return -EFAULT;
1342                 return 0;
1343         case VHOST_SCSI_SET_EVENTS_MISSED:
1344                 if (get_user(events_missed, eventsp))
1345                         return -EFAULT;
1346                 mutex_lock(&vq->mutex);
1347                 vs->vs_events_missed = events_missed;
1348                 mutex_unlock(&vq->mutex);
1349                 return 0;
1350         case VHOST_SCSI_GET_EVENTS_MISSED:
1351                 mutex_lock(&vq->mutex);
1352                 events_missed = vs->vs_events_missed;
1353                 mutex_unlock(&vq->mutex);
1354                 if (put_user(events_missed, eventsp))
1355                         return -EFAULT;
1356                 return 0;
1357         case VHOST_GET_FEATURES:
1358                 features = VHOST_SCSI_FEATURES;
1359                 if (copy_to_user(featurep, &features, sizeof features))
1360                         return -EFAULT;
1361                 return 0;
1362         case VHOST_SET_FEATURES:
1363                 if (copy_from_user(&features, featurep, sizeof features))
1364                         return -EFAULT;
1365                 return vhost_scsi_set_features(vs, features);
1366         default:
1367                 mutex_lock(&vs->dev.mutex);
1368                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1369                 /* TODO: flush backend after dev ioctl. */
1370                 if (r == -ENOIOCTLCMD)
1371                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1372                 mutex_unlock(&vs->dev.mutex);
1373                 return r;
1374         }
1375 }
1376
1377 #ifdef CONFIG_COMPAT
1378 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1379                                 unsigned long arg)
1380 {
1381         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1382 }
1383 #endif
1384
1385 static const struct file_operations vhost_scsi_fops = {
1386         .owner          = THIS_MODULE,
1387         .release        = vhost_scsi_release,
1388         .unlocked_ioctl = vhost_scsi_ioctl,
1389 #ifdef CONFIG_COMPAT
1390         .compat_ioctl   = vhost_scsi_compat_ioctl,
1391 #endif
1392         .open           = vhost_scsi_open,
1393         .llseek         = noop_llseek,
1394 };
1395
1396 static struct miscdevice vhost_scsi_misc = {
1397         MISC_DYNAMIC_MINOR,
1398         "vhost-scsi",
1399         &vhost_scsi_fops,
1400 };
1401
1402 static int __init vhost_scsi_register(void)
1403 {
1404         return misc_register(&vhost_scsi_misc);
1405 }
1406
1407 static int vhost_scsi_deregister(void)
1408 {
1409         return misc_deregister(&vhost_scsi_misc);
1410 }
1411
1412 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1413 {
1414         switch (tport->tport_proto_id) {
1415         case SCSI_PROTOCOL_SAS:
1416                 return "SAS";
1417         case SCSI_PROTOCOL_FCP:
1418                 return "FCP";
1419         case SCSI_PROTOCOL_ISCSI:
1420                 return "iSCSI";
1421         default:
1422                 break;
1423         }
1424
1425         return "Unknown";
1426 }
1427
1428 static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1429         struct se_lun *lun, bool plug)
1430 {
1431
1432         struct vhost_scsi *vs = tpg->vhost_scsi;
1433         struct vhost_virtqueue *vq;
1434         u32 reason;
1435
1436         if (!vs)
1437                 return;
1438
1439         mutex_lock(&vs->dev.mutex);
1440         if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1441                 mutex_unlock(&vs->dev.mutex);
1442                 return;
1443         }
1444
1445         if (plug)
1446                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1447         else
1448                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1449
1450         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1451         mutex_lock(&vq->mutex);
1452         tcm_vhost_send_evt(vs, tpg, lun,
1453                         VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1454         mutex_unlock(&vq->mutex);
1455         mutex_unlock(&vs->dev.mutex);
1456 }
1457
1458 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1459 {
1460         tcm_vhost_do_plug(tpg, lun, true);
1461 }
1462
1463 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1464 {
1465         tcm_vhost_do_plug(tpg, lun, false);
1466 }
1467
1468 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1469         struct se_lun *lun)
1470 {
1471         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1472                                 struct tcm_vhost_tpg, se_tpg);
1473
1474         mutex_lock(&tcm_vhost_mutex);
1475
1476         mutex_lock(&tv_tpg->tv_tpg_mutex);
1477         tv_tpg->tv_tpg_port_count++;
1478         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1479
1480         tcm_vhost_hotplug(tv_tpg, lun);
1481
1482         mutex_unlock(&tcm_vhost_mutex);
1483
1484         return 0;
1485 }
1486
1487 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1488         struct se_lun *lun)
1489 {
1490         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1491                                 struct tcm_vhost_tpg, se_tpg);
1492
1493         mutex_lock(&tcm_vhost_mutex);
1494
1495         mutex_lock(&tv_tpg->tv_tpg_mutex);
1496         tv_tpg->tv_tpg_port_count--;
1497         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1498
1499         tcm_vhost_hotunplug(tv_tpg, lun);
1500
1501         mutex_unlock(&tcm_vhost_mutex);
1502 }
1503
1504 static struct se_node_acl *tcm_vhost_make_nodeacl(
1505         struct se_portal_group *se_tpg,
1506         struct config_group *group,
1507         const char *name)
1508 {
1509         struct se_node_acl *se_nacl, *se_nacl_new;
1510         struct tcm_vhost_nacl *nacl;
1511         u64 wwpn = 0;
1512         u32 nexus_depth;
1513
1514         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1515                 return ERR_PTR(-EINVAL); */
1516         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1517         if (!se_nacl_new)
1518                 return ERR_PTR(-ENOMEM);
1519
1520         nexus_depth = 1;
1521         /*
1522          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1523          * when converting a NodeACL from demo mode -> explict
1524          */
1525         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1526                                 name, nexus_depth);
1527         if (IS_ERR(se_nacl)) {
1528                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1529                 return se_nacl;
1530         }
1531         /*
1532          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1533          */
1534         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1535         nacl->iport_wwpn = wwpn;
1536
1537         return se_nacl;
1538 }
1539
1540 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1541 {
1542         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1543                                 struct tcm_vhost_nacl, se_node_acl);
1544         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1545         kfree(nacl);
1546 }
1547
1548 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1549         const char *name)
1550 {
1551         struct se_portal_group *se_tpg;
1552         struct tcm_vhost_nexus *tv_nexus;
1553
1554         mutex_lock(&tv_tpg->tv_tpg_mutex);
1555         if (tv_tpg->tpg_nexus) {
1556                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1557                 pr_debug("tv_tpg->tpg_nexus already exists\n");
1558                 return -EEXIST;
1559         }
1560         se_tpg = &tv_tpg->se_tpg;
1561
1562         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1563         if (!tv_nexus) {
1564                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1565                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1566                 return -ENOMEM;
1567         }
1568         /*
1569          *  Initialize the struct se_session pointer
1570          */
1571         tv_nexus->tvn_se_sess = transport_init_session();
1572         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1573                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1574                 kfree(tv_nexus);
1575                 return -ENOMEM;
1576         }
1577         /*
1578          * Since we are running in 'demo mode' this call with generate a
1579          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1580          * the SCSI Initiator port name of the passed configfs group 'name'.
1581          */
1582         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1583                                 se_tpg, (unsigned char *)name);
1584         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1585                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1586                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1587                                 " for %s\n", name);
1588                 transport_free_session(tv_nexus->tvn_se_sess);
1589                 kfree(tv_nexus);
1590                 return -ENOMEM;
1591         }
1592         /*
1593          * Now register the TCM vhost virtual I_T Nexus as active with the
1594          * call to __transport_register_session()
1595          */
1596         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1597                         tv_nexus->tvn_se_sess, tv_nexus);
1598         tv_tpg->tpg_nexus = tv_nexus;
1599
1600         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1601         return 0;
1602 }
1603
1604 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1605 {
1606         struct se_session *se_sess;
1607         struct tcm_vhost_nexus *tv_nexus;
1608
1609         mutex_lock(&tpg->tv_tpg_mutex);
1610         tv_nexus = tpg->tpg_nexus;
1611         if (!tv_nexus) {
1612                 mutex_unlock(&tpg->tv_tpg_mutex);
1613                 return -ENODEV;
1614         }
1615
1616         se_sess = tv_nexus->tvn_se_sess;
1617         if (!se_sess) {
1618                 mutex_unlock(&tpg->tv_tpg_mutex);
1619                 return -ENODEV;
1620         }
1621
1622         if (tpg->tv_tpg_port_count != 0) {
1623                 mutex_unlock(&tpg->tv_tpg_mutex);
1624                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1625                         " active TPG port count: %d\n",
1626                         tpg->tv_tpg_port_count);
1627                 return -EBUSY;
1628         }
1629
1630         if (tpg->tv_tpg_vhost_count != 0) {
1631                 mutex_unlock(&tpg->tv_tpg_mutex);
1632                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1633                         " active TPG vhost count: %d\n",
1634                         tpg->tv_tpg_vhost_count);
1635                 return -EBUSY;
1636         }
1637
1638         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1639                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1640                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1641         /*
1642          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1643          */
1644         transport_deregister_session(tv_nexus->tvn_se_sess);
1645         tpg->tpg_nexus = NULL;
1646         mutex_unlock(&tpg->tv_tpg_mutex);
1647
1648         kfree(tv_nexus);
1649         return 0;
1650 }
1651
1652 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1653         char *page)
1654 {
1655         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1656                                 struct tcm_vhost_tpg, se_tpg);
1657         struct tcm_vhost_nexus *tv_nexus;
1658         ssize_t ret;
1659
1660         mutex_lock(&tv_tpg->tv_tpg_mutex);
1661         tv_nexus = tv_tpg->tpg_nexus;
1662         if (!tv_nexus) {
1663                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1664                 return -ENODEV;
1665         }
1666         ret = snprintf(page, PAGE_SIZE, "%s\n",
1667                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1668         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1669
1670         return ret;
1671 }
1672
1673 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1674         const char *page,
1675         size_t count)
1676 {
1677         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1678                                 struct tcm_vhost_tpg, se_tpg);
1679         struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1680         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1681         int ret;
1682         /*
1683          * Shutdown the active I_T nexus if 'NULL' is passed..
1684          */
1685         if (!strncmp(page, "NULL", 4)) {
1686                 ret = tcm_vhost_drop_nexus(tv_tpg);
1687                 return (!ret) ? count : ret;
1688         }
1689         /*
1690          * Otherwise make sure the passed virtual Initiator port WWN matches
1691          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1692          * tcm_vhost_make_nexus().
1693          */
1694         if (strlen(page) >= TCM_VHOST_NAMELEN) {
1695                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1696                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
1697                 return -EINVAL;
1698         }
1699         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1700
1701         ptr = strstr(i_port, "naa.");
1702         if (ptr) {
1703                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1704                         pr_err("Passed SAS Initiator Port %s does not"
1705                                 " match target port protoid: %s\n", i_port,
1706                                 tcm_vhost_dump_proto_id(tport_wwn));
1707                         return -EINVAL;
1708                 }
1709                 port_ptr = &i_port[0];
1710                 goto check_newline;
1711         }
1712         ptr = strstr(i_port, "fc.");
1713         if (ptr) {
1714                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1715                         pr_err("Passed FCP Initiator Port %s does not"
1716                                 " match target port protoid: %s\n", i_port,
1717                                 tcm_vhost_dump_proto_id(tport_wwn));
1718                         return -EINVAL;
1719                 }
1720                 port_ptr = &i_port[3]; /* Skip over "fc." */
1721                 goto check_newline;
1722         }
1723         ptr = strstr(i_port, "iqn.");
1724         if (ptr) {
1725                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1726                         pr_err("Passed iSCSI Initiator Port %s does not"
1727                                 " match target port protoid: %s\n", i_port,
1728                                 tcm_vhost_dump_proto_id(tport_wwn));
1729                         return -EINVAL;
1730                 }
1731                 port_ptr = &i_port[0];
1732                 goto check_newline;
1733         }
1734         pr_err("Unable to locate prefix for emulated Initiator Port:"
1735                         " %s\n", i_port);
1736         return -EINVAL;
1737         /*
1738          * Clear any trailing newline for the NAA WWN
1739          */
1740 check_newline:
1741         if (i_port[strlen(i_port)-1] == '\n')
1742                 i_port[strlen(i_port)-1] = '\0';
1743
1744         ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1745         if (ret < 0)
1746                 return ret;
1747
1748         return count;
1749 }
1750
1751 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1752
1753 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1754         &tcm_vhost_tpg_nexus.attr,
1755         NULL,
1756 };
1757
1758 static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1759         struct config_group *group,
1760         const char *name)
1761 {
1762         struct tcm_vhost_tport *tport = container_of(wwn,
1763                         struct tcm_vhost_tport, tport_wwn);
1764
1765         struct tcm_vhost_tpg *tpg;
1766         unsigned long tpgt;
1767         int ret;
1768
1769         if (strstr(name, "tpgt_") != name)
1770                 return ERR_PTR(-EINVAL);
1771         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1772                 return ERR_PTR(-EINVAL);
1773
1774         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1775         if (!tpg) {
1776                 pr_err("Unable to allocate struct tcm_vhost_tpg");
1777                 return ERR_PTR(-ENOMEM);
1778         }
1779         mutex_init(&tpg->tv_tpg_mutex);
1780         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1781         tpg->tport = tport;
1782         tpg->tport_tpgt = tpgt;
1783
1784         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1785                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1786         if (ret < 0) {
1787                 kfree(tpg);
1788                 return NULL;
1789         }
1790         mutex_lock(&tcm_vhost_mutex);
1791         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1792         mutex_unlock(&tcm_vhost_mutex);
1793
1794         return &tpg->se_tpg;
1795 }
1796
1797 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1798 {
1799         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1800                                 struct tcm_vhost_tpg, se_tpg);
1801
1802         mutex_lock(&tcm_vhost_mutex);
1803         list_del(&tpg->tv_tpg_list);
1804         mutex_unlock(&tcm_vhost_mutex);
1805         /*
1806          * Release the virtual I_T Nexus for this vhost TPG
1807          */
1808         tcm_vhost_drop_nexus(tpg);
1809         /*
1810          * Deregister the se_tpg from TCM..
1811          */
1812         core_tpg_deregister(se_tpg);
1813         kfree(tpg);
1814 }
1815
1816 static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1817         struct config_group *group,
1818         const char *name)
1819 {
1820         struct tcm_vhost_tport *tport;
1821         char *ptr;
1822         u64 wwpn = 0;
1823         int off = 0;
1824
1825         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1826                 return ERR_PTR(-EINVAL); */
1827
1828         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1829         if (!tport) {
1830                 pr_err("Unable to allocate struct tcm_vhost_tport");
1831                 return ERR_PTR(-ENOMEM);
1832         }
1833         tport->tport_wwpn = wwpn;
1834         /*
1835          * Determine the emulated Protocol Identifier and Target Port Name
1836          * based on the incoming configfs directory name.
1837          */
1838         ptr = strstr(name, "naa.");
1839         if (ptr) {
1840                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1841                 goto check_len;
1842         }
1843         ptr = strstr(name, "fc.");
1844         if (ptr) {
1845                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1846                 off = 3; /* Skip over "fc." */
1847                 goto check_len;
1848         }
1849         ptr = strstr(name, "iqn.");
1850         if (ptr) {
1851                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1852                 goto check_len;
1853         }
1854
1855         pr_err("Unable to locate prefix for emulated Target Port:"
1856                         " %s\n", name);
1857         kfree(tport);
1858         return ERR_PTR(-EINVAL);
1859
1860 check_len:
1861         if (strlen(name) >= TCM_VHOST_NAMELEN) {
1862                 pr_err("Emulated %s Address: %s, exceeds"
1863                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1864                         TCM_VHOST_NAMELEN);
1865                 kfree(tport);
1866                 return ERR_PTR(-EINVAL);
1867         }
1868         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1869
1870         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1871                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1872
1873         return &tport->tport_wwn;
1874 }
1875
1876 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1877 {
1878         struct tcm_vhost_tport *tport = container_of(wwn,
1879                                 struct tcm_vhost_tport, tport_wwn);
1880
1881         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1882                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1883                 tport->tport_name);
1884
1885         kfree(tport);
1886 }
1887
1888 static ssize_t tcm_vhost_wwn_show_attr_version(
1889         struct target_fabric_configfs *tf,
1890         char *page)
1891 {
1892         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1893                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1894                 utsname()->machine);
1895 }
1896
1897 TF_WWN_ATTR_RO(tcm_vhost, version);
1898
1899 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1900         &tcm_vhost_wwn_version.attr,
1901         NULL,
1902 };
1903
1904 static struct target_core_fabric_ops tcm_vhost_ops = {
1905         .get_fabric_name                = tcm_vhost_get_fabric_name,
1906         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
1907         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
1908         .tpg_get_tag                    = tcm_vhost_get_tag,
1909         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
1910         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
1911         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
1912         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
1913         .tpg_check_demo_mode            = tcm_vhost_check_true,
1914         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
1915         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
1916         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
1917         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
1918         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
1919         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
1920         .release_cmd                    = tcm_vhost_release_cmd,
1921         .shutdown_session               = tcm_vhost_shutdown_session,
1922         .close_session                  = tcm_vhost_close_session,
1923         .sess_get_index                 = tcm_vhost_sess_get_index,
1924         .sess_get_initiator_sid         = NULL,
1925         .write_pending                  = tcm_vhost_write_pending,
1926         .write_pending_status           = tcm_vhost_write_pending_status,
1927         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
1928         .get_task_tag                   = tcm_vhost_get_task_tag,
1929         .get_cmd_state                  = tcm_vhost_get_cmd_state,
1930         .queue_data_in                  = tcm_vhost_queue_data_in,
1931         .queue_status                   = tcm_vhost_queue_status,
1932         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
1933         /*
1934          * Setup callers for generic logic in target_core_fabric_configfs.c
1935          */
1936         .fabric_make_wwn                = tcm_vhost_make_tport,
1937         .fabric_drop_wwn                = tcm_vhost_drop_tport,
1938         .fabric_make_tpg                = tcm_vhost_make_tpg,
1939         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
1940         .fabric_post_link               = tcm_vhost_port_link,
1941         .fabric_pre_unlink              = tcm_vhost_port_unlink,
1942         .fabric_make_np                 = NULL,
1943         .fabric_drop_np                 = NULL,
1944         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
1945         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
1946 };
1947
1948 static int tcm_vhost_register_configfs(void)
1949 {
1950         struct target_fabric_configfs *fabric;
1951         int ret;
1952
1953         pr_debug("TCM_VHOST fabric module %s on %s/%s"
1954                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1955                 utsname()->machine);
1956         /*
1957          * Register the top level struct config_item_type with TCM core
1958          */
1959         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
1960         if (IS_ERR(fabric)) {
1961                 pr_err("target_fabric_configfs_init() failed\n");
1962                 return PTR_ERR(fabric);
1963         }
1964         /*
1965          * Setup fabric->tf_ops from our local tcm_vhost_ops
1966          */
1967         fabric->tf_ops = tcm_vhost_ops;
1968         /*
1969          * Setup default attribute lists for various fabric->tf_cit_tmpl
1970          */
1971         TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
1972         TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
1973         TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1974         TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1975         TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1976         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1977         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1978         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1979         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1980         /*
1981          * Register the fabric for use within TCM
1982          */
1983         ret = target_fabric_configfs_register(fabric);
1984         if (ret < 0) {
1985                 pr_err("target_fabric_configfs_register() failed"
1986                                 " for TCM_VHOST\n");
1987                 return ret;
1988         }
1989         /*
1990          * Setup our local pointer to *fabric
1991          */
1992         tcm_vhost_fabric_configfs = fabric;
1993         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
1994         return 0;
1995 };
1996
1997 static void tcm_vhost_deregister_configfs(void)
1998 {
1999         if (!tcm_vhost_fabric_configfs)
2000                 return;
2001
2002         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2003         tcm_vhost_fabric_configfs = NULL;
2004         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2005 };
2006
2007 static int __init tcm_vhost_init(void)
2008 {
2009         int ret = -ENOMEM;
2010         /*
2011          * Use our own dedicated workqueue for submitting I/O into
2012          * target core to avoid contention within system_wq.
2013          */
2014         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2015         if (!tcm_vhost_workqueue)
2016                 goto out;
2017
2018         ret = vhost_scsi_register();
2019         if (ret < 0)
2020                 goto out_destroy_workqueue;
2021
2022         ret = tcm_vhost_register_configfs();
2023         if (ret < 0)
2024                 goto out_vhost_scsi_deregister;
2025
2026         return 0;
2027
2028 out_vhost_scsi_deregister:
2029         vhost_scsi_deregister();
2030 out_destroy_workqueue:
2031         destroy_workqueue(tcm_vhost_workqueue);
2032 out:
2033         return ret;
2034 };
2035
2036 static void tcm_vhost_exit(void)
2037 {
2038         tcm_vhost_deregister_configfs();
2039         vhost_scsi_deregister();
2040         destroy_workqueue(tcm_vhost_workqueue);
2041 };
2042
2043 MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
2044 MODULE_LICENSE("GPL");
2045 module_init(tcm_vhost_init);
2046 module_exit(tcm_vhost_exit);