]> Pileus Git - ~andy/linux/blob - drivers/vhost/scsi.c
vhost/scsi: Convert to per-cpu ida_alloc + ida_free command map
[~andy/linux] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2012 RisingTide Systems LLC.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
52
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59
60 struct vhost_scsi_inflight {
61         /* Wait for the flush operation to finish */
62         struct completion comp;
63         /* Refcount for the inflight reqs */
64         struct kref kref;
65 };
66
67 struct tcm_vhost_cmd {
68         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
69         int tvc_vq_desc;
70         /* virtio-scsi initiator task attribute */
71         int tvc_task_attr;
72         /* virtio-scsi initiator data direction */
73         enum dma_data_direction tvc_data_direction;
74         /* Expected data transfer length from virtio-scsi header */
75         u32 tvc_exp_data_len;
76         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
77         u64 tvc_tag;
78         /* The number of scatterlists associated with this cmd */
79         u32 tvc_sgl_count;
80         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
81         u32 tvc_lun;
82         /* Pointer to the SGL formatted memory from virtio-scsi */
83         struct scatterlist *tvc_sgl;
84         /* Pointer to response */
85         struct virtio_scsi_cmd_resp __user *tvc_resp;
86         /* Pointer to vhost_scsi for our device */
87         struct vhost_scsi *tvc_vhost;
88         /* Pointer to vhost_virtqueue for the cmd */
89         struct vhost_virtqueue *tvc_vq;
90         /* Pointer to vhost nexus memory */
91         struct tcm_vhost_nexus *tvc_nexus;
92         /* The TCM I/O descriptor that is accessed via container_of() */
93         struct se_cmd tvc_se_cmd;
94         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
95         struct work_struct work;
96         /* Copy of the incoming SCSI command descriptor block (CDB) */
97         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
98         /* Sense buffer that will be mapped into outgoing status */
99         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
100         /* Completed commands list, serviced from vhost worker thread */
101         struct llist_node tvc_completion_list;
102         /* Used to track inflight cmd */
103         struct vhost_scsi_inflight *inflight;
104 };
105
106 struct tcm_vhost_nexus {
107         /* Pointer to TCM session for I_T Nexus */
108         struct se_session *tvn_se_sess;
109 };
110
111 struct tcm_vhost_nacl {
112         /* Binary World Wide unique Port Name for Vhost Initiator port */
113         u64 iport_wwpn;
114         /* ASCII formatted WWPN for Sas Initiator port */
115         char iport_name[TCM_VHOST_NAMELEN];
116         /* Returned by tcm_vhost_make_nodeacl() */
117         struct se_node_acl se_node_acl;
118 };
119
120 struct tcm_vhost_tpg {
121         /* Vhost port target portal group tag for TCM */
122         u16 tport_tpgt;
123         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
124         int tv_tpg_port_count;
125         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
126         int tv_tpg_vhost_count;
127         /* list for tcm_vhost_list */
128         struct list_head tv_tpg_list;
129         /* Used to protect access for tpg_nexus */
130         struct mutex tv_tpg_mutex;
131         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
132         struct tcm_vhost_nexus *tpg_nexus;
133         /* Pointer back to tcm_vhost_tport */
134         struct tcm_vhost_tport *tport;
135         /* Returned by tcm_vhost_make_tpg() */
136         struct se_portal_group se_tpg;
137         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
138         struct vhost_scsi *vhost_scsi;
139 };
140
141 struct tcm_vhost_tport {
142         /* SCSI protocol the tport is providing */
143         u8 tport_proto_id;
144         /* Binary World Wide unique Port Name for Vhost Target port */
145         u64 tport_wwpn;
146         /* ASCII formatted WWPN for Vhost Target port */
147         char tport_name[TCM_VHOST_NAMELEN];
148         /* Returned by tcm_vhost_make_tport() */
149         struct se_wwn tport_wwn;
150 };
151
152 struct tcm_vhost_evt {
153         /* event to be sent to guest */
154         struct virtio_scsi_event event;
155         /* event list, serviced from vhost worker thread */
156         struct llist_node list;
157 };
158
159 enum {
160         VHOST_SCSI_VQ_CTL = 0,
161         VHOST_SCSI_VQ_EVT = 1,
162         VHOST_SCSI_VQ_IO = 2,
163 };
164
165 enum {
166         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
167 };
168
169 #define VHOST_SCSI_MAX_TARGET   256
170 #define VHOST_SCSI_MAX_VQ       128
171 #define VHOST_SCSI_MAX_EVENT    128
172
173 struct vhost_scsi_virtqueue {
174         struct vhost_virtqueue vq;
175         /*
176          * Reference counting for inflight reqs, used for flush operation. At
177          * each time, one reference tracks new commands submitted, while we
178          * wait for another one to reach 0.
179          */
180         struct vhost_scsi_inflight inflights[2];
181         /*
182          * Indicate current inflight in use, protected by vq->mutex.
183          * Writers must also take dev mutex and flush under it.
184          */
185         int inflight_idx;
186 };
187
188 struct vhost_scsi {
189         /* Protected by vhost_scsi->dev.mutex */
190         struct tcm_vhost_tpg **vs_tpg;
191         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
192
193         struct vhost_dev dev;
194         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
195
196         struct vhost_work vs_completion_work; /* cmd completion work item */
197         struct llist_head vs_completion_list; /* cmd completion queue */
198
199         struct vhost_work vs_event_work; /* evt injection work item */
200         struct llist_head vs_event_list; /* evt injection queue */
201
202         bool vs_events_missed; /* any missed events, protected by vq->mutex */
203         int vs_events_nr; /* num of pending events, protected by vq->mutex */
204 };
205
206 /* Local pointer to allocated TCM configfs fabric module */
207 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
208
209 static struct workqueue_struct *tcm_vhost_workqueue;
210
211 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
212 static DEFINE_MUTEX(tcm_vhost_mutex);
213 static LIST_HEAD(tcm_vhost_list);
214
215 static int iov_num_pages(struct iovec *iov)
216 {
217         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
218                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
219 }
220
221 static void tcm_vhost_done_inflight(struct kref *kref)
222 {
223         struct vhost_scsi_inflight *inflight;
224
225         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
226         complete(&inflight->comp);
227 }
228
229 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
230                                     struct vhost_scsi_inflight *old_inflight[])
231 {
232         struct vhost_scsi_inflight *new_inflight;
233         struct vhost_virtqueue *vq;
234         int idx, i;
235
236         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
237                 vq = &vs->vqs[i].vq;
238
239                 mutex_lock(&vq->mutex);
240
241                 /* store old infight */
242                 idx = vs->vqs[i].inflight_idx;
243                 if (old_inflight)
244                         old_inflight[i] = &vs->vqs[i].inflights[idx];
245
246                 /* setup new infight */
247                 vs->vqs[i].inflight_idx = idx ^ 1;
248                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
249                 kref_init(&new_inflight->kref);
250                 init_completion(&new_inflight->comp);
251
252                 mutex_unlock(&vq->mutex);
253         }
254 }
255
256 static struct vhost_scsi_inflight *
257 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
258 {
259         struct vhost_scsi_inflight *inflight;
260         struct vhost_scsi_virtqueue *svq;
261
262         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
263         inflight = &svq->inflights[svq->inflight_idx];
264         kref_get(&inflight->kref);
265
266         return inflight;
267 }
268
269 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
270 {
271         kref_put(&inflight->kref, tcm_vhost_done_inflight);
272 }
273
274 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
275 {
276         return 1;
277 }
278
279 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
280 {
281         return 0;
282 }
283
284 static char *tcm_vhost_get_fabric_name(void)
285 {
286         return "vhost";
287 }
288
289 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
290 {
291         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
292                                 struct tcm_vhost_tpg, se_tpg);
293         struct tcm_vhost_tport *tport = tpg->tport;
294
295         switch (tport->tport_proto_id) {
296         case SCSI_PROTOCOL_SAS:
297                 return sas_get_fabric_proto_ident(se_tpg);
298         case SCSI_PROTOCOL_FCP:
299                 return fc_get_fabric_proto_ident(se_tpg);
300         case SCSI_PROTOCOL_ISCSI:
301                 return iscsi_get_fabric_proto_ident(se_tpg);
302         default:
303                 pr_err("Unknown tport_proto_id: 0x%02x, using"
304                         " SAS emulation\n", tport->tport_proto_id);
305                 break;
306         }
307
308         return sas_get_fabric_proto_ident(se_tpg);
309 }
310
311 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
312 {
313         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
314                                 struct tcm_vhost_tpg, se_tpg);
315         struct tcm_vhost_tport *tport = tpg->tport;
316
317         return &tport->tport_name[0];
318 }
319
320 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
321 {
322         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
323                                 struct tcm_vhost_tpg, se_tpg);
324         return tpg->tport_tpgt;
325 }
326
327 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
328 {
329         return 1;
330 }
331
332 static u32
333 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
334                               struct se_node_acl *se_nacl,
335                               struct t10_pr_registration *pr_reg,
336                               int *format_code,
337                               unsigned char *buf)
338 {
339         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
340                                 struct tcm_vhost_tpg, se_tpg);
341         struct tcm_vhost_tport *tport = tpg->tport;
342
343         switch (tport->tport_proto_id) {
344         case SCSI_PROTOCOL_SAS:
345                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
346                                         format_code, buf);
347         case SCSI_PROTOCOL_FCP:
348                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
349                                         format_code, buf);
350         case SCSI_PROTOCOL_ISCSI:
351                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
352                                         format_code, buf);
353         default:
354                 pr_err("Unknown tport_proto_id: 0x%02x, using"
355                         " SAS emulation\n", tport->tport_proto_id);
356                 break;
357         }
358
359         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
360                         format_code, buf);
361 }
362
363 static u32
364 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
365                                   struct se_node_acl *se_nacl,
366                                   struct t10_pr_registration *pr_reg,
367                                   int *format_code)
368 {
369         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
370                                 struct tcm_vhost_tpg, se_tpg);
371         struct tcm_vhost_tport *tport = tpg->tport;
372
373         switch (tport->tport_proto_id) {
374         case SCSI_PROTOCOL_SAS:
375                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
376                                         format_code);
377         case SCSI_PROTOCOL_FCP:
378                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
379                                         format_code);
380         case SCSI_PROTOCOL_ISCSI:
381                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
382                                         format_code);
383         default:
384                 pr_err("Unknown tport_proto_id: 0x%02x, using"
385                         " SAS emulation\n", tport->tport_proto_id);
386                 break;
387         }
388
389         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
390                         format_code);
391 }
392
393 static char *
394 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
395                                     const char *buf,
396                                     u32 *out_tid_len,
397                                     char **port_nexus_ptr)
398 {
399         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
400                                 struct tcm_vhost_tpg, se_tpg);
401         struct tcm_vhost_tport *tport = tpg->tport;
402
403         switch (tport->tport_proto_id) {
404         case SCSI_PROTOCOL_SAS:
405                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
406                                         port_nexus_ptr);
407         case SCSI_PROTOCOL_FCP:
408                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
409                                         port_nexus_ptr);
410         case SCSI_PROTOCOL_ISCSI:
411                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
412                                         port_nexus_ptr);
413         default:
414                 pr_err("Unknown tport_proto_id: 0x%02x, using"
415                         " SAS emulation\n", tport->tport_proto_id);
416                 break;
417         }
418
419         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
420                         port_nexus_ptr);
421 }
422
423 static struct se_node_acl *
424 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
425 {
426         struct tcm_vhost_nacl *nacl;
427
428         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
429         if (!nacl) {
430                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
431                 return NULL;
432         }
433
434         return &nacl->se_node_acl;
435 }
436
437 static void
438 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
439                              struct se_node_acl *se_nacl)
440 {
441         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
442                         struct tcm_vhost_nacl, se_node_acl);
443         kfree(nacl);
444 }
445
446 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
447 {
448         return 1;
449 }
450
451 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
452 {
453         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
454                                 struct tcm_vhost_cmd, tvc_se_cmd);
455         struct se_session *se_sess = se_cmd->se_sess;
456
457         if (tv_cmd->tvc_sgl_count) {
458                 u32 i;
459                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
460                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
461
462                 kfree(tv_cmd->tvc_sgl);
463         }
464
465         tcm_vhost_put_inflight(tv_cmd->inflight);
466         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
467 }
468
469 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
470 {
471         return 0;
472 }
473
474 static void tcm_vhost_close_session(struct se_session *se_sess)
475 {
476         return;
477 }
478
479 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
480 {
481         return 0;
482 }
483
484 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
485 {
486         /* Go ahead and process the write immediately */
487         target_execute_cmd(se_cmd);
488         return 0;
489 }
490
491 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
492 {
493         return 0;
494 }
495
496 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
497 {
498         return;
499 }
500
501 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
502 {
503         return 0;
504 }
505
506 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
507 {
508         return 0;
509 }
510
511 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
512 {
513         struct vhost_scsi *vs = cmd->tvc_vhost;
514
515         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
516
517         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
518 }
519
520 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
521 {
522         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
523                                 struct tcm_vhost_cmd, tvc_se_cmd);
524         vhost_scsi_complete_cmd(cmd);
525         return 0;
526 }
527
528 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
529 {
530         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
531                                 struct tcm_vhost_cmd, tvc_se_cmd);
532         vhost_scsi_complete_cmd(cmd);
533         return 0;
534 }
535
536 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
537 {
538         return;
539 }
540
541 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
542 {
543         vs->vs_events_nr--;
544         kfree(evt);
545 }
546
547 static struct tcm_vhost_evt *
548 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
549                        u32 event, u32 reason)
550 {
551         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
552         struct tcm_vhost_evt *evt;
553
554         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
555                 vs->vs_events_missed = true;
556                 return NULL;
557         }
558
559         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
560         if (!evt) {
561                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
562                 vs->vs_events_missed = true;
563                 return NULL;
564         }
565
566         evt->event.event = event;
567         evt->event.reason = reason;
568         vs->vs_events_nr++;
569
570         return evt;
571 }
572
573 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
574 {
575         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
576
577         /* TODO locking against target/backend threads? */
578         transport_generic_free_cmd(se_cmd, 0);
579
580 }
581
582 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
583 {
584         return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
585 }
586
587 static void
588 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
589 {
590         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
591         struct virtio_scsi_event *event = &evt->event;
592         struct virtio_scsi_event __user *eventp;
593         unsigned out, in;
594         int head, ret;
595
596         if (!vq->private_data) {
597                 vs->vs_events_missed = true;
598                 return;
599         }
600
601 again:
602         vhost_disable_notify(&vs->dev, vq);
603         head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
604                         ARRAY_SIZE(vq->iov), &out, &in,
605                         NULL, NULL);
606         if (head < 0) {
607                 vs->vs_events_missed = true;
608                 return;
609         }
610         if (head == vq->num) {
611                 if (vhost_enable_notify(&vs->dev, vq))
612                         goto again;
613                 vs->vs_events_missed = true;
614                 return;
615         }
616
617         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
618                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
619                                 vq->iov[out].iov_len);
620                 vs->vs_events_missed = true;
621                 return;
622         }
623
624         if (vs->vs_events_missed) {
625                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
626                 vs->vs_events_missed = false;
627         }
628
629         eventp = vq->iov[out].iov_base;
630         ret = __copy_to_user(eventp, event, sizeof(*event));
631         if (!ret)
632                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
633         else
634                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
635 }
636
637 static void tcm_vhost_evt_work(struct vhost_work *work)
638 {
639         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
640                                         vs_event_work);
641         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
642         struct tcm_vhost_evt *evt;
643         struct llist_node *llnode;
644
645         mutex_lock(&vq->mutex);
646         llnode = llist_del_all(&vs->vs_event_list);
647         while (llnode) {
648                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
649                 llnode = llist_next(llnode);
650                 tcm_vhost_do_evt_work(vs, evt);
651                 tcm_vhost_free_evt(vs, evt);
652         }
653         mutex_unlock(&vq->mutex);
654 }
655
656 /* Fill in status and signal that we are done processing this command
657  *
658  * This is scheduled in the vhost work queue so we are called with the owner
659  * process mm and can access the vring.
660  */
661 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
662 {
663         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
664                                         vs_completion_work);
665         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
666         struct virtio_scsi_cmd_resp v_rsp;
667         struct tcm_vhost_cmd *cmd;
668         struct llist_node *llnode;
669         struct se_cmd *se_cmd;
670         int ret, vq;
671
672         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
673         llnode = llist_del_all(&vs->vs_completion_list);
674         while (llnode) {
675                 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
676                                      tvc_completion_list);
677                 llnode = llist_next(llnode);
678                 se_cmd = &cmd->tvc_se_cmd;
679
680                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
681                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
682
683                 memset(&v_rsp, 0, sizeof(v_rsp));
684                 v_rsp.resid = se_cmd->residual_count;
685                 /* TODO is status_qualifier field needed? */
686                 v_rsp.status = se_cmd->scsi_status;
687                 v_rsp.sense_len = se_cmd->scsi_sense_length;
688                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
689                        v_rsp.sense_len);
690                 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
691                 if (likely(ret == 0)) {
692                         struct vhost_scsi_virtqueue *q;
693                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
694                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
695                         vq = q - vs->vqs;
696                         __set_bit(vq, signal);
697                 } else
698                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
699
700                 vhost_scsi_free_cmd(cmd);
701         }
702
703         vq = -1;
704         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
705                 < VHOST_SCSI_MAX_VQ)
706                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
707 }
708
709 static struct tcm_vhost_cmd *
710 vhost_scsi_get_tag(struct vhost_virtqueue *vq,
711                         struct tcm_vhost_tpg *tpg,
712                         struct virtio_scsi_cmd_req *v_req,
713                         u32 exp_data_len,
714                         int data_direction)
715 {
716         struct tcm_vhost_cmd *cmd;
717         struct tcm_vhost_nexus *tv_nexus;
718         struct se_session *se_sess;
719         int tag;
720
721         tv_nexus = tpg->tpg_nexus;
722         if (!tv_nexus) {
723                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
724                 return ERR_PTR(-EIO);
725         }
726         se_sess = tv_nexus->tvn_se_sess;
727
728         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL);
729         cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
730         memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
731
732         cmd->tvc_se_cmd.map_tag = tag;
733         cmd->tvc_tag = v_req->tag;
734         cmd->tvc_task_attr = v_req->task_attr;
735         cmd->tvc_exp_data_len = exp_data_len;
736         cmd->tvc_data_direction = data_direction;
737         cmd->tvc_nexus = tv_nexus;
738         cmd->inflight = tcm_vhost_get_inflight(vq);
739
740         return cmd;
741 }
742
743 /*
744  * Map a user memory range into a scatterlist
745  *
746  * Returns the number of scatterlist entries used or -errno on error.
747  */
748 static int
749 vhost_scsi_map_to_sgl(struct scatterlist *sgl,
750                       unsigned int sgl_count,
751                       struct iovec *iov,
752                       int write)
753 {
754         unsigned int npages = 0, pages_nr, offset, nbytes;
755         struct scatterlist *sg = sgl;
756         void __user *ptr = iov->iov_base;
757         size_t len = iov->iov_len;
758         struct page **pages;
759         int ret, i;
760
761         pages_nr = iov_num_pages(iov);
762         if (pages_nr > sgl_count)
763                 return -ENOBUFS;
764
765         pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
766         if (!pages)
767                 return -ENOMEM;
768
769         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
770         /* No pages were pinned */
771         if (ret < 0)
772                 goto out;
773         /* Less pages pinned than wanted */
774         if (ret != pages_nr) {
775                 for (i = 0; i < ret; i++)
776                         put_page(pages[i]);
777                 ret = -EFAULT;
778                 goto out;
779         }
780
781         while (len > 0) {
782                 offset = (uintptr_t)ptr & ~PAGE_MASK;
783                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
784                 sg_set_page(sg, pages[npages], nbytes, offset);
785                 ptr += nbytes;
786                 len -= nbytes;
787                 sg++;
788                 npages++;
789         }
790
791 out:
792         kfree(pages);
793         return ret;
794 }
795
796 static int
797 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
798                           struct iovec *iov,
799                           unsigned int niov,
800                           int write)
801 {
802         int ret;
803         unsigned int i;
804         u32 sgl_count;
805         struct scatterlist *sg;
806
807         /*
808          * Find out how long sglist needs to be
809          */
810         sgl_count = 0;
811         for (i = 0; i < niov; i++)
812                 sgl_count += iov_num_pages(&iov[i]);
813
814         /* TODO overflow checking */
815
816         sg = kmalloc(sizeof(cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
817         if (!sg)
818                 return -ENOMEM;
819         pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
820                sg, sgl_count, !sg);
821         sg_init_table(sg, sgl_count);
822
823         cmd->tvc_sgl = sg;
824         cmd->tvc_sgl_count = sgl_count;
825
826         pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
827         for (i = 0; i < niov; i++) {
828                 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
829                 if (ret < 0) {
830                         for (i = 0; i < cmd->tvc_sgl_count; i++)
831                                 put_page(sg_page(&cmd->tvc_sgl[i]));
832                         kfree(cmd->tvc_sgl);
833                         cmd->tvc_sgl = NULL;
834                         cmd->tvc_sgl_count = 0;
835                         return ret;
836                 }
837
838                 sg += ret;
839                 sgl_count -= ret;
840         }
841         return 0;
842 }
843
844 static void tcm_vhost_submission_work(struct work_struct *work)
845 {
846         struct tcm_vhost_cmd *cmd =
847                 container_of(work, struct tcm_vhost_cmd, work);
848         struct tcm_vhost_nexus *tv_nexus;
849         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
850         struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
851         int rc, sg_no_bidi = 0;
852
853         if (cmd->tvc_sgl_count) {
854                 sg_ptr = cmd->tvc_sgl;
855 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
856 #if 0
857                 if (se_cmd->se_cmd_flags & SCF_BIDI) {
858                         sg_bidi_ptr = NULL;
859                         sg_no_bidi = 0;
860                 }
861 #endif
862         } else {
863                 sg_ptr = NULL;
864         }
865         tv_nexus = cmd->tvc_nexus;
866
867         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
868                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
869                         cmd->tvc_lun, cmd->tvc_exp_data_len,
870                         cmd->tvc_task_attr, cmd->tvc_data_direction,
871                         TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
872                         sg_bidi_ptr, sg_no_bidi);
873         if (rc < 0) {
874                 transport_send_check_condition_and_sense(se_cmd,
875                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
876                 transport_generic_free_cmd(se_cmd, 0);
877         }
878 }
879
880 static void
881 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
882                            struct vhost_virtqueue *vq,
883                            int head, unsigned out)
884 {
885         struct virtio_scsi_cmd_resp __user *resp;
886         struct virtio_scsi_cmd_resp rsp;
887         int ret;
888
889         memset(&rsp, 0, sizeof(rsp));
890         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
891         resp = vq->iov[out].iov_base;
892         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
893         if (!ret)
894                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
895         else
896                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
897 }
898
899 static void
900 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
901 {
902         struct tcm_vhost_tpg **vs_tpg;
903         struct virtio_scsi_cmd_req v_req;
904         struct tcm_vhost_tpg *tpg;
905         struct tcm_vhost_cmd *cmd;
906         u32 exp_data_len, data_first, data_num, data_direction;
907         unsigned out, in, i;
908         int head, ret;
909         u8 target;
910
911         mutex_lock(&vq->mutex);
912         /*
913          * We can handle the vq only after the endpoint is setup by calling the
914          * VHOST_SCSI_SET_ENDPOINT ioctl.
915          */
916         vs_tpg = vq->private_data;
917         if (!vs_tpg)
918                 goto out;
919
920         vhost_disable_notify(&vs->dev, vq);
921
922         for (;;) {
923                 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
924                                         ARRAY_SIZE(vq->iov), &out, &in,
925                                         NULL, NULL);
926                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
927                                         head, out, in);
928                 /* On error, stop handling until the next kick. */
929                 if (unlikely(head < 0))
930                         break;
931                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
932                 if (head == vq->num) {
933                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
934                                 vhost_disable_notify(&vs->dev, vq);
935                                 continue;
936                         }
937                         break;
938                 }
939
940 /* FIXME: BIDI operation */
941                 if (out == 1 && in == 1) {
942                         data_direction = DMA_NONE;
943                         data_first = 0;
944                         data_num = 0;
945                 } else if (out == 1 && in > 1) {
946                         data_direction = DMA_FROM_DEVICE;
947                         data_first = out + 1;
948                         data_num = in - 1;
949                 } else if (out > 1 && in == 1) {
950                         data_direction = DMA_TO_DEVICE;
951                         data_first = 1;
952                         data_num = out - 1;
953                 } else {
954                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
955                                         out, in);
956                         break;
957                 }
958
959                 /*
960                  * Check for a sane resp buffer so we can report errors to
961                  * the guest.
962                  */
963                 if (unlikely(vq->iov[out].iov_len !=
964                                         sizeof(struct virtio_scsi_cmd_resp))) {
965                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
966                                 " bytes\n", vq->iov[out].iov_len);
967                         break;
968                 }
969
970                 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
971                         vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
972                                 " bytes\n", vq->iov[0].iov_len);
973                         break;
974                 }
975                 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
976                         " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
977                 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
978                                 sizeof(v_req));
979                 if (unlikely(ret)) {
980                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
981                         break;
982                 }
983
984                 /* Extract the tpgt */
985                 target = v_req.lun[1];
986                 tpg = ACCESS_ONCE(vs_tpg[target]);
987
988                 /* Target does not exist, fail the request */
989                 if (unlikely(!tpg)) {
990                         vhost_scsi_send_bad_target(vs, vq, head, out);
991                         continue;
992                 }
993
994                 exp_data_len = 0;
995                 for (i = 0; i < data_num; i++)
996                         exp_data_len += vq->iov[data_first + i].iov_len;
997
998                 cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
999                                          exp_data_len, data_direction);
1000                 if (IS_ERR(cmd)) {
1001                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1002                                         PTR_ERR(cmd));
1003                         goto err_cmd;
1004                 }
1005                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1006                         ": %d\n", cmd, exp_data_len, data_direction);
1007
1008                 cmd->tvc_vhost = vs;
1009                 cmd->tvc_vq = vq;
1010                 cmd->tvc_resp = vq->iov[out].iov_base;
1011
1012                 /*
1013                  * Copy in the recieved CDB descriptor into cmd->tvc_cdb
1014                  * that will be used by tcm_vhost_new_cmd_map() and down into
1015                  * target_setup_cmd_from_cdb()
1016                  */
1017                 memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1018                 /*
1019                  * Check that the recieved CDB size does not exceeded our
1020                  * hardcoded max for tcm_vhost
1021                  */
1022                 /* TODO what if cdb was too small for varlen cdb header? */
1023                 if (unlikely(scsi_command_size(cmd->tvc_cdb) >
1024                                         TCM_VHOST_MAX_CDB_SIZE)) {
1025                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1026                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1027                                 scsi_command_size(cmd->tvc_cdb),
1028                                 TCM_VHOST_MAX_CDB_SIZE);
1029                         goto err_free;
1030                 }
1031                 cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1032
1033                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1034                         cmd->tvc_cdb[0], cmd->tvc_lun);
1035
1036                 if (data_direction != DMA_NONE) {
1037                         ret = vhost_scsi_map_iov_to_sgl(cmd,
1038                                         &vq->iov[data_first], data_num,
1039                                         data_direction == DMA_TO_DEVICE);
1040                         if (unlikely(ret)) {
1041                                 vq_err(vq, "Failed to map iov to sgl\n");
1042                                 goto err_free;
1043                         }
1044                 }
1045
1046                 /*
1047                  * Save the descriptor from vhost_get_vq_desc() to be used to
1048                  * complete the virtio-scsi request in TCM callback context via
1049                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1050                  */
1051                 cmd->tvc_vq_desc = head;
1052                 /*
1053                  * Dispatch tv_cmd descriptor for cmwq execution in process
1054                  * context provided by tcm_vhost_workqueue.  This also ensures
1055                  * tv_cmd is executed on the same kworker CPU as this vhost
1056                  * thread to gain positive L2 cache locality effects..
1057                  */
1058                 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1059                 queue_work(tcm_vhost_workqueue, &cmd->work);
1060         }
1061
1062         mutex_unlock(&vq->mutex);
1063         return;
1064
1065 err_free:
1066         vhost_scsi_free_cmd(cmd);
1067 err_cmd:
1068         vhost_scsi_send_bad_target(vs, vq, head, out);
1069 out:
1070         mutex_unlock(&vq->mutex);
1071 }
1072
1073 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1074 {
1075         pr_debug("%s: The handling func for control queue.\n", __func__);
1076 }
1077
1078 static void
1079 tcm_vhost_send_evt(struct vhost_scsi *vs,
1080                    struct tcm_vhost_tpg *tpg,
1081                    struct se_lun *lun,
1082                    u32 event,
1083                    u32 reason)
1084 {
1085         struct tcm_vhost_evt *evt;
1086
1087         evt = tcm_vhost_allocate_evt(vs, event, reason);
1088         if (!evt)
1089                 return;
1090
1091         if (tpg && lun) {
1092                 /* TODO: share lun setup code with virtio-scsi.ko */
1093                 /*
1094                  * Note: evt->event is zeroed when we allocate it and
1095                  * lun[4-7] need to be zero according to virtio-scsi spec.
1096                  */
1097                 evt->event.lun[0] = 0x01;
1098                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1099                 if (lun->unpacked_lun >= 256)
1100                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1101                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1102         }
1103
1104         llist_add(&evt->list, &vs->vs_event_list);
1105         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1106 }
1107
1108 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1109 {
1110         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1111                                                 poll.work);
1112         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1113
1114         mutex_lock(&vq->mutex);
1115         if (!vq->private_data)
1116                 goto out;
1117
1118         if (vs->vs_events_missed)
1119                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1120 out:
1121         mutex_unlock(&vq->mutex);
1122 }
1123
1124 static void vhost_scsi_handle_kick(struct vhost_work *work)
1125 {
1126         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1127                                                 poll.work);
1128         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1129
1130         vhost_scsi_handle_vq(vs, vq);
1131 }
1132
1133 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1134 {
1135         vhost_poll_flush(&vs->vqs[index].vq.poll);
1136 }
1137
1138 /* Callers must hold dev mutex */
1139 static void vhost_scsi_flush(struct vhost_scsi *vs)
1140 {
1141         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1142         int i;
1143
1144         /* Init new inflight and remember the old inflight */
1145         tcm_vhost_init_inflight(vs, old_inflight);
1146
1147         /*
1148          * The inflight->kref was initialized to 1. We decrement it here to
1149          * indicate the start of the flush operation so that it will reach 0
1150          * when all the reqs are finished.
1151          */
1152         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1153                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1154
1155         /* Flush both the vhost poll and vhost work */
1156         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1157                 vhost_scsi_flush_vq(vs, i);
1158         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1159         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1160
1161         /* Wait for all reqs issued before the flush to be finished */
1162         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1163                 wait_for_completion(&old_inflight[i]->comp);
1164 }
1165
1166 /*
1167  * Called from vhost_scsi_ioctl() context to walk the list of available
1168  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1169  *
1170  *  The lock nesting rule is:
1171  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1172  */
1173 static int
1174 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1175                         struct vhost_scsi_target *t)
1176 {
1177         struct tcm_vhost_tport *tv_tport;
1178         struct tcm_vhost_tpg *tpg;
1179         struct tcm_vhost_tpg **vs_tpg;
1180         struct vhost_virtqueue *vq;
1181         int index, ret, i, len;
1182         bool match = false;
1183
1184         mutex_lock(&tcm_vhost_mutex);
1185         mutex_lock(&vs->dev.mutex);
1186
1187         /* Verify that ring has been setup correctly. */
1188         for (index = 0; index < vs->dev.nvqs; ++index) {
1189                 /* Verify that ring has been setup correctly. */
1190                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1191                         ret = -EFAULT;
1192                         goto out;
1193                 }
1194         }
1195
1196         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1197         vs_tpg = kzalloc(len, GFP_KERNEL);
1198         if (!vs_tpg) {
1199                 ret = -ENOMEM;
1200                 goto out;
1201         }
1202         if (vs->vs_tpg)
1203                 memcpy(vs_tpg, vs->vs_tpg, len);
1204
1205         list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1206                 mutex_lock(&tpg->tv_tpg_mutex);
1207                 if (!tpg->tpg_nexus) {
1208                         mutex_unlock(&tpg->tv_tpg_mutex);
1209                         continue;
1210                 }
1211                 if (tpg->tv_tpg_vhost_count != 0) {
1212                         mutex_unlock(&tpg->tv_tpg_mutex);
1213                         continue;
1214                 }
1215                 tv_tport = tpg->tport;
1216
1217                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1218                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1219                                 kfree(vs_tpg);
1220                                 mutex_unlock(&tpg->tv_tpg_mutex);
1221                                 ret = -EEXIST;
1222                                 goto out;
1223                         }
1224                         tpg->tv_tpg_vhost_count++;
1225                         tpg->vhost_scsi = vs;
1226                         vs_tpg[tpg->tport_tpgt] = tpg;
1227                         smp_mb__after_atomic_inc();
1228                         match = true;
1229                 }
1230                 mutex_unlock(&tpg->tv_tpg_mutex);
1231         }
1232
1233         if (match) {
1234                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1235                        sizeof(vs->vs_vhost_wwpn));
1236                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1237                         vq = &vs->vqs[i].vq;
1238                         mutex_lock(&vq->mutex);
1239                         vq->private_data = vs_tpg;
1240                         vhost_init_used(vq);
1241                         mutex_unlock(&vq->mutex);
1242                 }
1243                 ret = 0;
1244         } else {
1245                 ret = -EEXIST;
1246         }
1247
1248         /*
1249          * Act as synchronize_rcu to make sure access to
1250          * old vs->vs_tpg is finished.
1251          */
1252         vhost_scsi_flush(vs);
1253         kfree(vs->vs_tpg);
1254         vs->vs_tpg = vs_tpg;
1255
1256 out:
1257         mutex_unlock(&vs->dev.mutex);
1258         mutex_unlock(&tcm_vhost_mutex);
1259         return ret;
1260 }
1261
1262 static int
1263 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1264                           struct vhost_scsi_target *t)
1265 {
1266         struct tcm_vhost_tport *tv_tport;
1267         struct tcm_vhost_tpg *tpg;
1268         struct vhost_virtqueue *vq;
1269         bool match = false;
1270         int index, ret, i;
1271         u8 target;
1272
1273         mutex_lock(&tcm_vhost_mutex);
1274         mutex_lock(&vs->dev.mutex);
1275         /* Verify that ring has been setup correctly. */
1276         for (index = 0; index < vs->dev.nvqs; ++index) {
1277                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1278                         ret = -EFAULT;
1279                         goto err_dev;
1280                 }
1281         }
1282
1283         if (!vs->vs_tpg) {
1284                 ret = 0;
1285                 goto err_dev;
1286         }
1287
1288         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1289                 target = i;
1290                 tpg = vs->vs_tpg[target];
1291                 if (!tpg)
1292                         continue;
1293
1294                 mutex_lock(&tpg->tv_tpg_mutex);
1295                 tv_tport = tpg->tport;
1296                 if (!tv_tport) {
1297                         ret = -ENODEV;
1298                         goto err_tpg;
1299                 }
1300
1301                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1302                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1303                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1304                                 tv_tport->tport_name, tpg->tport_tpgt,
1305                                 t->vhost_wwpn, t->vhost_tpgt);
1306                         ret = -EINVAL;
1307                         goto err_tpg;
1308                 }
1309                 tpg->tv_tpg_vhost_count--;
1310                 tpg->vhost_scsi = NULL;
1311                 vs->vs_tpg[target] = NULL;
1312                 match = true;
1313                 mutex_unlock(&tpg->tv_tpg_mutex);
1314         }
1315         if (match) {
1316                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1317                         vq = &vs->vqs[i].vq;
1318                         mutex_lock(&vq->mutex);
1319                         vq->private_data = NULL;
1320                         mutex_unlock(&vq->mutex);
1321                 }
1322         }
1323         /*
1324          * Act as synchronize_rcu to make sure access to
1325          * old vs->vs_tpg is finished.
1326          */
1327         vhost_scsi_flush(vs);
1328         kfree(vs->vs_tpg);
1329         vs->vs_tpg = NULL;
1330         WARN_ON(vs->vs_events_nr);
1331         mutex_unlock(&vs->dev.mutex);
1332         mutex_unlock(&tcm_vhost_mutex);
1333         return 0;
1334
1335 err_tpg:
1336         mutex_unlock(&tpg->tv_tpg_mutex);
1337 err_dev:
1338         mutex_unlock(&vs->dev.mutex);
1339         mutex_unlock(&tcm_vhost_mutex);
1340         return ret;
1341 }
1342
1343 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1344 {
1345         if (features & ~VHOST_SCSI_FEATURES)
1346                 return -EOPNOTSUPP;
1347
1348         mutex_lock(&vs->dev.mutex);
1349         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1350             !vhost_log_access_ok(&vs->dev)) {
1351                 mutex_unlock(&vs->dev.mutex);
1352                 return -EFAULT;
1353         }
1354         vs->dev.acked_features = features;
1355         smp_wmb();
1356         vhost_scsi_flush(vs);
1357         mutex_unlock(&vs->dev.mutex);
1358         return 0;
1359 }
1360
1361 static int vhost_scsi_open(struct inode *inode, struct file *f)
1362 {
1363         struct vhost_scsi *vs;
1364         struct vhost_virtqueue **vqs;
1365         int r, i;
1366
1367         vs = kzalloc(sizeof(*vs), GFP_KERNEL);
1368         if (!vs)
1369                 return -ENOMEM;
1370
1371         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1372         if (!vqs) {
1373                 kfree(vs);
1374                 return -ENOMEM;
1375         }
1376
1377         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1378         vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1379
1380         vs->vs_events_nr = 0;
1381         vs->vs_events_missed = false;
1382
1383         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1384         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1385         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1386         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1387         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1388                 vqs[i] = &vs->vqs[i].vq;
1389                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1390         }
1391         r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1392
1393         tcm_vhost_init_inflight(vs, NULL);
1394
1395         if (r < 0) {
1396                 kfree(vqs);
1397                 kfree(vs);
1398                 return r;
1399         }
1400
1401         f->private_data = vs;
1402         return 0;
1403 }
1404
1405 static int vhost_scsi_release(struct inode *inode, struct file *f)
1406 {
1407         struct vhost_scsi *vs = f->private_data;
1408         struct vhost_scsi_target t;
1409
1410         mutex_lock(&vs->dev.mutex);
1411         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1412         mutex_unlock(&vs->dev.mutex);
1413         vhost_scsi_clear_endpoint(vs, &t);
1414         vhost_dev_stop(&vs->dev);
1415         vhost_dev_cleanup(&vs->dev, false);
1416         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1417         vhost_scsi_flush(vs);
1418         kfree(vs->dev.vqs);
1419         kfree(vs);
1420         return 0;
1421 }
1422
1423 static long
1424 vhost_scsi_ioctl(struct file *f,
1425                  unsigned int ioctl,
1426                  unsigned long arg)
1427 {
1428         struct vhost_scsi *vs = f->private_data;
1429         struct vhost_scsi_target backend;
1430         void __user *argp = (void __user *)arg;
1431         u64 __user *featurep = argp;
1432         u32 __user *eventsp = argp;
1433         u32 events_missed;
1434         u64 features;
1435         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1436         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1437
1438         switch (ioctl) {
1439         case VHOST_SCSI_SET_ENDPOINT:
1440                 if (copy_from_user(&backend, argp, sizeof backend))
1441                         return -EFAULT;
1442                 if (backend.reserved != 0)
1443                         return -EOPNOTSUPP;
1444
1445                 return vhost_scsi_set_endpoint(vs, &backend);
1446         case VHOST_SCSI_CLEAR_ENDPOINT:
1447                 if (copy_from_user(&backend, argp, sizeof backend))
1448                         return -EFAULT;
1449                 if (backend.reserved != 0)
1450                         return -EOPNOTSUPP;
1451
1452                 return vhost_scsi_clear_endpoint(vs, &backend);
1453         case VHOST_SCSI_GET_ABI_VERSION:
1454                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1455                         return -EFAULT;
1456                 return 0;
1457         case VHOST_SCSI_SET_EVENTS_MISSED:
1458                 if (get_user(events_missed, eventsp))
1459                         return -EFAULT;
1460                 mutex_lock(&vq->mutex);
1461                 vs->vs_events_missed = events_missed;
1462                 mutex_unlock(&vq->mutex);
1463                 return 0;
1464         case VHOST_SCSI_GET_EVENTS_MISSED:
1465                 mutex_lock(&vq->mutex);
1466                 events_missed = vs->vs_events_missed;
1467                 mutex_unlock(&vq->mutex);
1468                 if (put_user(events_missed, eventsp))
1469                         return -EFAULT;
1470                 return 0;
1471         case VHOST_GET_FEATURES:
1472                 features = VHOST_SCSI_FEATURES;
1473                 if (copy_to_user(featurep, &features, sizeof features))
1474                         return -EFAULT;
1475                 return 0;
1476         case VHOST_SET_FEATURES:
1477                 if (copy_from_user(&features, featurep, sizeof features))
1478                         return -EFAULT;
1479                 return vhost_scsi_set_features(vs, features);
1480         default:
1481                 mutex_lock(&vs->dev.mutex);
1482                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1483                 /* TODO: flush backend after dev ioctl. */
1484                 if (r == -ENOIOCTLCMD)
1485                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1486                 mutex_unlock(&vs->dev.mutex);
1487                 return r;
1488         }
1489 }
1490
1491 #ifdef CONFIG_COMPAT
1492 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1493                                 unsigned long arg)
1494 {
1495         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1496 }
1497 #endif
1498
1499 static const struct file_operations vhost_scsi_fops = {
1500         .owner          = THIS_MODULE,
1501         .release        = vhost_scsi_release,
1502         .unlocked_ioctl = vhost_scsi_ioctl,
1503 #ifdef CONFIG_COMPAT
1504         .compat_ioctl   = vhost_scsi_compat_ioctl,
1505 #endif
1506         .open           = vhost_scsi_open,
1507         .llseek         = noop_llseek,
1508 };
1509
1510 static struct miscdevice vhost_scsi_misc = {
1511         MISC_DYNAMIC_MINOR,
1512         "vhost-scsi",
1513         &vhost_scsi_fops,
1514 };
1515
1516 static int __init vhost_scsi_register(void)
1517 {
1518         return misc_register(&vhost_scsi_misc);
1519 }
1520
1521 static int vhost_scsi_deregister(void)
1522 {
1523         return misc_deregister(&vhost_scsi_misc);
1524 }
1525
1526 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1527 {
1528         switch (tport->tport_proto_id) {
1529         case SCSI_PROTOCOL_SAS:
1530                 return "SAS";
1531         case SCSI_PROTOCOL_FCP:
1532                 return "FCP";
1533         case SCSI_PROTOCOL_ISCSI:
1534                 return "iSCSI";
1535         default:
1536                 break;
1537         }
1538
1539         return "Unknown";
1540 }
1541
1542 static void
1543 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1544                   struct se_lun *lun, bool plug)
1545 {
1546
1547         struct vhost_scsi *vs = tpg->vhost_scsi;
1548         struct vhost_virtqueue *vq;
1549         u32 reason;
1550
1551         if (!vs)
1552                 return;
1553
1554         mutex_lock(&vs->dev.mutex);
1555         if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1556                 mutex_unlock(&vs->dev.mutex);
1557                 return;
1558         }
1559
1560         if (plug)
1561                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1562         else
1563                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1564
1565         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1566         mutex_lock(&vq->mutex);
1567         tcm_vhost_send_evt(vs, tpg, lun,
1568                         VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1569         mutex_unlock(&vq->mutex);
1570         mutex_unlock(&vs->dev.mutex);
1571 }
1572
1573 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1574 {
1575         tcm_vhost_do_plug(tpg, lun, true);
1576 }
1577
1578 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1579 {
1580         tcm_vhost_do_plug(tpg, lun, false);
1581 }
1582
1583 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1584                                struct se_lun *lun)
1585 {
1586         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1587                                 struct tcm_vhost_tpg, se_tpg);
1588
1589         mutex_lock(&tcm_vhost_mutex);
1590
1591         mutex_lock(&tpg->tv_tpg_mutex);
1592         tpg->tv_tpg_port_count++;
1593         mutex_unlock(&tpg->tv_tpg_mutex);
1594
1595         tcm_vhost_hotplug(tpg, lun);
1596
1597         mutex_unlock(&tcm_vhost_mutex);
1598
1599         return 0;
1600 }
1601
1602 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1603                                   struct se_lun *lun)
1604 {
1605         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1606                                 struct tcm_vhost_tpg, se_tpg);
1607
1608         mutex_lock(&tcm_vhost_mutex);
1609
1610         mutex_lock(&tpg->tv_tpg_mutex);
1611         tpg->tv_tpg_port_count--;
1612         mutex_unlock(&tpg->tv_tpg_mutex);
1613
1614         tcm_vhost_hotunplug(tpg, lun);
1615
1616         mutex_unlock(&tcm_vhost_mutex);
1617 }
1618
1619 static struct se_node_acl *
1620 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1621                        struct config_group *group,
1622                        const char *name)
1623 {
1624         struct se_node_acl *se_nacl, *se_nacl_new;
1625         struct tcm_vhost_nacl *nacl;
1626         u64 wwpn = 0;
1627         u32 nexus_depth;
1628
1629         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1630                 return ERR_PTR(-EINVAL); */
1631         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1632         if (!se_nacl_new)
1633                 return ERR_PTR(-ENOMEM);
1634
1635         nexus_depth = 1;
1636         /*
1637          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1638          * when converting a NodeACL from demo mode -> explict
1639          */
1640         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1641                                 name, nexus_depth);
1642         if (IS_ERR(se_nacl)) {
1643                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1644                 return se_nacl;
1645         }
1646         /*
1647          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1648          */
1649         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1650         nacl->iport_wwpn = wwpn;
1651
1652         return se_nacl;
1653 }
1654
1655 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1656 {
1657         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1658                                 struct tcm_vhost_nacl, se_node_acl);
1659         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1660         kfree(nacl);
1661 }
1662
1663 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1664                                 const char *name)
1665 {
1666         struct se_portal_group *se_tpg;
1667         struct tcm_vhost_nexus *tv_nexus;
1668
1669         mutex_lock(&tpg->tv_tpg_mutex);
1670         if (tpg->tpg_nexus) {
1671                 mutex_unlock(&tpg->tv_tpg_mutex);
1672                 pr_debug("tpg->tpg_nexus already exists\n");
1673                 return -EEXIST;
1674         }
1675         se_tpg = &tpg->se_tpg;
1676
1677         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1678         if (!tv_nexus) {
1679                 mutex_unlock(&tpg->tv_tpg_mutex);
1680                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1681                 return -ENOMEM;
1682         }
1683         /*
1684          *  Initialize the struct se_session pointer and setup tagpool
1685          *  for struct tcm_vhost_cmd descriptors
1686          */
1687         tv_nexus->tvn_se_sess = transport_init_session_tags(
1688                                         TCM_VHOST_DEFAULT_TAGS,
1689                                         sizeof(struct tcm_vhost_cmd));
1690         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1691                 mutex_unlock(&tpg->tv_tpg_mutex);
1692                 kfree(tv_nexus);
1693                 return -ENOMEM;
1694         }
1695         /*
1696          * Since we are running in 'demo mode' this call with generate a
1697          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1698          * the SCSI Initiator port name of the passed configfs group 'name'.
1699          */
1700         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1701                                 se_tpg, (unsigned char *)name);
1702         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1703                 mutex_unlock(&tpg->tv_tpg_mutex);
1704                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1705                                 " for %s\n", name);
1706                 transport_free_session(tv_nexus->tvn_se_sess);
1707                 kfree(tv_nexus);
1708                 return -ENOMEM;
1709         }
1710         /*
1711          * Now register the TCM vhost virtual I_T Nexus as active with the
1712          * call to __transport_register_session()
1713          */
1714         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1715                         tv_nexus->tvn_se_sess, tv_nexus);
1716         tpg->tpg_nexus = tv_nexus;
1717
1718         mutex_unlock(&tpg->tv_tpg_mutex);
1719         return 0;
1720 }
1721
1722 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1723 {
1724         struct se_session *se_sess;
1725         struct tcm_vhost_nexus *tv_nexus;
1726
1727         mutex_lock(&tpg->tv_tpg_mutex);
1728         tv_nexus = tpg->tpg_nexus;
1729         if (!tv_nexus) {
1730                 mutex_unlock(&tpg->tv_tpg_mutex);
1731                 return -ENODEV;
1732         }
1733
1734         se_sess = tv_nexus->tvn_se_sess;
1735         if (!se_sess) {
1736                 mutex_unlock(&tpg->tv_tpg_mutex);
1737                 return -ENODEV;
1738         }
1739
1740         if (tpg->tv_tpg_port_count != 0) {
1741                 mutex_unlock(&tpg->tv_tpg_mutex);
1742                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1743                         " active TPG port count: %d\n",
1744                         tpg->tv_tpg_port_count);
1745                 return -EBUSY;
1746         }
1747
1748         if (tpg->tv_tpg_vhost_count != 0) {
1749                 mutex_unlock(&tpg->tv_tpg_mutex);
1750                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1751                         " active TPG vhost count: %d\n",
1752                         tpg->tv_tpg_vhost_count);
1753                 return -EBUSY;
1754         }
1755
1756         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1757                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1758                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1759         /*
1760          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1761          */
1762         transport_deregister_session(tv_nexus->tvn_se_sess);
1763         tpg->tpg_nexus = NULL;
1764         mutex_unlock(&tpg->tv_tpg_mutex);
1765
1766         kfree(tv_nexus);
1767         return 0;
1768 }
1769
1770 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1771                                         char *page)
1772 {
1773         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1774                                 struct tcm_vhost_tpg, se_tpg);
1775         struct tcm_vhost_nexus *tv_nexus;
1776         ssize_t ret;
1777
1778         mutex_lock(&tpg->tv_tpg_mutex);
1779         tv_nexus = tpg->tpg_nexus;
1780         if (!tv_nexus) {
1781                 mutex_unlock(&tpg->tv_tpg_mutex);
1782                 return -ENODEV;
1783         }
1784         ret = snprintf(page, PAGE_SIZE, "%s\n",
1785                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1786         mutex_unlock(&tpg->tv_tpg_mutex);
1787
1788         return ret;
1789 }
1790
1791 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1792                                          const char *page,
1793                                          size_t count)
1794 {
1795         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1796                                 struct tcm_vhost_tpg, se_tpg);
1797         struct tcm_vhost_tport *tport_wwn = tpg->tport;
1798         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1799         int ret;
1800         /*
1801          * Shutdown the active I_T nexus if 'NULL' is passed..
1802          */
1803         if (!strncmp(page, "NULL", 4)) {
1804                 ret = tcm_vhost_drop_nexus(tpg);
1805                 return (!ret) ? count : ret;
1806         }
1807         /*
1808          * Otherwise make sure the passed virtual Initiator port WWN matches
1809          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1810          * tcm_vhost_make_nexus().
1811          */
1812         if (strlen(page) >= TCM_VHOST_NAMELEN) {
1813                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1814                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
1815                 return -EINVAL;
1816         }
1817         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1818
1819         ptr = strstr(i_port, "naa.");
1820         if (ptr) {
1821                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1822                         pr_err("Passed SAS Initiator Port %s does not"
1823                                 " match target port protoid: %s\n", i_port,
1824                                 tcm_vhost_dump_proto_id(tport_wwn));
1825                         return -EINVAL;
1826                 }
1827                 port_ptr = &i_port[0];
1828                 goto check_newline;
1829         }
1830         ptr = strstr(i_port, "fc.");
1831         if (ptr) {
1832                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1833                         pr_err("Passed FCP Initiator Port %s does not"
1834                                 " match target port protoid: %s\n", i_port,
1835                                 tcm_vhost_dump_proto_id(tport_wwn));
1836                         return -EINVAL;
1837                 }
1838                 port_ptr = &i_port[3]; /* Skip over "fc." */
1839                 goto check_newline;
1840         }
1841         ptr = strstr(i_port, "iqn.");
1842         if (ptr) {
1843                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1844                         pr_err("Passed iSCSI Initiator Port %s does not"
1845                                 " match target port protoid: %s\n", i_port,
1846                                 tcm_vhost_dump_proto_id(tport_wwn));
1847                         return -EINVAL;
1848                 }
1849                 port_ptr = &i_port[0];
1850                 goto check_newline;
1851         }
1852         pr_err("Unable to locate prefix for emulated Initiator Port:"
1853                         " %s\n", i_port);
1854         return -EINVAL;
1855         /*
1856          * Clear any trailing newline for the NAA WWN
1857          */
1858 check_newline:
1859         if (i_port[strlen(i_port)-1] == '\n')
1860                 i_port[strlen(i_port)-1] = '\0';
1861
1862         ret = tcm_vhost_make_nexus(tpg, port_ptr);
1863         if (ret < 0)
1864                 return ret;
1865
1866         return count;
1867 }
1868
1869 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1870
1871 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1872         &tcm_vhost_tpg_nexus.attr,
1873         NULL,
1874 };
1875
1876 static struct se_portal_group *
1877 tcm_vhost_make_tpg(struct se_wwn *wwn,
1878                    struct config_group *group,
1879                    const char *name)
1880 {
1881         struct tcm_vhost_tport *tport = container_of(wwn,
1882                         struct tcm_vhost_tport, tport_wwn);
1883
1884         struct tcm_vhost_tpg *tpg;
1885         unsigned long tpgt;
1886         int ret;
1887
1888         if (strstr(name, "tpgt_") != name)
1889                 return ERR_PTR(-EINVAL);
1890         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1891                 return ERR_PTR(-EINVAL);
1892
1893         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1894         if (!tpg) {
1895                 pr_err("Unable to allocate struct tcm_vhost_tpg");
1896                 return ERR_PTR(-ENOMEM);
1897         }
1898         mutex_init(&tpg->tv_tpg_mutex);
1899         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1900         tpg->tport = tport;
1901         tpg->tport_tpgt = tpgt;
1902
1903         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1904                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1905         if (ret < 0) {
1906                 kfree(tpg);
1907                 return NULL;
1908         }
1909         mutex_lock(&tcm_vhost_mutex);
1910         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1911         mutex_unlock(&tcm_vhost_mutex);
1912
1913         return &tpg->se_tpg;
1914 }
1915
1916 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1917 {
1918         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1919                                 struct tcm_vhost_tpg, se_tpg);
1920
1921         mutex_lock(&tcm_vhost_mutex);
1922         list_del(&tpg->tv_tpg_list);
1923         mutex_unlock(&tcm_vhost_mutex);
1924         /*
1925          * Release the virtual I_T Nexus for this vhost TPG
1926          */
1927         tcm_vhost_drop_nexus(tpg);
1928         /*
1929          * Deregister the se_tpg from TCM..
1930          */
1931         core_tpg_deregister(se_tpg);
1932         kfree(tpg);
1933 }
1934
1935 static struct se_wwn *
1936 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1937                      struct config_group *group,
1938                      const char *name)
1939 {
1940         struct tcm_vhost_tport *tport;
1941         char *ptr;
1942         u64 wwpn = 0;
1943         int off = 0;
1944
1945         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1946                 return ERR_PTR(-EINVAL); */
1947
1948         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1949         if (!tport) {
1950                 pr_err("Unable to allocate struct tcm_vhost_tport");
1951                 return ERR_PTR(-ENOMEM);
1952         }
1953         tport->tport_wwpn = wwpn;
1954         /*
1955          * Determine the emulated Protocol Identifier and Target Port Name
1956          * based on the incoming configfs directory name.
1957          */
1958         ptr = strstr(name, "naa.");
1959         if (ptr) {
1960                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1961                 goto check_len;
1962         }
1963         ptr = strstr(name, "fc.");
1964         if (ptr) {
1965                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1966                 off = 3; /* Skip over "fc." */
1967                 goto check_len;
1968         }
1969         ptr = strstr(name, "iqn.");
1970         if (ptr) {
1971                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1972                 goto check_len;
1973         }
1974
1975         pr_err("Unable to locate prefix for emulated Target Port:"
1976                         " %s\n", name);
1977         kfree(tport);
1978         return ERR_PTR(-EINVAL);
1979
1980 check_len:
1981         if (strlen(name) >= TCM_VHOST_NAMELEN) {
1982                 pr_err("Emulated %s Address: %s, exceeds"
1983                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1984                         TCM_VHOST_NAMELEN);
1985                 kfree(tport);
1986                 return ERR_PTR(-EINVAL);
1987         }
1988         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1989
1990         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1991                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1992
1993         return &tport->tport_wwn;
1994 }
1995
1996 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1997 {
1998         struct tcm_vhost_tport *tport = container_of(wwn,
1999                                 struct tcm_vhost_tport, tport_wwn);
2000
2001         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2002                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2003                 tport->tport_name);
2004
2005         kfree(tport);
2006 }
2007
2008 static ssize_t
2009 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2010                                 char *page)
2011 {
2012         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2013                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2014                 utsname()->machine);
2015 }
2016
2017 TF_WWN_ATTR_RO(tcm_vhost, version);
2018
2019 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2020         &tcm_vhost_wwn_version.attr,
2021         NULL,
2022 };
2023
2024 static struct target_core_fabric_ops tcm_vhost_ops = {
2025         .get_fabric_name                = tcm_vhost_get_fabric_name,
2026         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2027         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2028         .tpg_get_tag                    = tcm_vhost_get_tag,
2029         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2030         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2031         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2032         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2033         .tpg_check_demo_mode            = tcm_vhost_check_true,
2034         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2035         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2036         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2037         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2038         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2039         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2040         .release_cmd                    = tcm_vhost_release_cmd,
2041         .check_stop_free                = vhost_scsi_check_stop_free,
2042         .shutdown_session               = tcm_vhost_shutdown_session,
2043         .close_session                  = tcm_vhost_close_session,
2044         .sess_get_index                 = tcm_vhost_sess_get_index,
2045         .sess_get_initiator_sid         = NULL,
2046         .write_pending                  = tcm_vhost_write_pending,
2047         .write_pending_status           = tcm_vhost_write_pending_status,
2048         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2049         .get_task_tag                   = tcm_vhost_get_task_tag,
2050         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2051         .queue_data_in                  = tcm_vhost_queue_data_in,
2052         .queue_status                   = tcm_vhost_queue_status,
2053         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2054         /*
2055          * Setup callers for generic logic in target_core_fabric_configfs.c
2056          */
2057         .fabric_make_wwn                = tcm_vhost_make_tport,
2058         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2059         .fabric_make_tpg                = tcm_vhost_make_tpg,
2060         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2061         .fabric_post_link               = tcm_vhost_port_link,
2062         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2063         .fabric_make_np                 = NULL,
2064         .fabric_drop_np                 = NULL,
2065         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2066         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2067 };
2068
2069 static int tcm_vhost_register_configfs(void)
2070 {
2071         struct target_fabric_configfs *fabric;
2072         int ret;
2073
2074         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2075                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2076                 utsname()->machine);
2077         /*
2078          * Register the top level struct config_item_type with TCM core
2079          */
2080         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2081         if (IS_ERR(fabric)) {
2082                 pr_err("target_fabric_configfs_init() failed\n");
2083                 return PTR_ERR(fabric);
2084         }
2085         /*
2086          * Setup fabric->tf_ops from our local tcm_vhost_ops
2087          */
2088         fabric->tf_ops = tcm_vhost_ops;
2089         /*
2090          * Setup default attribute lists for various fabric->tf_cit_tmpl
2091          */
2092         TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2093         TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2094         TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
2095         TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2096         TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2097         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2098         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2099         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2100         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2101         /*
2102          * Register the fabric for use within TCM
2103          */
2104         ret = target_fabric_configfs_register(fabric);
2105         if (ret < 0) {
2106                 pr_err("target_fabric_configfs_register() failed"
2107                                 " for TCM_VHOST\n");
2108                 return ret;
2109         }
2110         /*
2111          * Setup our local pointer to *fabric
2112          */
2113         tcm_vhost_fabric_configfs = fabric;
2114         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2115         return 0;
2116 };
2117
2118 static void tcm_vhost_deregister_configfs(void)
2119 {
2120         if (!tcm_vhost_fabric_configfs)
2121                 return;
2122
2123         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2124         tcm_vhost_fabric_configfs = NULL;
2125         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2126 };
2127
2128 static int __init tcm_vhost_init(void)
2129 {
2130         int ret = -ENOMEM;
2131         /*
2132          * Use our own dedicated workqueue for submitting I/O into
2133          * target core to avoid contention within system_wq.
2134          */
2135         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2136         if (!tcm_vhost_workqueue)
2137                 goto out;
2138
2139         ret = vhost_scsi_register();
2140         if (ret < 0)
2141                 goto out_destroy_workqueue;
2142
2143         ret = tcm_vhost_register_configfs();
2144         if (ret < 0)
2145                 goto out_vhost_scsi_deregister;
2146
2147         return 0;
2148
2149 out_vhost_scsi_deregister:
2150         vhost_scsi_deregister();
2151 out_destroy_workqueue:
2152         destroy_workqueue(tcm_vhost_workqueue);
2153 out:
2154         return ret;
2155 };
2156
2157 static void tcm_vhost_exit(void)
2158 {
2159         tcm_vhost_deregister_configfs();
2160         vhost_scsi_deregister();
2161         destroy_workqueue(tcm_vhost_workqueue);
2162 };
2163
2164 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2165 MODULE_ALIAS("tcm_vhost");
2166 MODULE_LICENSE("GPL");
2167 module_init(tcm_vhost_init);
2168 module_exit(tcm_vhost_exit);