]> Pileus Git - ~andy/linux/blob - drivers/vhost/tcm_vhost.c
96d3b47c82cd0c8750f519545a97b1958e2802ab
[~andy/linux] / drivers / vhost / tcm_vhost.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2012 RisingTide Systems LLC.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
49 #include <linux/virtio_scsi.h>
50 #include <linux/llist.h>
51 #include <linux/bitmap.h>
52
53 #include "vhost.c"
54 #include "vhost.h"
55 #include "tcm_vhost.h"
56
57 enum {
58         VHOST_SCSI_VQ_CTL = 0,
59         VHOST_SCSI_VQ_EVT = 1,
60         VHOST_SCSI_VQ_IO = 2,
61 };
62
63 /*
64  * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
65  * kernel but disabling it helps.
66  * TODO: debug and remove the workaround.
67  */
68 enum {
69         VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
70                               (1ULL << VIRTIO_SCSI_F_HOTPLUG)
71 };
72
73 #define VHOST_SCSI_MAX_TARGET   256
74 #define VHOST_SCSI_MAX_VQ       128
75 #define VHOST_SCSI_MAX_EVENT    128
76
77 struct vhost_scsi_inflight {
78         /* Wait for the flush operation to finish */
79         struct completion comp;
80         /* Refcount for the inflight reqs */
81         struct kref kref;
82 };
83
84 struct vhost_scsi_virtqueue {
85         struct vhost_virtqueue vq;
86         /*
87          * Reference counting for inflight reqs, used for flush operation. At
88          * each time, one reference tracks new commands submitted, while we
89          * wait for another one to reach 0.
90          */
91         struct vhost_scsi_inflight inflights[2];
92         /*
93          * Indicate current inflight in use, protected by vq->mutex.
94          * Writers must also take dev mutex and flush under it.
95          */
96         int inflight_idx;
97 };
98
99 struct vhost_scsi {
100         /* Protected by vhost_scsi->dev.mutex */
101         struct tcm_vhost_tpg **vs_tpg;
102         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
103
104         struct vhost_dev dev;
105         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
106
107         struct vhost_work vs_completion_work; /* cmd completion work item */
108         struct llist_head vs_completion_list; /* cmd completion queue */
109
110         struct vhost_work vs_event_work; /* evt injection work item */
111         struct llist_head vs_event_list; /* evt injection queue */
112
113         bool vs_events_missed; /* any missed events, protected by vq->mutex */
114         int vs_events_nr; /* num of pending events, protected by vq->mutex */
115 };
116
117 /* Local pointer to allocated TCM configfs fabric module */
118 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
119
120 static struct workqueue_struct *tcm_vhost_workqueue;
121
122 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
123 static DEFINE_MUTEX(tcm_vhost_mutex);
124 static LIST_HEAD(tcm_vhost_list);
125
126 static int iov_num_pages(struct iovec *iov)
127 {
128         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
129                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
130 }
131
132 void tcm_vhost_done_inflight(struct kref *kref)
133 {
134         struct vhost_scsi_inflight *inflight;
135
136         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
137         complete(&inflight->comp);
138 }
139
140 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
141                                     struct vhost_scsi_inflight *old_inflight[])
142 {
143         struct vhost_scsi_inflight *new_inflight;
144         struct vhost_virtqueue *vq;
145         int idx, i;
146
147         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
148                 vq = &vs->vqs[i].vq;
149
150                 mutex_lock(&vq->mutex);
151
152                 /* store old infight */
153                 idx = vs->vqs[i].inflight_idx;
154                 if (old_inflight)
155                         old_inflight[i] = &vs->vqs[i].inflights[idx];
156
157                 /* setup new infight */
158                 vs->vqs[i].inflight_idx = idx ^ 1;
159                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
160                 kref_init(&new_inflight->kref);
161                 init_completion(&new_inflight->comp);
162
163                 mutex_unlock(&vq->mutex);
164         }
165 }
166
167 static struct vhost_scsi_inflight *
168 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
169 {
170         struct vhost_scsi_inflight *inflight;
171         struct vhost_scsi_virtqueue *svq;
172
173         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
174         inflight = &svq->inflights[svq->inflight_idx];
175         kref_get(&inflight->kref);
176
177         return inflight;
178 }
179
180 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
181 {
182         kref_put(&inflight->kref, tcm_vhost_done_inflight);
183 }
184
185 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
186 {
187         return 1;
188 }
189
190 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
191 {
192         return 0;
193 }
194
195 static char *tcm_vhost_get_fabric_name(void)
196 {
197         return "vhost";
198 }
199
200 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
201 {
202         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
203                                 struct tcm_vhost_tpg, se_tpg);
204         struct tcm_vhost_tport *tport = tpg->tport;
205
206         switch (tport->tport_proto_id) {
207         case SCSI_PROTOCOL_SAS:
208                 return sas_get_fabric_proto_ident(se_tpg);
209         case SCSI_PROTOCOL_FCP:
210                 return fc_get_fabric_proto_ident(se_tpg);
211         case SCSI_PROTOCOL_ISCSI:
212                 return iscsi_get_fabric_proto_ident(se_tpg);
213         default:
214                 pr_err("Unknown tport_proto_id: 0x%02x, using"
215                         " SAS emulation\n", tport->tport_proto_id);
216                 break;
217         }
218
219         return sas_get_fabric_proto_ident(se_tpg);
220 }
221
222 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
223 {
224         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
225                                 struct tcm_vhost_tpg, se_tpg);
226         struct tcm_vhost_tport *tport = tpg->tport;
227
228         return &tport->tport_name[0];
229 }
230
231 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
232 {
233         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
234                                 struct tcm_vhost_tpg, se_tpg);
235         return tpg->tport_tpgt;
236 }
237
238 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
239 {
240         return 1;
241 }
242
243 static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
244         struct se_node_acl *se_nacl,
245         struct t10_pr_registration *pr_reg,
246         int *format_code,
247         unsigned char *buf)
248 {
249         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
250                                 struct tcm_vhost_tpg, se_tpg);
251         struct tcm_vhost_tport *tport = tpg->tport;
252
253         switch (tport->tport_proto_id) {
254         case SCSI_PROTOCOL_SAS:
255                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
256                                         format_code, buf);
257         case SCSI_PROTOCOL_FCP:
258                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
259                                         format_code, buf);
260         case SCSI_PROTOCOL_ISCSI:
261                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
262                                         format_code, buf);
263         default:
264                 pr_err("Unknown tport_proto_id: 0x%02x, using"
265                         " SAS emulation\n", tport->tport_proto_id);
266                 break;
267         }
268
269         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
270                         format_code, buf);
271 }
272
273 static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
274         struct se_node_acl *se_nacl,
275         struct t10_pr_registration *pr_reg,
276         int *format_code)
277 {
278         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
279                                 struct tcm_vhost_tpg, se_tpg);
280         struct tcm_vhost_tport *tport = tpg->tport;
281
282         switch (tport->tport_proto_id) {
283         case SCSI_PROTOCOL_SAS:
284                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
285                                         format_code);
286         case SCSI_PROTOCOL_FCP:
287                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
288                                         format_code);
289         case SCSI_PROTOCOL_ISCSI:
290                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
291                                         format_code);
292         default:
293                 pr_err("Unknown tport_proto_id: 0x%02x, using"
294                         " SAS emulation\n", tport->tport_proto_id);
295                 break;
296         }
297
298         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
299                         format_code);
300 }
301
302 static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
303         const char *buf,
304         u32 *out_tid_len,
305         char **port_nexus_ptr)
306 {
307         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
308                                 struct tcm_vhost_tpg, se_tpg);
309         struct tcm_vhost_tport *tport = tpg->tport;
310
311         switch (tport->tport_proto_id) {
312         case SCSI_PROTOCOL_SAS:
313                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
314                                         port_nexus_ptr);
315         case SCSI_PROTOCOL_FCP:
316                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
317                                         port_nexus_ptr);
318         case SCSI_PROTOCOL_ISCSI:
319                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
320                                         port_nexus_ptr);
321         default:
322                 pr_err("Unknown tport_proto_id: 0x%02x, using"
323                         " SAS emulation\n", tport->tport_proto_id);
324                 break;
325         }
326
327         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
328                         port_nexus_ptr);
329 }
330
331 static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
332         struct se_portal_group *se_tpg)
333 {
334         struct tcm_vhost_nacl *nacl;
335
336         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
337         if (!nacl) {
338                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
339                 return NULL;
340         }
341
342         return &nacl->se_node_acl;
343 }
344
345 static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
346         struct se_node_acl *se_nacl)
347 {
348         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
349                         struct tcm_vhost_nacl, se_node_acl);
350         kfree(nacl);
351 }
352
353 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
354 {
355         return 1;
356 }
357
358 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
359 {
360         return;
361 }
362
363 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
364 {
365         return 0;
366 }
367
368 static void tcm_vhost_close_session(struct se_session *se_sess)
369 {
370         return;
371 }
372
373 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
374 {
375         return 0;
376 }
377
378 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
379 {
380         /* Go ahead and process the write immediately */
381         target_execute_cmd(se_cmd);
382         return 0;
383 }
384
385 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
386 {
387         return 0;
388 }
389
390 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
391 {
392         return;
393 }
394
395 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
396 {
397         return 0;
398 }
399
400 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
401 {
402         return 0;
403 }
404
405 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
406 {
407         struct vhost_scsi *vs = tv_cmd->tvc_vhost;
408
409         llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
410
411         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
412 }
413
414 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
415 {
416         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
417                                 struct tcm_vhost_cmd, tvc_se_cmd);
418         vhost_scsi_complete_cmd(tv_cmd);
419         return 0;
420 }
421
422 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
423 {
424         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
425                                 struct tcm_vhost_cmd, tvc_se_cmd);
426         vhost_scsi_complete_cmd(tv_cmd);
427         return 0;
428 }
429
430 static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
431 {
432         return 0;
433 }
434
435 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
436 {
437         vs->vs_events_nr--;
438         kfree(evt);
439 }
440
441 static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
442         u32 event, u32 reason)
443 {
444         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
445         struct tcm_vhost_evt *evt;
446
447         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
448                 vs->vs_events_missed = true;
449                 return NULL;
450         }
451
452         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
453         if (!evt) {
454                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
455                 vs->vs_events_missed = true;
456                 return NULL;
457         }
458
459         evt->event.event = event;
460         evt->event.reason = reason;
461         vs->vs_events_nr++;
462
463         return evt;
464 }
465
466 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
467 {
468         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
469
470         /* TODO locking against target/backend threads? */
471         transport_generic_free_cmd(se_cmd, 1);
472
473         if (tv_cmd->tvc_sgl_count) {
474                 u32 i;
475                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
476                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
477
478                 kfree(tv_cmd->tvc_sgl);
479         }
480
481         tcm_vhost_put_inflight(tv_cmd->inflight);
482
483         kfree(tv_cmd);
484 }
485
486 static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
487         struct tcm_vhost_evt *evt)
488 {
489         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
490         struct virtio_scsi_event *event = &evt->event;
491         struct virtio_scsi_event __user *eventp;
492         unsigned out, in;
493         int head, ret;
494
495         if (!vq->private_data) {
496                 vs->vs_events_missed = true;
497                 return;
498         }
499
500 again:
501         vhost_disable_notify(&vs->dev, vq);
502         head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
503                         ARRAY_SIZE(vq->iov), &out, &in,
504                         NULL, NULL);
505         if (head < 0) {
506                 vs->vs_events_missed = true;
507                 return;
508         }
509         if (head == vq->num) {
510                 if (vhost_enable_notify(&vs->dev, vq))
511                         goto again;
512                 vs->vs_events_missed = true;
513                 return;
514         }
515
516         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
517                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
518                                 vq->iov[out].iov_len);
519                 vs->vs_events_missed = true;
520                 return;
521         }
522
523         if (vs->vs_events_missed) {
524                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
525                 vs->vs_events_missed = false;
526         }
527
528         eventp = vq->iov[out].iov_base;
529         ret = __copy_to_user(eventp, event, sizeof(*event));
530         if (!ret)
531                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
532         else
533                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
534 }
535
536 static void tcm_vhost_evt_work(struct vhost_work *work)
537 {
538         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
539                                         vs_event_work);
540         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
541         struct tcm_vhost_evt *evt;
542         struct llist_node *llnode;
543
544         mutex_lock(&vq->mutex);
545         llnode = llist_del_all(&vs->vs_event_list);
546         while (llnode) {
547                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
548                 llnode = llist_next(llnode);
549                 tcm_vhost_do_evt_work(vs, evt);
550                 tcm_vhost_free_evt(vs, evt);
551         }
552         mutex_unlock(&vq->mutex);
553 }
554
555 /* Fill in status and signal that we are done processing this command
556  *
557  * This is scheduled in the vhost work queue so we are called with the owner
558  * process mm and can access the vring.
559  */
560 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
561 {
562         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
563                                         vs_completion_work);
564         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
565         struct virtio_scsi_cmd_resp v_rsp;
566         struct tcm_vhost_cmd *tv_cmd;
567         struct llist_node *llnode;
568         struct se_cmd *se_cmd;
569         int ret, vq;
570
571         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
572         llnode = llist_del_all(&vs->vs_completion_list);
573         while (llnode) {
574                 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
575                                      tvc_completion_list);
576                 llnode = llist_next(llnode);
577                 se_cmd = &tv_cmd->tvc_se_cmd;
578
579                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
580                         tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
581
582                 memset(&v_rsp, 0, sizeof(v_rsp));
583                 v_rsp.resid = se_cmd->residual_count;
584                 /* TODO is status_qualifier field needed? */
585                 v_rsp.status = se_cmd->scsi_status;
586                 v_rsp.sense_len = se_cmd->scsi_sense_length;
587                 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
588                        v_rsp.sense_len);
589                 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
590                 if (likely(ret == 0)) {
591                         struct vhost_scsi_virtqueue *q;
592                         vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
593                         q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
594                         vq = q - vs->vqs;
595                         __set_bit(vq, signal);
596                 } else
597                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
598
599                 vhost_scsi_free_cmd(tv_cmd);
600         }
601
602         vq = -1;
603         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
604                 < VHOST_SCSI_MAX_VQ)
605                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
606 }
607
608 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
609         struct vhost_virtqueue *vq,
610         struct tcm_vhost_tpg *tv_tpg,
611         struct virtio_scsi_cmd_req *v_req,
612         u32 exp_data_len,
613         int data_direction)
614 {
615         struct tcm_vhost_cmd *tv_cmd;
616         struct tcm_vhost_nexus *tv_nexus;
617
618         tv_nexus = tv_tpg->tpg_nexus;
619         if (!tv_nexus) {
620                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
621                 return ERR_PTR(-EIO);
622         }
623
624         tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
625         if (!tv_cmd) {
626                 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
627                 return ERR_PTR(-ENOMEM);
628         }
629         tv_cmd->tvc_tag = v_req->tag;
630         tv_cmd->tvc_task_attr = v_req->task_attr;
631         tv_cmd->tvc_exp_data_len = exp_data_len;
632         tv_cmd->tvc_data_direction = data_direction;
633         tv_cmd->tvc_nexus = tv_nexus;
634         tv_cmd->inflight = tcm_vhost_get_inflight(vq);
635
636         return tv_cmd;
637 }
638
639 /*
640  * Map a user memory range into a scatterlist
641  *
642  * Returns the number of scatterlist entries used or -errno on error.
643  */
644 static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
645         unsigned int sgl_count, struct iovec *iov, int write)
646 {
647         unsigned int npages = 0, pages_nr, offset, nbytes;
648         struct scatterlist *sg = sgl;
649         void __user *ptr = iov->iov_base;
650         size_t len = iov->iov_len;
651         struct page **pages;
652         int ret, i;
653
654         pages_nr = iov_num_pages(iov);
655         if (pages_nr > sgl_count)
656                 return -ENOBUFS;
657
658         pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
659         if (!pages)
660                 return -ENOMEM;
661
662         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
663         /* No pages were pinned */
664         if (ret < 0)
665                 goto out;
666         /* Less pages pinned than wanted */
667         if (ret != pages_nr) {
668                 for (i = 0; i < ret; i++)
669                         put_page(pages[i]);
670                 ret = -EFAULT;
671                 goto out;
672         }
673
674         while (len > 0) {
675                 offset = (uintptr_t)ptr & ~PAGE_MASK;
676                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
677                 sg_set_page(sg, pages[npages], nbytes, offset);
678                 ptr += nbytes;
679                 len -= nbytes;
680                 sg++;
681                 npages++;
682         }
683
684 out:
685         kfree(pages);
686         return ret;
687 }
688
689 static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
690         struct iovec *iov, unsigned int niov, int write)
691 {
692         int ret;
693         unsigned int i;
694         u32 sgl_count;
695         struct scatterlist *sg;
696
697         /*
698          * Find out how long sglist needs to be
699          */
700         sgl_count = 0;
701         for (i = 0; i < niov; i++)
702                 sgl_count += iov_num_pages(&iov[i]);
703
704         /* TODO overflow checking */
705
706         sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
707         if (!sg)
708                 return -ENOMEM;
709         pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
710                sg, sgl_count, !sg);
711         sg_init_table(sg, sgl_count);
712
713         tv_cmd->tvc_sgl = sg;
714         tv_cmd->tvc_sgl_count = sgl_count;
715
716         pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
717         for (i = 0; i < niov; i++) {
718                 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
719                 if (ret < 0) {
720                         for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
721                                 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
722                         kfree(tv_cmd->tvc_sgl);
723                         tv_cmd->tvc_sgl = NULL;
724                         tv_cmd->tvc_sgl_count = 0;
725                         return ret;
726                 }
727
728                 sg += ret;
729                 sgl_count -= ret;
730         }
731         return 0;
732 }
733
734 static void tcm_vhost_submission_work(struct work_struct *work)
735 {
736         struct tcm_vhost_cmd *tv_cmd =
737                 container_of(work, struct tcm_vhost_cmd, work);
738         struct tcm_vhost_nexus *tv_nexus;
739         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
740         struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
741         int rc, sg_no_bidi = 0;
742
743         if (tv_cmd->tvc_sgl_count) {
744                 sg_ptr = tv_cmd->tvc_sgl;
745 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
746 #if 0
747                 if (se_cmd->se_cmd_flags & SCF_BIDI) {
748                         sg_bidi_ptr = NULL;
749                         sg_no_bidi = 0;
750                 }
751 #endif
752         } else {
753                 sg_ptr = NULL;
754         }
755         tv_nexus = tv_cmd->tvc_nexus;
756
757         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
758                         tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
759                         tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
760                         tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
761                         0, sg_ptr, tv_cmd->tvc_sgl_count,
762                         sg_bidi_ptr, sg_no_bidi);
763         if (rc < 0) {
764                 transport_send_check_condition_and_sense(se_cmd,
765                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
766                 transport_generic_free_cmd(se_cmd, 0);
767         }
768 }
769
770 static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
771         struct vhost_virtqueue *vq, int head, unsigned out)
772 {
773         struct virtio_scsi_cmd_resp __user *resp;
774         struct virtio_scsi_cmd_resp rsp;
775         int ret;
776
777         memset(&rsp, 0, sizeof(rsp));
778         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
779         resp = vq->iov[out].iov_base;
780         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
781         if (!ret)
782                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
783         else
784                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
785 }
786
787 static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
788         struct vhost_virtqueue *vq)
789 {
790         struct tcm_vhost_tpg **vs_tpg;
791         struct virtio_scsi_cmd_req v_req;
792         struct tcm_vhost_tpg *tv_tpg;
793         struct tcm_vhost_cmd *tv_cmd;
794         u32 exp_data_len, data_first, data_num, data_direction;
795         unsigned out, in, i;
796         int head, ret;
797         u8 target;
798
799         /*
800          * We can handle the vq only after the endpoint is setup by calling the
801          * VHOST_SCSI_SET_ENDPOINT ioctl.
802          *
803          * TODO: Check that we are running from vhost_worker which acts
804          * as read-side critical section for vhost kind of RCU.
805          * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
806          */
807         vs_tpg = rcu_dereference_check(vq->private_data, 1);
808         if (!vs_tpg)
809                 return;
810
811         mutex_lock(&vq->mutex);
812         vhost_disable_notify(&vs->dev, vq);
813
814         for (;;) {
815                 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
816                                         ARRAY_SIZE(vq->iov), &out, &in,
817                                         NULL, NULL);
818                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
819                                         head, out, in);
820                 /* On error, stop handling until the next kick. */
821                 if (unlikely(head < 0))
822                         break;
823                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
824                 if (head == vq->num) {
825                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
826                                 vhost_disable_notify(&vs->dev, vq);
827                                 continue;
828                         }
829                         break;
830                 }
831
832 /* FIXME: BIDI operation */
833                 if (out == 1 && in == 1) {
834                         data_direction = DMA_NONE;
835                         data_first = 0;
836                         data_num = 0;
837                 } else if (out == 1 && in > 1) {
838                         data_direction = DMA_FROM_DEVICE;
839                         data_first = out + 1;
840                         data_num = in - 1;
841                 } else if (out > 1 && in == 1) {
842                         data_direction = DMA_TO_DEVICE;
843                         data_first = 1;
844                         data_num = out - 1;
845                 } else {
846                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
847                                         out, in);
848                         break;
849                 }
850
851                 /*
852                  * Check for a sane resp buffer so we can report errors to
853                  * the guest.
854                  */
855                 if (unlikely(vq->iov[out].iov_len !=
856                                         sizeof(struct virtio_scsi_cmd_resp))) {
857                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
858                                 " bytes\n", vq->iov[out].iov_len);
859                         break;
860                 }
861
862                 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
863                         vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
864                                 " bytes\n", vq->iov[0].iov_len);
865                         break;
866                 }
867                 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
868                         " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
869                 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
870                                 sizeof(v_req));
871                 if (unlikely(ret)) {
872                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
873                         break;
874                 }
875
876                 /* Extract the tpgt */
877                 target = v_req.lun[1];
878                 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
879
880                 /* Target does not exist, fail the request */
881                 if (unlikely(!tv_tpg)) {
882                         vhost_scsi_send_bad_target(vs, vq, head, out);
883                         continue;
884                 }
885
886                 exp_data_len = 0;
887                 for (i = 0; i < data_num; i++)
888                         exp_data_len += vq->iov[data_first + i].iov_len;
889
890                 tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
891                                         exp_data_len, data_direction);
892                 if (IS_ERR(tv_cmd)) {
893                         vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
894                                         PTR_ERR(tv_cmd));
895                         goto err_cmd;
896                 }
897                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
898                         ": %d\n", tv_cmd, exp_data_len, data_direction);
899
900                 tv_cmd->tvc_vhost = vs;
901                 tv_cmd->tvc_vq = vq;
902                 tv_cmd->tvc_resp = vq->iov[out].iov_base;
903
904                 /*
905                  * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
906                  * that will be used by tcm_vhost_new_cmd_map() and down into
907                  * target_setup_cmd_from_cdb()
908                  */
909                 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
910                 /*
911                  * Check that the recieved CDB size does not exceeded our
912                  * hardcoded max for tcm_vhost
913                  */
914                 /* TODO what if cdb was too small for varlen cdb header? */
915                 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
916                                         TCM_VHOST_MAX_CDB_SIZE)) {
917                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
918                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
919                                 scsi_command_size(tv_cmd->tvc_cdb),
920                                 TCM_VHOST_MAX_CDB_SIZE);
921                         goto err_free;
922                 }
923                 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
924
925                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
926                         tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
927
928                 if (data_direction != DMA_NONE) {
929                         ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
930                                         &vq->iov[data_first], data_num,
931                                         data_direction == DMA_TO_DEVICE);
932                         if (unlikely(ret)) {
933                                 vq_err(vq, "Failed to map iov to sgl\n");
934                                 goto err_free;
935                         }
936                 }
937
938                 /*
939                  * Save the descriptor from vhost_get_vq_desc() to be used to
940                  * complete the virtio-scsi request in TCM callback context via
941                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
942                  */
943                 tv_cmd->tvc_vq_desc = head;
944                 /*
945                  * Dispatch tv_cmd descriptor for cmwq execution in process
946                  * context provided by tcm_vhost_workqueue.  This also ensures
947                  * tv_cmd is executed on the same kworker CPU as this vhost
948                  * thread to gain positive L2 cache locality effects..
949                  */
950                 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
951                 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
952         }
953
954         mutex_unlock(&vq->mutex);
955         return;
956
957 err_free:
958         vhost_scsi_free_cmd(tv_cmd);
959 err_cmd:
960         vhost_scsi_send_bad_target(vs, vq, head, out);
961         mutex_unlock(&vq->mutex);
962 }
963
964 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
965 {
966         pr_debug("%s: The handling func for control queue.\n", __func__);
967 }
968
969 static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
970         struct se_lun *lun, u32 event, u32 reason)
971 {
972         struct tcm_vhost_evt *evt;
973
974         evt = tcm_vhost_allocate_evt(vs, event, reason);
975         if (!evt)
976                 return;
977
978         if (tpg && lun) {
979                 /* TODO: share lun setup code with virtio-scsi.ko */
980                 /*
981                  * Note: evt->event is zeroed when we allocate it and
982                  * lun[4-7] need to be zero according to virtio-scsi spec.
983                  */
984                 evt->event.lun[0] = 0x01;
985                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
986                 if (lun->unpacked_lun >= 256)
987                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
988                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
989         }
990
991         llist_add(&evt->list, &vs->vs_event_list);
992         vhost_work_queue(&vs->dev, &vs->vs_event_work);
993 }
994
995 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
996 {
997         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
998                                                 poll.work);
999         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1000
1001         mutex_lock(&vq->mutex);
1002         if (!vq->private_data)
1003                 goto out;
1004
1005         if (vs->vs_events_missed)
1006                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1007 out:
1008         mutex_unlock(&vq->mutex);
1009 }
1010
1011 static void vhost_scsi_handle_kick(struct vhost_work *work)
1012 {
1013         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1014                                                 poll.work);
1015         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1016
1017         vhost_scsi_handle_vq(vs, vq);
1018 }
1019
1020 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1021 {
1022         vhost_poll_flush(&vs->vqs[index].vq.poll);
1023 }
1024
1025 /* Callers must hold dev mutex */
1026 static void vhost_scsi_flush(struct vhost_scsi *vs)
1027 {
1028         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1029         int i;
1030
1031         /* Init new inflight and remember the old inflight */
1032         tcm_vhost_init_inflight(vs, old_inflight);
1033
1034         /*
1035          * The inflight->kref was initialized to 1. We decrement it here to
1036          * indicate the start of the flush operation so that it will reach 0
1037          * when all the reqs are finished.
1038          */
1039         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1040                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1041
1042         /* Flush both the vhost poll and vhost work */
1043         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1044                 vhost_scsi_flush_vq(vs, i);
1045         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1046         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1047
1048         /* Wait for all reqs issued before the flush to be finished */
1049         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1050                 wait_for_completion(&old_inflight[i]->comp);
1051 }
1052
1053 /*
1054  * Called from vhost_scsi_ioctl() context to walk the list of available
1055  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1056  *
1057  *  The lock nesting rule is:
1058  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1059  */
1060 static int vhost_scsi_set_endpoint(
1061         struct vhost_scsi *vs,
1062         struct vhost_scsi_target *t)
1063 {
1064         struct tcm_vhost_tport *tv_tport;
1065         struct tcm_vhost_tpg *tv_tpg;
1066         struct tcm_vhost_tpg **vs_tpg;
1067         struct vhost_virtqueue *vq;
1068         int index, ret, i, len;
1069         bool match = false;
1070
1071         mutex_lock(&tcm_vhost_mutex);
1072         mutex_lock(&vs->dev.mutex);
1073
1074         /* Verify that ring has been setup correctly. */
1075         for (index = 0; index < vs->dev.nvqs; ++index) {
1076                 /* Verify that ring has been setup correctly. */
1077                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1078                         ret = -EFAULT;
1079                         goto out;
1080                 }
1081         }
1082
1083         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1084         vs_tpg = kzalloc(len, GFP_KERNEL);
1085         if (!vs_tpg) {
1086                 ret = -ENOMEM;
1087                 goto out;
1088         }
1089         if (vs->vs_tpg)
1090                 memcpy(vs_tpg, vs->vs_tpg, len);
1091
1092         list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
1093                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1094                 if (!tv_tpg->tpg_nexus) {
1095                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1096                         continue;
1097                 }
1098                 if (tv_tpg->tv_tpg_vhost_count != 0) {
1099                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1100                         continue;
1101                 }
1102                 tv_tport = tv_tpg->tport;
1103
1104                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1105                         if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
1106                                 kfree(vs_tpg);
1107                                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1108                                 ret = -EEXIST;
1109                                 goto out;
1110                         }
1111                         tv_tpg->tv_tpg_vhost_count++;
1112                         tv_tpg->vhost_scsi = vs;
1113                         vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
1114                         smp_mb__after_atomic_inc();
1115                         match = true;
1116                 }
1117                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1118         }
1119
1120         if (match) {
1121                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1122                        sizeof(vs->vs_vhost_wwpn));
1123                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1124                         vq = &vs->vqs[i].vq;
1125                         /* Flushing the vhost_work acts as synchronize_rcu */
1126                         mutex_lock(&vq->mutex);
1127                         rcu_assign_pointer(vq->private_data, vs_tpg);
1128                         vhost_init_used(vq);
1129                         mutex_unlock(&vq->mutex);
1130                 }
1131                 ret = 0;
1132         } else {
1133                 ret = -EEXIST;
1134         }
1135
1136         /*
1137          * Act as synchronize_rcu to make sure access to
1138          * old vs->vs_tpg is finished.
1139          */
1140         vhost_scsi_flush(vs);
1141         kfree(vs->vs_tpg);
1142         vs->vs_tpg = vs_tpg;
1143
1144 out:
1145         mutex_unlock(&vs->dev.mutex);
1146         mutex_unlock(&tcm_vhost_mutex);
1147         return ret;
1148 }
1149
1150 static int vhost_scsi_clear_endpoint(
1151         struct vhost_scsi *vs,
1152         struct vhost_scsi_target *t)
1153 {
1154         struct tcm_vhost_tport *tv_tport;
1155         struct tcm_vhost_tpg *tv_tpg;
1156         struct vhost_virtqueue *vq;
1157         bool match = false;
1158         int index, ret, i;
1159         u8 target;
1160
1161         mutex_lock(&tcm_vhost_mutex);
1162         mutex_lock(&vs->dev.mutex);
1163         /* Verify that ring has been setup correctly. */
1164         for (index = 0; index < vs->dev.nvqs; ++index) {
1165                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1166                         ret = -EFAULT;
1167                         goto err_dev;
1168                 }
1169         }
1170
1171         if (!vs->vs_tpg) {
1172                 ret = 0;
1173                 goto err_dev;
1174         }
1175
1176         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1177                 target = i;
1178                 tv_tpg = vs->vs_tpg[target];
1179                 if (!tv_tpg)
1180                         continue;
1181
1182                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1183                 tv_tport = tv_tpg->tport;
1184                 if (!tv_tport) {
1185                         ret = -ENODEV;
1186                         goto err_tpg;
1187                 }
1188
1189                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1190                         pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1191                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1192                                 tv_tport->tport_name, tv_tpg->tport_tpgt,
1193                                 t->vhost_wwpn, t->vhost_tpgt);
1194                         ret = -EINVAL;
1195                         goto err_tpg;
1196                 }
1197                 tv_tpg->tv_tpg_vhost_count--;
1198                 tv_tpg->vhost_scsi = NULL;
1199                 vs->vs_tpg[target] = NULL;
1200                 match = true;
1201                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1202         }
1203         if (match) {
1204                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1205                         vq = &vs->vqs[i].vq;
1206                         /* Flushing the vhost_work acts as synchronize_rcu */
1207                         mutex_lock(&vq->mutex);
1208                         rcu_assign_pointer(vq->private_data, NULL);
1209                         mutex_unlock(&vq->mutex);
1210                 }
1211         }
1212         /*
1213          * Act as synchronize_rcu to make sure access to
1214          * old vs->vs_tpg is finished.
1215          */
1216         vhost_scsi_flush(vs);
1217         kfree(vs->vs_tpg);
1218         vs->vs_tpg = NULL;
1219         WARN_ON(vs->vs_events_nr);
1220         mutex_unlock(&vs->dev.mutex);
1221         mutex_unlock(&tcm_vhost_mutex);
1222         return 0;
1223
1224 err_tpg:
1225         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1226 err_dev:
1227         mutex_unlock(&vs->dev.mutex);
1228         mutex_unlock(&tcm_vhost_mutex);
1229         return ret;
1230 }
1231
1232 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1233 {
1234         if (features & ~VHOST_SCSI_FEATURES)
1235                 return -EOPNOTSUPP;
1236
1237         mutex_lock(&vs->dev.mutex);
1238         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1239             !vhost_log_access_ok(&vs->dev)) {
1240                 mutex_unlock(&vs->dev.mutex);
1241                 return -EFAULT;
1242         }
1243         vs->dev.acked_features = features;
1244         smp_wmb();
1245         vhost_scsi_flush(vs);
1246         mutex_unlock(&vs->dev.mutex);
1247         return 0;
1248 }
1249
1250 static int vhost_scsi_open(struct inode *inode, struct file *f)
1251 {
1252         struct vhost_scsi *s;
1253         struct vhost_virtqueue **vqs;
1254         int r, i;
1255
1256         s = kzalloc(sizeof(*s), GFP_KERNEL);
1257         if (!s)
1258                 return -ENOMEM;
1259
1260         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1261         if (!vqs) {
1262                 kfree(s);
1263                 return -ENOMEM;
1264         }
1265
1266         vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1267         vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1268
1269         s->vs_events_nr = 0;
1270         s->vs_events_missed = false;
1271
1272         vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
1273         vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
1274         s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1275         s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1276         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1277                 vqs[i] = &s->vqs[i].vq;
1278                 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1279         }
1280         r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
1281
1282         tcm_vhost_init_inflight(s, NULL);
1283
1284         if (r < 0) {
1285                 kfree(vqs);
1286                 kfree(s);
1287                 return r;
1288         }
1289
1290         f->private_data = s;
1291         return 0;
1292 }
1293
1294 static int vhost_scsi_release(struct inode *inode, struct file *f)
1295 {
1296         struct vhost_scsi *s = f->private_data;
1297         struct vhost_scsi_target t;
1298
1299         mutex_lock(&s->dev.mutex);
1300         memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1301         mutex_unlock(&s->dev.mutex);
1302         vhost_scsi_clear_endpoint(s, &t);
1303         vhost_dev_stop(&s->dev);
1304         vhost_dev_cleanup(&s->dev, false);
1305         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1306         vhost_scsi_flush(s);
1307         kfree(s->dev.vqs);
1308         kfree(s);
1309         return 0;
1310 }
1311
1312 static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1313                                 unsigned long arg)
1314 {
1315         struct vhost_scsi *vs = f->private_data;
1316         struct vhost_scsi_target backend;
1317         void __user *argp = (void __user *)arg;
1318         u64 __user *featurep = argp;
1319         u32 __user *eventsp = argp;
1320         u32 events_missed;
1321         u64 features;
1322         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1323         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1324
1325         switch (ioctl) {
1326         case VHOST_SCSI_SET_ENDPOINT:
1327                 if (copy_from_user(&backend, argp, sizeof backend))
1328                         return -EFAULT;
1329                 if (backend.reserved != 0)
1330                         return -EOPNOTSUPP;
1331
1332                 return vhost_scsi_set_endpoint(vs, &backend);
1333         case VHOST_SCSI_CLEAR_ENDPOINT:
1334                 if (copy_from_user(&backend, argp, sizeof backend))
1335                         return -EFAULT;
1336                 if (backend.reserved != 0)
1337                         return -EOPNOTSUPP;
1338
1339                 return vhost_scsi_clear_endpoint(vs, &backend);
1340         case VHOST_SCSI_GET_ABI_VERSION:
1341                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1342                         return -EFAULT;
1343                 return 0;
1344         case VHOST_SCSI_SET_EVENTS_MISSED:
1345                 if (get_user(events_missed, eventsp))
1346                         return -EFAULT;
1347                 mutex_lock(&vq->mutex);
1348                 vs->vs_events_missed = events_missed;
1349                 mutex_unlock(&vq->mutex);
1350                 return 0;
1351         case VHOST_SCSI_GET_EVENTS_MISSED:
1352                 mutex_lock(&vq->mutex);
1353                 events_missed = vs->vs_events_missed;
1354                 mutex_unlock(&vq->mutex);
1355                 if (put_user(events_missed, eventsp))
1356                         return -EFAULT;
1357                 return 0;
1358         case VHOST_GET_FEATURES:
1359                 features = VHOST_SCSI_FEATURES;
1360                 if (copy_to_user(featurep, &features, sizeof features))
1361                         return -EFAULT;
1362                 return 0;
1363         case VHOST_SET_FEATURES:
1364                 if (copy_from_user(&features, featurep, sizeof features))
1365                         return -EFAULT;
1366                 return vhost_scsi_set_features(vs, features);
1367         default:
1368                 mutex_lock(&vs->dev.mutex);
1369                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1370                 /* TODO: flush backend after dev ioctl. */
1371                 if (r == -ENOIOCTLCMD)
1372                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1373                 mutex_unlock(&vs->dev.mutex);
1374                 return r;
1375         }
1376 }
1377
1378 #ifdef CONFIG_COMPAT
1379 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1380                                 unsigned long arg)
1381 {
1382         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1383 }
1384 #endif
1385
1386 static const struct file_operations vhost_scsi_fops = {
1387         .owner          = THIS_MODULE,
1388         .release        = vhost_scsi_release,
1389         .unlocked_ioctl = vhost_scsi_ioctl,
1390 #ifdef CONFIG_COMPAT
1391         .compat_ioctl   = vhost_scsi_compat_ioctl,
1392 #endif
1393         .open           = vhost_scsi_open,
1394         .llseek         = noop_llseek,
1395 };
1396
1397 static struct miscdevice vhost_scsi_misc = {
1398         MISC_DYNAMIC_MINOR,
1399         "vhost-scsi",
1400         &vhost_scsi_fops,
1401 };
1402
1403 static int __init vhost_scsi_register(void)
1404 {
1405         return misc_register(&vhost_scsi_misc);
1406 }
1407
1408 static int vhost_scsi_deregister(void)
1409 {
1410         return misc_deregister(&vhost_scsi_misc);
1411 }
1412
1413 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1414 {
1415         switch (tport->tport_proto_id) {
1416         case SCSI_PROTOCOL_SAS:
1417                 return "SAS";
1418         case SCSI_PROTOCOL_FCP:
1419                 return "FCP";
1420         case SCSI_PROTOCOL_ISCSI:
1421                 return "iSCSI";
1422         default:
1423                 break;
1424         }
1425
1426         return "Unknown";
1427 }
1428
1429 static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1430         struct se_lun *lun, bool plug)
1431 {
1432
1433         struct vhost_scsi *vs = tpg->vhost_scsi;
1434         struct vhost_virtqueue *vq;
1435         u32 reason;
1436
1437         if (!vs)
1438                 return;
1439
1440         mutex_lock(&vs->dev.mutex);
1441         if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1442                 mutex_unlock(&vs->dev.mutex);
1443                 return;
1444         }
1445
1446         if (plug)
1447                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1448         else
1449                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1450
1451         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1452         mutex_lock(&vq->mutex);
1453         tcm_vhost_send_evt(vs, tpg, lun,
1454                         VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1455         mutex_unlock(&vq->mutex);
1456         mutex_unlock(&vs->dev.mutex);
1457 }
1458
1459 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1460 {
1461         tcm_vhost_do_plug(tpg, lun, true);
1462 }
1463
1464 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1465 {
1466         tcm_vhost_do_plug(tpg, lun, false);
1467 }
1468
1469 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1470         struct se_lun *lun)
1471 {
1472         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1473                                 struct tcm_vhost_tpg, se_tpg);
1474
1475         mutex_lock(&tcm_vhost_mutex);
1476
1477         mutex_lock(&tv_tpg->tv_tpg_mutex);
1478         tv_tpg->tv_tpg_port_count++;
1479         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1480
1481         tcm_vhost_hotplug(tv_tpg, lun);
1482
1483         mutex_unlock(&tcm_vhost_mutex);
1484
1485         return 0;
1486 }
1487
1488 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1489         struct se_lun *lun)
1490 {
1491         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1492                                 struct tcm_vhost_tpg, se_tpg);
1493
1494         mutex_lock(&tcm_vhost_mutex);
1495
1496         mutex_lock(&tv_tpg->tv_tpg_mutex);
1497         tv_tpg->tv_tpg_port_count--;
1498         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1499
1500         tcm_vhost_hotunplug(tv_tpg, lun);
1501
1502         mutex_unlock(&tcm_vhost_mutex);
1503 }
1504
1505 static struct se_node_acl *tcm_vhost_make_nodeacl(
1506         struct se_portal_group *se_tpg,
1507         struct config_group *group,
1508         const char *name)
1509 {
1510         struct se_node_acl *se_nacl, *se_nacl_new;
1511         struct tcm_vhost_nacl *nacl;
1512         u64 wwpn = 0;
1513         u32 nexus_depth;
1514
1515         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1516                 return ERR_PTR(-EINVAL); */
1517         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1518         if (!se_nacl_new)
1519                 return ERR_PTR(-ENOMEM);
1520
1521         nexus_depth = 1;
1522         /*
1523          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1524          * when converting a NodeACL from demo mode -> explict
1525          */
1526         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1527                                 name, nexus_depth);
1528         if (IS_ERR(se_nacl)) {
1529                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1530                 return se_nacl;
1531         }
1532         /*
1533          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1534          */
1535         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1536         nacl->iport_wwpn = wwpn;
1537
1538         return se_nacl;
1539 }
1540
1541 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1542 {
1543         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1544                                 struct tcm_vhost_nacl, se_node_acl);
1545         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1546         kfree(nacl);
1547 }
1548
1549 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1550         const char *name)
1551 {
1552         struct se_portal_group *se_tpg;
1553         struct tcm_vhost_nexus *tv_nexus;
1554
1555         mutex_lock(&tv_tpg->tv_tpg_mutex);
1556         if (tv_tpg->tpg_nexus) {
1557                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1558                 pr_debug("tv_tpg->tpg_nexus already exists\n");
1559                 return -EEXIST;
1560         }
1561         se_tpg = &tv_tpg->se_tpg;
1562
1563         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1564         if (!tv_nexus) {
1565                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1566                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1567                 return -ENOMEM;
1568         }
1569         /*
1570          *  Initialize the struct se_session pointer
1571          */
1572         tv_nexus->tvn_se_sess = transport_init_session();
1573         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1574                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1575                 kfree(tv_nexus);
1576                 return -ENOMEM;
1577         }
1578         /*
1579          * Since we are running in 'demo mode' this call with generate a
1580          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1581          * the SCSI Initiator port name of the passed configfs group 'name'.
1582          */
1583         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1584                                 se_tpg, (unsigned char *)name);
1585         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1586                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1587                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1588                                 " for %s\n", name);
1589                 transport_free_session(tv_nexus->tvn_se_sess);
1590                 kfree(tv_nexus);
1591                 return -ENOMEM;
1592         }
1593         /*
1594          * Now register the TCM vhost virtual I_T Nexus as active with the
1595          * call to __transport_register_session()
1596          */
1597         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1598                         tv_nexus->tvn_se_sess, tv_nexus);
1599         tv_tpg->tpg_nexus = tv_nexus;
1600
1601         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1602         return 0;
1603 }
1604
1605 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1606 {
1607         struct se_session *se_sess;
1608         struct tcm_vhost_nexus *tv_nexus;
1609
1610         mutex_lock(&tpg->tv_tpg_mutex);
1611         tv_nexus = tpg->tpg_nexus;
1612         if (!tv_nexus) {
1613                 mutex_unlock(&tpg->tv_tpg_mutex);
1614                 return -ENODEV;
1615         }
1616
1617         se_sess = tv_nexus->tvn_se_sess;
1618         if (!se_sess) {
1619                 mutex_unlock(&tpg->tv_tpg_mutex);
1620                 return -ENODEV;
1621         }
1622
1623         if (tpg->tv_tpg_port_count != 0) {
1624                 mutex_unlock(&tpg->tv_tpg_mutex);
1625                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1626                         " active TPG port count: %d\n",
1627                         tpg->tv_tpg_port_count);
1628                 return -EBUSY;
1629         }
1630
1631         if (tpg->tv_tpg_vhost_count != 0) {
1632                 mutex_unlock(&tpg->tv_tpg_mutex);
1633                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1634                         " active TPG vhost count: %d\n",
1635                         tpg->tv_tpg_vhost_count);
1636                 return -EBUSY;
1637         }
1638
1639         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1640                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1641                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1642         /*
1643          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1644          */
1645         transport_deregister_session(tv_nexus->tvn_se_sess);
1646         tpg->tpg_nexus = NULL;
1647         mutex_unlock(&tpg->tv_tpg_mutex);
1648
1649         kfree(tv_nexus);
1650         return 0;
1651 }
1652
1653 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1654         char *page)
1655 {
1656         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1657                                 struct tcm_vhost_tpg, se_tpg);
1658         struct tcm_vhost_nexus *tv_nexus;
1659         ssize_t ret;
1660
1661         mutex_lock(&tv_tpg->tv_tpg_mutex);
1662         tv_nexus = tv_tpg->tpg_nexus;
1663         if (!tv_nexus) {
1664                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1665                 return -ENODEV;
1666         }
1667         ret = snprintf(page, PAGE_SIZE, "%s\n",
1668                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1669         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1670
1671         return ret;
1672 }
1673
1674 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1675         const char *page,
1676         size_t count)
1677 {
1678         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1679                                 struct tcm_vhost_tpg, se_tpg);
1680         struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1681         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1682         int ret;
1683         /*
1684          * Shutdown the active I_T nexus if 'NULL' is passed..
1685          */
1686         if (!strncmp(page, "NULL", 4)) {
1687                 ret = tcm_vhost_drop_nexus(tv_tpg);
1688                 return (!ret) ? count : ret;
1689         }
1690         /*
1691          * Otherwise make sure the passed virtual Initiator port WWN matches
1692          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1693          * tcm_vhost_make_nexus().
1694          */
1695         if (strlen(page) >= TCM_VHOST_NAMELEN) {
1696                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1697                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
1698                 return -EINVAL;
1699         }
1700         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1701
1702         ptr = strstr(i_port, "naa.");
1703         if (ptr) {
1704                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1705                         pr_err("Passed SAS Initiator Port %s does not"
1706                                 " match target port protoid: %s\n", i_port,
1707                                 tcm_vhost_dump_proto_id(tport_wwn));
1708                         return -EINVAL;
1709                 }
1710                 port_ptr = &i_port[0];
1711                 goto check_newline;
1712         }
1713         ptr = strstr(i_port, "fc.");
1714         if (ptr) {
1715                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1716                         pr_err("Passed FCP Initiator Port %s does not"
1717                                 " match target port protoid: %s\n", i_port,
1718                                 tcm_vhost_dump_proto_id(tport_wwn));
1719                         return -EINVAL;
1720                 }
1721                 port_ptr = &i_port[3]; /* Skip over "fc." */
1722                 goto check_newline;
1723         }
1724         ptr = strstr(i_port, "iqn.");
1725         if (ptr) {
1726                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1727                         pr_err("Passed iSCSI Initiator Port %s does not"
1728                                 " match target port protoid: %s\n", i_port,
1729                                 tcm_vhost_dump_proto_id(tport_wwn));
1730                         return -EINVAL;
1731                 }
1732                 port_ptr = &i_port[0];
1733                 goto check_newline;
1734         }
1735         pr_err("Unable to locate prefix for emulated Initiator Port:"
1736                         " %s\n", i_port);
1737         return -EINVAL;
1738         /*
1739          * Clear any trailing newline for the NAA WWN
1740          */
1741 check_newline:
1742         if (i_port[strlen(i_port)-1] == '\n')
1743                 i_port[strlen(i_port)-1] = '\0';
1744
1745         ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1746         if (ret < 0)
1747                 return ret;
1748
1749         return count;
1750 }
1751
1752 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1753
1754 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1755         &tcm_vhost_tpg_nexus.attr,
1756         NULL,
1757 };
1758
1759 static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1760         struct config_group *group,
1761         const char *name)
1762 {
1763         struct tcm_vhost_tport *tport = container_of(wwn,
1764                         struct tcm_vhost_tport, tport_wwn);
1765
1766         struct tcm_vhost_tpg *tpg;
1767         unsigned long tpgt;
1768         int ret;
1769
1770         if (strstr(name, "tpgt_") != name)
1771                 return ERR_PTR(-EINVAL);
1772         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1773                 return ERR_PTR(-EINVAL);
1774
1775         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1776         if (!tpg) {
1777                 pr_err("Unable to allocate struct tcm_vhost_tpg");
1778                 return ERR_PTR(-ENOMEM);
1779         }
1780         mutex_init(&tpg->tv_tpg_mutex);
1781         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1782         tpg->tport = tport;
1783         tpg->tport_tpgt = tpgt;
1784
1785         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1786                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1787         if (ret < 0) {
1788                 kfree(tpg);
1789                 return NULL;
1790         }
1791         mutex_lock(&tcm_vhost_mutex);
1792         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1793         mutex_unlock(&tcm_vhost_mutex);
1794
1795         return &tpg->se_tpg;
1796 }
1797
1798 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1799 {
1800         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1801                                 struct tcm_vhost_tpg, se_tpg);
1802
1803         mutex_lock(&tcm_vhost_mutex);
1804         list_del(&tpg->tv_tpg_list);
1805         mutex_unlock(&tcm_vhost_mutex);
1806         /*
1807          * Release the virtual I_T Nexus for this vhost TPG
1808          */
1809         tcm_vhost_drop_nexus(tpg);
1810         /*
1811          * Deregister the se_tpg from TCM..
1812          */
1813         core_tpg_deregister(se_tpg);
1814         kfree(tpg);
1815 }
1816
1817 static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1818         struct config_group *group,
1819         const char *name)
1820 {
1821         struct tcm_vhost_tport *tport;
1822         char *ptr;
1823         u64 wwpn = 0;
1824         int off = 0;
1825
1826         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1827                 return ERR_PTR(-EINVAL); */
1828
1829         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1830         if (!tport) {
1831                 pr_err("Unable to allocate struct tcm_vhost_tport");
1832                 return ERR_PTR(-ENOMEM);
1833         }
1834         tport->tport_wwpn = wwpn;
1835         /*
1836          * Determine the emulated Protocol Identifier and Target Port Name
1837          * based on the incoming configfs directory name.
1838          */
1839         ptr = strstr(name, "naa.");
1840         if (ptr) {
1841                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1842                 goto check_len;
1843         }
1844         ptr = strstr(name, "fc.");
1845         if (ptr) {
1846                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1847                 off = 3; /* Skip over "fc." */
1848                 goto check_len;
1849         }
1850         ptr = strstr(name, "iqn.");
1851         if (ptr) {
1852                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1853                 goto check_len;
1854         }
1855
1856         pr_err("Unable to locate prefix for emulated Target Port:"
1857                         " %s\n", name);
1858         kfree(tport);
1859         return ERR_PTR(-EINVAL);
1860
1861 check_len:
1862         if (strlen(name) >= TCM_VHOST_NAMELEN) {
1863                 pr_err("Emulated %s Address: %s, exceeds"
1864                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1865                         TCM_VHOST_NAMELEN);
1866                 kfree(tport);
1867                 return ERR_PTR(-EINVAL);
1868         }
1869         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1870
1871         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1872                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1873
1874         return &tport->tport_wwn;
1875 }
1876
1877 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1878 {
1879         struct tcm_vhost_tport *tport = container_of(wwn,
1880                                 struct tcm_vhost_tport, tport_wwn);
1881
1882         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1883                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1884                 tport->tport_name);
1885
1886         kfree(tport);
1887 }
1888
1889 static ssize_t tcm_vhost_wwn_show_attr_version(
1890         struct target_fabric_configfs *tf,
1891         char *page)
1892 {
1893         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1894                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1895                 utsname()->machine);
1896 }
1897
1898 TF_WWN_ATTR_RO(tcm_vhost, version);
1899
1900 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1901         &tcm_vhost_wwn_version.attr,
1902         NULL,
1903 };
1904
1905 static struct target_core_fabric_ops tcm_vhost_ops = {
1906         .get_fabric_name                = tcm_vhost_get_fabric_name,
1907         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
1908         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
1909         .tpg_get_tag                    = tcm_vhost_get_tag,
1910         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
1911         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
1912         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
1913         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
1914         .tpg_check_demo_mode            = tcm_vhost_check_true,
1915         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
1916         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
1917         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
1918         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
1919         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
1920         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
1921         .release_cmd                    = tcm_vhost_release_cmd,
1922         .shutdown_session               = tcm_vhost_shutdown_session,
1923         .close_session                  = tcm_vhost_close_session,
1924         .sess_get_index                 = tcm_vhost_sess_get_index,
1925         .sess_get_initiator_sid         = NULL,
1926         .write_pending                  = tcm_vhost_write_pending,
1927         .write_pending_status           = tcm_vhost_write_pending_status,
1928         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
1929         .get_task_tag                   = tcm_vhost_get_task_tag,
1930         .get_cmd_state                  = tcm_vhost_get_cmd_state,
1931         .queue_data_in                  = tcm_vhost_queue_data_in,
1932         .queue_status                   = tcm_vhost_queue_status,
1933         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
1934         /*
1935          * Setup callers for generic logic in target_core_fabric_configfs.c
1936          */
1937         .fabric_make_wwn                = tcm_vhost_make_tport,
1938         .fabric_drop_wwn                = tcm_vhost_drop_tport,
1939         .fabric_make_tpg                = tcm_vhost_make_tpg,
1940         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
1941         .fabric_post_link               = tcm_vhost_port_link,
1942         .fabric_pre_unlink              = tcm_vhost_port_unlink,
1943         .fabric_make_np                 = NULL,
1944         .fabric_drop_np                 = NULL,
1945         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
1946         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
1947 };
1948
1949 static int tcm_vhost_register_configfs(void)
1950 {
1951         struct target_fabric_configfs *fabric;
1952         int ret;
1953
1954         pr_debug("TCM_VHOST fabric module %s on %s/%s"
1955                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1956                 utsname()->machine);
1957         /*
1958          * Register the top level struct config_item_type with TCM core
1959          */
1960         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
1961         if (IS_ERR(fabric)) {
1962                 pr_err("target_fabric_configfs_init() failed\n");
1963                 return PTR_ERR(fabric);
1964         }
1965         /*
1966          * Setup fabric->tf_ops from our local tcm_vhost_ops
1967          */
1968         fabric->tf_ops = tcm_vhost_ops;
1969         /*
1970          * Setup default attribute lists for various fabric->tf_cit_tmpl
1971          */
1972         TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
1973         TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
1974         TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1975         TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1976         TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1977         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1978         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1979         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1980         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1981         /*
1982          * Register the fabric for use within TCM
1983          */
1984         ret = target_fabric_configfs_register(fabric);
1985         if (ret < 0) {
1986                 pr_err("target_fabric_configfs_register() failed"
1987                                 " for TCM_VHOST\n");
1988                 return ret;
1989         }
1990         /*
1991          * Setup our local pointer to *fabric
1992          */
1993         tcm_vhost_fabric_configfs = fabric;
1994         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
1995         return 0;
1996 };
1997
1998 static void tcm_vhost_deregister_configfs(void)
1999 {
2000         if (!tcm_vhost_fabric_configfs)
2001                 return;
2002
2003         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2004         tcm_vhost_fabric_configfs = NULL;
2005         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2006 };
2007
2008 static int __init tcm_vhost_init(void)
2009 {
2010         int ret = -ENOMEM;
2011         /*
2012          * Use our own dedicated workqueue for submitting I/O into
2013          * target core to avoid contention within system_wq.
2014          */
2015         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2016         if (!tcm_vhost_workqueue)
2017                 goto out;
2018
2019         ret = vhost_scsi_register();
2020         if (ret < 0)
2021                 goto out_destroy_workqueue;
2022
2023         ret = tcm_vhost_register_configfs();
2024         if (ret < 0)
2025                 goto out_vhost_scsi_deregister;
2026
2027         return 0;
2028
2029 out_vhost_scsi_deregister:
2030         vhost_scsi_deregister();
2031 out_destroy_workqueue:
2032         destroy_workqueue(tcm_vhost_workqueue);
2033 out:
2034         return ret;
2035 };
2036
2037 static void tcm_vhost_exit(void)
2038 {
2039         tcm_vhost_deregister_configfs();
2040         vhost_scsi_deregister();
2041         destroy_workqueue(tcm_vhost_workqueue);
2042 };
2043
2044 MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
2045 MODULE_LICENSE("GPL");
2046 module_init(tcm_vhost_init);
2047 module_exit(tcm_vhost_exit);