]> Pileus Git - ~andy/linux/blob - drivers/infiniband/ulp/isert/ib_isert.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[~andy/linux] / drivers / infiniband / ulp / isert / ib_isert.c
1 /*******************************************************************************
2  * This file contains iSCSI extentions for RDMA (iSER) Verbs
3  *
4  * (c) Copyright 2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ****************************************************************************/
18
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <linux/llist.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
31
32 #include "isert_proto.h"
33 #include "ib_isert.h"
34
35 #define ISERT_MAX_CONN          8
36 #define ISER_MAX_RX_CQ_LEN      (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN      (ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
38
39 static DEFINE_MUTEX(device_list_mutex);
40 static LIST_HEAD(device_list);
41 static struct workqueue_struct *isert_rx_wq;
42 static struct workqueue_struct *isert_comp_wq;
43
44 static void
45 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
46 static int
47 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
48                struct isert_rdma_wr *wr);
49 static void
50 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51 static int
52 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53                struct isert_rdma_wr *wr);
54
55 static void
56 isert_qp_event_callback(struct ib_event *e, void *context)
57 {
58         struct isert_conn *isert_conn = (struct isert_conn *)context;
59
60         pr_err("isert_qp_event_callback event: %d\n", e->event);
61         switch (e->event) {
62         case IB_EVENT_COMM_EST:
63                 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
64                 break;
65         case IB_EVENT_QP_LAST_WQE_REACHED:
66                 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
67                 break;
68         default:
69                 break;
70         }
71 }
72
73 static int
74 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
75 {
76         int ret;
77
78         ret = ib_query_device(ib_dev, devattr);
79         if (ret) {
80                 pr_err("ib_query_device() failed: %d\n", ret);
81                 return ret;
82         }
83         pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
84         pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
85
86         return 0;
87 }
88
89 static int
90 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
91 {
92         struct isert_device *device = isert_conn->conn_device;
93         struct ib_qp_init_attr attr;
94         int ret, index, min_index = 0;
95
96         mutex_lock(&device_list_mutex);
97         for (index = 0; index < device->cqs_used; index++)
98                 if (device->cq_active_qps[index] <
99                     device->cq_active_qps[min_index])
100                         min_index = index;
101         device->cq_active_qps[min_index]++;
102         pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
103         mutex_unlock(&device_list_mutex);
104
105         memset(&attr, 0, sizeof(struct ib_qp_init_attr));
106         attr.event_handler = isert_qp_event_callback;
107         attr.qp_context = isert_conn;
108         attr.send_cq = device->dev_tx_cq[min_index];
109         attr.recv_cq = device->dev_rx_cq[min_index];
110         attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
111         attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
112         /*
113          * FIXME: Use devattr.max_sge - 2 for max_send_sge as
114          * work-around for RDMA_READ..
115          */
116         attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
117         isert_conn->max_sge = attr.cap.max_send_sge;
118
119         attr.cap.max_recv_sge = 1;
120         attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121         attr.qp_type = IB_QPT_RC;
122
123         pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124                  cma_id->device);
125         pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
126                  isert_conn->conn_pd->device);
127
128         ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
129         if (ret) {
130                 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
131                 return ret;
132         }
133         isert_conn->conn_qp = cma_id->qp;
134         pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
135
136         return 0;
137 }
138
139 static void
140 isert_cq_event_callback(struct ib_event *e, void *context)
141 {
142         pr_debug("isert_cq_event_callback event: %d\n", e->event);
143 }
144
145 static int
146 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
147 {
148         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
149         struct iser_rx_desc *rx_desc;
150         struct ib_sge *rx_sg;
151         u64 dma_addr;
152         int i, j;
153
154         isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
155                                 sizeof(struct iser_rx_desc), GFP_KERNEL);
156         if (!isert_conn->conn_rx_descs)
157                 goto fail;
158
159         rx_desc = isert_conn->conn_rx_descs;
160
161         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
162                 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
163                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
164                 if (ib_dma_mapping_error(ib_dev, dma_addr))
165                         goto dma_map_fail;
166
167                 rx_desc->dma_addr = dma_addr;
168
169                 rx_sg = &rx_desc->rx_sg;
170                 rx_sg->addr = rx_desc->dma_addr;
171                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
172                 rx_sg->lkey = isert_conn->conn_mr->lkey;
173         }
174
175         isert_conn->conn_rx_desc_head = 0;
176         return 0;
177
178 dma_map_fail:
179         rx_desc = isert_conn->conn_rx_descs;
180         for (j = 0; j < i; j++, rx_desc++) {
181                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
182                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
183         }
184         kfree(isert_conn->conn_rx_descs);
185         isert_conn->conn_rx_descs = NULL;
186 fail:
187         return -ENOMEM;
188 }
189
190 static void
191 isert_free_rx_descriptors(struct isert_conn *isert_conn)
192 {
193         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
194         struct iser_rx_desc *rx_desc;
195         int i;
196
197         if (!isert_conn->conn_rx_descs)
198                 return;
199
200         rx_desc = isert_conn->conn_rx_descs;
201         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
202                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
203                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
204         }
205
206         kfree(isert_conn->conn_rx_descs);
207         isert_conn->conn_rx_descs = NULL;
208 }
209
210 static void isert_cq_tx_work(struct work_struct *);
211 static void isert_cq_tx_callback(struct ib_cq *, void *);
212 static void isert_cq_rx_work(struct work_struct *);
213 static void isert_cq_rx_callback(struct ib_cq *, void *);
214
215 static int
216 isert_create_device_ib_res(struct isert_device *device)
217 {
218         struct ib_device *ib_dev = device->ib_device;
219         struct isert_cq_desc *cq_desc;
220         struct ib_device_attr *dev_attr;
221         int ret = 0, i, j;
222
223         dev_attr = &device->dev_attr;
224         ret = isert_query_device(ib_dev, dev_attr);
225         if (ret)
226                 return ret;
227
228         /* asign function handlers */
229         if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
230                 device->use_fastreg = 1;
231                 device->reg_rdma_mem = isert_reg_rdma;
232                 device->unreg_rdma_mem = isert_unreg_rdma;
233         } else {
234                 device->use_fastreg = 0;
235                 device->reg_rdma_mem = isert_map_rdma;
236                 device->unreg_rdma_mem = isert_unmap_cmd;
237         }
238
239         device->cqs_used = min_t(int, num_online_cpus(),
240                                  device->ib_device->num_comp_vectors);
241         device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
242         pr_debug("Using %d CQs, device %s supports %d vectors support "
243                  "Fast registration %d\n",
244                  device->cqs_used, device->ib_device->name,
245                  device->ib_device->num_comp_vectors, device->use_fastreg);
246         device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
247                                 device->cqs_used, GFP_KERNEL);
248         if (!device->cq_desc) {
249                 pr_err("Unable to allocate device->cq_desc\n");
250                 return -ENOMEM;
251         }
252         cq_desc = device->cq_desc;
253
254         for (i = 0; i < device->cqs_used; i++) {
255                 cq_desc[i].device = device;
256                 cq_desc[i].cq_index = i;
257
258                 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
259                 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
260                                                 isert_cq_rx_callback,
261                                                 isert_cq_event_callback,
262                                                 (void *)&cq_desc[i],
263                                                 ISER_MAX_RX_CQ_LEN, i);
264                 if (IS_ERR(device->dev_rx_cq[i])) {
265                         ret = PTR_ERR(device->dev_rx_cq[i]);
266                         device->dev_rx_cq[i] = NULL;
267                         goto out_cq;
268                 }
269
270                 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
271                 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
272                                                 isert_cq_tx_callback,
273                                                 isert_cq_event_callback,
274                                                 (void *)&cq_desc[i],
275                                                 ISER_MAX_TX_CQ_LEN, i);
276                 if (IS_ERR(device->dev_tx_cq[i])) {
277                         ret = PTR_ERR(device->dev_tx_cq[i]);
278                         device->dev_tx_cq[i] = NULL;
279                         goto out_cq;
280                 }
281
282                 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
283                 if (ret)
284                         goto out_cq;
285
286                 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
287                 if (ret)
288                         goto out_cq;
289         }
290
291         return 0;
292
293 out_cq:
294         for (j = 0; j < i; j++) {
295                 cq_desc = &device->cq_desc[j];
296
297                 if (device->dev_rx_cq[j]) {
298                         cancel_work_sync(&cq_desc->cq_rx_work);
299                         ib_destroy_cq(device->dev_rx_cq[j]);
300                 }
301                 if (device->dev_tx_cq[j]) {
302                         cancel_work_sync(&cq_desc->cq_tx_work);
303                         ib_destroy_cq(device->dev_tx_cq[j]);
304                 }
305         }
306         kfree(device->cq_desc);
307
308         return ret;
309 }
310
311 static void
312 isert_free_device_ib_res(struct isert_device *device)
313 {
314         struct isert_cq_desc *cq_desc;
315         int i;
316
317         for (i = 0; i < device->cqs_used; i++) {
318                 cq_desc = &device->cq_desc[i];
319
320                 cancel_work_sync(&cq_desc->cq_rx_work);
321                 cancel_work_sync(&cq_desc->cq_tx_work);
322                 ib_destroy_cq(device->dev_rx_cq[i]);
323                 ib_destroy_cq(device->dev_tx_cq[i]);
324                 device->dev_rx_cq[i] = NULL;
325                 device->dev_tx_cq[i] = NULL;
326         }
327
328         kfree(device->cq_desc);
329 }
330
331 static void
332 isert_device_try_release(struct isert_device *device)
333 {
334         mutex_lock(&device_list_mutex);
335         device->refcount--;
336         if (!device->refcount) {
337                 isert_free_device_ib_res(device);
338                 list_del(&device->dev_node);
339                 kfree(device);
340         }
341         mutex_unlock(&device_list_mutex);
342 }
343
344 static struct isert_device *
345 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
346 {
347         struct isert_device *device;
348         int ret;
349
350         mutex_lock(&device_list_mutex);
351         list_for_each_entry(device, &device_list, dev_node) {
352                 if (device->ib_device->node_guid == cma_id->device->node_guid) {
353                         device->refcount++;
354                         mutex_unlock(&device_list_mutex);
355                         return device;
356                 }
357         }
358
359         device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
360         if (!device) {
361                 mutex_unlock(&device_list_mutex);
362                 return ERR_PTR(-ENOMEM);
363         }
364
365         INIT_LIST_HEAD(&device->dev_node);
366
367         device->ib_device = cma_id->device;
368         ret = isert_create_device_ib_res(device);
369         if (ret) {
370                 kfree(device);
371                 mutex_unlock(&device_list_mutex);
372                 return ERR_PTR(ret);
373         }
374
375         device->refcount++;
376         list_add_tail(&device->dev_node, &device_list);
377         mutex_unlock(&device_list_mutex);
378
379         return device;
380 }
381
382 static void
383 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
384 {
385         struct fast_reg_descriptor *fr_desc, *tmp;
386         int i = 0;
387
388         if (list_empty(&isert_conn->conn_fr_pool))
389                 return;
390
391         pr_debug("Freeing conn %p fastreg pool", isert_conn);
392
393         list_for_each_entry_safe(fr_desc, tmp,
394                                  &isert_conn->conn_fr_pool, list) {
395                 list_del(&fr_desc->list);
396                 ib_free_fast_reg_page_list(fr_desc->data_frpl);
397                 ib_dereg_mr(fr_desc->data_mr);
398                 kfree(fr_desc);
399                 ++i;
400         }
401
402         if (i < isert_conn->conn_fr_pool_size)
403                 pr_warn("Pool still has %d regions registered\n",
404                         isert_conn->conn_fr_pool_size - i);
405 }
406
407 static int
408 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
409                      struct fast_reg_descriptor *fr_desc)
410 {
411         fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
412                                                          ISCSI_ISER_SG_TABLESIZE);
413         if (IS_ERR(fr_desc->data_frpl)) {
414                 pr_err("Failed to allocate data frpl err=%ld\n",
415                        PTR_ERR(fr_desc->data_frpl));
416                 return PTR_ERR(fr_desc->data_frpl);
417         }
418
419         fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
420         if (IS_ERR(fr_desc->data_mr)) {
421                 pr_err("Failed to allocate data frmr err=%ld\n",
422                        PTR_ERR(fr_desc->data_mr));
423                 ib_free_fast_reg_page_list(fr_desc->data_frpl);
424                 return PTR_ERR(fr_desc->data_mr);
425         }
426         pr_debug("Create fr_desc %p page_list %p\n",
427                  fr_desc, fr_desc->data_frpl->page_list);
428
429         fr_desc->valid = true;
430
431         return 0;
432 }
433
434 static int
435 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
436 {
437         struct fast_reg_descriptor *fr_desc;
438         struct isert_device *device = isert_conn->conn_device;
439         int i, ret;
440
441         INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
442         isert_conn->conn_fr_pool_size = 0;
443         for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
444                 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
445                 if (!fr_desc) {
446                         pr_err("Failed to allocate fast_reg descriptor\n");
447                         ret = -ENOMEM;
448                         goto err;
449                 }
450
451                 ret = isert_create_fr_desc(device->ib_device,
452                                            isert_conn->conn_pd, fr_desc);
453                 if (ret) {
454                         pr_err("Failed to create fastreg descriptor err=%d\n",
455                                ret);
456                         goto err;
457                 }
458
459                 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
460                 isert_conn->conn_fr_pool_size++;
461         }
462
463         pr_debug("Creating conn %p fastreg pool size=%d",
464                  isert_conn, isert_conn->conn_fr_pool_size);
465
466         return 0;
467
468 err:
469         isert_conn_free_fastreg_pool(isert_conn);
470         return ret;
471 }
472
473 static int
474 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
475 {
476         struct iscsi_np *np = cma_id->context;
477         struct isert_np *isert_np = np->np_context;
478         struct isert_conn *isert_conn;
479         struct isert_device *device;
480         struct ib_device *ib_dev = cma_id->device;
481         int ret = 0;
482
483         pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
484                  cma_id, cma_id->context);
485
486         isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
487         if (!isert_conn) {
488                 pr_err("Unable to allocate isert_conn\n");
489                 return -ENOMEM;
490         }
491         isert_conn->state = ISER_CONN_INIT;
492         INIT_LIST_HEAD(&isert_conn->conn_accept_node);
493         init_completion(&isert_conn->conn_login_comp);
494         init_waitqueue_head(&isert_conn->conn_wait);
495         init_waitqueue_head(&isert_conn->conn_wait_comp_err);
496         kref_init(&isert_conn->conn_kref);
497         kref_get(&isert_conn->conn_kref);
498         mutex_init(&isert_conn->conn_mutex);
499         mutex_init(&isert_conn->conn_comp_mutex);
500         spin_lock_init(&isert_conn->conn_lock);
501
502         cma_id->context = isert_conn;
503         isert_conn->conn_cm_id = cma_id;
504         isert_conn->responder_resources = event->param.conn.responder_resources;
505         isert_conn->initiator_depth = event->param.conn.initiator_depth;
506         pr_debug("Using responder_resources: %u initiator_depth: %u\n",
507                  isert_conn->responder_resources, isert_conn->initiator_depth);
508
509         isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
510                                         ISER_RX_LOGIN_SIZE, GFP_KERNEL);
511         if (!isert_conn->login_buf) {
512                 pr_err("Unable to allocate isert_conn->login_buf\n");
513                 ret = -ENOMEM;
514                 goto out;
515         }
516
517         isert_conn->login_req_buf = isert_conn->login_buf;
518         isert_conn->login_rsp_buf = isert_conn->login_buf +
519                                     ISCSI_DEF_MAX_RECV_SEG_LEN;
520         pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
521                  isert_conn->login_buf, isert_conn->login_req_buf,
522                  isert_conn->login_rsp_buf);
523
524         isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
525                                 (void *)isert_conn->login_req_buf,
526                                 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
527
528         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
529         if (ret) {
530                 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
531                        ret);
532                 isert_conn->login_req_dma = 0;
533                 goto out_login_buf;
534         }
535
536         isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
537                                         (void *)isert_conn->login_rsp_buf,
538                                         ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
539
540         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
541         if (ret) {
542                 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
543                        ret);
544                 isert_conn->login_rsp_dma = 0;
545                 goto out_req_dma_map;
546         }
547
548         device = isert_device_find_by_ib_dev(cma_id);
549         if (IS_ERR(device)) {
550                 ret = PTR_ERR(device);
551                 goto out_rsp_dma_map;
552         }
553
554         isert_conn->conn_device = device;
555         isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
556         if (IS_ERR(isert_conn->conn_pd)) {
557                 ret = PTR_ERR(isert_conn->conn_pd);
558                 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
559                        isert_conn, ret);
560                 goto out_pd;
561         }
562
563         isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
564                                            IB_ACCESS_LOCAL_WRITE);
565         if (IS_ERR(isert_conn->conn_mr)) {
566                 ret = PTR_ERR(isert_conn->conn_mr);
567                 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
568                        isert_conn, ret);
569                 goto out_mr;
570         }
571
572         if (device->use_fastreg) {
573                 ret = isert_conn_create_fastreg_pool(isert_conn);
574                 if (ret) {
575                         pr_err("Conn: %p failed to create fastreg pool\n",
576                                isert_conn);
577                         goto out_fastreg;
578                 }
579         }
580
581         ret = isert_conn_setup_qp(isert_conn, cma_id);
582         if (ret)
583                 goto out_conn_dev;
584
585         mutex_lock(&isert_np->np_accept_mutex);
586         list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
587         mutex_unlock(&isert_np->np_accept_mutex);
588
589         pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
590         wake_up(&isert_np->np_accept_wq);
591         return 0;
592
593 out_conn_dev:
594         if (device->use_fastreg)
595                 isert_conn_free_fastreg_pool(isert_conn);
596 out_fastreg:
597         ib_dereg_mr(isert_conn->conn_mr);
598 out_mr:
599         ib_dealloc_pd(isert_conn->conn_pd);
600 out_pd:
601         isert_device_try_release(device);
602 out_rsp_dma_map:
603         ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
604                             ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
605 out_req_dma_map:
606         ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
607                             ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
608 out_login_buf:
609         kfree(isert_conn->login_buf);
610 out:
611         kfree(isert_conn);
612         return ret;
613 }
614
615 static void
616 isert_connect_release(struct isert_conn *isert_conn)
617 {
618         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
619         struct isert_device *device = isert_conn->conn_device;
620         int cq_index;
621
622         pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
623
624         if (device && device->use_fastreg)
625                 isert_conn_free_fastreg_pool(isert_conn);
626
627         if (isert_conn->conn_qp) {
628                 cq_index = ((struct isert_cq_desc *)
629                         isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
630                 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
631                 isert_conn->conn_device->cq_active_qps[cq_index]--;
632
633                 rdma_destroy_qp(isert_conn->conn_cm_id);
634         }
635
636         isert_free_rx_descriptors(isert_conn);
637         rdma_destroy_id(isert_conn->conn_cm_id);
638
639         ib_dereg_mr(isert_conn->conn_mr);
640         ib_dealloc_pd(isert_conn->conn_pd);
641
642         if (isert_conn->login_buf) {
643                 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
644                                     ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
645                 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
646                                     ISCSI_DEF_MAX_RECV_SEG_LEN,
647                                     DMA_FROM_DEVICE);
648                 kfree(isert_conn->login_buf);
649         }
650         kfree(isert_conn);
651
652         if (device)
653                 isert_device_try_release(device);
654
655         pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
656 }
657
658 static void
659 isert_connected_handler(struct rdma_cm_id *cma_id)
660 {
661         return;
662 }
663
664 static void
665 isert_release_conn_kref(struct kref *kref)
666 {
667         struct isert_conn *isert_conn = container_of(kref,
668                                 struct isert_conn, conn_kref);
669
670         pr_debug("Calling isert_connect_release for final kref %s/%d\n",
671                  current->comm, current->pid);
672
673         isert_connect_release(isert_conn);
674 }
675
676 static void
677 isert_put_conn(struct isert_conn *isert_conn)
678 {
679         kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
680 }
681
682 static void
683 isert_disconnect_work(struct work_struct *work)
684 {
685         struct isert_conn *isert_conn = container_of(work,
686                                 struct isert_conn, conn_logout_work);
687
688         pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
689         mutex_lock(&isert_conn->conn_mutex);
690         isert_conn->state = ISER_CONN_DOWN;
691
692         if (isert_conn->post_recv_buf_count == 0 &&
693             atomic_read(&isert_conn->post_send_buf_count) == 0) {
694                 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
695                 mutex_unlock(&isert_conn->conn_mutex);
696                 goto wake_up;
697         }
698         if (!isert_conn->conn_cm_id) {
699                 mutex_unlock(&isert_conn->conn_mutex);
700                 isert_put_conn(isert_conn);
701                 return;
702         }
703         if (!isert_conn->logout_posted) {
704                 pr_debug("Calling rdma_disconnect for !logout_posted from"
705                          " isert_disconnect_work\n");
706                 rdma_disconnect(isert_conn->conn_cm_id);
707                 mutex_unlock(&isert_conn->conn_mutex);
708                 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
709                 goto wake_up;
710         }
711         mutex_unlock(&isert_conn->conn_mutex);
712
713 wake_up:
714         wake_up(&isert_conn->conn_wait);
715         isert_put_conn(isert_conn);
716 }
717
718 static void
719 isert_disconnected_handler(struct rdma_cm_id *cma_id)
720 {
721         struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
722
723         INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
724         schedule_work(&isert_conn->conn_logout_work);
725 }
726
727 static int
728 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
729 {
730         int ret = 0;
731
732         pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
733                  event->event, event->status, cma_id->context, cma_id);
734
735         switch (event->event) {
736         case RDMA_CM_EVENT_CONNECT_REQUEST:
737                 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
738                 ret = isert_connect_request(cma_id, event);
739                 break;
740         case RDMA_CM_EVENT_ESTABLISHED:
741                 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
742                 isert_connected_handler(cma_id);
743                 break;
744         case RDMA_CM_EVENT_DISCONNECTED:
745                 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
746                 isert_disconnected_handler(cma_id);
747                 break;
748         case RDMA_CM_EVENT_DEVICE_REMOVAL:
749         case RDMA_CM_EVENT_ADDR_CHANGE:
750                 break;
751         case RDMA_CM_EVENT_CONNECT_ERROR:
752         default:
753                 pr_err("Unknown RDMA CMA event: %d\n", event->event);
754                 break;
755         }
756
757         if (ret != 0) {
758                 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
759                        event->event, ret);
760                 dump_stack();
761         }
762
763         return ret;
764 }
765
766 static int
767 isert_post_recv(struct isert_conn *isert_conn, u32 count)
768 {
769         struct ib_recv_wr *rx_wr, *rx_wr_failed;
770         int i, ret;
771         unsigned int rx_head = isert_conn->conn_rx_desc_head;
772         struct iser_rx_desc *rx_desc;
773
774         for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
775                 rx_desc         = &isert_conn->conn_rx_descs[rx_head];
776                 rx_wr->wr_id    = (unsigned long)rx_desc;
777                 rx_wr->sg_list  = &rx_desc->rx_sg;
778                 rx_wr->num_sge  = 1;
779                 rx_wr->next     = rx_wr + 1;
780                 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
781         }
782
783         rx_wr--;
784         rx_wr->next = NULL; /* mark end of work requests list */
785
786         isert_conn->post_recv_buf_count += count;
787         ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
788                                 &rx_wr_failed);
789         if (ret) {
790                 pr_err("ib_post_recv() failed with ret: %d\n", ret);
791                 isert_conn->post_recv_buf_count -= count;
792         } else {
793                 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
794                 isert_conn->conn_rx_desc_head = rx_head;
795         }
796         return ret;
797 }
798
799 static int
800 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
801 {
802         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
803         struct ib_send_wr send_wr, *send_wr_failed;
804         int ret;
805
806         ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
807                                       ISER_HEADERS_LEN, DMA_TO_DEVICE);
808
809         send_wr.next    = NULL;
810         send_wr.wr_id   = (unsigned long)tx_desc;
811         send_wr.sg_list = tx_desc->tx_sg;
812         send_wr.num_sge = tx_desc->num_sge;
813         send_wr.opcode  = IB_WR_SEND;
814         send_wr.send_flags = IB_SEND_SIGNALED;
815
816         atomic_inc(&isert_conn->post_send_buf_count);
817
818         ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
819         if (ret) {
820                 pr_err("ib_post_send() failed, ret: %d\n", ret);
821                 atomic_dec(&isert_conn->post_send_buf_count);
822         }
823
824         return ret;
825 }
826
827 static void
828 isert_create_send_desc(struct isert_conn *isert_conn,
829                        struct isert_cmd *isert_cmd,
830                        struct iser_tx_desc *tx_desc)
831 {
832         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
833
834         ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
835                                    ISER_HEADERS_LEN, DMA_TO_DEVICE);
836
837         memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
838         tx_desc->iser_header.flags = ISER_VER;
839
840         tx_desc->num_sge = 1;
841         tx_desc->isert_cmd = isert_cmd;
842
843         if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
844                 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
845                 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
846         }
847 }
848
849 static int
850 isert_init_tx_hdrs(struct isert_conn *isert_conn,
851                    struct iser_tx_desc *tx_desc)
852 {
853         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
854         u64 dma_addr;
855
856         dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
857                         ISER_HEADERS_LEN, DMA_TO_DEVICE);
858         if (ib_dma_mapping_error(ib_dev, dma_addr)) {
859                 pr_err("ib_dma_mapping_error() failed\n");
860                 return -ENOMEM;
861         }
862
863         tx_desc->dma_addr = dma_addr;
864         tx_desc->tx_sg[0].addr  = tx_desc->dma_addr;
865         tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
866         tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
867
868         pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
869                  " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
870                  tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
871
872         return 0;
873 }
874
875 static void
876 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
877                    struct ib_send_wr *send_wr, bool coalesce)
878 {
879         struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
880
881         isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
882         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
883         send_wr->opcode = IB_WR_SEND;
884         send_wr->sg_list = &tx_desc->tx_sg[0];
885         send_wr->num_sge = isert_cmd->tx_desc.num_sge;
886         /*
887          * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
888          * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
889          */
890         mutex_lock(&isert_conn->conn_comp_mutex);
891         if (coalesce &&
892             ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
893                 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
894                 mutex_unlock(&isert_conn->conn_comp_mutex);
895                 return;
896         }
897         isert_conn->conn_comp_batch = 0;
898         tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
899         mutex_unlock(&isert_conn->conn_comp_mutex);
900
901         send_wr->send_flags = IB_SEND_SIGNALED;
902 }
903
904 static int
905 isert_rdma_post_recvl(struct isert_conn *isert_conn)
906 {
907         struct ib_recv_wr rx_wr, *rx_wr_fail;
908         struct ib_sge sge;
909         int ret;
910
911         memset(&sge, 0, sizeof(struct ib_sge));
912         sge.addr = isert_conn->login_req_dma;
913         sge.length = ISER_RX_LOGIN_SIZE;
914         sge.lkey = isert_conn->conn_mr->lkey;
915
916         pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
917                 sge.addr, sge.length, sge.lkey);
918
919         memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
920         rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
921         rx_wr.sg_list = &sge;
922         rx_wr.num_sge = 1;
923
924         isert_conn->post_recv_buf_count++;
925         ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
926         if (ret) {
927                 pr_err("ib_post_recv() failed: %d\n", ret);
928                 isert_conn->post_recv_buf_count--;
929         }
930
931         pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
932         return ret;
933 }
934
935 static int
936 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
937                    u32 length)
938 {
939         struct isert_conn *isert_conn = conn->context;
940         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
941         struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
942         int ret;
943
944         isert_create_send_desc(isert_conn, NULL, tx_desc);
945
946         memcpy(&tx_desc->iscsi_header, &login->rsp[0],
947                sizeof(struct iscsi_hdr));
948
949         isert_init_tx_hdrs(isert_conn, tx_desc);
950
951         if (length > 0) {
952                 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
953
954                 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
955                                            length, DMA_TO_DEVICE);
956
957                 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
958
959                 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
960                                               length, DMA_TO_DEVICE);
961
962                 tx_dsg->addr    = isert_conn->login_rsp_dma;
963                 tx_dsg->length  = length;
964                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
965                 tx_desc->num_sge = 2;
966         }
967         if (!login->login_failed) {
968                 if (login->login_complete) {
969                         ret = isert_alloc_rx_descriptors(isert_conn);
970                         if (ret)
971                                 return ret;
972
973                         ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
974                         if (ret)
975                                 return ret;
976
977                         isert_conn->state = ISER_CONN_UP;
978                         goto post_send;
979                 }
980
981                 ret = isert_rdma_post_recvl(isert_conn);
982                 if (ret)
983                         return ret;
984         }
985 post_send:
986         ret = isert_post_send(isert_conn, tx_desc);
987         if (ret)
988                 return ret;
989
990         return 0;
991 }
992
993 static void
994 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
995                    struct isert_conn *isert_conn)
996 {
997         struct iscsi_conn *conn = isert_conn->conn;
998         struct iscsi_login *login = conn->conn_login;
999         int size;
1000
1001         if (!login) {
1002                 pr_err("conn->conn_login is NULL\n");
1003                 dump_stack();
1004                 return;
1005         }
1006
1007         if (login->first_request) {
1008                 struct iscsi_login_req *login_req =
1009                         (struct iscsi_login_req *)&rx_desc->iscsi_header;
1010                 /*
1011                  * Setup the initial iscsi_login values from the leading
1012                  * login request PDU.
1013                  */
1014                 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1015                 login->current_stage =
1016                         (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1017                          >> 2;
1018                 login->version_min      = login_req->min_version;
1019                 login->version_max      = login_req->max_version;
1020                 memcpy(login->isid, login_req->isid, 6);
1021                 login->cmd_sn           = be32_to_cpu(login_req->cmdsn);
1022                 login->init_task_tag    = login_req->itt;
1023                 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1024                 login->cid              = be16_to_cpu(login_req->cid);
1025                 login->tsih             = be16_to_cpu(login_req->tsih);
1026         }
1027
1028         memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1029
1030         size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1031         pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1032                  size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1033         memcpy(login->req_buf, &rx_desc->data[0], size);
1034
1035         if (login->first_request) {
1036                 complete(&isert_conn->conn_login_comp);
1037                 return;
1038         }
1039         schedule_delayed_work(&conn->login_work, 0);
1040 }
1041
1042 static struct iscsi_cmd
1043 *isert_allocate_cmd(struct iscsi_conn *conn)
1044 {
1045         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1046         struct isert_cmd *isert_cmd;
1047         struct iscsi_cmd *cmd;
1048
1049         cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1050         if (!cmd) {
1051                 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1052                 return NULL;
1053         }
1054         isert_cmd = iscsit_priv_cmd(cmd);
1055         isert_cmd->conn = isert_conn;
1056         isert_cmd->iscsi_cmd = cmd;
1057
1058         return cmd;
1059 }
1060
1061 static int
1062 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1063                       struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1064                       struct iser_rx_desc *rx_desc, unsigned char *buf)
1065 {
1066         struct iscsi_conn *conn = isert_conn->conn;
1067         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1068         struct scatterlist *sg;
1069         int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1070         bool dump_payload = false;
1071
1072         rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1073         if (rc < 0)
1074                 return rc;
1075
1076         imm_data = cmd->immediate_data;
1077         imm_data_len = cmd->first_burst_len;
1078         unsol_data = cmd->unsolicited_data;
1079
1080         rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1081         if (rc < 0) {
1082                 return 0;
1083         } else if (rc > 0) {
1084                 dump_payload = true;
1085                 goto sequence_cmd;
1086         }
1087
1088         if (!imm_data)
1089                 return 0;
1090
1091         sg = &cmd->se_cmd.t_data_sg[0];
1092         sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1093
1094         pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1095                  sg, sg_nents, &rx_desc->data[0], imm_data_len);
1096
1097         sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1098
1099         cmd->write_data_done += imm_data_len;
1100
1101         if (cmd->write_data_done == cmd->se_cmd.data_length) {
1102                 spin_lock_bh(&cmd->istate_lock);
1103                 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1104                 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1105                 spin_unlock_bh(&cmd->istate_lock);
1106         }
1107
1108 sequence_cmd:
1109         rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1110
1111         if (!rc && dump_payload == false && unsol_data)
1112                 iscsit_set_unsoliticed_dataout(cmd);
1113
1114         return 0;
1115 }
1116
1117 static int
1118 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1119                            struct iser_rx_desc *rx_desc, unsigned char *buf)
1120 {
1121         struct scatterlist *sg_start;
1122         struct iscsi_conn *conn = isert_conn->conn;
1123         struct iscsi_cmd *cmd = NULL;
1124         struct iscsi_data *hdr = (struct iscsi_data *)buf;
1125         u32 unsol_data_len = ntoh24(hdr->dlength);
1126         int rc, sg_nents, sg_off, page_off;
1127
1128         rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1129         if (rc < 0)
1130                 return rc;
1131         else if (!cmd)
1132                 return 0;
1133         /*
1134          * FIXME: Unexpected unsolicited_data out
1135          */
1136         if (!cmd->unsolicited_data) {
1137                 pr_err("Received unexpected solicited data payload\n");
1138                 dump_stack();
1139                 return -1;
1140         }
1141
1142         pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1143                  unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1144
1145         sg_off = cmd->write_data_done / PAGE_SIZE;
1146         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1147         sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1148         page_off = cmd->write_data_done % PAGE_SIZE;
1149         /*
1150          * FIXME: Non page-aligned unsolicited_data out
1151          */
1152         if (page_off) {
1153                 pr_err("Received unexpected non-page aligned data payload\n");
1154                 dump_stack();
1155                 return -1;
1156         }
1157         pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1158                  sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1159
1160         sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1161                             unsol_data_len);
1162
1163         rc = iscsit_check_dataout_payload(cmd, hdr, false);
1164         if (rc < 0)
1165                 return rc;
1166
1167         return 0;
1168 }
1169
1170 static int
1171 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1172                      struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1173                      unsigned char *buf)
1174 {
1175         struct iscsi_conn *conn = isert_conn->conn;
1176         struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1177         int rc;
1178
1179         rc = iscsit_setup_nop_out(conn, cmd, hdr);
1180         if (rc < 0)
1181                 return rc;
1182         /*
1183          * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1184          */
1185
1186         return iscsit_process_nop_out(conn, cmd, hdr);
1187 }
1188
1189 static int
1190 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1191                       struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1192                       struct iscsi_text *hdr)
1193 {
1194         struct iscsi_conn *conn = isert_conn->conn;
1195         u32 payload_length = ntoh24(hdr->dlength);
1196         int rc;
1197         unsigned char *text_in;
1198
1199         rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1200         if (rc < 0)
1201                 return rc;
1202
1203         text_in = kzalloc(payload_length, GFP_KERNEL);
1204         if (!text_in) {
1205                 pr_err("Unable to allocate text_in of payload_length: %u\n",
1206                        payload_length);
1207                 return -ENOMEM;
1208         }
1209         cmd->text_in_ptr = text_in;
1210
1211         memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1212
1213         return iscsit_process_text_cmd(conn, cmd, hdr);
1214 }
1215
1216 static int
1217 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1218                 uint32_t read_stag, uint64_t read_va,
1219                 uint32_t write_stag, uint64_t write_va)
1220 {
1221         struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1222         struct iscsi_conn *conn = isert_conn->conn;
1223         struct iscsi_session *sess = conn->sess;
1224         struct iscsi_cmd *cmd;
1225         struct isert_cmd *isert_cmd;
1226         int ret = -EINVAL;
1227         u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1228
1229         if (sess->sess_ops->SessionType &&
1230            (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1231                 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1232                        " ignoring\n", opcode);
1233                 return 0;
1234         }
1235
1236         switch (opcode) {
1237         case ISCSI_OP_SCSI_CMD:
1238                 cmd = isert_allocate_cmd(conn);
1239                 if (!cmd)
1240                         break;
1241
1242                 isert_cmd = iscsit_priv_cmd(cmd);
1243                 isert_cmd->read_stag = read_stag;
1244                 isert_cmd->read_va = read_va;
1245                 isert_cmd->write_stag = write_stag;
1246                 isert_cmd->write_va = write_va;
1247
1248                 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1249                                         rx_desc, (unsigned char *)hdr);
1250                 break;
1251         case ISCSI_OP_NOOP_OUT:
1252                 cmd = isert_allocate_cmd(conn);
1253                 if (!cmd)
1254                         break;
1255
1256                 isert_cmd = iscsit_priv_cmd(cmd);
1257                 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1258                                            rx_desc, (unsigned char *)hdr);
1259                 break;
1260         case ISCSI_OP_SCSI_DATA_OUT:
1261                 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1262                                                 (unsigned char *)hdr);
1263                 break;
1264         case ISCSI_OP_SCSI_TMFUNC:
1265                 cmd = isert_allocate_cmd(conn);
1266                 if (!cmd)
1267                         break;
1268
1269                 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1270                                                 (unsigned char *)hdr);
1271                 break;
1272         case ISCSI_OP_LOGOUT:
1273                 cmd = isert_allocate_cmd(conn);
1274                 if (!cmd)
1275                         break;
1276
1277                 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1278                 if (ret > 0)
1279                         wait_for_completion_timeout(&conn->conn_logout_comp,
1280                                                     SECONDS_FOR_LOGOUT_COMP *
1281                                                     HZ);
1282                 break;
1283         case ISCSI_OP_TEXT:
1284                 cmd = isert_allocate_cmd(conn);
1285                 if (!cmd)
1286                         break;
1287
1288                 isert_cmd = iscsit_priv_cmd(cmd);
1289                 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1290                                             rx_desc, (struct iscsi_text *)hdr);
1291                 break;
1292         default:
1293                 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1294                 dump_stack();
1295                 break;
1296         }
1297
1298         return ret;
1299 }
1300
1301 static void
1302 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1303 {
1304         struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1305         uint64_t read_va = 0, write_va = 0;
1306         uint32_t read_stag = 0, write_stag = 0;
1307         int rc;
1308
1309         switch (iser_hdr->flags & 0xF0) {
1310         case ISCSI_CTRL:
1311                 if (iser_hdr->flags & ISER_RSV) {
1312                         read_stag = be32_to_cpu(iser_hdr->read_stag);
1313                         read_va = be64_to_cpu(iser_hdr->read_va);
1314                         pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1315                                  read_stag, (unsigned long long)read_va);
1316                 }
1317                 if (iser_hdr->flags & ISER_WSV) {
1318                         write_stag = be32_to_cpu(iser_hdr->write_stag);
1319                         write_va = be64_to_cpu(iser_hdr->write_va);
1320                         pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1321                                  write_stag, (unsigned long long)write_va);
1322                 }
1323
1324                 pr_debug("ISER ISCSI_CTRL PDU\n");
1325                 break;
1326         case ISER_HELLO:
1327                 pr_err("iSER Hello message\n");
1328                 break;
1329         default:
1330                 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1331                 break;
1332         }
1333
1334         rc = isert_rx_opcode(isert_conn, rx_desc,
1335                              read_stag, read_va, write_stag, write_va);
1336 }
1337
1338 static void
1339 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1340                     unsigned long xfer_len)
1341 {
1342         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1343         struct iscsi_hdr *hdr;
1344         u64 rx_dma;
1345         int rx_buflen, outstanding;
1346
1347         if ((char *)desc == isert_conn->login_req_buf) {
1348                 rx_dma = isert_conn->login_req_dma;
1349                 rx_buflen = ISER_RX_LOGIN_SIZE;
1350                 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1351                          rx_dma, rx_buflen);
1352         } else {
1353                 rx_dma = desc->dma_addr;
1354                 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1355                 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1356                          rx_dma, rx_buflen);
1357         }
1358
1359         ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1360
1361         hdr = &desc->iscsi_header;
1362         pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1363                  hdr->opcode, hdr->itt, hdr->flags,
1364                  (int)(xfer_len - ISER_HEADERS_LEN));
1365
1366         if ((char *)desc == isert_conn->login_req_buf)
1367                 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1368                                    isert_conn);
1369         else
1370                 isert_rx_do_work(desc, isert_conn);
1371
1372         ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1373                                       DMA_FROM_DEVICE);
1374
1375         isert_conn->post_recv_buf_count--;
1376         pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1377                  isert_conn->post_recv_buf_count);
1378
1379         if ((char *)desc == isert_conn->login_req_buf)
1380                 return;
1381
1382         outstanding = isert_conn->post_recv_buf_count;
1383         if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1384                 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1385                                 ISERT_MIN_POSTED_RX);
1386                 err = isert_post_recv(isert_conn, count);
1387                 if (err) {
1388                         pr_err("isert_post_recv() count: %d failed, %d\n",
1389                                count, err);
1390                 }
1391         }
1392 }
1393
1394 static void
1395 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1396 {
1397         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1398         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1399
1400         pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1401         if (wr->sge) {
1402                 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1403                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1404                                 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1405                                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1406                 wr->sge = NULL;
1407         }
1408
1409         if (wr->send_wr) {
1410                 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1411                 kfree(wr->send_wr);
1412                 wr->send_wr = NULL;
1413         }
1414
1415         if (wr->ib_sge) {
1416                 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1417                 kfree(wr->ib_sge);
1418                 wr->ib_sge = NULL;
1419         }
1420 }
1421
1422 static void
1423 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1424 {
1425         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1426         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1427         LIST_HEAD(unmap_list);
1428
1429         pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
1430
1431         if (wr->fr_desc) {
1432                 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1433                          isert_cmd, wr->fr_desc);
1434                 spin_lock_bh(&isert_conn->conn_lock);
1435                 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1436                 spin_unlock_bh(&isert_conn->conn_lock);
1437                 wr->fr_desc = NULL;
1438         }
1439
1440         if (wr->sge) {
1441                 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
1442                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1443                                 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1444                                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1445                 wr->sge = NULL;
1446         }
1447
1448         wr->ib_sge = NULL;
1449         wr->send_wr = NULL;
1450 }
1451
1452 static void
1453 isert_put_cmd(struct isert_cmd *isert_cmd)
1454 {
1455         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1456         struct isert_conn *isert_conn = isert_cmd->conn;
1457         struct iscsi_conn *conn = isert_conn->conn;
1458         struct isert_device *device = isert_conn->conn_device;
1459
1460         pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1461
1462         switch (cmd->iscsi_opcode) {
1463         case ISCSI_OP_SCSI_CMD:
1464                 spin_lock_bh(&conn->cmd_lock);
1465                 if (!list_empty(&cmd->i_conn_node))
1466                         list_del(&cmd->i_conn_node);
1467                 spin_unlock_bh(&conn->cmd_lock);
1468
1469                 if (cmd->data_direction == DMA_TO_DEVICE)
1470                         iscsit_stop_dataout_timer(cmd);
1471
1472                 device->unreg_rdma_mem(isert_cmd, isert_conn);
1473                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1474                 break;
1475         case ISCSI_OP_SCSI_TMFUNC:
1476                 spin_lock_bh(&conn->cmd_lock);
1477                 if (!list_empty(&cmd->i_conn_node))
1478                         list_del(&cmd->i_conn_node);
1479                 spin_unlock_bh(&conn->cmd_lock);
1480
1481                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1482                 break;
1483         case ISCSI_OP_REJECT:
1484         case ISCSI_OP_NOOP_OUT:
1485         case ISCSI_OP_TEXT:
1486                 spin_lock_bh(&conn->cmd_lock);
1487                 if (!list_empty(&cmd->i_conn_node))
1488                         list_del(&cmd->i_conn_node);
1489                 spin_unlock_bh(&conn->cmd_lock);
1490
1491                 /*
1492                  * Handle special case for REJECT when iscsi_add_reject*() has
1493                  * overwritten the original iscsi_opcode assignment, and the
1494                  * associated cmd->se_cmd needs to be released.
1495                  */
1496                 if (cmd->se_cmd.se_tfo != NULL) {
1497                         pr_debug("Calling transport_generic_free_cmd from"
1498                                  " isert_put_cmd for 0x%02x\n",
1499                                  cmd->iscsi_opcode);
1500                         transport_generic_free_cmd(&cmd->se_cmd, 0);
1501                         break;
1502                 }
1503                 /*
1504                  * Fall-through
1505                  */
1506         default:
1507                 iscsit_release_cmd(cmd);
1508                 break;
1509         }
1510 }
1511
1512 static void
1513 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1514 {
1515         if (tx_desc->dma_addr != 0) {
1516                 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1517                 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1518                                     ISER_HEADERS_LEN, DMA_TO_DEVICE);
1519                 tx_desc->dma_addr = 0;
1520         }
1521 }
1522
1523 static void
1524 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1525                      struct ib_device *ib_dev)
1526 {
1527         if (isert_cmd->pdu_buf_dma != 0) {
1528                 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1529                 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1530                                     isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1531                 isert_cmd->pdu_buf_dma = 0;
1532         }
1533
1534         isert_unmap_tx_desc(tx_desc, ib_dev);
1535         isert_put_cmd(isert_cmd);
1536 }
1537
1538 static void
1539 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1540                            struct isert_cmd *isert_cmd)
1541 {
1542         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1543         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1544         struct se_cmd *se_cmd = &cmd->se_cmd;
1545         struct isert_conn *isert_conn = isert_cmd->conn;
1546         struct isert_device *device = isert_conn->conn_device;
1547
1548         iscsit_stop_dataout_timer(cmd);
1549         device->unreg_rdma_mem(isert_cmd, isert_conn);
1550         cmd->write_data_done = wr->cur_rdma_length;
1551
1552         pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1553         spin_lock_bh(&cmd->istate_lock);
1554         cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1555         cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1556         spin_unlock_bh(&cmd->istate_lock);
1557
1558         target_execute_cmd(se_cmd);
1559 }
1560
1561 static void
1562 isert_do_control_comp(struct work_struct *work)
1563 {
1564         struct isert_cmd *isert_cmd = container_of(work,
1565                         struct isert_cmd, comp_work);
1566         struct isert_conn *isert_conn = isert_cmd->conn;
1567         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1568         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1569
1570         switch (cmd->i_state) {
1571         case ISTATE_SEND_TASKMGTRSP:
1572                 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1573
1574                 atomic_dec(&isert_conn->post_send_buf_count);
1575                 iscsit_tmr_post_handler(cmd, cmd->conn);
1576
1577                 cmd->i_state = ISTATE_SENT_STATUS;
1578                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1579                 break;
1580         case ISTATE_SEND_REJECT:
1581                 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1582                 atomic_dec(&isert_conn->post_send_buf_count);
1583
1584                 cmd->i_state = ISTATE_SENT_STATUS;
1585                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1586                 break;
1587         case ISTATE_SEND_LOGOUTRSP:
1588                 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1589                 /*
1590                  * Call atomic_dec(&isert_conn->post_send_buf_count)
1591                  * from isert_free_conn()
1592                  */
1593                 isert_conn->logout_posted = true;
1594                 iscsit_logout_post_handler(cmd, cmd->conn);
1595                 break;
1596         case ISTATE_SEND_TEXTRSP:
1597                 atomic_dec(&isert_conn->post_send_buf_count);
1598                 cmd->i_state = ISTATE_SENT_STATUS;
1599                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1600                 break;
1601         default:
1602                 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1603                 dump_stack();
1604                 break;
1605         }
1606 }
1607
1608 static void
1609 isert_response_completion(struct iser_tx_desc *tx_desc,
1610                           struct isert_cmd *isert_cmd,
1611                           struct isert_conn *isert_conn,
1612                           struct ib_device *ib_dev)
1613 {
1614         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1615
1616         if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1617             cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1618             cmd->i_state == ISTATE_SEND_REJECT ||
1619             cmd->i_state == ISTATE_SEND_TEXTRSP) {
1620                 isert_unmap_tx_desc(tx_desc, ib_dev);
1621
1622                 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1623                 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1624                 return;
1625         }
1626         atomic_dec(&isert_conn->post_send_buf_count);
1627
1628         cmd->i_state = ISTATE_SENT_STATUS;
1629         isert_completion_put(tx_desc, isert_cmd, ib_dev);
1630 }
1631
1632 static void
1633 __isert_send_completion(struct iser_tx_desc *tx_desc,
1634                         struct isert_conn *isert_conn)
1635 {
1636         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1637         struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1638         struct isert_rdma_wr *wr;
1639
1640         if (!isert_cmd) {
1641                 atomic_dec(&isert_conn->post_send_buf_count);
1642                 isert_unmap_tx_desc(tx_desc, ib_dev);
1643                 return;
1644         }
1645         wr = &isert_cmd->rdma_wr;
1646
1647         switch (wr->iser_ib_op) {
1648         case ISER_IB_RECV:
1649                 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1650                 dump_stack();
1651                 break;
1652         case ISER_IB_SEND:
1653                 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1654                 isert_response_completion(tx_desc, isert_cmd,
1655                                           isert_conn, ib_dev);
1656                 break;
1657         case ISER_IB_RDMA_WRITE:
1658                 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1659                 dump_stack();
1660                 break;
1661         case ISER_IB_RDMA_READ:
1662                 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1663
1664                 atomic_dec(&isert_conn->post_send_buf_count);
1665                 isert_completion_rdma_read(tx_desc, isert_cmd);
1666                 break;
1667         default:
1668                 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1669                 dump_stack();
1670                 break;
1671         }
1672 }
1673
1674 static void
1675 isert_send_completion(struct iser_tx_desc *tx_desc,
1676                       struct isert_conn *isert_conn)
1677 {
1678         struct llist_node *llnode = tx_desc->comp_llnode_batch;
1679         struct iser_tx_desc *t;
1680         /*
1681          * Drain coalesced completion llist starting from comp_llnode_batch
1682          * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1683          */
1684         while (llnode) {
1685                 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1686                 llnode = llist_next(llnode);
1687                 __isert_send_completion(t, isert_conn);
1688         }
1689         __isert_send_completion(tx_desc, isert_conn);
1690 }
1691
1692 static void
1693 isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1694 {
1695         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1696
1697         if (tx_desc) {
1698                 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1699
1700                 if (!isert_cmd)
1701                         isert_unmap_tx_desc(tx_desc, ib_dev);
1702                 else
1703                         isert_completion_put(tx_desc, isert_cmd, ib_dev);
1704         }
1705
1706         if (isert_conn->post_recv_buf_count == 0 &&
1707             atomic_read(&isert_conn->post_send_buf_count) == 0) {
1708                 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1709                 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1710
1711                 mutex_lock(&isert_conn->conn_mutex);
1712                 if (isert_conn->state != ISER_CONN_DOWN)
1713                         isert_conn->state = ISER_CONN_TERMINATING;
1714                 mutex_unlock(&isert_conn->conn_mutex);
1715
1716                 wake_up(&isert_conn->conn_wait_comp_err);
1717         }
1718 }
1719
1720 static void
1721 isert_cq_tx_work(struct work_struct *work)
1722 {
1723         struct isert_cq_desc *cq_desc = container_of(work,
1724                                 struct isert_cq_desc, cq_tx_work);
1725         struct isert_device *device = cq_desc->device;
1726         int cq_index = cq_desc->cq_index;
1727         struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1728         struct isert_conn *isert_conn;
1729         struct iser_tx_desc *tx_desc;
1730         struct ib_wc wc;
1731
1732         while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1733                 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1734                 isert_conn = wc.qp->qp_context;
1735
1736                 if (wc.status == IB_WC_SUCCESS) {
1737                         isert_send_completion(tx_desc, isert_conn);
1738                 } else {
1739                         pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1740                         pr_debug("TX wc.status: 0x%08x\n", wc.status);
1741                         pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1742                         atomic_dec(&isert_conn->post_send_buf_count);
1743                         isert_cq_comp_err(tx_desc, isert_conn);
1744                 }
1745         }
1746
1747         ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1748 }
1749
1750 static void
1751 isert_cq_tx_callback(struct ib_cq *cq, void *context)
1752 {
1753         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1754
1755         queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1756 }
1757
1758 static void
1759 isert_cq_rx_work(struct work_struct *work)
1760 {
1761         struct isert_cq_desc *cq_desc = container_of(work,
1762                         struct isert_cq_desc, cq_rx_work);
1763         struct isert_device *device = cq_desc->device;
1764         int cq_index = cq_desc->cq_index;
1765         struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1766         struct isert_conn *isert_conn;
1767         struct iser_rx_desc *rx_desc;
1768         struct ib_wc wc;
1769         unsigned long xfer_len;
1770
1771         while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1772                 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1773                 isert_conn = wc.qp->qp_context;
1774
1775                 if (wc.status == IB_WC_SUCCESS) {
1776                         xfer_len = (unsigned long)wc.byte_len;
1777                         isert_rx_completion(rx_desc, isert_conn, xfer_len);
1778                 } else {
1779                         pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1780                         if (wc.status != IB_WC_WR_FLUSH_ERR) {
1781                                 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1782                                 pr_debug("RX wc.vendor_err: 0x%08x\n",
1783                                          wc.vendor_err);
1784                         }
1785                         isert_conn->post_recv_buf_count--;
1786                         isert_cq_comp_err(NULL, isert_conn);
1787                 }
1788         }
1789
1790         ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1791 }
1792
1793 static void
1794 isert_cq_rx_callback(struct ib_cq *cq, void *context)
1795 {
1796         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1797
1798         queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1799 }
1800
1801 static int
1802 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1803 {
1804         struct ib_send_wr *wr_failed;
1805         int ret;
1806
1807         atomic_inc(&isert_conn->post_send_buf_count);
1808
1809         ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1810                            &wr_failed);
1811         if (ret) {
1812                 pr_err("ib_post_send failed with %d\n", ret);
1813                 atomic_dec(&isert_conn->post_send_buf_count);
1814                 return ret;
1815         }
1816         return ret;
1817 }
1818
1819 static int
1820 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1821 {
1822         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1823         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1824         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1825         struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1826                                 &isert_cmd->tx_desc.iscsi_header;
1827
1828         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1829         iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1830         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1831         /*
1832          * Attach SENSE DATA payload to iSCSI Response PDU
1833          */
1834         if (cmd->se_cmd.sense_buffer &&
1835             ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1836             (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1837                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1838                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1839                 u32 padding, pdu_len;
1840
1841                 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1842                                    cmd->sense_buffer);
1843                 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1844
1845                 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1846                 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1847                 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1848
1849                 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1850                                 (void *)cmd->sense_buffer, pdu_len,
1851                                 DMA_TO_DEVICE);
1852
1853                 isert_cmd->pdu_buf_len = pdu_len;
1854                 tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1855                 tx_dsg->length  = pdu_len;
1856                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1857                 isert_cmd->tx_desc.num_sge = 2;
1858         }
1859
1860         isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
1861
1862         pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1863
1864         return isert_post_response(isert_conn, isert_cmd);
1865 }
1866
1867 static int
1868 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1869                 bool nopout_response)
1870 {
1871         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1872         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1873         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1874
1875         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1876         iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1877                                &isert_cmd->tx_desc.iscsi_header,
1878                                nopout_response);
1879         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1880         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1881
1882         pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1883
1884         return isert_post_response(isert_conn, isert_cmd);
1885 }
1886
1887 static int
1888 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1889 {
1890         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1891         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1892         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1893
1894         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1895         iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1896                                 &isert_cmd->tx_desc.iscsi_header);
1897         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1898         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1899
1900         pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1901
1902         return isert_post_response(isert_conn, isert_cmd);
1903 }
1904
1905 static int
1906 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1907 {
1908         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1909         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1910         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1911
1912         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1913         iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1914                                   &isert_cmd->tx_desc.iscsi_header);
1915         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1916         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1917
1918         pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1919
1920         return isert_post_response(isert_conn, isert_cmd);
1921 }
1922
1923 static int
1924 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1925 {
1926         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1927         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1928         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1929         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1930         struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1931         struct iscsi_reject *hdr =
1932                 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1933
1934         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1935         iscsit_build_reject(cmd, conn, hdr);
1936         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1937
1938         hton24(hdr->dlength, ISCSI_HDR_LEN);
1939         isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1940                         (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1941                         DMA_TO_DEVICE);
1942         isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1943         tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1944         tx_dsg->length  = ISCSI_HDR_LEN;
1945         tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1946         isert_cmd->tx_desc.num_sge = 2;
1947
1948         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1949
1950         pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1951
1952         return isert_post_response(isert_conn, isert_cmd);
1953 }
1954
1955 static int
1956 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1957 {
1958         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1959         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1960         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1961         struct iscsi_text_rsp *hdr =
1962                 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1963         u32 txt_rsp_len;
1964         int rc;
1965
1966         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1967         rc = iscsit_build_text_rsp(cmd, conn, hdr);
1968         if (rc < 0)
1969                 return rc;
1970
1971         txt_rsp_len = rc;
1972         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1973
1974         if (txt_rsp_len) {
1975                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1976                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1977                 void *txt_rsp_buf = cmd->buf_ptr;
1978
1979                 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1980                                 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1981
1982                 isert_cmd->pdu_buf_len = txt_rsp_len;
1983                 tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1984                 tx_dsg->length  = txt_rsp_len;
1985                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1986                 isert_cmd->tx_desc.num_sge = 2;
1987         }
1988         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1989
1990         pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1991
1992         return isert_post_response(isert_conn, isert_cmd);
1993 }
1994
1995 static int
1996 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1997                     struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1998                     u32 data_left, u32 offset)
1999 {
2000         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2001         struct scatterlist *sg_start, *tmp_sg;
2002         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2003         u32 sg_off, page_off;
2004         int i = 0, sg_nents;
2005
2006         sg_off = offset / PAGE_SIZE;
2007         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2008         sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2009         page_off = offset % PAGE_SIZE;
2010
2011         send_wr->sg_list = ib_sge;
2012         send_wr->num_sge = sg_nents;
2013         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2014         /*
2015          * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2016          */
2017         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2018                 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2019                          (unsigned long long)tmp_sg->dma_address,
2020                          tmp_sg->length, page_off);
2021
2022                 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2023                 ib_sge->length = min_t(u32, data_left,
2024                                 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2025                 ib_sge->lkey = isert_conn->conn_mr->lkey;
2026
2027                 pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2028                          ib_sge->addr, ib_sge->length, ib_sge->lkey);
2029                 page_off = 0;
2030                 data_left -= ib_sge->length;
2031                 ib_sge++;
2032                 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2033         }
2034
2035         pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2036                  send_wr->sg_list, send_wr->num_sge);
2037
2038         return sg_nents;
2039 }
2040
2041 static int
2042 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2043                struct isert_rdma_wr *wr)
2044 {
2045         struct se_cmd *se_cmd = &cmd->se_cmd;
2046         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2047         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2048         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2049         struct ib_send_wr *send_wr;
2050         struct ib_sge *ib_sge;
2051         struct scatterlist *sg_start;
2052         u32 sg_off = 0, sg_nents;
2053         u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2054         int ret = 0, count, i, ib_sge_cnt;
2055
2056         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2057                 data_left = se_cmd->data_length;
2058         } else {
2059                 sg_off = cmd->write_data_done / PAGE_SIZE;
2060                 data_left = se_cmd->data_length - cmd->write_data_done;
2061                 offset = cmd->write_data_done;
2062                 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2063         }
2064
2065         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2066         sg_nents = se_cmd->t_data_nents - sg_off;
2067
2068         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2069                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2070                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
2071         if (unlikely(!count)) {
2072                 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2073                 return -EINVAL;
2074         }
2075         wr->sge = sg_start;
2076         wr->num_sge = sg_nents;
2077         wr->cur_rdma_length = data_left;
2078         pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2079                  isert_cmd, count, sg_start, sg_nents, data_left);
2080
2081         ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
2082         if (!ib_sge) {
2083                 pr_warn("Unable to allocate ib_sge\n");
2084                 ret = -ENOMEM;
2085                 goto unmap_sg;
2086         }
2087         wr->ib_sge = ib_sge;
2088
2089         wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2090         wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2091                                 GFP_KERNEL);
2092         if (!wr->send_wr) {
2093                 pr_debug("Unable to allocate wr->send_wr\n");
2094                 ret = -ENOMEM;
2095                 goto unmap_sg;
2096         }
2097
2098         wr->isert_cmd = isert_cmd;
2099         rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2100
2101         for (i = 0; i < wr->send_wr_num; i++) {
2102                 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2103                 data_len = min(data_left, rdma_write_max);
2104
2105                 send_wr->send_flags = 0;
2106                 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2107                         send_wr->opcode = IB_WR_RDMA_WRITE;
2108                         send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2109                         send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2110                         if (i + 1 == wr->send_wr_num)
2111                                 send_wr->next = &isert_cmd->tx_desc.send_wr;
2112                         else
2113                                 send_wr->next = &wr->send_wr[i + 1];
2114                 } else {
2115                         send_wr->opcode = IB_WR_RDMA_READ;
2116                         send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2117                         send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2118                         if (i + 1 == wr->send_wr_num)
2119                                 send_wr->send_flags = IB_SEND_SIGNALED;
2120                         else
2121                                 send_wr->next = &wr->send_wr[i + 1];
2122                 }
2123
2124                 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2125                                         send_wr, data_len, offset);
2126                 ib_sge += ib_sge_cnt;
2127
2128                 offset += data_len;
2129                 va_offset += data_len;
2130                 data_left -= data_len;
2131         }
2132
2133         return 0;
2134 unmap_sg:
2135         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2136                         (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2137                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
2138         return ret;
2139 }
2140
2141 static int
2142 isert_map_fr_pagelist(struct ib_device *ib_dev,
2143                       struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2144 {
2145         u64 start_addr, end_addr, page, chunk_start = 0;
2146         struct scatterlist *tmp_sg;
2147         int i = 0, new_chunk, last_ent, n_pages;
2148
2149         n_pages = 0;
2150         new_chunk = 1;
2151         last_ent = sg_nents - 1;
2152         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2153                 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2154                 if (new_chunk)
2155                         chunk_start = start_addr;
2156                 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2157
2158                 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2159                          i, (unsigned long long)tmp_sg->dma_address,
2160                          tmp_sg->length);
2161
2162                 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2163                         new_chunk = 0;
2164                         continue;
2165                 }
2166                 new_chunk = 1;
2167
2168                 page = chunk_start & PAGE_MASK;
2169                 do {
2170                         fr_pl[n_pages++] = page;
2171                         pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2172                                  n_pages - 1, page);
2173                         page += PAGE_SIZE;
2174                 } while (page < end_addr);
2175         }
2176
2177         return n_pages;
2178 }
2179
2180 static int
2181 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2182                   struct isert_conn *isert_conn, struct scatterlist *sg_start,
2183                   struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
2184                   unsigned int data_len)
2185 {
2186         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2187         struct ib_send_wr fr_wr, inv_wr;
2188         struct ib_send_wr *bad_wr, *wr = NULL;
2189         int ret, pagelist_len;
2190         u32 page_off;
2191         u8 key;
2192
2193         sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
2194         page_off = offset % PAGE_SIZE;
2195
2196         pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2197                  fr_desc, sg_nents, offset);
2198
2199         pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2200                                              &fr_desc->data_frpl->page_list[0]);
2201
2202         if (!fr_desc->valid) {
2203                 memset(&inv_wr, 0, sizeof(inv_wr));
2204                 inv_wr.opcode = IB_WR_LOCAL_INV;
2205                 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2206                 wr = &inv_wr;
2207                 /* Bump the key */
2208                 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2209                 ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2210         }
2211
2212         /* Prepare FASTREG WR */
2213         memset(&fr_wr, 0, sizeof(fr_wr));
2214         fr_wr.opcode = IB_WR_FAST_REG_MR;
2215         fr_wr.wr.fast_reg.iova_start =
2216                 fr_desc->data_frpl->page_list[0] + page_off;
2217         fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2218         fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2219         fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2220         fr_wr.wr.fast_reg.length = data_len;
2221         fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2222         fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2223
2224         if (!wr)
2225                 wr = &fr_wr;
2226         else
2227                 wr->next = &fr_wr;
2228
2229         ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2230         if (ret) {
2231                 pr_err("fast registration failed, ret:%d\n", ret);
2232                 return ret;
2233         }
2234         fr_desc->valid = false;
2235
2236         ib_sge->lkey = fr_desc->data_mr->lkey;
2237         ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2238         ib_sge->length = data_len;
2239
2240         pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2241                  ib_sge->addr, ib_sge->length, ib_sge->lkey);
2242
2243         return ret;
2244 }
2245
2246 static int
2247 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2248                struct isert_rdma_wr *wr)
2249 {
2250         struct se_cmd *se_cmd = &cmd->se_cmd;
2251         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2252         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2253         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2254         struct ib_send_wr *send_wr;
2255         struct ib_sge *ib_sge;
2256         struct scatterlist *sg_start;
2257         struct fast_reg_descriptor *fr_desc;
2258         u32 sg_off = 0, sg_nents;
2259         u32 offset = 0, data_len, data_left, rdma_write_max;
2260         int ret = 0, count;
2261         unsigned long flags;
2262
2263         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2264                 data_left = se_cmd->data_length;
2265         } else {
2266                 offset = cmd->write_data_done;
2267                 sg_off = offset / PAGE_SIZE;
2268                 data_left = se_cmd->data_length - cmd->write_data_done;
2269                 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2270         }
2271
2272         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2273         sg_nents = se_cmd->t_data_nents - sg_off;
2274
2275         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2276                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2277                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
2278         if (unlikely(!count)) {
2279                 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2280                 return -EINVAL;
2281         }
2282         wr->sge = sg_start;
2283         wr->num_sge = sg_nents;
2284         pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2285                  isert_cmd, count, sg_start, sg_nents, data_left);
2286
2287         memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
2288         ib_sge = &wr->s_ib_sge;
2289         wr->ib_sge = ib_sge;
2290
2291         wr->send_wr_num = 1;
2292         memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2293         wr->send_wr = &wr->s_send_wr;
2294
2295         wr->isert_cmd = isert_cmd;
2296         rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2297
2298         send_wr = &isert_cmd->rdma_wr.s_send_wr;
2299         send_wr->sg_list = ib_sge;
2300         send_wr->num_sge = 1;
2301         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2302         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2303                 send_wr->opcode = IB_WR_RDMA_WRITE;
2304                 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2305                 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2306                 send_wr->send_flags = 0;
2307                 send_wr->next = &isert_cmd->tx_desc.send_wr;
2308         } else {
2309                 send_wr->opcode = IB_WR_RDMA_READ;
2310                 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2311                 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2312                 send_wr->send_flags = IB_SEND_SIGNALED;
2313         }
2314
2315         data_len = min(data_left, rdma_write_max);
2316         wr->cur_rdma_length = data_len;
2317
2318         /* if there is a single dma entry, dma mr is sufficient */
2319         if (count == 1) {
2320                 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2321                 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2322                 ib_sge->lkey = isert_conn->conn_mr->lkey;
2323                 wr->fr_desc = NULL;
2324         } else {
2325                 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2326                 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2327                                            struct fast_reg_descriptor, list);
2328                 list_del(&fr_desc->list);
2329                 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2330                 wr->fr_desc = fr_desc;
2331
2332                 ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
2333                                         ib_sge, sg_nents, offset, data_len);
2334                 if (ret) {
2335                         list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2336                         goto unmap_sg;
2337                 }
2338         }
2339
2340         return 0;
2341
2342 unmap_sg:
2343         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2344                         (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2345                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
2346         return ret;
2347 }
2348
2349 static int
2350 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2351 {
2352         struct se_cmd *se_cmd = &cmd->se_cmd;
2353         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2354         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2355         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2356         struct isert_device *device = isert_conn->conn_device;
2357         struct ib_send_wr *wr_failed;
2358         int rc;
2359
2360         pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2361                  isert_cmd, se_cmd->data_length);
2362         wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2363         rc = device->reg_rdma_mem(conn, cmd, wr);
2364         if (rc) {
2365                 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2366                 return rc;
2367         }
2368
2369         /*
2370          * Build isert_conn->tx_desc for iSCSI response PDU and attach
2371          */
2372         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2373         iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2374                              &isert_cmd->tx_desc.iscsi_header);
2375         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2376         isert_init_send_wr(isert_conn, isert_cmd,
2377                            &isert_cmd->tx_desc.send_wr, true);
2378
2379         atomic_inc(&isert_conn->post_send_buf_count);
2380
2381         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2382         if (rc) {
2383                 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2384                 atomic_dec(&isert_conn->post_send_buf_count);
2385         }
2386         pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2387                  isert_cmd);
2388
2389         return 1;
2390 }
2391
2392 static int
2393 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2394 {
2395         struct se_cmd *se_cmd = &cmd->se_cmd;
2396         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2397         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2398         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2399         struct isert_device *device = isert_conn->conn_device;
2400         struct ib_send_wr *wr_failed;
2401         int rc;
2402
2403         pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2404                  isert_cmd, se_cmd->data_length, cmd->write_data_done);
2405         wr->iser_ib_op = ISER_IB_RDMA_READ;
2406         rc = device->reg_rdma_mem(conn, cmd, wr);
2407         if (rc) {
2408                 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2409                 return rc;
2410         }
2411
2412         atomic_inc(&isert_conn->post_send_buf_count);
2413
2414         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2415         if (rc) {
2416                 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2417                 atomic_dec(&isert_conn->post_send_buf_count);
2418         }
2419         pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2420                  isert_cmd);
2421
2422         return 0;
2423 }
2424
2425 static int
2426 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2427 {
2428         int ret;
2429
2430         switch (state) {
2431         case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2432                 ret = isert_put_nopin(cmd, conn, false);
2433                 break;
2434         default:
2435                 pr_err("Unknown immediate state: 0x%02x\n", state);
2436                 ret = -EINVAL;
2437                 break;
2438         }
2439
2440         return ret;
2441 }
2442
2443 static int
2444 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2445 {
2446         int ret;
2447
2448         switch (state) {
2449         case ISTATE_SEND_LOGOUTRSP:
2450                 ret = isert_put_logout_rsp(cmd, conn);
2451                 if (!ret) {
2452                         pr_debug("Returning iSER Logout -EAGAIN\n");
2453                         ret = -EAGAIN;
2454                 }
2455                 break;
2456         case ISTATE_SEND_NOPIN:
2457                 ret = isert_put_nopin(cmd, conn, true);
2458                 break;
2459         case ISTATE_SEND_TASKMGTRSP:
2460                 ret = isert_put_tm_rsp(cmd, conn);
2461                 break;
2462         case ISTATE_SEND_REJECT:
2463                 ret = isert_put_reject(cmd, conn);
2464                 break;
2465         case ISTATE_SEND_TEXTRSP:
2466                 ret = isert_put_text_rsp(cmd, conn);
2467                 break;
2468         case ISTATE_SEND_STATUS:
2469                 /*
2470                  * Special case for sending non GOOD SCSI status from TX thread
2471                  * context during pre se_cmd excecution failure.
2472                  */
2473                 ret = isert_put_response(conn, cmd);
2474                 break;
2475         default:
2476                 pr_err("Unknown response state: 0x%02x\n", state);
2477                 ret = -EINVAL;
2478                 break;
2479         }
2480
2481         return ret;
2482 }
2483
2484 static int
2485 isert_setup_np(struct iscsi_np *np,
2486                struct __kernel_sockaddr_storage *ksockaddr)
2487 {
2488         struct isert_np *isert_np;
2489         struct rdma_cm_id *isert_lid;
2490         struct sockaddr *sa;
2491         int ret;
2492
2493         isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2494         if (!isert_np) {
2495                 pr_err("Unable to allocate struct isert_np\n");
2496                 return -ENOMEM;
2497         }
2498         init_waitqueue_head(&isert_np->np_accept_wq);
2499         mutex_init(&isert_np->np_accept_mutex);
2500         INIT_LIST_HEAD(&isert_np->np_accept_list);
2501         init_completion(&isert_np->np_login_comp);
2502
2503         sa = (struct sockaddr *)ksockaddr;
2504         pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2505         /*
2506          * Setup the np->np_sockaddr from the passed sockaddr setup
2507          * in iscsi_target_configfs.c code..
2508          */
2509         memcpy(&np->np_sockaddr, ksockaddr,
2510                sizeof(struct __kernel_sockaddr_storage));
2511
2512         isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2513                                 IB_QPT_RC);
2514         if (IS_ERR(isert_lid)) {
2515                 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2516                        PTR_ERR(isert_lid));
2517                 ret = PTR_ERR(isert_lid);
2518                 goto out;
2519         }
2520
2521         ret = rdma_bind_addr(isert_lid, sa);
2522         if (ret) {
2523                 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2524                 goto out_lid;
2525         }
2526
2527         ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2528         if (ret) {
2529                 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2530                 goto out_lid;
2531         }
2532
2533         isert_np->np_cm_id = isert_lid;
2534         np->np_context = isert_np;
2535         pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2536
2537         return 0;
2538
2539 out_lid:
2540         rdma_destroy_id(isert_lid);
2541 out:
2542         kfree(isert_np);
2543         return ret;
2544 }
2545
2546 static int
2547 isert_check_accept_queue(struct isert_np *isert_np)
2548 {
2549         int empty;
2550
2551         mutex_lock(&isert_np->np_accept_mutex);
2552         empty = list_empty(&isert_np->np_accept_list);
2553         mutex_unlock(&isert_np->np_accept_mutex);
2554
2555         return empty;
2556 }
2557
2558 static int
2559 isert_rdma_accept(struct isert_conn *isert_conn)
2560 {
2561         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2562         struct rdma_conn_param cp;
2563         int ret;
2564
2565         memset(&cp, 0, sizeof(struct rdma_conn_param));
2566         cp.responder_resources = isert_conn->responder_resources;
2567         cp.initiator_depth = isert_conn->initiator_depth;
2568         cp.retry_count = 7;
2569         cp.rnr_retry_count = 7;
2570
2571         pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2572
2573         ret = rdma_accept(cm_id, &cp);
2574         if (ret) {
2575                 pr_err("rdma_accept() failed with: %d\n", ret);
2576                 return ret;
2577         }
2578
2579         pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2580
2581         return 0;
2582 }
2583
2584 static int
2585 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2586 {
2587         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2588         int ret;
2589
2590         pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2591         /*
2592          * For login requests after the first PDU, isert_rx_login_req() will
2593          * kick schedule_delayed_work(&conn->login_work) as the packet is
2594          * received, which turns this callback from iscsi_target_do_login_rx()
2595          * into a NOP.
2596          */
2597         if (!login->first_request)
2598                 return 0;
2599
2600         ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2601         if (ret)
2602                 return ret;
2603
2604         pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2605         return 0;
2606 }
2607
2608 static void
2609 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2610                     struct isert_conn *isert_conn)
2611 {
2612         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2613         struct rdma_route *cm_route = &cm_id->route;
2614         struct sockaddr_in *sock_in;
2615         struct sockaddr_in6 *sock_in6;
2616
2617         conn->login_family = np->np_sockaddr.ss_family;
2618
2619         if (np->np_sockaddr.ss_family == AF_INET6) {
2620                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2621                 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2622                          &sock_in6->sin6_addr.in6_u);
2623                 conn->login_port = ntohs(sock_in6->sin6_port);
2624
2625                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2626                 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2627                          &sock_in6->sin6_addr.in6_u);
2628                 conn->local_port = ntohs(sock_in6->sin6_port);
2629         } else {
2630                 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2631                 sprintf(conn->login_ip, "%pI4",
2632                         &sock_in->sin_addr.s_addr);
2633                 conn->login_port = ntohs(sock_in->sin_port);
2634
2635                 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2636                 sprintf(conn->local_ip, "%pI4",
2637                         &sock_in->sin_addr.s_addr);
2638                 conn->local_port = ntohs(sock_in->sin_port);
2639         }
2640 }
2641
2642 static int
2643 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2644 {
2645         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2646         struct isert_conn *isert_conn;
2647         int max_accept = 0, ret;
2648
2649 accept_wait:
2650         ret = wait_event_interruptible(isert_np->np_accept_wq,
2651                         !isert_check_accept_queue(isert_np) ||
2652                         np->np_thread_state == ISCSI_NP_THREAD_RESET);
2653         if (max_accept > 5)
2654                 return -ENODEV;
2655
2656         spin_lock_bh(&np->np_thread_lock);
2657         if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2658                 spin_unlock_bh(&np->np_thread_lock);
2659                 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2660                 return -ENODEV;
2661         }
2662         spin_unlock_bh(&np->np_thread_lock);
2663
2664         mutex_lock(&isert_np->np_accept_mutex);
2665         if (list_empty(&isert_np->np_accept_list)) {
2666                 mutex_unlock(&isert_np->np_accept_mutex);
2667                 max_accept++;
2668                 goto accept_wait;
2669         }
2670         isert_conn = list_first_entry(&isert_np->np_accept_list,
2671                         struct isert_conn, conn_accept_node);
2672         list_del_init(&isert_conn->conn_accept_node);
2673         mutex_unlock(&isert_np->np_accept_mutex);
2674
2675         conn->context = isert_conn;
2676         isert_conn->conn = conn;
2677         max_accept = 0;
2678
2679         ret = isert_rdma_post_recvl(isert_conn);
2680         if (ret)
2681                 return ret;
2682
2683         ret = isert_rdma_accept(isert_conn);
2684         if (ret)
2685                 return ret;
2686
2687         isert_set_conn_info(np, conn, isert_conn);
2688
2689         pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2690         return 0;
2691 }
2692
2693 static void
2694 isert_free_np(struct iscsi_np *np)
2695 {
2696         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2697
2698         rdma_destroy_id(isert_np->np_cm_id);
2699
2700         np->np_context = NULL;
2701         kfree(isert_np);
2702 }
2703
2704 static int isert_check_state(struct isert_conn *isert_conn, int state)
2705 {
2706         int ret;
2707
2708         mutex_lock(&isert_conn->conn_mutex);
2709         ret = (isert_conn->state == state);
2710         mutex_unlock(&isert_conn->conn_mutex);
2711
2712         return ret;
2713 }
2714
2715 static void isert_free_conn(struct iscsi_conn *conn)
2716 {
2717         struct isert_conn *isert_conn = conn->context;
2718
2719         pr_debug("isert_free_conn: Starting \n");
2720         /*
2721          * Decrement post_send_buf_count for special case when called
2722          * from isert_do_control_comp() -> iscsit_logout_post_handler()
2723          */
2724         mutex_lock(&isert_conn->conn_mutex);
2725         if (isert_conn->logout_posted)
2726                 atomic_dec(&isert_conn->post_send_buf_count);
2727
2728         if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2729                 pr_debug("Calling rdma_disconnect from isert_free_conn\n");
2730                 rdma_disconnect(isert_conn->conn_cm_id);
2731         }
2732         /*
2733          * Only wait for conn_wait_comp_err if the isert_conn made it
2734          * into full feature phase..
2735          */
2736         if (isert_conn->state == ISER_CONN_UP) {
2737                 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2738                          isert_conn->state);
2739                 mutex_unlock(&isert_conn->conn_mutex);
2740
2741                 wait_event(isert_conn->conn_wait_comp_err,
2742                           (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2743
2744                 wait_event(isert_conn->conn_wait,
2745                           (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2746
2747                 isert_put_conn(isert_conn);
2748                 return;
2749         }
2750         if (isert_conn->state == ISER_CONN_INIT) {
2751                 mutex_unlock(&isert_conn->conn_mutex);
2752                 isert_put_conn(isert_conn);
2753                 return;
2754         }
2755         pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2756                  isert_conn->state);
2757         mutex_unlock(&isert_conn->conn_mutex);
2758
2759         wait_event(isert_conn->conn_wait,
2760                   (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2761
2762         isert_put_conn(isert_conn);
2763 }
2764
2765 static struct iscsit_transport iser_target_transport = {
2766         .name                   = "IB/iSER",
2767         .transport_type         = ISCSI_INFINIBAND,
2768         .priv_size              = sizeof(struct isert_cmd),
2769         .owner                  = THIS_MODULE,
2770         .iscsit_setup_np        = isert_setup_np,
2771         .iscsit_accept_np       = isert_accept_np,
2772         .iscsit_free_np         = isert_free_np,
2773         .iscsit_free_conn       = isert_free_conn,
2774         .iscsit_get_login_rx    = isert_get_login_rx,
2775         .iscsit_put_login_tx    = isert_put_login_tx,
2776         .iscsit_immediate_queue = isert_immediate_queue,
2777         .iscsit_response_queue  = isert_response_queue,
2778         .iscsit_get_dataout     = isert_get_dataout,
2779         .iscsit_queue_data_in   = isert_put_datain,
2780         .iscsit_queue_status    = isert_put_response,
2781 };
2782
2783 static int __init isert_init(void)
2784 {
2785         int ret;
2786
2787         isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2788         if (!isert_rx_wq) {
2789                 pr_err("Unable to allocate isert_rx_wq\n");
2790                 return -ENOMEM;
2791         }
2792
2793         isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2794         if (!isert_comp_wq) {
2795                 pr_err("Unable to allocate isert_comp_wq\n");
2796                 ret = -ENOMEM;
2797                 goto destroy_rx_wq;
2798         }
2799
2800         iscsit_register_transport(&iser_target_transport);
2801         pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2802         return 0;
2803
2804 destroy_rx_wq:
2805         destroy_workqueue(isert_rx_wq);
2806         return ret;
2807 }
2808
2809 static void __exit isert_exit(void)
2810 {
2811         destroy_workqueue(isert_comp_wq);
2812         destroy_workqueue(isert_rx_wq);
2813         iscsit_unregister_transport(&iser_target_transport);
2814         pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2815 }
2816
2817 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2818 MODULE_VERSION("0.1");
2819 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2820 MODULE_LICENSE("GPL");
2821
2822 module_init(isert_init);
2823 module_exit(isert_exit);