78f6e92c571f8b2bfad1444f11d0ce24cd347a5b
[linux-2.6-microblaze.git] / drivers / infiniband / ulp / isert / ib_isert.c
1 /*******************************************************************************
2  * This file contains iSCSI extentions for RDMA (iSER) Verbs
3  *
4  * (c) Copyright 2013 Datera, Inc.
5  *
6  * Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ****************************************************************************/
18
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <linux/llist.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
31
32 #include "isert_proto.h"
33 #include "ib_isert.h"
34
35 #define ISERT_MAX_CONN          8
36 #define ISER_MAX_RX_CQ_LEN      (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN      (ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
38
39 static DEFINE_MUTEX(device_list_mutex);
40 static LIST_HEAD(device_list);
41 static struct workqueue_struct *isert_rx_wq;
42 static struct workqueue_struct *isert_comp_wq;
43
44 static void
45 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
46 static int
47 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
48                struct isert_rdma_wr *wr);
49 static void
50 isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51 static int
52 isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
53                     struct isert_rdma_wr *wr);
54
55 static void
56 isert_qp_event_callback(struct ib_event *e, void *context)
57 {
58         struct isert_conn *isert_conn = (struct isert_conn *)context;
59
60         pr_err("isert_qp_event_callback event: %d\n", e->event);
61         switch (e->event) {
62         case IB_EVENT_COMM_EST:
63                 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
64                 break;
65         case IB_EVENT_QP_LAST_WQE_REACHED:
66                 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
67                 break;
68         default:
69                 break;
70         }
71 }
72
73 static int
74 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
75 {
76         int ret;
77
78         ret = ib_query_device(ib_dev, devattr);
79         if (ret) {
80                 pr_err("ib_query_device() failed: %d\n", ret);
81                 return ret;
82         }
83         pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
84         pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
85
86         return 0;
87 }
88
89 static int
90 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
91 {
92         struct isert_device *device = isert_conn->conn_device;
93         struct ib_qp_init_attr attr;
94         int ret, index, min_index = 0;
95
96         mutex_lock(&device_list_mutex);
97         for (index = 0; index < device->cqs_used; index++)
98                 if (device->cq_active_qps[index] <
99                     device->cq_active_qps[min_index])
100                         min_index = index;
101         device->cq_active_qps[min_index]++;
102         pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
103         mutex_unlock(&device_list_mutex);
104
105         memset(&attr, 0, sizeof(struct ib_qp_init_attr));
106         attr.event_handler = isert_qp_event_callback;
107         attr.qp_context = isert_conn;
108         attr.send_cq = device->dev_tx_cq[min_index];
109         attr.recv_cq = device->dev_rx_cq[min_index];
110         attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
111         attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
112         /*
113          * FIXME: Use devattr.max_sge - 2 for max_send_sge as
114          * work-around for RDMA_READ..
115          */
116         attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
117         isert_conn->max_sge = attr.cap.max_send_sge;
118
119         attr.cap.max_recv_sge = 1;
120         attr.sq_sig_type = IB_SIGNAL_REQ_WR;
121         attr.qp_type = IB_QPT_RC;
122
123         pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124                  cma_id->device);
125         pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
126                  isert_conn->conn_pd->device);
127
128         ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
129         if (ret) {
130                 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
131                 return ret;
132         }
133         isert_conn->conn_qp = cma_id->qp;
134         pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
135
136         return 0;
137 }
138
139 static void
140 isert_cq_event_callback(struct ib_event *e, void *context)
141 {
142         pr_debug("isert_cq_event_callback event: %d\n", e->event);
143 }
144
145 static int
146 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
147 {
148         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
149         struct iser_rx_desc *rx_desc;
150         struct ib_sge *rx_sg;
151         u64 dma_addr;
152         int i, j;
153
154         isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
155                                 sizeof(struct iser_rx_desc), GFP_KERNEL);
156         if (!isert_conn->conn_rx_descs)
157                 goto fail;
158
159         rx_desc = isert_conn->conn_rx_descs;
160
161         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
162                 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
163                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
164                 if (ib_dma_mapping_error(ib_dev, dma_addr))
165                         goto dma_map_fail;
166
167                 rx_desc->dma_addr = dma_addr;
168
169                 rx_sg = &rx_desc->rx_sg;
170                 rx_sg->addr = rx_desc->dma_addr;
171                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
172                 rx_sg->lkey = isert_conn->conn_mr->lkey;
173         }
174
175         isert_conn->conn_rx_desc_head = 0;
176         return 0;
177
178 dma_map_fail:
179         rx_desc = isert_conn->conn_rx_descs;
180         for (j = 0; j < i; j++, rx_desc++) {
181                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
182                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
183         }
184         kfree(isert_conn->conn_rx_descs);
185         isert_conn->conn_rx_descs = NULL;
186 fail:
187         return -ENOMEM;
188 }
189
190 static void
191 isert_free_rx_descriptors(struct isert_conn *isert_conn)
192 {
193         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
194         struct iser_rx_desc *rx_desc;
195         int i;
196
197         if (!isert_conn->conn_rx_descs)
198                 return;
199
200         rx_desc = isert_conn->conn_rx_descs;
201         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
202                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
203                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
204         }
205
206         kfree(isert_conn->conn_rx_descs);
207         isert_conn->conn_rx_descs = NULL;
208 }
209
210 static void isert_cq_tx_callback(struct ib_cq *, void *);
211 static void isert_cq_rx_callback(struct ib_cq *, void *);
212
213 static int
214 isert_create_device_ib_res(struct isert_device *device)
215 {
216         struct ib_device *ib_dev = device->ib_device;
217         struct isert_cq_desc *cq_desc;
218         struct ib_device_attr *dev_attr;
219         int ret = 0, i, j;
220
221         dev_attr = &device->dev_attr;
222         ret = isert_query_device(ib_dev, dev_attr);
223         if (ret)
224                 return ret;
225
226         /* asign function handlers */
227         if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
228                 device->use_frwr = 1;
229                 device->reg_rdma_mem = isert_reg_rdma_frwr;
230                 device->unreg_rdma_mem = isert_unreg_rdma_frwr;
231         } else {
232                 device->use_frwr = 0;
233                 device->reg_rdma_mem = isert_map_rdma;
234                 device->unreg_rdma_mem = isert_unmap_cmd;
235         }
236
237         device->cqs_used = min_t(int, num_online_cpus(),
238                                  device->ib_device->num_comp_vectors);
239         device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
240         pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
241                  device->cqs_used, device->ib_device->name,
242                  device->ib_device->num_comp_vectors, device->use_frwr);
243         device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
244                                 device->cqs_used, GFP_KERNEL);
245         if (!device->cq_desc) {
246                 pr_err("Unable to allocate device->cq_desc\n");
247                 return -ENOMEM;
248         }
249         cq_desc = device->cq_desc;
250
251         device->dev_pd = ib_alloc_pd(ib_dev);
252         if (IS_ERR(device->dev_pd)) {
253                 ret = PTR_ERR(device->dev_pd);
254                 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
255                 goto out_cq_desc;
256         }
257
258         for (i = 0; i < device->cqs_used; i++) {
259                 cq_desc[i].device = device;
260                 cq_desc[i].cq_index = i;
261
262                 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
263                                                 isert_cq_rx_callback,
264                                                 isert_cq_event_callback,
265                                                 (void *)&cq_desc[i],
266                                                 ISER_MAX_RX_CQ_LEN, i);
267                 if (IS_ERR(device->dev_rx_cq[i])) {
268                         ret = PTR_ERR(device->dev_rx_cq[i]);
269                         device->dev_rx_cq[i] = NULL;
270                         goto out_cq;
271                 }
272
273                 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
274                                                 isert_cq_tx_callback,
275                                                 isert_cq_event_callback,
276                                                 (void *)&cq_desc[i],
277                                                 ISER_MAX_TX_CQ_LEN, i);
278                 if (IS_ERR(device->dev_tx_cq[i])) {
279                         ret = PTR_ERR(device->dev_tx_cq[i]);
280                         device->dev_tx_cq[i] = NULL;
281                         goto out_cq;
282                 }
283
284                 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
285                 if (ret)
286                         goto out_cq;
287
288                 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
289                 if (ret)
290                         goto out_cq;
291         }
292
293         device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
294         if (IS_ERR(device->dev_mr)) {
295                 ret = PTR_ERR(device->dev_mr);
296                 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
297                 goto out_cq;
298         }
299
300         return 0;
301
302 out_cq:
303         for (j = 0; j < i; j++) {
304                 cq_desc = &device->cq_desc[j];
305
306                 if (device->dev_rx_cq[j]) {
307                         cancel_work_sync(&cq_desc->cq_rx_work);
308                         ib_destroy_cq(device->dev_rx_cq[j]);
309                 }
310                 if (device->dev_tx_cq[j]) {
311                         cancel_work_sync(&cq_desc->cq_tx_work);
312                         ib_destroy_cq(device->dev_tx_cq[j]);
313                 }
314         }
315         ib_dealloc_pd(device->dev_pd);
316
317 out_cq_desc:
318         kfree(device->cq_desc);
319
320         return ret;
321 }
322
323 static void
324 isert_free_device_ib_res(struct isert_device *device)
325 {
326         struct isert_cq_desc *cq_desc;
327         int i;
328
329         for (i = 0; i < device->cqs_used; i++) {
330                 cq_desc = &device->cq_desc[i];
331
332                 cancel_work_sync(&cq_desc->cq_rx_work);
333                 cancel_work_sync(&cq_desc->cq_tx_work);
334                 ib_destroy_cq(device->dev_rx_cq[i]);
335                 ib_destroy_cq(device->dev_tx_cq[i]);
336                 device->dev_rx_cq[i] = NULL;
337                 device->dev_tx_cq[i] = NULL;
338         }
339
340         ib_dereg_mr(device->dev_mr);
341         ib_dealloc_pd(device->dev_pd);
342         kfree(device->cq_desc);
343 }
344
345 static void
346 isert_device_try_release(struct isert_device *device)
347 {
348         mutex_lock(&device_list_mutex);
349         device->refcount--;
350         if (!device->refcount) {
351                 isert_free_device_ib_res(device);
352                 list_del(&device->dev_node);
353                 kfree(device);
354         }
355         mutex_unlock(&device_list_mutex);
356 }
357
358 static struct isert_device *
359 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
360 {
361         struct isert_device *device;
362         int ret;
363
364         mutex_lock(&device_list_mutex);
365         list_for_each_entry(device, &device_list, dev_node) {
366                 if (device->ib_device->node_guid == cma_id->device->node_guid) {
367                         device->refcount++;
368                         mutex_unlock(&device_list_mutex);
369                         return device;
370                 }
371         }
372
373         device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
374         if (!device) {
375                 mutex_unlock(&device_list_mutex);
376                 return ERR_PTR(-ENOMEM);
377         }
378
379         INIT_LIST_HEAD(&device->dev_node);
380
381         device->ib_device = cma_id->device;
382         ret = isert_create_device_ib_res(device);
383         if (ret) {
384                 kfree(device);
385                 mutex_unlock(&device_list_mutex);
386                 return ERR_PTR(ret);
387         }
388
389         device->refcount++;
390         list_add_tail(&device->dev_node, &device_list);
391         mutex_unlock(&device_list_mutex);
392
393         return device;
394 }
395
396 static void
397 isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
398 {
399         struct fast_reg_descriptor *fr_desc, *tmp;
400         int i = 0;
401
402         if (list_empty(&isert_conn->conn_frwr_pool))
403                 return;
404
405         pr_debug("Freeing conn %p frwr pool", isert_conn);
406
407         list_for_each_entry_safe(fr_desc, tmp,
408                                  &isert_conn->conn_frwr_pool, list) {
409                 list_del(&fr_desc->list);
410                 ib_free_fast_reg_page_list(fr_desc->data_frpl);
411                 ib_dereg_mr(fr_desc->data_mr);
412                 kfree(fr_desc);
413                 ++i;
414         }
415
416         if (i < isert_conn->conn_frwr_pool_size)
417                 pr_warn("Pool still has %d regions registered\n",
418                         isert_conn->conn_frwr_pool_size - i);
419 }
420
421 static int
422 isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
423 {
424         struct fast_reg_descriptor *fr_desc;
425         struct isert_device *device = isert_conn->conn_device;
426         int i, ret;
427
428         INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
429         isert_conn->conn_frwr_pool_size = 0;
430         for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
431                 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
432                 if (!fr_desc) {
433                         pr_err("Failed to allocate fast_reg descriptor\n");
434                         ret = -ENOMEM;
435                         goto err;
436                 }
437
438                 fr_desc->data_frpl =
439                         ib_alloc_fast_reg_page_list(device->ib_device,
440                                                     ISCSI_ISER_SG_TABLESIZE);
441                 if (IS_ERR(fr_desc->data_frpl)) {
442                         pr_err("Failed to allocate fr_pg_list err=%ld\n",
443                                PTR_ERR(fr_desc->data_frpl));
444                         ret = PTR_ERR(fr_desc->data_frpl);
445                         goto err;
446                 }
447
448                 fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
449                                         ISCSI_ISER_SG_TABLESIZE);
450                 if (IS_ERR(fr_desc->data_mr)) {
451                         pr_err("Failed to allocate frmr err=%ld\n",
452                                PTR_ERR(fr_desc->data_mr));
453                         ret = PTR_ERR(fr_desc->data_mr);
454                         ib_free_fast_reg_page_list(fr_desc->data_frpl);
455                         goto err;
456                 }
457                 pr_debug("Create fr_desc %p page_list %p\n",
458                          fr_desc, fr_desc->data_frpl->page_list);
459
460                 fr_desc->valid = true;
461                 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
462                 isert_conn->conn_frwr_pool_size++;
463         }
464
465         pr_debug("Creating conn %p frwr pool size=%d",
466                  isert_conn, isert_conn->conn_frwr_pool_size);
467
468         return 0;
469
470 err:
471         isert_conn_free_frwr_pool(isert_conn);
472         return ret;
473 }
474
475 static int
476 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
477 {
478         struct iscsi_np *np = cma_id->context;
479         struct isert_np *isert_np = np->np_context;
480         struct isert_conn *isert_conn;
481         struct isert_device *device;
482         struct ib_device *ib_dev = cma_id->device;
483         int ret = 0;
484
485         pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
486                  cma_id, cma_id->context);
487
488         isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
489         if (!isert_conn) {
490                 pr_err("Unable to allocate isert_conn\n");
491                 return -ENOMEM;
492         }
493         isert_conn->state = ISER_CONN_INIT;
494         INIT_LIST_HEAD(&isert_conn->conn_accept_node);
495         init_completion(&isert_conn->conn_login_comp);
496         init_waitqueue_head(&isert_conn->conn_wait);
497         init_waitqueue_head(&isert_conn->conn_wait_comp_err);
498         kref_init(&isert_conn->conn_kref);
499         kref_get(&isert_conn->conn_kref);
500         mutex_init(&isert_conn->conn_mutex);
501         mutex_init(&isert_conn->conn_comp_mutex);
502         spin_lock_init(&isert_conn->conn_lock);
503
504         cma_id->context = isert_conn;
505         isert_conn->conn_cm_id = cma_id;
506         isert_conn->responder_resources = event->param.conn.responder_resources;
507         isert_conn->initiator_depth = event->param.conn.initiator_depth;
508         pr_debug("Using responder_resources: %u initiator_depth: %u\n",
509                  isert_conn->responder_resources, isert_conn->initiator_depth);
510
511         isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
512                                         ISER_RX_LOGIN_SIZE, GFP_KERNEL);
513         if (!isert_conn->login_buf) {
514                 pr_err("Unable to allocate isert_conn->login_buf\n");
515                 ret = -ENOMEM;
516                 goto out;
517         }
518
519         isert_conn->login_req_buf = isert_conn->login_buf;
520         isert_conn->login_rsp_buf = isert_conn->login_buf +
521                                     ISCSI_DEF_MAX_RECV_SEG_LEN;
522         pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
523                  isert_conn->login_buf, isert_conn->login_req_buf,
524                  isert_conn->login_rsp_buf);
525
526         isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
527                                 (void *)isert_conn->login_req_buf,
528                                 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
529
530         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
531         if (ret) {
532                 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
533                        ret);
534                 isert_conn->login_req_dma = 0;
535                 goto out_login_buf;
536         }
537
538         isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
539                                         (void *)isert_conn->login_rsp_buf,
540                                         ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
541
542         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
543         if (ret) {
544                 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
545                        ret);
546                 isert_conn->login_rsp_dma = 0;
547                 goto out_req_dma_map;
548         }
549
550         device = isert_device_find_by_ib_dev(cma_id);
551         if (IS_ERR(device)) {
552                 ret = PTR_ERR(device);
553                 goto out_rsp_dma_map;
554         }
555
556         isert_conn->conn_device = device;
557         isert_conn->conn_pd = device->dev_pd;
558         isert_conn->conn_mr = device->dev_mr;
559
560         if (device->use_frwr) {
561                 ret = isert_conn_create_frwr_pool(isert_conn);
562                 if (ret) {
563                         pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
564                         goto out_frwr;
565                 }
566         }
567
568         ret = isert_conn_setup_qp(isert_conn, cma_id);
569         if (ret)
570                 goto out_conn_dev;
571
572         mutex_lock(&isert_np->np_accept_mutex);
573         list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
574         mutex_unlock(&isert_np->np_accept_mutex);
575
576         pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
577         wake_up(&isert_np->np_accept_wq);
578         return 0;
579
580 out_conn_dev:
581         if (device->use_frwr)
582                 isert_conn_free_frwr_pool(isert_conn);
583 out_frwr:
584         isert_device_try_release(device);
585 out_rsp_dma_map:
586         ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
587                             ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
588 out_req_dma_map:
589         ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
590                             ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
591 out_login_buf:
592         kfree(isert_conn->login_buf);
593 out:
594         kfree(isert_conn);
595         return ret;
596 }
597
598 static void
599 isert_connect_release(struct isert_conn *isert_conn)
600 {
601         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
602         struct isert_device *device = isert_conn->conn_device;
603         int cq_index;
604
605         pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
606
607         if (device && device->use_frwr)
608                 isert_conn_free_frwr_pool(isert_conn);
609
610         if (isert_conn->conn_qp) {
611                 cq_index = ((struct isert_cq_desc *)
612                         isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
613                 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
614                 isert_conn->conn_device->cq_active_qps[cq_index]--;
615
616                 rdma_destroy_qp(isert_conn->conn_cm_id);
617         }
618
619         isert_free_rx_descriptors(isert_conn);
620         rdma_destroy_id(isert_conn->conn_cm_id);
621
622         if (isert_conn->login_buf) {
623                 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
624                                     ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
625                 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
626                                     ISCSI_DEF_MAX_RECV_SEG_LEN,
627                                     DMA_FROM_DEVICE);
628                 kfree(isert_conn->login_buf);
629         }
630         kfree(isert_conn);
631
632         if (device)
633                 isert_device_try_release(device);
634
635         pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
636 }
637
638 static void
639 isert_connected_handler(struct rdma_cm_id *cma_id)
640 {
641         return;
642 }
643
644 static void
645 isert_release_conn_kref(struct kref *kref)
646 {
647         struct isert_conn *isert_conn = container_of(kref,
648                                 struct isert_conn, conn_kref);
649
650         pr_debug("Calling isert_connect_release for final kref %s/%d\n",
651                  current->comm, current->pid);
652
653         isert_connect_release(isert_conn);
654 }
655
656 static void
657 isert_put_conn(struct isert_conn *isert_conn)
658 {
659         kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
660 }
661
662 static void
663 isert_disconnect_work(struct work_struct *work)
664 {
665         struct isert_conn *isert_conn = container_of(work,
666                                 struct isert_conn, conn_logout_work);
667
668         pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
669         mutex_lock(&isert_conn->conn_mutex);
670         isert_conn->state = ISER_CONN_DOWN;
671
672         if (isert_conn->post_recv_buf_count == 0 &&
673             atomic_read(&isert_conn->post_send_buf_count) == 0) {
674                 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
675                 mutex_unlock(&isert_conn->conn_mutex);
676                 goto wake_up;
677         }
678         if (!isert_conn->conn_cm_id) {
679                 mutex_unlock(&isert_conn->conn_mutex);
680                 isert_put_conn(isert_conn);
681                 return;
682         }
683         if (!isert_conn->logout_posted) {
684                 pr_debug("Calling rdma_disconnect for !logout_posted from"
685                          " isert_disconnect_work\n");
686                 rdma_disconnect(isert_conn->conn_cm_id);
687                 mutex_unlock(&isert_conn->conn_mutex);
688                 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
689                 goto wake_up;
690         }
691         mutex_unlock(&isert_conn->conn_mutex);
692
693 wake_up:
694         wake_up(&isert_conn->conn_wait);
695         isert_put_conn(isert_conn);
696 }
697
698 static void
699 isert_disconnected_handler(struct rdma_cm_id *cma_id)
700 {
701         struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
702
703         INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
704         schedule_work(&isert_conn->conn_logout_work);
705 }
706
707 static int
708 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
709 {
710         int ret = 0;
711
712         pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
713                  event->event, event->status, cma_id->context, cma_id);
714
715         switch (event->event) {
716         case RDMA_CM_EVENT_CONNECT_REQUEST:
717                 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
718                 ret = isert_connect_request(cma_id, event);
719                 break;
720         case RDMA_CM_EVENT_ESTABLISHED:
721                 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
722                 isert_connected_handler(cma_id);
723                 break;
724         case RDMA_CM_EVENT_DISCONNECTED:
725                 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
726                 isert_disconnected_handler(cma_id);
727                 break;
728         case RDMA_CM_EVENT_DEVICE_REMOVAL:
729         case RDMA_CM_EVENT_ADDR_CHANGE:
730                 break;
731         case RDMA_CM_EVENT_CONNECT_ERROR:
732         default:
733                 pr_err("Unknown RDMA CMA event: %d\n", event->event);
734                 break;
735         }
736
737         if (ret != 0) {
738                 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
739                        event->event, ret);
740                 dump_stack();
741         }
742
743         return ret;
744 }
745
746 static int
747 isert_post_recv(struct isert_conn *isert_conn, u32 count)
748 {
749         struct ib_recv_wr *rx_wr, *rx_wr_failed;
750         int i, ret;
751         unsigned int rx_head = isert_conn->conn_rx_desc_head;
752         struct iser_rx_desc *rx_desc;
753
754         for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
755                 rx_desc         = &isert_conn->conn_rx_descs[rx_head];
756                 rx_wr->wr_id    = (unsigned long)rx_desc;
757                 rx_wr->sg_list  = &rx_desc->rx_sg;
758                 rx_wr->num_sge  = 1;
759                 rx_wr->next     = rx_wr + 1;
760                 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
761         }
762
763         rx_wr--;
764         rx_wr->next = NULL; /* mark end of work requests list */
765
766         isert_conn->post_recv_buf_count += count;
767         ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
768                                 &rx_wr_failed);
769         if (ret) {
770                 pr_err("ib_post_recv() failed with ret: %d\n", ret);
771                 isert_conn->post_recv_buf_count -= count;
772         } else {
773                 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
774                 isert_conn->conn_rx_desc_head = rx_head;
775         }
776         return ret;
777 }
778
779 static int
780 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
781 {
782         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
783         struct ib_send_wr send_wr, *send_wr_failed;
784         int ret;
785
786         ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
787                                       ISER_HEADERS_LEN, DMA_TO_DEVICE);
788
789         send_wr.next    = NULL;
790         send_wr.wr_id   = (unsigned long)tx_desc;
791         send_wr.sg_list = tx_desc->tx_sg;
792         send_wr.num_sge = tx_desc->num_sge;
793         send_wr.opcode  = IB_WR_SEND;
794         send_wr.send_flags = IB_SEND_SIGNALED;
795
796         atomic_inc(&isert_conn->post_send_buf_count);
797
798         ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
799         if (ret) {
800                 pr_err("ib_post_send() failed, ret: %d\n", ret);
801                 atomic_dec(&isert_conn->post_send_buf_count);
802         }
803
804         return ret;
805 }
806
807 static void
808 isert_create_send_desc(struct isert_conn *isert_conn,
809                        struct isert_cmd *isert_cmd,
810                        struct iser_tx_desc *tx_desc)
811 {
812         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
813
814         ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
815                                    ISER_HEADERS_LEN, DMA_TO_DEVICE);
816
817         memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
818         tx_desc->iser_header.flags = ISER_VER;
819
820         tx_desc->num_sge = 1;
821         tx_desc->isert_cmd = isert_cmd;
822
823         if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
824                 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
825                 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
826         }
827 }
828
829 static int
830 isert_init_tx_hdrs(struct isert_conn *isert_conn,
831                    struct iser_tx_desc *tx_desc)
832 {
833         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
834         u64 dma_addr;
835
836         dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
837                         ISER_HEADERS_LEN, DMA_TO_DEVICE);
838         if (ib_dma_mapping_error(ib_dev, dma_addr)) {
839                 pr_err("ib_dma_mapping_error() failed\n");
840                 return -ENOMEM;
841         }
842
843         tx_desc->dma_addr = dma_addr;
844         tx_desc->tx_sg[0].addr  = tx_desc->dma_addr;
845         tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
846         tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
847
848         pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
849                  " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
850                  tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
851
852         return 0;
853 }
854
855 static void
856 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
857                    struct ib_send_wr *send_wr, bool coalesce)
858 {
859         struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
860
861         isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
862         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
863         send_wr->opcode = IB_WR_SEND;
864         send_wr->sg_list = &tx_desc->tx_sg[0];
865         send_wr->num_sge = isert_cmd->tx_desc.num_sge;
866         /*
867          * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
868          * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
869          */
870         mutex_lock(&isert_conn->conn_comp_mutex);
871         if (coalesce &&
872             ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
873                 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
874                 mutex_unlock(&isert_conn->conn_comp_mutex);
875                 return;
876         }
877         isert_conn->conn_comp_batch = 0;
878         tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
879         mutex_unlock(&isert_conn->conn_comp_mutex);
880
881         send_wr->send_flags = IB_SEND_SIGNALED;
882 }
883
884 static int
885 isert_rdma_post_recvl(struct isert_conn *isert_conn)
886 {
887         struct ib_recv_wr rx_wr, *rx_wr_fail;
888         struct ib_sge sge;
889         int ret;
890
891         memset(&sge, 0, sizeof(struct ib_sge));
892         sge.addr = isert_conn->login_req_dma;
893         sge.length = ISER_RX_LOGIN_SIZE;
894         sge.lkey = isert_conn->conn_mr->lkey;
895
896         pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
897                 sge.addr, sge.length, sge.lkey);
898
899         memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
900         rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
901         rx_wr.sg_list = &sge;
902         rx_wr.num_sge = 1;
903
904         isert_conn->post_recv_buf_count++;
905         ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
906         if (ret) {
907                 pr_err("ib_post_recv() failed: %d\n", ret);
908                 isert_conn->post_recv_buf_count--;
909         }
910
911         pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
912         return ret;
913 }
914
915 static int
916 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
917                    u32 length)
918 {
919         struct isert_conn *isert_conn = conn->context;
920         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
921         struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
922         int ret;
923
924         isert_create_send_desc(isert_conn, NULL, tx_desc);
925
926         memcpy(&tx_desc->iscsi_header, &login->rsp[0],
927                sizeof(struct iscsi_hdr));
928
929         isert_init_tx_hdrs(isert_conn, tx_desc);
930
931         if (length > 0) {
932                 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
933
934                 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
935                                            length, DMA_TO_DEVICE);
936
937                 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
938
939                 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
940                                               length, DMA_TO_DEVICE);
941
942                 tx_dsg->addr    = isert_conn->login_rsp_dma;
943                 tx_dsg->length  = length;
944                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
945                 tx_desc->num_sge = 2;
946         }
947         if (!login->login_failed) {
948                 if (login->login_complete) {
949                         ret = isert_alloc_rx_descriptors(isert_conn);
950                         if (ret)
951                                 return ret;
952
953                         ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
954                         if (ret)
955                                 return ret;
956
957                         isert_conn->state = ISER_CONN_UP;
958                         goto post_send;
959                 }
960
961                 ret = isert_rdma_post_recvl(isert_conn);
962                 if (ret)
963                         return ret;
964         }
965 post_send:
966         ret = isert_post_send(isert_conn, tx_desc);
967         if (ret)
968                 return ret;
969
970         return 0;
971 }
972
973 static void
974 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
975                    struct isert_conn *isert_conn)
976 {
977         struct iscsi_conn *conn = isert_conn->conn;
978         struct iscsi_login *login = conn->conn_login;
979         int size;
980
981         if (!login) {
982                 pr_err("conn->conn_login is NULL\n");
983                 dump_stack();
984                 return;
985         }
986
987         if (login->first_request) {
988                 struct iscsi_login_req *login_req =
989                         (struct iscsi_login_req *)&rx_desc->iscsi_header;
990                 /*
991                  * Setup the initial iscsi_login values from the leading
992                  * login request PDU.
993                  */
994                 login->leading_connection = (!login_req->tsih) ? 1 : 0;
995                 login->current_stage =
996                         (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
997                          >> 2;
998                 login->version_min      = login_req->min_version;
999                 login->version_max      = login_req->max_version;
1000                 memcpy(login->isid, login_req->isid, 6);
1001                 login->cmd_sn           = be32_to_cpu(login_req->cmdsn);
1002                 login->init_task_tag    = login_req->itt;
1003                 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1004                 login->cid              = be16_to_cpu(login_req->cid);
1005                 login->tsih             = be16_to_cpu(login_req->tsih);
1006         }
1007
1008         memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1009
1010         size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1011         pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1012                  size, rx_buflen, MAX_KEY_VALUE_PAIRS);
1013         memcpy(login->req_buf, &rx_desc->data[0], size);
1014
1015         if (login->first_request) {
1016                 complete(&isert_conn->conn_login_comp);
1017                 return;
1018         }
1019         schedule_delayed_work(&conn->login_work, 0);
1020 }
1021
1022 static struct iscsi_cmd
1023 *isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
1024 {
1025         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1026         struct isert_cmd *isert_cmd;
1027         struct iscsi_cmd *cmd;
1028
1029         cmd = iscsit_allocate_cmd(conn, gfp);
1030         if (!cmd) {
1031                 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1032                 return NULL;
1033         }
1034         isert_cmd = iscsit_priv_cmd(cmd);
1035         isert_cmd->conn = isert_conn;
1036         isert_cmd->iscsi_cmd = cmd;
1037
1038         return cmd;
1039 }
1040
1041 static int
1042 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1043                       struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1044                       struct iser_rx_desc *rx_desc, unsigned char *buf)
1045 {
1046         struct iscsi_conn *conn = isert_conn->conn;
1047         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1048         struct scatterlist *sg;
1049         int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1050         bool dump_payload = false;
1051
1052         rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1053         if (rc < 0)
1054                 return rc;
1055
1056         imm_data = cmd->immediate_data;
1057         imm_data_len = cmd->first_burst_len;
1058         unsol_data = cmd->unsolicited_data;
1059
1060         rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1061         if (rc < 0) {
1062                 return 0;
1063         } else if (rc > 0) {
1064                 dump_payload = true;
1065                 goto sequence_cmd;
1066         }
1067
1068         if (!imm_data)
1069                 return 0;
1070
1071         sg = &cmd->se_cmd.t_data_sg[0];
1072         sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1073
1074         pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1075                  sg, sg_nents, &rx_desc->data[0], imm_data_len);
1076
1077         sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1078
1079         cmd->write_data_done += imm_data_len;
1080
1081         if (cmd->write_data_done == cmd->se_cmd.data_length) {
1082                 spin_lock_bh(&cmd->istate_lock);
1083                 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1084                 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1085                 spin_unlock_bh(&cmd->istate_lock);
1086         }
1087
1088 sequence_cmd:
1089         rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1090
1091         if (!rc && dump_payload == false && unsol_data)
1092                 iscsit_set_unsoliticed_dataout(cmd);
1093
1094         return 0;
1095 }
1096
1097 static int
1098 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1099                            struct iser_rx_desc *rx_desc, unsigned char *buf)
1100 {
1101         struct scatterlist *sg_start;
1102         struct iscsi_conn *conn = isert_conn->conn;
1103         struct iscsi_cmd *cmd = NULL;
1104         struct iscsi_data *hdr = (struct iscsi_data *)buf;
1105         u32 unsol_data_len = ntoh24(hdr->dlength);
1106         int rc, sg_nents, sg_off, page_off;
1107
1108         rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1109         if (rc < 0)
1110                 return rc;
1111         else if (!cmd)
1112                 return 0;
1113         /*
1114          * FIXME: Unexpected unsolicited_data out
1115          */
1116         if (!cmd->unsolicited_data) {
1117                 pr_err("Received unexpected solicited data payload\n");
1118                 dump_stack();
1119                 return -1;
1120         }
1121
1122         pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1123                  unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1124
1125         sg_off = cmd->write_data_done / PAGE_SIZE;
1126         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1127         sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1128         page_off = cmd->write_data_done % PAGE_SIZE;
1129         /*
1130          * FIXME: Non page-aligned unsolicited_data out
1131          */
1132         if (page_off) {
1133                 pr_err("Received unexpected non-page aligned data payload\n");
1134                 dump_stack();
1135                 return -1;
1136         }
1137         pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1138                  sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1139
1140         sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1141                             unsol_data_len);
1142
1143         rc = iscsit_check_dataout_payload(cmd, hdr, false);
1144         if (rc < 0)
1145                 return rc;
1146
1147         return 0;
1148 }
1149
1150 static int
1151 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1152                      struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1153                      unsigned char *buf)
1154 {
1155         struct iscsi_conn *conn = isert_conn->conn;
1156         struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1157         int rc;
1158
1159         rc = iscsit_setup_nop_out(conn, cmd, hdr);
1160         if (rc < 0)
1161                 return rc;
1162         /*
1163          * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1164          */
1165
1166         return iscsit_process_nop_out(conn, cmd, hdr);
1167 }
1168
1169 static int
1170 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1171                       struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1172                       struct iscsi_text *hdr)
1173 {
1174         struct iscsi_conn *conn = isert_conn->conn;
1175         u32 payload_length = ntoh24(hdr->dlength);
1176         int rc;
1177         unsigned char *text_in;
1178
1179         rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1180         if (rc < 0)
1181                 return rc;
1182
1183         text_in = kzalloc(payload_length, GFP_KERNEL);
1184         if (!text_in) {
1185                 pr_err("Unable to allocate text_in of payload_length: %u\n",
1186                        payload_length);
1187                 return -ENOMEM;
1188         }
1189         cmd->text_in_ptr = text_in;
1190
1191         memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1192
1193         return iscsit_process_text_cmd(conn, cmd, hdr);
1194 }
1195
1196 static int
1197 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1198                 uint32_t read_stag, uint64_t read_va,
1199                 uint32_t write_stag, uint64_t write_va)
1200 {
1201         struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1202         struct iscsi_conn *conn = isert_conn->conn;
1203         struct iscsi_session *sess = conn->sess;
1204         struct iscsi_cmd *cmd;
1205         struct isert_cmd *isert_cmd;
1206         int ret = -EINVAL;
1207         u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1208
1209         if (sess->sess_ops->SessionType &&
1210            (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1211                 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1212                        " ignoring\n", opcode);
1213                 return 0;
1214         }
1215
1216         switch (opcode) {
1217         case ISCSI_OP_SCSI_CMD:
1218                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1219                 if (!cmd)
1220                         break;
1221
1222                 isert_cmd = iscsit_priv_cmd(cmd);
1223                 isert_cmd->read_stag = read_stag;
1224                 isert_cmd->read_va = read_va;
1225                 isert_cmd->write_stag = write_stag;
1226                 isert_cmd->write_va = write_va;
1227
1228                 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1229                                         rx_desc, (unsigned char *)hdr);
1230                 break;
1231         case ISCSI_OP_NOOP_OUT:
1232                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1233                 if (!cmd)
1234                         break;
1235
1236                 isert_cmd = iscsit_priv_cmd(cmd);
1237                 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1238                                            rx_desc, (unsigned char *)hdr);
1239                 break;
1240         case ISCSI_OP_SCSI_DATA_OUT:
1241                 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1242                                                 (unsigned char *)hdr);
1243                 break;
1244         case ISCSI_OP_SCSI_TMFUNC:
1245                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1246                 if (!cmd)
1247                         break;
1248
1249                 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1250                                                 (unsigned char *)hdr);
1251                 break;
1252         case ISCSI_OP_LOGOUT:
1253                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1254                 if (!cmd)
1255                         break;
1256
1257                 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1258                 if (ret > 0)
1259                         wait_for_completion_timeout(&conn->conn_logout_comp,
1260                                                     SECONDS_FOR_LOGOUT_COMP *
1261                                                     HZ);
1262                 break;
1263         case ISCSI_OP_TEXT:
1264                 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1265                 if (!cmd)
1266                         break;
1267
1268                 isert_cmd = iscsit_priv_cmd(cmd);
1269                 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1270                                             rx_desc, (struct iscsi_text *)hdr);
1271                 break;
1272         default:
1273                 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1274                 dump_stack();
1275                 break;
1276         }
1277
1278         return ret;
1279 }
1280
1281 static void
1282 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1283 {
1284         struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1285         uint64_t read_va = 0, write_va = 0;
1286         uint32_t read_stag = 0, write_stag = 0;
1287         int rc;
1288
1289         switch (iser_hdr->flags & 0xF0) {
1290         case ISCSI_CTRL:
1291                 if (iser_hdr->flags & ISER_RSV) {
1292                         read_stag = be32_to_cpu(iser_hdr->read_stag);
1293                         read_va = be64_to_cpu(iser_hdr->read_va);
1294                         pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1295                                  read_stag, (unsigned long long)read_va);
1296                 }
1297                 if (iser_hdr->flags & ISER_WSV) {
1298                         write_stag = be32_to_cpu(iser_hdr->write_stag);
1299                         write_va = be64_to_cpu(iser_hdr->write_va);
1300                         pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1301                                  write_stag, (unsigned long long)write_va);
1302                 }
1303
1304                 pr_debug("ISER ISCSI_CTRL PDU\n");
1305                 break;
1306         case ISER_HELLO:
1307                 pr_err("iSER Hello message\n");
1308                 break;
1309         default:
1310                 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1311                 break;
1312         }
1313
1314         rc = isert_rx_opcode(isert_conn, rx_desc,
1315                              read_stag, read_va, write_stag, write_va);
1316 }
1317
1318 static void
1319 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1320                     unsigned long xfer_len)
1321 {
1322         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1323         struct iscsi_hdr *hdr;
1324         u64 rx_dma;
1325         int rx_buflen, outstanding;
1326
1327         if ((char *)desc == isert_conn->login_req_buf) {
1328                 rx_dma = isert_conn->login_req_dma;
1329                 rx_buflen = ISER_RX_LOGIN_SIZE;
1330                 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1331                          rx_dma, rx_buflen);
1332         } else {
1333                 rx_dma = desc->dma_addr;
1334                 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1335                 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1336                          rx_dma, rx_buflen);
1337         }
1338
1339         ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1340
1341         hdr = &desc->iscsi_header;
1342         pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1343                  hdr->opcode, hdr->itt, hdr->flags,
1344                  (int)(xfer_len - ISER_HEADERS_LEN));
1345
1346         if ((char *)desc == isert_conn->login_req_buf)
1347                 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1348                                    isert_conn);
1349         else
1350                 isert_rx_do_work(desc, isert_conn);
1351
1352         ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1353                                       DMA_FROM_DEVICE);
1354
1355         isert_conn->post_recv_buf_count--;
1356         pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1357                  isert_conn->post_recv_buf_count);
1358
1359         if ((char *)desc == isert_conn->login_req_buf)
1360                 return;
1361
1362         outstanding = isert_conn->post_recv_buf_count;
1363         if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1364                 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1365                                 ISERT_MIN_POSTED_RX);
1366                 err = isert_post_recv(isert_conn, count);
1367                 if (err) {
1368                         pr_err("isert_post_recv() count: %d failed, %d\n",
1369                                count, err);
1370                 }
1371         }
1372 }
1373
1374 static void
1375 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1376 {
1377         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1378         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1379
1380         pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1381         if (wr->sge) {
1382                 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1383                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1384                                 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1385                                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1386                 wr->sge = NULL;
1387         }
1388
1389         if (wr->send_wr) {
1390                 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1391                 kfree(wr->send_wr);
1392                 wr->send_wr = NULL;
1393         }
1394
1395         if (wr->ib_sge) {
1396                 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1397                 kfree(wr->ib_sge);
1398                 wr->ib_sge = NULL;
1399         }
1400 }
1401
1402 static void
1403 isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1404 {
1405         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1406         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1407         LIST_HEAD(unmap_list);
1408
1409         pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
1410
1411         if (wr->fr_desc) {
1412                 pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
1413                          isert_cmd, wr->fr_desc);
1414                 spin_lock_bh(&isert_conn->conn_lock);
1415                 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
1416                 spin_unlock_bh(&isert_conn->conn_lock);
1417                 wr->fr_desc = NULL;
1418         }
1419
1420         if (wr->sge) {
1421                 pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
1422                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1423                                 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1424                                 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1425                 wr->sge = NULL;
1426         }
1427
1428         wr->ib_sge = NULL;
1429         wr->send_wr = NULL;
1430 }
1431
1432 static void
1433 isert_put_cmd(struct isert_cmd *isert_cmd)
1434 {
1435         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1436         struct isert_conn *isert_conn = isert_cmd->conn;
1437         struct iscsi_conn *conn = isert_conn->conn;
1438         struct isert_device *device = isert_conn->conn_device;
1439
1440         pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1441
1442         switch (cmd->iscsi_opcode) {
1443         case ISCSI_OP_SCSI_CMD:
1444                 spin_lock_bh(&conn->cmd_lock);
1445                 if (!list_empty(&cmd->i_conn_node))
1446                         list_del(&cmd->i_conn_node);
1447                 spin_unlock_bh(&conn->cmd_lock);
1448
1449                 if (cmd->data_direction == DMA_TO_DEVICE)
1450                         iscsit_stop_dataout_timer(cmd);
1451
1452                 device->unreg_rdma_mem(isert_cmd, isert_conn);
1453                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1454                 break;
1455         case ISCSI_OP_SCSI_TMFUNC:
1456                 spin_lock_bh(&conn->cmd_lock);
1457                 if (!list_empty(&cmd->i_conn_node))
1458                         list_del(&cmd->i_conn_node);
1459                 spin_unlock_bh(&conn->cmd_lock);
1460
1461                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1462                 break;
1463         case ISCSI_OP_REJECT:
1464         case ISCSI_OP_NOOP_OUT:
1465         case ISCSI_OP_TEXT:
1466                 spin_lock_bh(&conn->cmd_lock);
1467                 if (!list_empty(&cmd->i_conn_node))
1468                         list_del(&cmd->i_conn_node);
1469                 spin_unlock_bh(&conn->cmd_lock);
1470
1471                 /*
1472                  * Handle special case for REJECT when iscsi_add_reject*() has
1473                  * overwritten the original iscsi_opcode assignment, and the
1474                  * associated cmd->se_cmd needs to be released.
1475                  */
1476                 if (cmd->se_cmd.se_tfo != NULL) {
1477                         pr_debug("Calling transport_generic_free_cmd from"
1478                                  " isert_put_cmd for 0x%02x\n",
1479                                  cmd->iscsi_opcode);
1480                         transport_generic_free_cmd(&cmd->se_cmd, 0);
1481                         break;
1482                 }
1483                 /*
1484                  * Fall-through
1485                  */
1486         default:
1487                 iscsit_release_cmd(cmd);
1488                 break;
1489         }
1490 }
1491
1492 static void
1493 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1494 {
1495         if (tx_desc->dma_addr != 0) {
1496                 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1497                 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1498                                     ISER_HEADERS_LEN, DMA_TO_DEVICE);
1499                 tx_desc->dma_addr = 0;
1500         }
1501 }
1502
1503 static void
1504 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1505                      struct ib_device *ib_dev)
1506 {
1507         if (isert_cmd->pdu_buf_dma != 0) {
1508                 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1509                 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1510                                     isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1511                 isert_cmd->pdu_buf_dma = 0;
1512         }
1513
1514         isert_unmap_tx_desc(tx_desc, ib_dev);
1515         isert_put_cmd(isert_cmd);
1516 }
1517
1518 static void
1519 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1520                            struct isert_cmd *isert_cmd)
1521 {
1522         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1523         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1524         struct se_cmd *se_cmd = &cmd->se_cmd;
1525         struct isert_conn *isert_conn = isert_cmd->conn;
1526         struct isert_device *device = isert_conn->conn_device;
1527
1528         iscsit_stop_dataout_timer(cmd);
1529         device->unreg_rdma_mem(isert_cmd, isert_conn);
1530         cmd->write_data_done = wr->cur_rdma_length;
1531
1532         pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1533         spin_lock_bh(&cmd->istate_lock);
1534         cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1535         cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1536         spin_unlock_bh(&cmd->istate_lock);
1537
1538         target_execute_cmd(se_cmd);
1539 }
1540
1541 static void
1542 isert_do_control_comp(struct work_struct *work)
1543 {
1544         struct isert_cmd *isert_cmd = container_of(work,
1545                         struct isert_cmd, comp_work);
1546         struct isert_conn *isert_conn = isert_cmd->conn;
1547         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1548         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1549
1550         switch (cmd->i_state) {
1551         case ISTATE_SEND_TASKMGTRSP:
1552                 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1553
1554                 atomic_dec(&isert_conn->post_send_buf_count);
1555                 iscsit_tmr_post_handler(cmd, cmd->conn);
1556
1557                 cmd->i_state = ISTATE_SENT_STATUS;
1558                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1559                 break;
1560         case ISTATE_SEND_REJECT:
1561                 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1562                 atomic_dec(&isert_conn->post_send_buf_count);
1563
1564                 cmd->i_state = ISTATE_SENT_STATUS;
1565                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1566                 break;
1567         case ISTATE_SEND_LOGOUTRSP:
1568                 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1569                 /*
1570                  * Call atomic_dec(&isert_conn->post_send_buf_count)
1571                  * from isert_free_conn()
1572                  */
1573                 isert_conn->logout_posted = true;
1574                 iscsit_logout_post_handler(cmd, cmd->conn);
1575                 break;
1576         case ISTATE_SEND_TEXTRSP:
1577                 atomic_dec(&isert_conn->post_send_buf_count);
1578                 cmd->i_state = ISTATE_SENT_STATUS;
1579                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1580                 break;
1581         default:
1582                 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1583                 dump_stack();
1584                 break;
1585         }
1586 }
1587
1588 static void
1589 isert_response_completion(struct iser_tx_desc *tx_desc,
1590                           struct isert_cmd *isert_cmd,
1591                           struct isert_conn *isert_conn,
1592                           struct ib_device *ib_dev)
1593 {
1594         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1595
1596         if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1597             cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1598             cmd->i_state == ISTATE_SEND_REJECT ||
1599             cmd->i_state == ISTATE_SEND_TEXTRSP) {
1600                 isert_unmap_tx_desc(tx_desc, ib_dev);
1601
1602                 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1603                 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1604                 return;
1605         }
1606         atomic_dec(&isert_conn->post_send_buf_count);
1607
1608         cmd->i_state = ISTATE_SENT_STATUS;
1609         isert_completion_put(tx_desc, isert_cmd, ib_dev);
1610 }
1611
1612 static void
1613 __isert_send_completion(struct iser_tx_desc *tx_desc,
1614                         struct isert_conn *isert_conn)
1615 {
1616         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1617         struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1618         struct isert_rdma_wr *wr;
1619
1620         if (!isert_cmd) {
1621                 atomic_dec(&isert_conn->post_send_buf_count);
1622                 isert_unmap_tx_desc(tx_desc, ib_dev);
1623                 return;
1624         }
1625         wr = &isert_cmd->rdma_wr;
1626
1627         switch (wr->iser_ib_op) {
1628         case ISER_IB_RECV:
1629                 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1630                 dump_stack();
1631                 break;
1632         case ISER_IB_SEND:
1633                 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1634                 isert_response_completion(tx_desc, isert_cmd,
1635                                           isert_conn, ib_dev);
1636                 break;
1637         case ISER_IB_RDMA_WRITE:
1638                 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1639                 dump_stack();
1640                 break;
1641         case ISER_IB_RDMA_READ:
1642                 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1643
1644                 atomic_dec(&isert_conn->post_send_buf_count);
1645                 isert_completion_rdma_read(tx_desc, isert_cmd);
1646                 break;
1647         default:
1648                 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1649                 dump_stack();
1650                 break;
1651         }
1652 }
1653
1654 static void
1655 isert_send_completion(struct iser_tx_desc *tx_desc,
1656                       struct isert_conn *isert_conn)
1657 {
1658         struct llist_node *llnode = tx_desc->comp_llnode_batch;
1659         struct iser_tx_desc *t;
1660         /*
1661          * Drain coalesced completion llist starting from comp_llnode_batch
1662          * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1663          */
1664         while (llnode) {
1665                 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1666                 llnode = llist_next(llnode);
1667                 __isert_send_completion(t, isert_conn);
1668         }
1669         __isert_send_completion(tx_desc, isert_conn);
1670 }
1671
1672 static void
1673 isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1674 {
1675         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1676
1677         if (tx_desc) {
1678                 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1679
1680                 if (!isert_cmd)
1681                         isert_unmap_tx_desc(tx_desc, ib_dev);
1682                 else
1683                         isert_completion_put(tx_desc, isert_cmd, ib_dev);
1684         }
1685
1686         if (isert_conn->post_recv_buf_count == 0 &&
1687             atomic_read(&isert_conn->post_send_buf_count) == 0) {
1688                 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1689                 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1690
1691                 mutex_lock(&isert_conn->conn_mutex);
1692                 if (isert_conn->state != ISER_CONN_DOWN)
1693                         isert_conn->state = ISER_CONN_TERMINATING;
1694                 mutex_unlock(&isert_conn->conn_mutex);
1695
1696                 wake_up(&isert_conn->conn_wait_comp_err);
1697         }
1698 }
1699
1700 static void
1701 isert_cq_tx_work(struct work_struct *work)
1702 {
1703         struct isert_cq_desc *cq_desc = container_of(work,
1704                                 struct isert_cq_desc, cq_tx_work);
1705         struct isert_device *device = cq_desc->device;
1706         int cq_index = cq_desc->cq_index;
1707         struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1708         struct isert_conn *isert_conn;
1709         struct iser_tx_desc *tx_desc;
1710         struct ib_wc wc;
1711
1712         while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1713                 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1714                 isert_conn = wc.qp->qp_context;
1715
1716                 if (wc.status == IB_WC_SUCCESS) {
1717                         isert_send_completion(tx_desc, isert_conn);
1718                 } else {
1719                         pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1720                         pr_debug("TX wc.status: 0x%08x\n", wc.status);
1721                         pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1722                         atomic_dec(&isert_conn->post_send_buf_count);
1723                         isert_cq_comp_err(tx_desc, isert_conn);
1724                 }
1725         }
1726
1727         ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1728 }
1729
1730 static void
1731 isert_cq_tx_callback(struct ib_cq *cq, void *context)
1732 {
1733         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1734
1735         INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1736         queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1737 }
1738
1739 static void
1740 isert_cq_rx_work(struct work_struct *work)
1741 {
1742         struct isert_cq_desc *cq_desc = container_of(work,
1743                         struct isert_cq_desc, cq_rx_work);
1744         struct isert_device *device = cq_desc->device;
1745         int cq_index = cq_desc->cq_index;
1746         struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1747         struct isert_conn *isert_conn;
1748         struct iser_rx_desc *rx_desc;
1749         struct ib_wc wc;
1750         unsigned long xfer_len;
1751
1752         while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1753                 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1754                 isert_conn = wc.qp->qp_context;
1755
1756                 if (wc.status == IB_WC_SUCCESS) {
1757                         xfer_len = (unsigned long)wc.byte_len;
1758                         isert_rx_completion(rx_desc, isert_conn, xfer_len);
1759                 } else {
1760                         pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1761                         if (wc.status != IB_WC_WR_FLUSH_ERR) {
1762                                 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1763                                 pr_debug("RX wc.vendor_err: 0x%08x\n",
1764                                          wc.vendor_err);
1765                         }
1766                         isert_conn->post_recv_buf_count--;
1767                         isert_cq_comp_err(NULL, isert_conn);
1768                 }
1769         }
1770
1771         ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1772 }
1773
1774 static void
1775 isert_cq_rx_callback(struct ib_cq *cq, void *context)
1776 {
1777         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1778
1779         INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1780         queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1781 }
1782
1783 static int
1784 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1785 {
1786         struct ib_send_wr *wr_failed;
1787         int ret;
1788
1789         atomic_inc(&isert_conn->post_send_buf_count);
1790
1791         ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1792                            &wr_failed);
1793         if (ret) {
1794                 pr_err("ib_post_send failed with %d\n", ret);
1795                 atomic_dec(&isert_conn->post_send_buf_count);
1796                 return ret;
1797         }
1798         return ret;
1799 }
1800
1801 static int
1802 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1803 {
1804         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1805         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1806         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1807         struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1808                                 &isert_cmd->tx_desc.iscsi_header;
1809
1810         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1811         iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1812         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1813         /*
1814          * Attach SENSE DATA payload to iSCSI Response PDU
1815          */
1816         if (cmd->se_cmd.sense_buffer &&
1817             ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1818             (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1819                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1820                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1821                 u32 padding, pdu_len;
1822
1823                 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1824                                    cmd->sense_buffer);
1825                 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1826
1827                 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1828                 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1829                 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1830
1831                 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1832                                 (void *)cmd->sense_buffer, pdu_len,
1833                                 DMA_TO_DEVICE);
1834
1835                 isert_cmd->pdu_buf_len = pdu_len;
1836                 tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1837                 tx_dsg->length  = pdu_len;
1838                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1839                 isert_cmd->tx_desc.num_sge = 2;
1840         }
1841
1842         isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
1843
1844         pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1845
1846         return isert_post_response(isert_conn, isert_cmd);
1847 }
1848
1849 static int
1850 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1851                 bool nopout_response)
1852 {
1853         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1854         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1855         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1856
1857         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1858         iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1859                                &isert_cmd->tx_desc.iscsi_header,
1860                                nopout_response);
1861         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1862         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1863
1864         pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1865
1866         return isert_post_response(isert_conn, isert_cmd);
1867 }
1868
1869 static int
1870 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1871 {
1872         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1873         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1874         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1875
1876         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1877         iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1878                                 &isert_cmd->tx_desc.iscsi_header);
1879         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1880         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1881
1882         pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1883
1884         return isert_post_response(isert_conn, isert_cmd);
1885 }
1886
1887 static int
1888 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1889 {
1890         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1891         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1892         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1893
1894         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1895         iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1896                                   &isert_cmd->tx_desc.iscsi_header);
1897         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1898         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1899
1900         pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1901
1902         return isert_post_response(isert_conn, isert_cmd);
1903 }
1904
1905 static int
1906 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1907 {
1908         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1909         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1910         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1911         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1912         struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1913         struct iscsi_reject *hdr =
1914                 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1915
1916         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1917         iscsit_build_reject(cmd, conn, hdr);
1918         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1919
1920         hton24(hdr->dlength, ISCSI_HDR_LEN);
1921         isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1922                         (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1923                         DMA_TO_DEVICE);
1924         isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1925         tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1926         tx_dsg->length  = ISCSI_HDR_LEN;
1927         tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1928         isert_cmd->tx_desc.num_sge = 2;
1929
1930         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1931
1932         pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1933
1934         return isert_post_response(isert_conn, isert_cmd);
1935 }
1936
1937 static int
1938 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1939 {
1940         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1941         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1942         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1943         struct iscsi_text_rsp *hdr =
1944                 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1945         u32 txt_rsp_len;
1946         int rc;
1947
1948         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1949         rc = iscsit_build_text_rsp(cmd, conn, hdr);
1950         if (rc < 0)
1951                 return rc;
1952
1953         txt_rsp_len = rc;
1954         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1955
1956         if (txt_rsp_len) {
1957                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1958                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1959                 void *txt_rsp_buf = cmd->buf_ptr;
1960
1961                 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1962                                 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1963
1964                 isert_cmd->pdu_buf_len = txt_rsp_len;
1965                 tx_dsg->addr    = isert_cmd->pdu_buf_dma;
1966                 tx_dsg->length  = txt_rsp_len;
1967                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1968                 isert_cmd->tx_desc.num_sge = 2;
1969         }
1970         isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
1971
1972         pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1973
1974         return isert_post_response(isert_conn, isert_cmd);
1975 }
1976
1977 static int
1978 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1979                     struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1980                     u32 data_left, u32 offset)
1981 {
1982         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1983         struct scatterlist *sg_start, *tmp_sg;
1984         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1985         u32 sg_off, page_off;
1986         int i = 0, sg_nents;
1987
1988         sg_off = offset / PAGE_SIZE;
1989         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1990         sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1991         page_off = offset % PAGE_SIZE;
1992
1993         send_wr->sg_list = ib_sge;
1994         send_wr->num_sge = sg_nents;
1995         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1996         /*
1997          * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1998          */
1999         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2000                 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2001                          (unsigned long long)tmp_sg->dma_address,
2002                          tmp_sg->length, page_off);
2003
2004                 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2005                 ib_sge->length = min_t(u32, data_left,
2006                                 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2007                 ib_sge->lkey = isert_conn->conn_mr->lkey;
2008
2009                 pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2010                          ib_sge->addr, ib_sge->length, ib_sge->lkey);
2011                 page_off = 0;
2012                 data_left -= ib_sge->length;
2013                 ib_sge++;
2014                 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
2015         }
2016
2017         pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2018                  send_wr->sg_list, send_wr->num_sge);
2019
2020         return sg_nents;
2021 }
2022
2023 static int
2024 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2025                struct isert_rdma_wr *wr)
2026 {
2027         struct se_cmd *se_cmd = &cmd->se_cmd;
2028         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2029         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2030         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2031         struct ib_send_wr *send_wr;
2032         struct ib_sge *ib_sge;
2033         struct scatterlist *sg_start;
2034         u32 sg_off = 0, sg_nents;
2035         u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
2036         int ret = 0, count, i, ib_sge_cnt;
2037
2038         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2039                 data_left = se_cmd->data_length;
2040         } else {
2041                 sg_off = cmd->write_data_done / PAGE_SIZE;
2042                 data_left = se_cmd->data_length - cmd->write_data_done;
2043                 offset = cmd->write_data_done;
2044                 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2045         }
2046
2047         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2048         sg_nents = se_cmd->t_data_nents - sg_off;
2049
2050         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2051                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2052                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
2053         if (unlikely(!count)) {
2054                 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2055                 return -EINVAL;
2056         }
2057         wr->sge = sg_start;
2058         wr->num_sge = sg_nents;
2059         wr->cur_rdma_length = data_left;
2060         pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2061                  isert_cmd, count, sg_start, sg_nents, data_left);
2062
2063         ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
2064         if (!ib_sge) {
2065                 pr_warn("Unable to allocate ib_sge\n");
2066                 ret = -ENOMEM;
2067                 goto unmap_sg;
2068         }
2069         wr->ib_sge = ib_sge;
2070
2071         wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2072         wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2073                                 GFP_KERNEL);
2074         if (!wr->send_wr) {
2075                 pr_debug("Unable to allocate wr->send_wr\n");
2076                 ret = -ENOMEM;
2077                 goto unmap_sg;
2078         }
2079
2080         wr->isert_cmd = isert_cmd;
2081         rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2082
2083         for (i = 0; i < wr->send_wr_num; i++) {
2084                 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2085                 data_len = min(data_left, rdma_write_max);
2086
2087                 send_wr->send_flags = 0;
2088                 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2089                         send_wr->opcode = IB_WR_RDMA_WRITE;
2090                         send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2091                         send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2092                         if (i + 1 == wr->send_wr_num)
2093                                 send_wr->next = &isert_cmd->tx_desc.send_wr;
2094                         else
2095                                 send_wr->next = &wr->send_wr[i + 1];
2096                 } else {
2097                         send_wr->opcode = IB_WR_RDMA_READ;
2098                         send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2099                         send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2100                         if (i + 1 == wr->send_wr_num)
2101                                 send_wr->send_flags = IB_SEND_SIGNALED;
2102                         else
2103                                 send_wr->next = &wr->send_wr[i + 1];
2104                 }
2105
2106                 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2107                                         send_wr, data_len, offset);
2108                 ib_sge += ib_sge_cnt;
2109
2110                 offset += data_len;
2111                 va_offset += data_len;
2112                 data_left -= data_len;
2113         }
2114
2115         return 0;
2116 unmap_sg:
2117         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2118                         (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2119                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
2120         return ret;
2121 }
2122
2123 static int
2124 isert_map_fr_pagelist(struct ib_device *ib_dev,
2125                       struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2126 {
2127         u64 start_addr, end_addr, page, chunk_start = 0;
2128         struct scatterlist *tmp_sg;
2129         int i = 0, new_chunk, last_ent, n_pages;
2130
2131         n_pages = 0;
2132         new_chunk = 1;
2133         last_ent = sg_nents - 1;
2134         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2135                 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2136                 if (new_chunk)
2137                         chunk_start = start_addr;
2138                 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2139
2140                 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2141                          i, (unsigned long long)tmp_sg->dma_address,
2142                          tmp_sg->length);
2143
2144                 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2145                         new_chunk = 0;
2146                         continue;
2147                 }
2148                 new_chunk = 1;
2149
2150                 page = chunk_start & PAGE_MASK;
2151                 do {
2152                         fr_pl[n_pages++] = page;
2153                         pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2154                                  n_pages - 1, page);
2155                         page += PAGE_SIZE;
2156                 } while (page < end_addr);
2157         }
2158
2159         return n_pages;
2160 }
2161
2162 static int
2163 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2164                   struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
2165                   struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
2166 {
2167         struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2168         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2169         struct scatterlist *sg_start;
2170         u32 sg_off, page_off;
2171         struct ib_send_wr fr_wr, inv_wr;
2172         struct ib_send_wr *bad_wr, *wr = NULL;
2173         u8 key;
2174         int ret, sg_nents, pagelist_len;
2175
2176         sg_off = offset / PAGE_SIZE;
2177         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2178         sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
2179                          ISCSI_ISER_SG_TABLESIZE);
2180         page_off = offset % PAGE_SIZE;
2181
2182         pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
2183                  isert_cmd, fr_desc, sg_nents, sg_off, offset);
2184
2185         pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2186                                              &fr_desc->data_frpl->page_list[0]);
2187
2188         if (!fr_desc->valid) {
2189                 memset(&inv_wr, 0, sizeof(inv_wr));
2190                 inv_wr.opcode = IB_WR_LOCAL_INV;
2191                 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2192                 wr = &inv_wr;
2193                 /* Bump the key */
2194                 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2195                 ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2196         }
2197
2198         /* Prepare FASTREG WR */
2199         memset(&fr_wr, 0, sizeof(fr_wr));
2200         fr_wr.opcode = IB_WR_FAST_REG_MR;
2201         fr_wr.wr.fast_reg.iova_start =
2202                 fr_desc->data_frpl->page_list[0] + page_off;
2203         fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2204         fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2205         fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2206         fr_wr.wr.fast_reg.length = data_len;
2207         fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2208         fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2209
2210         if (!wr)
2211                 wr = &fr_wr;
2212         else
2213                 wr->next = &fr_wr;
2214
2215         ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2216         if (ret) {
2217                 pr_err("fast registration failed, ret:%d\n", ret);
2218                 return ret;
2219         }
2220         fr_desc->valid = false;
2221
2222         ib_sge->lkey = fr_desc->data_mr->lkey;
2223         ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2224         ib_sge->length = data_len;
2225
2226         pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
2227                  ib_sge->addr, ib_sge->length, ib_sge->lkey);
2228
2229         return ret;
2230 }
2231
2232 static int
2233 isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2234                     struct isert_rdma_wr *wr)
2235 {
2236         struct se_cmd *se_cmd = &cmd->se_cmd;
2237         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2238         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2239         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2240         struct ib_send_wr *send_wr;
2241         struct ib_sge *ib_sge;
2242         struct scatterlist *sg_start;
2243         struct fast_reg_descriptor *fr_desc;
2244         u32 sg_off = 0, sg_nents;
2245         u32 offset = 0, data_len, data_left, rdma_write_max;
2246         int ret = 0, count;
2247         unsigned long flags;
2248
2249         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2250                 data_left = se_cmd->data_length;
2251         } else {
2252                 sg_off = cmd->write_data_done / PAGE_SIZE;
2253                 data_left = se_cmd->data_length - cmd->write_data_done;
2254                 offset = cmd->write_data_done;
2255                 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2256         }
2257
2258         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2259         sg_nents = se_cmd->t_data_nents - sg_off;
2260
2261         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2262                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2263                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
2264         if (unlikely(!count)) {
2265                 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2266                 return -EINVAL;
2267         }
2268         wr->sge = sg_start;
2269         wr->num_sge = sg_nents;
2270         pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2271                  isert_cmd, count, sg_start, sg_nents, data_left);
2272
2273         memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
2274         ib_sge = &wr->s_ib_sge;
2275         wr->ib_sge = ib_sge;
2276
2277         wr->send_wr_num = 1;
2278         memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2279         wr->send_wr = &wr->s_send_wr;
2280
2281         wr->isert_cmd = isert_cmd;
2282         rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2283
2284         send_wr = &isert_cmd->rdma_wr.s_send_wr;
2285         send_wr->sg_list = ib_sge;
2286         send_wr->num_sge = 1;
2287         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2288         if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2289                 send_wr->opcode = IB_WR_RDMA_WRITE;
2290                 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2291                 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2292                 send_wr->send_flags = 0;
2293                 send_wr->next = &isert_cmd->tx_desc.send_wr;
2294         } else {
2295                 send_wr->opcode = IB_WR_RDMA_READ;
2296                 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2297                 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2298                 send_wr->send_flags = IB_SEND_SIGNALED;
2299         }
2300
2301         data_len = min(data_left, rdma_write_max);
2302         wr->cur_rdma_length = data_len;
2303
2304         /* if there is a single dma entry, dma mr is sufficient */
2305         if (count == 1) {
2306                 ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
2307                 ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
2308                 ib_sge->lkey = isert_conn->conn_mr->lkey;
2309                 wr->fr_desc = NULL;
2310         } else {
2311                 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2312                 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
2313                                            struct fast_reg_descriptor, list);
2314                 list_del(&fr_desc->list);
2315                 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2316                 wr->fr_desc = fr_desc;
2317
2318                 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
2319                                   ib_sge, offset, data_len);
2320                 if (ret) {
2321                         list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
2322                         goto unmap_sg;
2323                 }
2324         }
2325
2326         return 0;
2327
2328 unmap_sg:
2329         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2330                         (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2331                         DMA_TO_DEVICE : DMA_FROM_DEVICE);
2332         return ret;
2333 }
2334
2335 static int
2336 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2337 {
2338         struct se_cmd *se_cmd = &cmd->se_cmd;
2339         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2340         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2341         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2342         struct isert_device *device = isert_conn->conn_device;
2343         struct ib_send_wr *wr_failed;
2344         int rc;
2345
2346         pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2347                  isert_cmd, se_cmd->data_length);
2348         wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2349         rc = device->reg_rdma_mem(conn, cmd, wr);
2350         if (rc) {
2351                 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2352                 return rc;
2353         }
2354
2355         /*
2356          * Build isert_conn->tx_desc for iSCSI response PDU and attach
2357          */
2358         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2359         iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2360                              &isert_cmd->tx_desc.iscsi_header);
2361         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2362         isert_init_send_wr(isert_conn, isert_cmd,
2363                            &isert_cmd->tx_desc.send_wr, true);
2364
2365         atomic_inc(&isert_conn->post_send_buf_count);
2366
2367         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2368         if (rc) {
2369                 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2370                 atomic_dec(&isert_conn->post_send_buf_count);
2371         }
2372         pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2373                  isert_cmd);
2374
2375         return 1;
2376 }
2377
2378 static int
2379 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2380 {
2381         struct se_cmd *se_cmd = &cmd->se_cmd;
2382         struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2383         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2384         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2385         struct isert_device *device = isert_conn->conn_device;
2386         struct ib_send_wr *wr_failed;
2387         int rc;
2388
2389         pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2390                  isert_cmd, se_cmd->data_length, cmd->write_data_done);
2391         wr->iser_ib_op = ISER_IB_RDMA_READ;
2392         rc = device->reg_rdma_mem(conn, cmd, wr);
2393         if (rc) {
2394                 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2395                 return rc;
2396         }
2397
2398         atomic_inc(&isert_conn->post_send_buf_count);
2399
2400         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2401         if (rc) {
2402                 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2403                 atomic_dec(&isert_conn->post_send_buf_count);
2404         }
2405         pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2406                  isert_cmd);
2407
2408         return 0;
2409 }
2410
2411 static int
2412 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2413 {
2414         int ret;
2415
2416         switch (state) {
2417         case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2418                 ret = isert_put_nopin(cmd, conn, false);
2419                 break;
2420         default:
2421                 pr_err("Unknown immediate state: 0x%02x\n", state);
2422                 ret = -EINVAL;
2423                 break;
2424         }
2425
2426         return ret;
2427 }
2428
2429 static int
2430 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2431 {
2432         int ret;
2433
2434         switch (state) {
2435         case ISTATE_SEND_LOGOUTRSP:
2436                 ret = isert_put_logout_rsp(cmd, conn);
2437                 if (!ret) {
2438                         pr_debug("Returning iSER Logout -EAGAIN\n");
2439                         ret = -EAGAIN;
2440                 }
2441                 break;
2442         case ISTATE_SEND_NOPIN:
2443                 ret = isert_put_nopin(cmd, conn, true);
2444                 break;
2445         case ISTATE_SEND_TASKMGTRSP:
2446                 ret = isert_put_tm_rsp(cmd, conn);
2447                 break;
2448         case ISTATE_SEND_REJECT:
2449                 ret = isert_put_reject(cmd, conn);
2450                 break;
2451         case ISTATE_SEND_TEXTRSP:
2452                 ret = isert_put_text_rsp(cmd, conn);
2453                 break;
2454         case ISTATE_SEND_STATUS:
2455                 /*
2456                  * Special case for sending non GOOD SCSI status from TX thread
2457                  * context during pre se_cmd excecution failure.
2458                  */
2459                 ret = isert_put_response(conn, cmd);
2460                 break;
2461         default:
2462                 pr_err("Unknown response state: 0x%02x\n", state);
2463                 ret = -EINVAL;
2464                 break;
2465         }
2466
2467         return ret;
2468 }
2469
2470 static int
2471 isert_setup_np(struct iscsi_np *np,
2472                struct __kernel_sockaddr_storage *ksockaddr)
2473 {
2474         struct isert_np *isert_np;
2475         struct rdma_cm_id *isert_lid;
2476         struct sockaddr *sa;
2477         int ret;
2478
2479         isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2480         if (!isert_np) {
2481                 pr_err("Unable to allocate struct isert_np\n");
2482                 return -ENOMEM;
2483         }
2484         init_waitqueue_head(&isert_np->np_accept_wq);
2485         mutex_init(&isert_np->np_accept_mutex);
2486         INIT_LIST_HEAD(&isert_np->np_accept_list);
2487         init_completion(&isert_np->np_login_comp);
2488
2489         sa = (struct sockaddr *)ksockaddr;
2490         pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2491         /*
2492          * Setup the np->np_sockaddr from the passed sockaddr setup
2493          * in iscsi_target_configfs.c code..
2494          */
2495         memcpy(&np->np_sockaddr, ksockaddr,
2496                sizeof(struct __kernel_sockaddr_storage));
2497
2498         isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2499                                 IB_QPT_RC);
2500         if (IS_ERR(isert_lid)) {
2501                 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2502                        PTR_ERR(isert_lid));
2503                 ret = PTR_ERR(isert_lid);
2504                 goto out;
2505         }
2506
2507         ret = rdma_bind_addr(isert_lid, sa);
2508         if (ret) {
2509                 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2510                 goto out_lid;
2511         }
2512
2513         ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2514         if (ret) {
2515                 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2516                 goto out_lid;
2517         }
2518
2519         isert_np->np_cm_id = isert_lid;
2520         np->np_context = isert_np;
2521         pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2522
2523         return 0;
2524
2525 out_lid:
2526         rdma_destroy_id(isert_lid);
2527 out:
2528         kfree(isert_np);
2529         return ret;
2530 }
2531
2532 static int
2533 isert_check_accept_queue(struct isert_np *isert_np)
2534 {
2535         int empty;
2536
2537         mutex_lock(&isert_np->np_accept_mutex);
2538         empty = list_empty(&isert_np->np_accept_list);
2539         mutex_unlock(&isert_np->np_accept_mutex);
2540
2541         return empty;
2542 }
2543
2544 static int
2545 isert_rdma_accept(struct isert_conn *isert_conn)
2546 {
2547         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2548         struct rdma_conn_param cp;
2549         int ret;
2550
2551         memset(&cp, 0, sizeof(struct rdma_conn_param));
2552         cp.responder_resources = isert_conn->responder_resources;
2553         cp.initiator_depth = isert_conn->initiator_depth;
2554         cp.retry_count = 7;
2555         cp.rnr_retry_count = 7;
2556
2557         pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2558
2559         ret = rdma_accept(cm_id, &cp);
2560         if (ret) {
2561                 pr_err("rdma_accept() failed with: %d\n", ret);
2562                 return ret;
2563         }
2564
2565         pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2566
2567         return 0;
2568 }
2569
2570 static int
2571 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2572 {
2573         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2574         int ret;
2575
2576         pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2577         /*
2578          * For login requests after the first PDU, isert_rx_login_req() will
2579          * kick schedule_delayed_work(&conn->login_work) as the packet is
2580          * received, which turns this callback from iscsi_target_do_login_rx()
2581          * into a NOP.
2582          */
2583         if (!login->first_request)
2584                 return 0;
2585
2586         ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2587         if (ret)
2588                 return ret;
2589
2590         pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2591         return 0;
2592 }
2593
2594 static void
2595 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2596                     struct isert_conn *isert_conn)
2597 {
2598         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2599         struct rdma_route *cm_route = &cm_id->route;
2600         struct sockaddr_in *sock_in;
2601         struct sockaddr_in6 *sock_in6;
2602
2603         conn->login_family = np->np_sockaddr.ss_family;
2604
2605         if (np->np_sockaddr.ss_family == AF_INET6) {
2606                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2607                 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2608                          &sock_in6->sin6_addr.in6_u);
2609                 conn->login_port = ntohs(sock_in6->sin6_port);
2610
2611                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2612                 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2613                          &sock_in6->sin6_addr.in6_u);
2614                 conn->local_port = ntohs(sock_in6->sin6_port);
2615         } else {
2616                 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2617                 sprintf(conn->login_ip, "%pI4",
2618                         &sock_in->sin_addr.s_addr);
2619                 conn->login_port = ntohs(sock_in->sin_port);
2620
2621                 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2622                 sprintf(conn->local_ip, "%pI4",
2623                         &sock_in->sin_addr.s_addr);
2624                 conn->local_port = ntohs(sock_in->sin_port);
2625         }
2626 }
2627
2628 static int
2629 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2630 {
2631         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2632         struct isert_conn *isert_conn;
2633         int max_accept = 0, ret;
2634
2635 accept_wait:
2636         ret = wait_event_interruptible(isert_np->np_accept_wq,
2637                         !isert_check_accept_queue(isert_np) ||
2638                         np->np_thread_state == ISCSI_NP_THREAD_RESET);
2639         if (max_accept > 5)
2640                 return -ENODEV;
2641
2642         spin_lock_bh(&np->np_thread_lock);
2643         if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2644                 spin_unlock_bh(&np->np_thread_lock);
2645                 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2646                 return -ENODEV;
2647         }
2648         spin_unlock_bh(&np->np_thread_lock);
2649
2650         mutex_lock(&isert_np->np_accept_mutex);
2651         if (list_empty(&isert_np->np_accept_list)) {
2652                 mutex_unlock(&isert_np->np_accept_mutex);
2653                 max_accept++;
2654                 goto accept_wait;
2655         }
2656         isert_conn = list_first_entry(&isert_np->np_accept_list,
2657                         struct isert_conn, conn_accept_node);
2658         list_del_init(&isert_conn->conn_accept_node);
2659         mutex_unlock(&isert_np->np_accept_mutex);
2660
2661         conn->context = isert_conn;
2662         isert_conn->conn = conn;
2663         max_accept = 0;
2664
2665         ret = isert_rdma_post_recvl(isert_conn);
2666         if (ret)
2667                 return ret;
2668
2669         ret = isert_rdma_accept(isert_conn);
2670         if (ret)
2671                 return ret;
2672
2673         isert_set_conn_info(np, conn, isert_conn);
2674
2675         pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2676         return 0;
2677 }
2678
2679 static void
2680 isert_free_np(struct iscsi_np *np)
2681 {
2682         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2683
2684         rdma_destroy_id(isert_np->np_cm_id);
2685
2686         np->np_context = NULL;
2687         kfree(isert_np);
2688 }
2689
2690 static int isert_check_state(struct isert_conn *isert_conn, int state)
2691 {
2692         int ret;
2693
2694         mutex_lock(&isert_conn->conn_mutex);
2695         ret = (isert_conn->state == state);
2696         mutex_unlock(&isert_conn->conn_mutex);
2697
2698         return ret;
2699 }
2700
2701 static void isert_free_conn(struct iscsi_conn *conn)
2702 {
2703         struct isert_conn *isert_conn = conn->context;
2704
2705         pr_debug("isert_free_conn: Starting \n");
2706         /*
2707          * Decrement post_send_buf_count for special case when called
2708          * from isert_do_control_comp() -> iscsit_logout_post_handler()
2709          */
2710         mutex_lock(&isert_conn->conn_mutex);
2711         if (isert_conn->logout_posted)
2712                 atomic_dec(&isert_conn->post_send_buf_count);
2713
2714         if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2715                 pr_debug("Calling rdma_disconnect from isert_free_conn\n");
2716                 rdma_disconnect(isert_conn->conn_cm_id);
2717         }
2718         /*
2719          * Only wait for conn_wait_comp_err if the isert_conn made it
2720          * into full feature phase..
2721          */
2722         if (isert_conn->state == ISER_CONN_UP) {
2723                 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2724                          isert_conn->state);
2725                 mutex_unlock(&isert_conn->conn_mutex);
2726
2727                 wait_event(isert_conn->conn_wait_comp_err,
2728                           (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2729
2730                 wait_event(isert_conn->conn_wait,
2731                           (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2732
2733                 isert_put_conn(isert_conn);
2734                 return;
2735         }
2736         if (isert_conn->state == ISER_CONN_INIT) {
2737                 mutex_unlock(&isert_conn->conn_mutex);
2738                 isert_put_conn(isert_conn);
2739                 return;
2740         }
2741         pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2742                  isert_conn->state);
2743         mutex_unlock(&isert_conn->conn_mutex);
2744
2745         wait_event(isert_conn->conn_wait,
2746                   (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2747
2748         isert_put_conn(isert_conn);
2749 }
2750
2751 static struct iscsit_transport iser_target_transport = {
2752         .name                   = "IB/iSER",
2753         .transport_type         = ISCSI_INFINIBAND,
2754         .priv_size              = sizeof(struct isert_cmd),
2755         .owner                  = THIS_MODULE,
2756         .iscsit_setup_np        = isert_setup_np,
2757         .iscsit_accept_np       = isert_accept_np,
2758         .iscsit_free_np         = isert_free_np,
2759         .iscsit_free_conn       = isert_free_conn,
2760         .iscsit_get_login_rx    = isert_get_login_rx,
2761         .iscsit_put_login_tx    = isert_put_login_tx,
2762         .iscsit_immediate_queue = isert_immediate_queue,
2763         .iscsit_response_queue  = isert_response_queue,
2764         .iscsit_get_dataout     = isert_get_dataout,
2765         .iscsit_queue_data_in   = isert_put_datain,
2766         .iscsit_queue_status    = isert_put_response,
2767 };
2768
2769 static int __init isert_init(void)
2770 {
2771         int ret;
2772
2773         isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2774         if (!isert_rx_wq) {
2775                 pr_err("Unable to allocate isert_rx_wq\n");
2776                 return -ENOMEM;
2777         }
2778
2779         isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2780         if (!isert_comp_wq) {
2781                 pr_err("Unable to allocate isert_comp_wq\n");
2782                 ret = -ENOMEM;
2783                 goto destroy_rx_wq;
2784         }
2785
2786         iscsit_register_transport(&iser_target_transport);
2787         pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2788         return 0;
2789
2790 destroy_rx_wq:
2791         destroy_workqueue(isert_rx_wq);
2792         return ret;
2793 }
2794
2795 static void __exit isert_exit(void)
2796 {
2797         destroy_workqueue(isert_comp_wq);
2798         destroy_workqueue(isert_rx_wq);
2799         iscsit_unregister_transport(&iser_target_transport);
2800         pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2801 }
2802
2803 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2804 MODULE_VERSION("0.1");
2805 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2806 MODULE_LICENSE("GPL");
2807
2808 module_init(isert_init);
2809 module_exit(isert_exit);