1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
31 #include "isert_proto.h"
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 static DEFINE_MUTEX(device_list_mutex);
39 static LIST_HEAD(device_list);
40 static struct workqueue_struct *isert_rx_wq;
41 static struct workqueue_struct *isert_comp_wq;
44 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
46 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
47 struct isert_rdma_wr *wr);
49 isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
51 isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
52 struct isert_rdma_wr *wr);
55 isert_qp_event_callback(struct ib_event *e, void *context)
57 struct isert_conn *isert_conn = (struct isert_conn *)context;
59 pr_err("isert_qp_event_callback event: %d\n", e->event);
61 case IB_EVENT_COMM_EST:
62 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
64 case IB_EVENT_QP_LAST_WQE_REACHED:
65 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
73 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
77 ret = ib_query_device(ib_dev, devattr);
79 pr_err("ib_query_device() failed: %d\n", ret);
82 pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
83 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
89 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
91 struct isert_device *device = isert_conn->conn_device;
92 struct ib_qp_init_attr attr;
93 int ret, index, min_index = 0;
95 mutex_lock(&device_list_mutex);
96 for (index = 0; index < device->cqs_used; index++)
97 if (device->cq_active_qps[index] <
98 device->cq_active_qps[min_index])
100 device->cq_active_qps[min_index]++;
101 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
102 mutex_unlock(&device_list_mutex);
104 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
105 attr.event_handler = isert_qp_event_callback;
106 attr.qp_context = isert_conn;
107 attr.send_cq = device->dev_tx_cq[min_index];
108 attr.recv_cq = device->dev_rx_cq[min_index];
109 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
110 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
112 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
113 * work-around for RDMA_READ..
115 attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
116 isert_conn->max_sge = attr.cap.max_send_sge;
118 attr.cap.max_recv_sge = 1;
119 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
120 attr.qp_type = IB_QPT_RC;
122 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
124 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
125 isert_conn->conn_pd->device);
127 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
129 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
132 isert_conn->conn_qp = cma_id->qp;
133 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
139 isert_cq_event_callback(struct ib_event *e, void *context)
141 pr_debug("isert_cq_event_callback event: %d\n", e->event);
145 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
147 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
148 struct iser_rx_desc *rx_desc;
149 struct ib_sge *rx_sg;
153 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
154 sizeof(struct iser_rx_desc), GFP_KERNEL);
155 if (!isert_conn->conn_rx_descs)
158 rx_desc = isert_conn->conn_rx_descs;
160 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
161 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
162 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
163 if (ib_dma_mapping_error(ib_dev, dma_addr))
166 rx_desc->dma_addr = dma_addr;
168 rx_sg = &rx_desc->rx_sg;
169 rx_sg->addr = rx_desc->dma_addr;
170 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
171 rx_sg->lkey = isert_conn->conn_mr->lkey;
174 isert_conn->conn_rx_desc_head = 0;
178 rx_desc = isert_conn->conn_rx_descs;
179 for (j = 0; j < i; j++, rx_desc++) {
180 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
181 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
183 kfree(isert_conn->conn_rx_descs);
184 isert_conn->conn_rx_descs = NULL;
190 isert_free_rx_descriptors(struct isert_conn *isert_conn)
192 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
193 struct iser_rx_desc *rx_desc;
196 if (!isert_conn->conn_rx_descs)
199 rx_desc = isert_conn->conn_rx_descs;
200 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
201 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
202 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
205 kfree(isert_conn->conn_rx_descs);
206 isert_conn->conn_rx_descs = NULL;
209 static void isert_cq_tx_callback(struct ib_cq *, void *);
210 static void isert_cq_rx_callback(struct ib_cq *, void *);
213 isert_create_device_ib_res(struct isert_device *device)
215 struct ib_device *ib_dev = device->ib_device;
216 struct isert_cq_desc *cq_desc;
217 struct ib_device_attr *dev_attr;
220 dev_attr = &device->dev_attr;
221 ret = isert_query_device(ib_dev, dev_attr);
225 /* asign function handlers */
226 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
227 device->use_frwr = 1;
228 device->reg_rdma_mem = isert_reg_rdma_frwr;
229 device->unreg_rdma_mem = isert_unreg_rdma_frwr;
231 device->use_frwr = 0;
232 device->reg_rdma_mem = isert_map_rdma;
233 device->unreg_rdma_mem = isert_unmap_cmd;
236 device->cqs_used = min_t(int, num_online_cpus(),
237 device->ib_device->num_comp_vectors);
238 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
239 pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
240 device->cqs_used, device->ib_device->name,
241 device->ib_device->num_comp_vectors, device->use_frwr);
242 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
243 device->cqs_used, GFP_KERNEL);
244 if (!device->cq_desc) {
245 pr_err("Unable to allocate device->cq_desc\n");
248 cq_desc = device->cq_desc;
250 device->dev_pd = ib_alloc_pd(ib_dev);
251 if (IS_ERR(device->dev_pd)) {
252 ret = PTR_ERR(device->dev_pd);
253 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
257 for (i = 0; i < device->cqs_used; i++) {
258 cq_desc[i].device = device;
259 cq_desc[i].cq_index = i;
261 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
262 isert_cq_rx_callback,
263 isert_cq_event_callback,
265 ISER_MAX_RX_CQ_LEN, i);
266 if (IS_ERR(device->dev_rx_cq[i]))
269 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
270 isert_cq_tx_callback,
271 isert_cq_event_callback,
273 ISER_MAX_TX_CQ_LEN, i);
274 if (IS_ERR(device->dev_tx_cq[i]))
277 if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
280 if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
284 device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
285 if (IS_ERR(device->dev_mr)) {
286 ret = PTR_ERR(device->dev_mr);
287 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
294 for (j = 0; j < i; j++) {
295 cq_desc = &device->cq_desc[j];
297 if (device->dev_rx_cq[j]) {
298 cancel_work_sync(&cq_desc->cq_rx_work);
299 ib_destroy_cq(device->dev_rx_cq[j]);
301 if (device->dev_tx_cq[j]) {
302 cancel_work_sync(&cq_desc->cq_tx_work);
303 ib_destroy_cq(device->dev_tx_cq[j]);
306 ib_dealloc_pd(device->dev_pd);
309 kfree(device->cq_desc);
315 isert_free_device_ib_res(struct isert_device *device)
317 struct isert_cq_desc *cq_desc;
320 for (i = 0; i < device->cqs_used; i++) {
321 cq_desc = &device->cq_desc[i];
323 cancel_work_sync(&cq_desc->cq_rx_work);
324 cancel_work_sync(&cq_desc->cq_tx_work);
325 ib_destroy_cq(device->dev_rx_cq[i]);
326 ib_destroy_cq(device->dev_tx_cq[i]);
327 device->dev_rx_cq[i] = NULL;
328 device->dev_tx_cq[i] = NULL;
331 ib_dereg_mr(device->dev_mr);
332 ib_dealloc_pd(device->dev_pd);
333 kfree(device->cq_desc);
337 isert_device_try_release(struct isert_device *device)
339 mutex_lock(&device_list_mutex);
341 if (!device->refcount) {
342 isert_free_device_ib_res(device);
343 list_del(&device->dev_node);
346 mutex_unlock(&device_list_mutex);
349 static struct isert_device *
350 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
352 struct isert_device *device;
355 mutex_lock(&device_list_mutex);
356 list_for_each_entry(device, &device_list, dev_node) {
357 if (device->ib_device->node_guid == cma_id->device->node_guid) {
359 mutex_unlock(&device_list_mutex);
364 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
366 mutex_unlock(&device_list_mutex);
367 return ERR_PTR(-ENOMEM);
370 INIT_LIST_HEAD(&device->dev_node);
372 device->ib_device = cma_id->device;
373 ret = isert_create_device_ib_res(device);
376 mutex_unlock(&device_list_mutex);
381 list_add_tail(&device->dev_node, &device_list);
382 mutex_unlock(&device_list_mutex);
388 isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
390 struct fast_reg_descriptor *fr_desc, *tmp;
393 if (list_empty(&isert_conn->conn_frwr_pool))
396 pr_debug("Freeing conn %p frwr pool", isert_conn);
398 list_for_each_entry_safe(fr_desc, tmp,
399 &isert_conn->conn_frwr_pool, list) {
400 list_del(&fr_desc->list);
401 ib_free_fast_reg_page_list(fr_desc->data_frpl);
402 ib_dereg_mr(fr_desc->data_mr);
407 if (i < isert_conn->conn_frwr_pool_size)
408 pr_warn("Pool still has %d regions registered\n",
409 isert_conn->conn_frwr_pool_size - i);
413 isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
415 struct fast_reg_descriptor *fr_desc;
416 struct isert_device *device = isert_conn->conn_device;
419 INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
420 isert_conn->conn_frwr_pool_size = 0;
421 for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
422 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
424 pr_err("Failed to allocate fast_reg descriptor\n");
430 ib_alloc_fast_reg_page_list(device->ib_device,
431 ISCSI_ISER_SG_TABLESIZE);
432 if (IS_ERR(fr_desc->data_frpl)) {
433 pr_err("Failed to allocate fr_pg_list err=%ld\n",
434 PTR_ERR(fr_desc->data_frpl));
435 ret = PTR_ERR(fr_desc->data_frpl);
439 fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
440 ISCSI_ISER_SG_TABLESIZE);
441 if (IS_ERR(fr_desc->data_mr)) {
442 pr_err("Failed to allocate frmr err=%ld\n",
443 PTR_ERR(fr_desc->data_mr));
444 ret = PTR_ERR(fr_desc->data_mr);
445 ib_free_fast_reg_page_list(fr_desc->data_frpl);
448 pr_debug("Create fr_desc %p page_list %p\n",
449 fr_desc, fr_desc->data_frpl->page_list);
451 fr_desc->valid = true;
452 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
453 isert_conn->conn_frwr_pool_size++;
456 pr_debug("Creating conn %p frwr pool size=%d",
457 isert_conn, isert_conn->conn_frwr_pool_size);
462 isert_conn_free_frwr_pool(isert_conn);
467 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
469 struct iscsi_np *np = cma_id->context;
470 struct isert_np *isert_np = np->np_context;
471 struct isert_conn *isert_conn;
472 struct isert_device *device;
473 struct ib_device *ib_dev = cma_id->device;
476 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
477 cma_id, cma_id->context);
479 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
481 pr_err("Unable to allocate isert_conn\n");
484 isert_conn->state = ISER_CONN_INIT;
485 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
486 init_completion(&isert_conn->conn_login_comp);
487 init_waitqueue_head(&isert_conn->conn_wait);
488 init_waitqueue_head(&isert_conn->conn_wait_comp_err);
489 kref_init(&isert_conn->conn_kref);
490 kref_get(&isert_conn->conn_kref);
491 mutex_init(&isert_conn->conn_mutex);
492 spin_lock_init(&isert_conn->conn_lock);
494 cma_id->context = isert_conn;
495 isert_conn->conn_cm_id = cma_id;
496 isert_conn->responder_resources = event->param.conn.responder_resources;
497 isert_conn->initiator_depth = event->param.conn.initiator_depth;
498 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
499 isert_conn->responder_resources, isert_conn->initiator_depth);
501 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
502 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
503 if (!isert_conn->login_buf) {
504 pr_err("Unable to allocate isert_conn->login_buf\n");
509 isert_conn->login_req_buf = isert_conn->login_buf;
510 isert_conn->login_rsp_buf = isert_conn->login_buf +
511 ISCSI_DEF_MAX_RECV_SEG_LEN;
512 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
513 isert_conn->login_buf, isert_conn->login_req_buf,
514 isert_conn->login_rsp_buf);
516 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
517 (void *)isert_conn->login_req_buf,
518 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
520 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
522 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
524 isert_conn->login_req_dma = 0;
528 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
529 (void *)isert_conn->login_rsp_buf,
530 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
532 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
534 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
536 isert_conn->login_rsp_dma = 0;
537 goto out_req_dma_map;
540 device = isert_device_find_by_ib_dev(cma_id);
541 if (IS_ERR(device)) {
542 ret = PTR_ERR(device);
543 goto out_rsp_dma_map;
546 isert_conn->conn_device = device;
547 isert_conn->conn_pd = device->dev_pd;
548 isert_conn->conn_mr = device->dev_mr;
550 if (device->use_frwr) {
551 ret = isert_conn_create_frwr_pool(isert_conn);
553 pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
558 ret = isert_conn_setup_qp(isert_conn, cma_id);
562 mutex_lock(&isert_np->np_accept_mutex);
563 list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
564 mutex_unlock(&isert_np->np_accept_mutex);
566 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
567 wake_up(&isert_np->np_accept_wq);
571 if (device->use_frwr)
572 isert_conn_free_frwr_pool(isert_conn);
574 isert_device_try_release(device);
576 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
577 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
579 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
580 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
582 kfree(isert_conn->login_buf);
589 isert_connect_release(struct isert_conn *isert_conn)
591 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
592 struct isert_device *device = isert_conn->conn_device;
595 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
597 if (device && device->use_frwr)
598 isert_conn_free_frwr_pool(isert_conn);
600 if (isert_conn->conn_qp) {
601 cq_index = ((struct isert_cq_desc *)
602 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
603 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
604 isert_conn->conn_device->cq_active_qps[cq_index]--;
606 rdma_destroy_qp(isert_conn->conn_cm_id);
609 isert_free_rx_descriptors(isert_conn);
610 rdma_destroy_id(isert_conn->conn_cm_id);
612 if (isert_conn->login_buf) {
613 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
614 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
615 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
616 ISCSI_DEF_MAX_RECV_SEG_LEN,
618 kfree(isert_conn->login_buf);
623 isert_device_try_release(device);
625 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
629 isert_connected_handler(struct rdma_cm_id *cma_id)
635 isert_release_conn_kref(struct kref *kref)
637 struct isert_conn *isert_conn = container_of(kref,
638 struct isert_conn, conn_kref);
640 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
641 current->comm, current->pid);
643 isert_connect_release(isert_conn);
647 isert_put_conn(struct isert_conn *isert_conn)
649 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
653 isert_disconnect_work(struct work_struct *work)
655 struct isert_conn *isert_conn = container_of(work,
656 struct isert_conn, conn_logout_work);
658 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
659 mutex_lock(&isert_conn->conn_mutex);
660 isert_conn->state = ISER_CONN_DOWN;
662 if (isert_conn->post_recv_buf_count == 0 &&
663 atomic_read(&isert_conn->post_send_buf_count) == 0) {
664 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
665 mutex_unlock(&isert_conn->conn_mutex);
668 if (!isert_conn->conn_cm_id) {
669 mutex_unlock(&isert_conn->conn_mutex);
670 isert_put_conn(isert_conn);
673 if (!isert_conn->logout_posted) {
674 pr_debug("Calling rdma_disconnect for !logout_posted from"
675 " isert_disconnect_work\n");
676 rdma_disconnect(isert_conn->conn_cm_id);
677 mutex_unlock(&isert_conn->conn_mutex);
678 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
681 mutex_unlock(&isert_conn->conn_mutex);
684 wake_up(&isert_conn->conn_wait);
685 isert_put_conn(isert_conn);
689 isert_disconnected_handler(struct rdma_cm_id *cma_id)
691 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
693 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
694 schedule_work(&isert_conn->conn_logout_work);
698 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
702 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
703 event->event, event->status, cma_id->context, cma_id);
705 switch (event->event) {
706 case RDMA_CM_EVENT_CONNECT_REQUEST:
707 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
708 ret = isert_connect_request(cma_id, event);
710 case RDMA_CM_EVENT_ESTABLISHED:
711 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
712 isert_connected_handler(cma_id);
714 case RDMA_CM_EVENT_DISCONNECTED:
715 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
716 isert_disconnected_handler(cma_id);
718 case RDMA_CM_EVENT_DEVICE_REMOVAL:
719 case RDMA_CM_EVENT_ADDR_CHANGE:
721 case RDMA_CM_EVENT_CONNECT_ERROR:
723 pr_err("Unknown RDMA CMA event: %d\n", event->event);
728 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
737 isert_post_recv(struct isert_conn *isert_conn, u32 count)
739 struct ib_recv_wr *rx_wr, *rx_wr_failed;
741 unsigned int rx_head = isert_conn->conn_rx_desc_head;
742 struct iser_rx_desc *rx_desc;
744 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
745 rx_desc = &isert_conn->conn_rx_descs[rx_head];
746 rx_wr->wr_id = (unsigned long)rx_desc;
747 rx_wr->sg_list = &rx_desc->rx_sg;
749 rx_wr->next = rx_wr + 1;
750 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
754 rx_wr->next = NULL; /* mark end of work requests list */
756 isert_conn->post_recv_buf_count += count;
757 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
760 pr_err("ib_post_recv() failed with ret: %d\n", ret);
761 isert_conn->post_recv_buf_count -= count;
763 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
764 isert_conn->conn_rx_desc_head = rx_head;
770 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
772 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
773 struct ib_send_wr send_wr, *send_wr_failed;
776 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
777 ISER_HEADERS_LEN, DMA_TO_DEVICE);
780 send_wr.wr_id = (unsigned long)tx_desc;
781 send_wr.sg_list = tx_desc->tx_sg;
782 send_wr.num_sge = tx_desc->num_sge;
783 send_wr.opcode = IB_WR_SEND;
784 send_wr.send_flags = IB_SEND_SIGNALED;
786 atomic_inc(&isert_conn->post_send_buf_count);
788 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
790 pr_err("ib_post_send() failed, ret: %d\n", ret);
791 atomic_dec(&isert_conn->post_send_buf_count);
798 isert_create_send_desc(struct isert_conn *isert_conn,
799 struct isert_cmd *isert_cmd,
800 struct iser_tx_desc *tx_desc)
802 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
804 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
805 ISER_HEADERS_LEN, DMA_TO_DEVICE);
807 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
808 tx_desc->iser_header.flags = ISER_VER;
810 tx_desc->num_sge = 1;
811 tx_desc->isert_cmd = isert_cmd;
813 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
814 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
815 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
820 isert_init_tx_hdrs(struct isert_conn *isert_conn,
821 struct iser_tx_desc *tx_desc)
823 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
826 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
827 ISER_HEADERS_LEN, DMA_TO_DEVICE);
828 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
829 pr_err("ib_dma_mapping_error() failed\n");
833 tx_desc->dma_addr = dma_addr;
834 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
835 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
836 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
838 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
839 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
840 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
846 isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
848 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
849 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
850 send_wr->opcode = IB_WR_SEND;
851 send_wr->send_flags = IB_SEND_SIGNALED;
852 send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
853 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
857 isert_rdma_post_recvl(struct isert_conn *isert_conn)
859 struct ib_recv_wr rx_wr, *rx_wr_fail;
863 memset(&sge, 0, sizeof(struct ib_sge));
864 sge.addr = isert_conn->login_req_dma;
865 sge.length = ISER_RX_LOGIN_SIZE;
866 sge.lkey = isert_conn->conn_mr->lkey;
868 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
869 sge.addr, sge.length, sge.lkey);
871 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
872 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
873 rx_wr.sg_list = &sge;
876 isert_conn->post_recv_buf_count++;
877 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
879 pr_err("ib_post_recv() failed: %d\n", ret);
880 isert_conn->post_recv_buf_count--;
883 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
888 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
891 struct isert_conn *isert_conn = conn->context;
892 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
893 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
896 isert_create_send_desc(isert_conn, NULL, tx_desc);
898 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
899 sizeof(struct iscsi_hdr));
901 isert_init_tx_hdrs(isert_conn, tx_desc);
904 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
906 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
907 length, DMA_TO_DEVICE);
909 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
911 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
912 length, DMA_TO_DEVICE);
914 tx_dsg->addr = isert_conn->login_rsp_dma;
915 tx_dsg->length = length;
916 tx_dsg->lkey = isert_conn->conn_mr->lkey;
917 tx_desc->num_sge = 2;
919 if (!login->login_failed) {
920 if (login->login_complete) {
921 ret = isert_alloc_rx_descriptors(isert_conn);
925 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
929 isert_conn->state = ISER_CONN_UP;
933 ret = isert_rdma_post_recvl(isert_conn);
938 ret = isert_post_send(isert_conn, tx_desc);
946 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
947 struct isert_conn *isert_conn)
949 struct iscsi_conn *conn = isert_conn->conn;
950 struct iscsi_login *login = conn->conn_login;
954 pr_err("conn->conn_login is NULL\n");
959 if (login->first_request) {
960 struct iscsi_login_req *login_req =
961 (struct iscsi_login_req *)&rx_desc->iscsi_header;
963 * Setup the initial iscsi_login values from the leading
966 login->leading_connection = (!login_req->tsih) ? 1 : 0;
967 login->current_stage =
968 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
970 login->version_min = login_req->min_version;
971 login->version_max = login_req->max_version;
972 memcpy(login->isid, login_req->isid, 6);
973 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
974 login->init_task_tag = login_req->itt;
975 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
976 login->cid = be16_to_cpu(login_req->cid);
977 login->tsih = be16_to_cpu(login_req->tsih);
980 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
982 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
983 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
984 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
985 memcpy(login->req_buf, &rx_desc->data[0], size);
987 if (login->first_request) {
988 complete(&isert_conn->conn_login_comp);
991 schedule_delayed_work(&conn->login_work, 0);
994 static struct iscsi_cmd
995 *isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
997 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
998 struct isert_cmd *isert_cmd;
999 struct iscsi_cmd *cmd;
1001 cmd = iscsit_allocate_cmd(conn, gfp);
1003 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1006 isert_cmd = iscsit_priv_cmd(cmd);
1007 isert_cmd->conn = isert_conn;
1008 isert_cmd->iscsi_cmd = cmd;
1014 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1015 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1016 struct iser_rx_desc *rx_desc, unsigned char *buf)
1018 struct iscsi_conn *conn = isert_conn->conn;
1019 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1020 struct scatterlist *sg;
1021 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1022 bool dump_payload = false;
1024 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1028 imm_data = cmd->immediate_data;
1029 imm_data_len = cmd->first_burst_len;
1030 unsol_data = cmd->unsolicited_data;
1032 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1035 } else if (rc > 0) {
1036 dump_payload = true;
1043 sg = &cmd->se_cmd.t_data_sg[0];
1044 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1046 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1047 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1049 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1051 cmd->write_data_done += imm_data_len;
1053 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1054 spin_lock_bh(&cmd->istate_lock);
1055 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1056 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1057 spin_unlock_bh(&cmd->istate_lock);
1061 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1063 if (!rc && dump_payload == false && unsol_data)
1064 iscsit_set_unsoliticed_dataout(cmd);
1070 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1071 struct iser_rx_desc *rx_desc, unsigned char *buf)
1073 struct scatterlist *sg_start;
1074 struct iscsi_conn *conn = isert_conn->conn;
1075 struct iscsi_cmd *cmd = NULL;
1076 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1077 u32 unsol_data_len = ntoh24(hdr->dlength);
1078 int rc, sg_nents, sg_off, page_off;
1080 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1086 * FIXME: Unexpected unsolicited_data out
1088 if (!cmd->unsolicited_data) {
1089 pr_err("Received unexpected solicited data payload\n");
1094 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1095 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1097 sg_off = cmd->write_data_done / PAGE_SIZE;
1098 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1099 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1100 page_off = cmd->write_data_done % PAGE_SIZE;
1102 * FIXME: Non page-aligned unsolicited_data out
1105 pr_err("Received unexpected non-page aligned data payload\n");
1109 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1110 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1112 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1115 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1123 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1124 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1127 struct iscsi_conn *conn = isert_conn->conn;
1128 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1131 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1135 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1138 return iscsit_process_nop_out(conn, cmd, hdr);
1142 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1143 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1144 struct iscsi_text *hdr)
1146 struct iscsi_conn *conn = isert_conn->conn;
1147 u32 payload_length = ntoh24(hdr->dlength);
1149 unsigned char *text_in;
1151 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1155 text_in = kzalloc(payload_length, GFP_KERNEL);
1157 pr_err("Unable to allocate text_in of payload_length: %u\n",
1161 cmd->text_in_ptr = text_in;
1163 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1165 return iscsit_process_text_cmd(conn, cmd, hdr);
1169 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1170 uint32_t read_stag, uint64_t read_va,
1171 uint32_t write_stag, uint64_t write_va)
1173 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1174 struct iscsi_conn *conn = isert_conn->conn;
1175 struct iscsi_session *sess = conn->sess;
1176 struct iscsi_cmd *cmd;
1177 struct isert_cmd *isert_cmd;
1179 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1181 if (sess->sess_ops->SessionType &&
1182 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1183 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1184 " ignoring\n", opcode);
1189 case ISCSI_OP_SCSI_CMD:
1190 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1194 isert_cmd = iscsit_priv_cmd(cmd);
1195 isert_cmd->read_stag = read_stag;
1196 isert_cmd->read_va = read_va;
1197 isert_cmd->write_stag = write_stag;
1198 isert_cmd->write_va = write_va;
1200 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1201 rx_desc, (unsigned char *)hdr);
1203 case ISCSI_OP_NOOP_OUT:
1204 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1208 isert_cmd = iscsit_priv_cmd(cmd);
1209 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1210 rx_desc, (unsigned char *)hdr);
1212 case ISCSI_OP_SCSI_DATA_OUT:
1213 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1214 (unsigned char *)hdr);
1216 case ISCSI_OP_SCSI_TMFUNC:
1217 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1221 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1222 (unsigned char *)hdr);
1224 case ISCSI_OP_LOGOUT:
1225 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1229 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1231 wait_for_completion_timeout(&conn->conn_logout_comp,
1232 SECONDS_FOR_LOGOUT_COMP *
1236 cmd = isert_allocate_cmd(conn, GFP_KERNEL);
1240 isert_cmd = iscsit_priv_cmd(cmd);
1241 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1242 rx_desc, (struct iscsi_text *)hdr);
1245 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1254 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1256 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1257 uint64_t read_va = 0, write_va = 0;
1258 uint32_t read_stag = 0, write_stag = 0;
1261 switch (iser_hdr->flags & 0xF0) {
1263 if (iser_hdr->flags & ISER_RSV) {
1264 read_stag = be32_to_cpu(iser_hdr->read_stag);
1265 read_va = be64_to_cpu(iser_hdr->read_va);
1266 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1267 read_stag, (unsigned long long)read_va);
1269 if (iser_hdr->flags & ISER_WSV) {
1270 write_stag = be32_to_cpu(iser_hdr->write_stag);
1271 write_va = be64_to_cpu(iser_hdr->write_va);
1272 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1273 write_stag, (unsigned long long)write_va);
1276 pr_debug("ISER ISCSI_CTRL PDU\n");
1279 pr_err("iSER Hello message\n");
1282 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1286 rc = isert_rx_opcode(isert_conn, rx_desc,
1287 read_stag, read_va, write_stag, write_va);
1291 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1292 unsigned long xfer_len)
1294 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1295 struct iscsi_hdr *hdr;
1297 int rx_buflen, outstanding;
1299 if ((char *)desc == isert_conn->login_req_buf) {
1300 rx_dma = isert_conn->login_req_dma;
1301 rx_buflen = ISER_RX_LOGIN_SIZE;
1302 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1305 rx_dma = desc->dma_addr;
1306 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1307 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1311 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1313 hdr = &desc->iscsi_header;
1314 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1315 hdr->opcode, hdr->itt, hdr->flags,
1316 (int)(xfer_len - ISER_HEADERS_LEN));
1318 if ((char *)desc == isert_conn->login_req_buf)
1319 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1322 isert_rx_do_work(desc, isert_conn);
1324 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1327 isert_conn->post_recv_buf_count--;
1328 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1329 isert_conn->post_recv_buf_count);
1331 if ((char *)desc == isert_conn->login_req_buf)
1334 outstanding = isert_conn->post_recv_buf_count;
1335 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1336 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1337 ISERT_MIN_POSTED_RX);
1338 err = isert_post_recv(isert_conn, count);
1340 pr_err("isert_post_recv() count: %d failed, %d\n",
1347 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1349 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1350 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1352 pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
1354 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
1355 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1356 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1357 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1362 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
1368 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
1375 isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1377 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1378 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1379 LIST_HEAD(unmap_list);
1381 pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
1384 pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
1385 isert_cmd, wr->fr_desc);
1386 spin_lock_bh(&isert_conn->conn_lock);
1387 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
1388 spin_unlock_bh(&isert_conn->conn_lock);
1393 pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
1394 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
1395 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
1396 DMA_TO_DEVICE : DMA_FROM_DEVICE);
1405 isert_put_cmd(struct isert_cmd *isert_cmd)
1407 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1408 struct isert_conn *isert_conn = isert_cmd->conn;
1409 struct iscsi_conn *conn = isert_conn->conn;
1410 struct isert_device *device = isert_conn->conn_device;
1412 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1414 switch (cmd->iscsi_opcode) {
1415 case ISCSI_OP_SCSI_CMD:
1416 spin_lock_bh(&conn->cmd_lock);
1417 if (!list_empty(&cmd->i_conn_node))
1418 list_del(&cmd->i_conn_node);
1419 spin_unlock_bh(&conn->cmd_lock);
1421 if (cmd->data_direction == DMA_TO_DEVICE)
1422 iscsit_stop_dataout_timer(cmd);
1424 device->unreg_rdma_mem(isert_cmd, isert_conn);
1425 transport_generic_free_cmd(&cmd->se_cmd, 0);
1427 case ISCSI_OP_SCSI_TMFUNC:
1428 spin_lock_bh(&conn->cmd_lock);
1429 if (!list_empty(&cmd->i_conn_node))
1430 list_del(&cmd->i_conn_node);
1431 spin_unlock_bh(&conn->cmd_lock);
1433 transport_generic_free_cmd(&cmd->se_cmd, 0);
1435 case ISCSI_OP_REJECT:
1436 case ISCSI_OP_NOOP_OUT:
1438 spin_lock_bh(&conn->cmd_lock);
1439 if (!list_empty(&cmd->i_conn_node))
1440 list_del(&cmd->i_conn_node);
1441 spin_unlock_bh(&conn->cmd_lock);
1444 * Handle special case for REJECT when iscsi_add_reject*() has
1445 * overwritten the original iscsi_opcode assignment, and the
1446 * associated cmd->se_cmd needs to be released.
1448 if (cmd->se_cmd.se_tfo != NULL) {
1449 pr_debug("Calling transport_generic_free_cmd from"
1450 " isert_put_cmd for 0x%02x\n",
1452 transport_generic_free_cmd(&cmd->se_cmd, 0);
1459 iscsit_release_cmd(cmd);
1465 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1467 if (tx_desc->dma_addr != 0) {
1468 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1469 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1470 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1471 tx_desc->dma_addr = 0;
1476 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1477 struct ib_device *ib_dev)
1479 if (isert_cmd->pdu_buf_dma != 0) {
1480 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1481 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1482 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1483 isert_cmd->pdu_buf_dma = 0;
1486 isert_unmap_tx_desc(tx_desc, ib_dev);
1487 isert_put_cmd(isert_cmd);
1491 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1492 struct isert_cmd *isert_cmd)
1494 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1495 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1496 struct se_cmd *se_cmd = &cmd->se_cmd;
1497 struct isert_conn *isert_conn = isert_cmd->conn;
1498 struct isert_device *device = isert_conn->conn_device;
1500 iscsit_stop_dataout_timer(cmd);
1501 device->unreg_rdma_mem(isert_cmd, isert_conn);
1502 cmd->write_data_done = wr->cur_rdma_length;
1504 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1505 spin_lock_bh(&cmd->istate_lock);
1506 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1507 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1508 spin_unlock_bh(&cmd->istate_lock);
1510 target_execute_cmd(se_cmd);
1514 isert_do_control_comp(struct work_struct *work)
1516 struct isert_cmd *isert_cmd = container_of(work,
1517 struct isert_cmd, comp_work);
1518 struct isert_conn *isert_conn = isert_cmd->conn;
1519 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1520 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1522 switch (cmd->i_state) {
1523 case ISTATE_SEND_TASKMGTRSP:
1524 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1526 atomic_dec(&isert_conn->post_send_buf_count);
1527 iscsit_tmr_post_handler(cmd, cmd->conn);
1529 cmd->i_state = ISTATE_SENT_STATUS;
1530 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1532 case ISTATE_SEND_REJECT:
1533 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1534 atomic_dec(&isert_conn->post_send_buf_count);
1536 cmd->i_state = ISTATE_SENT_STATUS;
1537 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1539 case ISTATE_SEND_LOGOUTRSP:
1540 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1542 * Call atomic_dec(&isert_conn->post_send_buf_count)
1543 * from isert_free_conn()
1545 isert_conn->logout_posted = true;
1546 iscsit_logout_post_handler(cmd, cmd->conn);
1548 case ISTATE_SEND_TEXTRSP:
1549 atomic_dec(&isert_conn->post_send_buf_count);
1550 cmd->i_state = ISTATE_SENT_STATUS;
1551 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
1554 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1561 isert_response_completion(struct iser_tx_desc *tx_desc,
1562 struct isert_cmd *isert_cmd,
1563 struct isert_conn *isert_conn,
1564 struct ib_device *ib_dev)
1566 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1568 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1569 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1570 cmd->i_state == ISTATE_SEND_REJECT ||
1571 cmd->i_state == ISTATE_SEND_TEXTRSP) {
1572 isert_unmap_tx_desc(tx_desc, ib_dev);
1574 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1575 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1578 atomic_dec(&isert_conn->post_send_buf_count);
1580 cmd->i_state = ISTATE_SENT_STATUS;
1581 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1585 isert_send_completion(struct iser_tx_desc *tx_desc,
1586 struct isert_conn *isert_conn)
1588 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1589 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1590 struct isert_rdma_wr *wr;
1593 atomic_dec(&isert_conn->post_send_buf_count);
1594 isert_unmap_tx_desc(tx_desc, ib_dev);
1597 wr = &isert_cmd->rdma_wr;
1599 switch (wr->iser_ib_op) {
1601 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1605 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1606 isert_response_completion(tx_desc, isert_cmd,
1607 isert_conn, ib_dev);
1609 case ISER_IB_RDMA_WRITE:
1610 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1613 case ISER_IB_RDMA_READ:
1614 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1616 atomic_dec(&isert_conn->post_send_buf_count);
1617 isert_completion_rdma_read(tx_desc, isert_cmd);
1620 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1627 isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1629 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1632 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1635 isert_unmap_tx_desc(tx_desc, ib_dev);
1637 isert_completion_put(tx_desc, isert_cmd, ib_dev);
1640 if (isert_conn->post_recv_buf_count == 0 &&
1641 atomic_read(&isert_conn->post_send_buf_count) == 0) {
1642 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1643 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1645 mutex_lock(&isert_conn->conn_mutex);
1646 if (isert_conn->state != ISER_CONN_DOWN)
1647 isert_conn->state = ISER_CONN_TERMINATING;
1648 mutex_unlock(&isert_conn->conn_mutex);
1650 wake_up(&isert_conn->conn_wait_comp_err);
1655 isert_cq_tx_work(struct work_struct *work)
1657 struct isert_cq_desc *cq_desc = container_of(work,
1658 struct isert_cq_desc, cq_tx_work);
1659 struct isert_device *device = cq_desc->device;
1660 int cq_index = cq_desc->cq_index;
1661 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1662 struct isert_conn *isert_conn;
1663 struct iser_tx_desc *tx_desc;
1666 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1667 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1668 isert_conn = wc.qp->qp_context;
1670 if (wc.status == IB_WC_SUCCESS) {
1671 isert_send_completion(tx_desc, isert_conn);
1673 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1674 pr_debug("TX wc.status: 0x%08x\n", wc.status);
1675 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
1676 atomic_dec(&isert_conn->post_send_buf_count);
1677 isert_cq_comp_err(tx_desc, isert_conn);
1681 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1685 isert_cq_tx_callback(struct ib_cq *cq, void *context)
1687 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1689 INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1690 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1694 isert_cq_rx_work(struct work_struct *work)
1696 struct isert_cq_desc *cq_desc = container_of(work,
1697 struct isert_cq_desc, cq_rx_work);
1698 struct isert_device *device = cq_desc->device;
1699 int cq_index = cq_desc->cq_index;
1700 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1701 struct isert_conn *isert_conn;
1702 struct iser_rx_desc *rx_desc;
1704 unsigned long xfer_len;
1706 while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1707 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1708 isert_conn = wc.qp->qp_context;
1710 if (wc.status == IB_WC_SUCCESS) {
1711 xfer_len = (unsigned long)wc.byte_len;
1712 isert_rx_completion(rx_desc, isert_conn, xfer_len);
1714 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1715 if (wc.status != IB_WC_WR_FLUSH_ERR) {
1716 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1717 pr_debug("RX wc.vendor_err: 0x%08x\n",
1720 isert_conn->post_recv_buf_count--;
1721 isert_cq_comp_err(NULL, isert_conn);
1725 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1729 isert_cq_rx_callback(struct ib_cq *cq, void *context)
1731 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1733 INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1734 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1738 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1740 struct ib_send_wr *wr_failed;
1743 atomic_inc(&isert_conn->post_send_buf_count);
1745 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1748 pr_err("ib_post_send failed with %d\n", ret);
1749 atomic_dec(&isert_conn->post_send_buf_count);
1756 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1758 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1759 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1760 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1761 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1762 &isert_cmd->tx_desc.iscsi_header;
1764 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1765 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1766 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1768 * Attach SENSE DATA payload to iSCSI Response PDU
1770 if (cmd->se_cmd.sense_buffer &&
1771 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1772 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1773 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1774 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1775 u32 padding, pdu_len;
1777 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1779 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1781 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1782 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1783 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1785 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1786 (void *)cmd->sense_buffer, pdu_len,
1789 isert_cmd->pdu_buf_len = pdu_len;
1790 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1791 tx_dsg->length = pdu_len;
1792 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1793 isert_cmd->tx_desc.num_sge = 2;
1796 isert_init_send_wr(isert_cmd, send_wr);
1798 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1800 return isert_post_response(isert_conn, isert_cmd);
1804 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1805 bool nopout_response)
1807 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1808 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1809 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1811 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1812 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1813 &isert_cmd->tx_desc.iscsi_header,
1815 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1816 isert_init_send_wr(isert_cmd, send_wr);
1818 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1820 return isert_post_response(isert_conn, isert_cmd);
1824 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1826 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1827 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1828 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1830 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1831 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1832 &isert_cmd->tx_desc.iscsi_header);
1833 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1834 isert_init_send_wr(isert_cmd, send_wr);
1836 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1838 return isert_post_response(isert_conn, isert_cmd);
1842 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1844 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1845 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1846 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1848 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1849 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1850 &isert_cmd->tx_desc.iscsi_header);
1851 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1852 isert_init_send_wr(isert_cmd, send_wr);
1854 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1856 return isert_post_response(isert_conn, isert_cmd);
1860 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1862 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1863 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1864 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1865 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1866 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1867 struct iscsi_reject *hdr =
1868 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1870 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1871 iscsit_build_reject(cmd, conn, hdr);
1872 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1874 hton24(hdr->dlength, ISCSI_HDR_LEN);
1875 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1876 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1878 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1879 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1880 tx_dsg->length = ISCSI_HDR_LEN;
1881 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1882 isert_cmd->tx_desc.num_sge = 2;
1884 isert_init_send_wr(isert_cmd, send_wr);
1886 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1888 return isert_post_response(isert_conn, isert_cmd);
1892 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1894 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1895 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1896 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1897 struct iscsi_text_rsp *hdr =
1898 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1902 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1903 rc = iscsit_build_text_rsp(cmd, conn, hdr);
1908 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1911 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1912 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1913 void *txt_rsp_buf = cmd->buf_ptr;
1915 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1916 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
1918 isert_cmd->pdu_buf_len = txt_rsp_len;
1919 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1920 tx_dsg->length = txt_rsp_len;
1921 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1922 isert_cmd->tx_desc.num_sge = 2;
1924 isert_init_send_wr(isert_cmd, send_wr);
1926 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1928 return isert_post_response(isert_conn, isert_cmd);
1932 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1933 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1934 u32 data_left, u32 offset)
1936 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1937 struct scatterlist *sg_start, *tmp_sg;
1938 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1939 u32 sg_off, page_off;
1940 int i = 0, sg_nents;
1942 sg_off = offset / PAGE_SIZE;
1943 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1944 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1945 page_off = offset % PAGE_SIZE;
1947 send_wr->sg_list = ib_sge;
1948 send_wr->num_sge = sg_nents;
1949 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1951 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1953 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1954 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1955 (unsigned long long)tmp_sg->dma_address,
1956 tmp_sg->length, page_off);
1958 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
1959 ib_sge->length = min_t(u32, data_left,
1960 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1961 ib_sge->lkey = isert_conn->conn_mr->lkey;
1963 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
1964 ib_sge->addr, ib_sge->length, ib_sge->lkey);
1966 data_left -= ib_sge->length;
1968 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
1971 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1972 send_wr->sg_list, send_wr->num_sge);
1978 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1979 struct isert_rdma_wr *wr)
1981 struct se_cmd *se_cmd = &cmd->se_cmd;
1982 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1983 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1984 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1985 struct ib_send_wr *send_wr;
1986 struct ib_sge *ib_sge;
1987 struct scatterlist *sg_start;
1988 u32 sg_off = 0, sg_nents;
1989 u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
1990 int ret = 0, count, i, ib_sge_cnt;
1992 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
1993 data_left = se_cmd->data_length;
1994 iscsit_increment_maxcmdsn(cmd, conn->sess);
1995 cmd->stat_sn = conn->stat_sn++;
1997 sg_off = cmd->write_data_done / PAGE_SIZE;
1998 data_left = se_cmd->data_length - cmd->write_data_done;
1999 offset = cmd->write_data_done;
2000 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2003 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2004 sg_nents = se_cmd->t_data_nents - sg_off;
2006 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2007 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2008 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2009 if (unlikely(!count)) {
2010 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2014 wr->num_sge = sg_nents;
2015 wr->cur_rdma_length = data_left;
2016 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2017 isert_cmd, count, sg_start, sg_nents, data_left);
2019 ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
2021 pr_warn("Unable to allocate ib_sge\n");
2025 wr->ib_sge = ib_sge;
2027 wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
2028 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2031 pr_debug("Unable to allocate wr->send_wr\n");
2036 wr->isert_cmd = isert_cmd;
2037 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2039 for (i = 0; i < wr->send_wr_num; i++) {
2040 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2041 data_len = min(data_left, rdma_write_max);
2043 send_wr->send_flags = 0;
2044 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2045 send_wr->opcode = IB_WR_RDMA_WRITE;
2046 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2047 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2048 if (i + 1 == wr->send_wr_num)
2049 send_wr->next = &isert_cmd->tx_desc.send_wr;
2051 send_wr->next = &wr->send_wr[i + 1];
2053 send_wr->opcode = IB_WR_RDMA_READ;
2054 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2055 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2056 if (i + 1 == wr->send_wr_num)
2057 send_wr->send_flags = IB_SEND_SIGNALED;
2059 send_wr->next = &wr->send_wr[i + 1];
2062 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2063 send_wr, data_len, offset);
2064 ib_sge += ib_sge_cnt;
2067 va_offset += data_len;
2068 data_left -= data_len;
2073 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2074 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2075 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2080 isert_map_fr_pagelist(struct ib_device *ib_dev,
2081 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2083 u64 start_addr, end_addr, page, chunk_start = 0;
2084 struct scatterlist *tmp_sg;
2085 int i = 0, new_chunk, last_ent, n_pages;
2089 last_ent = sg_nents - 1;
2090 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2091 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2093 chunk_start = start_addr;
2094 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2096 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2097 i, (unsigned long long)tmp_sg->dma_address,
2100 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2106 page = chunk_start & PAGE_MASK;
2108 fr_pl[n_pages++] = page;
2109 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2112 } while (page < end_addr);
2119 isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2120 struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
2121 struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
2123 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2124 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2125 struct scatterlist *sg_start;
2126 u32 sg_off, page_off;
2127 struct ib_send_wr fr_wr, inv_wr;
2128 struct ib_send_wr *bad_wr, *wr = NULL;
2130 int ret, sg_nents, pagelist_len;
2132 sg_off = offset / PAGE_SIZE;
2133 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2134 sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
2135 ISCSI_ISER_SG_TABLESIZE);
2136 page_off = offset % PAGE_SIZE;
2138 pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
2139 isert_cmd, fr_desc, sg_nents, sg_off, offset);
2141 pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
2142 &fr_desc->data_frpl->page_list[0]);
2144 if (!fr_desc->valid) {
2145 memset(&inv_wr, 0, sizeof(inv_wr));
2146 inv_wr.opcode = IB_WR_LOCAL_INV;
2147 inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
2150 key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
2151 ib_update_fast_reg_key(fr_desc->data_mr, ++key);
2154 /* Prepare FASTREG WR */
2155 memset(&fr_wr, 0, sizeof(fr_wr));
2156 fr_wr.opcode = IB_WR_FAST_REG_MR;
2157 fr_wr.wr.fast_reg.iova_start =
2158 fr_desc->data_frpl->page_list[0] + page_off;
2159 fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
2160 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2161 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2162 fr_wr.wr.fast_reg.length = data_len;
2163 fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
2164 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2171 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2173 pr_err("fast registration failed, ret:%d\n", ret);
2176 fr_desc->valid = false;
2178 ib_sge->lkey = fr_desc->data_mr->lkey;
2179 ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
2180 ib_sge->length = data_len;
2182 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2183 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2189 isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2190 struct isert_rdma_wr *wr)
2192 struct se_cmd *se_cmd = &cmd->se_cmd;
2193 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2194 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2195 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2196 struct ib_send_wr *send_wr;
2197 struct ib_sge *ib_sge;
2198 struct scatterlist *sg_start;
2199 struct fast_reg_descriptor *fr_desc;
2200 u32 sg_off = 0, sg_nents;
2201 u32 offset = 0, data_len, data_left, rdma_write_max;
2203 unsigned long flags;
2205 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2206 data_left = se_cmd->data_length;
2207 iscsit_increment_maxcmdsn(cmd, conn->sess);
2208 cmd->stat_sn = conn->stat_sn++;
2210 sg_off = cmd->write_data_done / PAGE_SIZE;
2211 data_left = se_cmd->data_length - cmd->write_data_done;
2212 offset = cmd->write_data_done;
2213 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2216 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2217 sg_nents = se_cmd->t_data_nents - sg_off;
2219 count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
2220 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2221 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2222 if (unlikely(!count)) {
2223 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
2227 wr->num_sge = sg_nents;
2228 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2229 isert_cmd, count, sg_start, sg_nents, data_left);
2231 memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
2232 ib_sge = &wr->s_ib_sge;
2233 wr->ib_sge = ib_sge;
2235 wr->send_wr_num = 1;
2236 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2237 wr->send_wr = &wr->s_send_wr;
2239 wr->isert_cmd = isert_cmd;
2240 rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
2242 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2243 send_wr->sg_list = ib_sge;
2244 send_wr->num_sge = 1;
2245 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
2246 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2247 send_wr->opcode = IB_WR_RDMA_WRITE;
2248 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2249 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2250 send_wr->send_flags = 0;
2251 send_wr->next = &isert_cmd->tx_desc.send_wr;
2253 send_wr->opcode = IB_WR_RDMA_READ;
2254 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2255 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2256 send_wr->send_flags = IB_SEND_SIGNALED;
2259 data_len = min(data_left, rdma_write_max);
2260 wr->cur_rdma_length = data_len;
2262 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2263 fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
2264 struct fast_reg_descriptor, list);
2265 list_del(&fr_desc->list);
2266 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2267 wr->fr_desc = fr_desc;
2269 ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
2270 ib_sge, offset, data_len);
2272 list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
2279 ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
2280 (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
2281 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2286 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2288 struct se_cmd *se_cmd = &cmd->se_cmd;
2289 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2290 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2291 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2292 struct isert_device *device = isert_conn->conn_device;
2293 struct ib_send_wr *wr_failed;
2296 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2297 isert_cmd, se_cmd->data_length);
2298 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2299 rc = device->reg_rdma_mem(conn, cmd, wr);
2301 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2306 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2308 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2309 iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
2310 &isert_cmd->tx_desc.iscsi_header);
2311 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2312 isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
2314 atomic_inc(&isert_conn->post_send_buf_count);
2316 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2318 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2319 atomic_dec(&isert_conn->post_send_buf_count);
2321 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2328 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2330 struct se_cmd *se_cmd = &cmd->se_cmd;
2331 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2332 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2333 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2334 struct isert_device *device = isert_conn->conn_device;
2335 struct ib_send_wr *wr_failed;
2338 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2339 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2340 wr->iser_ib_op = ISER_IB_RDMA_READ;
2341 rc = device->reg_rdma_mem(conn, cmd, wr);
2343 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2347 atomic_inc(&isert_conn->post_send_buf_count);
2349 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2351 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2352 atomic_dec(&isert_conn->post_send_buf_count);
2354 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2361 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2366 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2367 ret = isert_put_nopin(cmd, conn, false);
2370 pr_err("Unknown immediate state: 0x%02x\n", state);
2379 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2384 case ISTATE_SEND_LOGOUTRSP:
2385 ret = isert_put_logout_rsp(cmd, conn);
2387 pr_debug("Returning iSER Logout -EAGAIN\n");
2391 case ISTATE_SEND_NOPIN:
2392 ret = isert_put_nopin(cmd, conn, true);
2394 case ISTATE_SEND_TASKMGTRSP:
2395 ret = isert_put_tm_rsp(cmd, conn);
2397 case ISTATE_SEND_REJECT:
2398 ret = isert_put_reject(cmd, conn);
2400 case ISTATE_SEND_TEXTRSP:
2401 ret = isert_put_text_rsp(cmd, conn);
2403 case ISTATE_SEND_STATUS:
2405 * Special case for sending non GOOD SCSI status from TX thread
2406 * context during pre se_cmd excecution failure.
2408 ret = isert_put_response(conn, cmd);
2411 pr_err("Unknown response state: 0x%02x\n", state);
2420 isert_setup_np(struct iscsi_np *np,
2421 struct __kernel_sockaddr_storage *ksockaddr)
2423 struct isert_np *isert_np;
2424 struct rdma_cm_id *isert_lid;
2425 struct sockaddr *sa;
2428 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2430 pr_err("Unable to allocate struct isert_np\n");
2433 init_waitqueue_head(&isert_np->np_accept_wq);
2434 mutex_init(&isert_np->np_accept_mutex);
2435 INIT_LIST_HEAD(&isert_np->np_accept_list);
2436 init_completion(&isert_np->np_login_comp);
2438 sa = (struct sockaddr *)ksockaddr;
2439 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2441 * Setup the np->np_sockaddr from the passed sockaddr setup
2442 * in iscsi_target_configfs.c code..
2444 memcpy(&np->np_sockaddr, ksockaddr,
2445 sizeof(struct __kernel_sockaddr_storage));
2447 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2449 if (IS_ERR(isert_lid)) {
2450 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2451 PTR_ERR(isert_lid));
2452 ret = PTR_ERR(isert_lid);
2456 ret = rdma_bind_addr(isert_lid, sa);
2458 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2462 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2464 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2468 isert_np->np_cm_id = isert_lid;
2469 np->np_context = isert_np;
2470 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2475 rdma_destroy_id(isert_lid);
2482 isert_check_accept_queue(struct isert_np *isert_np)
2486 mutex_lock(&isert_np->np_accept_mutex);
2487 empty = list_empty(&isert_np->np_accept_list);
2488 mutex_unlock(&isert_np->np_accept_mutex);
2494 isert_rdma_accept(struct isert_conn *isert_conn)
2496 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2497 struct rdma_conn_param cp;
2500 memset(&cp, 0, sizeof(struct rdma_conn_param));
2501 cp.responder_resources = isert_conn->responder_resources;
2502 cp.initiator_depth = isert_conn->initiator_depth;
2504 cp.rnr_retry_count = 7;
2506 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2508 ret = rdma_accept(cm_id, &cp);
2510 pr_err("rdma_accept() failed with: %d\n", ret);
2514 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2520 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2522 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2525 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2527 * For login requests after the first PDU, isert_rx_login_req() will
2528 * kick schedule_delayed_work(&conn->login_work) as the packet is
2529 * received, which turns this callback from iscsi_target_do_login_rx()
2532 if (!login->first_request)
2535 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2539 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2544 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2545 struct isert_conn *isert_conn)
2547 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2548 struct rdma_route *cm_route = &cm_id->route;
2549 struct sockaddr_in *sock_in;
2550 struct sockaddr_in6 *sock_in6;
2552 conn->login_family = np->np_sockaddr.ss_family;
2554 if (np->np_sockaddr.ss_family == AF_INET6) {
2555 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2556 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2557 &sock_in6->sin6_addr.in6_u);
2558 conn->login_port = ntohs(sock_in6->sin6_port);
2560 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2561 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2562 &sock_in6->sin6_addr.in6_u);
2563 conn->local_port = ntohs(sock_in6->sin6_port);
2565 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2566 sprintf(conn->login_ip, "%pI4",
2567 &sock_in->sin_addr.s_addr);
2568 conn->login_port = ntohs(sock_in->sin_port);
2570 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2571 sprintf(conn->local_ip, "%pI4",
2572 &sock_in->sin_addr.s_addr);
2573 conn->local_port = ntohs(sock_in->sin_port);
2578 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2580 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2581 struct isert_conn *isert_conn;
2582 int max_accept = 0, ret;
2585 ret = wait_event_interruptible(isert_np->np_accept_wq,
2586 !isert_check_accept_queue(isert_np) ||
2587 np->np_thread_state == ISCSI_NP_THREAD_RESET);
2591 spin_lock_bh(&np->np_thread_lock);
2592 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
2593 spin_unlock_bh(&np->np_thread_lock);
2594 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2597 spin_unlock_bh(&np->np_thread_lock);
2599 mutex_lock(&isert_np->np_accept_mutex);
2600 if (list_empty(&isert_np->np_accept_list)) {
2601 mutex_unlock(&isert_np->np_accept_mutex);
2605 isert_conn = list_first_entry(&isert_np->np_accept_list,
2606 struct isert_conn, conn_accept_node);
2607 list_del_init(&isert_conn->conn_accept_node);
2608 mutex_unlock(&isert_np->np_accept_mutex);
2610 conn->context = isert_conn;
2611 isert_conn->conn = conn;
2614 ret = isert_rdma_post_recvl(isert_conn);
2618 ret = isert_rdma_accept(isert_conn);
2622 isert_set_conn_info(np, conn, isert_conn);
2624 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2629 isert_free_np(struct iscsi_np *np)
2631 struct isert_np *isert_np = (struct isert_np *)np->np_context;
2633 rdma_destroy_id(isert_np->np_cm_id);
2635 np->np_context = NULL;
2639 static int isert_check_state(struct isert_conn *isert_conn, int state)
2643 mutex_lock(&isert_conn->conn_mutex);
2644 ret = (isert_conn->state == state);
2645 mutex_unlock(&isert_conn->conn_mutex);
2650 static void isert_free_conn(struct iscsi_conn *conn)
2652 struct isert_conn *isert_conn = conn->context;
2654 pr_debug("isert_free_conn: Starting \n");
2656 * Decrement post_send_buf_count for special case when called
2657 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2659 mutex_lock(&isert_conn->conn_mutex);
2660 if (isert_conn->logout_posted)
2661 atomic_dec(&isert_conn->post_send_buf_count);
2663 if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
2664 pr_debug("Calling rdma_disconnect from isert_free_conn\n");
2665 rdma_disconnect(isert_conn->conn_cm_id);
2668 * Only wait for conn_wait_comp_err if the isert_conn made it
2669 * into full feature phase..
2671 if (isert_conn->state == ISER_CONN_UP) {
2672 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2674 mutex_unlock(&isert_conn->conn_mutex);
2676 wait_event(isert_conn->conn_wait_comp_err,
2677 (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
2679 wait_event(isert_conn->conn_wait,
2680 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2682 isert_put_conn(isert_conn);
2685 if (isert_conn->state == ISER_CONN_INIT) {
2686 mutex_unlock(&isert_conn->conn_mutex);
2687 isert_put_conn(isert_conn);
2690 pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2692 mutex_unlock(&isert_conn->conn_mutex);
2694 wait_event(isert_conn->conn_wait,
2695 (isert_check_state(isert_conn, ISER_CONN_DOWN)));
2697 isert_put_conn(isert_conn);
2700 static struct iscsit_transport iser_target_transport = {
2702 .transport_type = ISCSI_INFINIBAND,
2703 .priv_size = sizeof(struct isert_cmd),
2704 .owner = THIS_MODULE,
2705 .iscsit_setup_np = isert_setup_np,
2706 .iscsit_accept_np = isert_accept_np,
2707 .iscsit_free_np = isert_free_np,
2708 .iscsit_free_conn = isert_free_conn,
2709 .iscsit_get_login_rx = isert_get_login_rx,
2710 .iscsit_put_login_tx = isert_put_login_tx,
2711 .iscsit_immediate_queue = isert_immediate_queue,
2712 .iscsit_response_queue = isert_response_queue,
2713 .iscsit_get_dataout = isert_get_dataout,
2714 .iscsit_queue_data_in = isert_put_datain,
2715 .iscsit_queue_status = isert_put_response,
2718 static int __init isert_init(void)
2722 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2724 pr_err("Unable to allocate isert_rx_wq\n");
2728 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2729 if (!isert_comp_wq) {
2730 pr_err("Unable to allocate isert_comp_wq\n");
2735 iscsit_register_transport(&iser_target_transport);
2736 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2740 destroy_workqueue(isert_rx_wq);
2744 static void __exit isert_exit(void)
2746 destroy_workqueue(isert_comp_wq);
2747 destroy_workqueue(isert_rx_wq);
2748 iscsit_unregister_transport(&iser_target_transport);
2749 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2752 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2753 MODULE_VERSION("0.1");
2754 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2755 MODULE_LICENSE("GPL");
2757 module_init(isert_init);
2758 module_exit(isert_exit);