2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
35 #include <linux/scatterlist.h>
36 #include <linux/kfifo.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
40 #include "iscsi_iser.h"
42 /* Register user buffer memory and initialize passive rdma
43 * dto descriptor. Total data size is stored in
44 * iser_task->data[ISER_DIR_IN].data_len
46 static int iser_prepare_read_cmd(struct iscsi_task *task,
50 struct iscsi_iser_task *iser_task = task->dd_data;
51 struct iser_regd_buf *regd_buf;
53 struct iser_hdr *hdr = &iser_task->desc.iser_header;
54 struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
56 err = iser_dma_map_task_data(iser_task,
63 if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
64 iser_err("Total data length: %ld, less than EDTL: "
65 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
66 iser_task->data[ISER_DIR_IN].data_len, edtl,
67 task->itt, iser_task->iser_conn);
71 err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
73 iser_err("Failed to set up Data-IN RDMA\n");
76 regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
78 hdr->flags |= ISER_RSV;
79 hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
80 hdr->read_va = cpu_to_be64(regd_buf->reg.va);
82 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
83 task->itt, regd_buf->reg.rkey,
84 (unsigned long long)regd_buf->reg.va);
89 /* Register user buffer memory and initialize passive rdma
90 * dto descriptor. Total data size is stored in
91 * task->data[ISER_DIR_OUT].data_len
94 iser_prepare_write_cmd(struct iscsi_task *task,
96 unsigned int unsol_sz,
99 struct iscsi_iser_task *iser_task = task->dd_data;
100 struct iser_regd_buf *regd_buf;
102 struct iser_hdr *hdr = &iser_task->desc.iser_header;
103 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
104 struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
106 err = iser_dma_map_task_data(iser_task,
113 if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
114 iser_err("Total data length: %ld, less than EDTL: %d, "
115 "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
116 iser_task->data[ISER_DIR_OUT].data_len,
117 edtl, task->itt, task->conn);
121 err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
123 iser_err("Failed to register write cmd RDMA mem\n");
127 regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
129 if (unsol_sz < edtl) {
130 hdr->flags |= ISER_WSV;
131 hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
132 hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz);
134 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
135 "VA:%#llX + unsol:%d\n",
136 task->itt, regd_buf->reg.rkey,
137 (unsigned long long)regd_buf->reg.va, unsol_sz);
141 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
143 tx_dsg->addr = regd_buf->reg.va;
144 tx_dsg->length = imm_sz;
145 tx_dsg->lkey = regd_buf->reg.lkey;
146 iser_task->desc.num_sge = 2;
152 /* creates a new tx descriptor and adds header regd buffer */
153 static void iser_create_send_desc(struct iser_conn *ib_conn,
154 struct iser_tx_desc *tx_desc)
156 struct iser_device *device = ib_conn->device;
158 ib_dma_sync_single_for_cpu(device->ib_device,
159 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
161 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
162 tx_desc->iser_header.flags = ISER_VER;
164 tx_desc->num_sge = 1;
166 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
167 tx_desc->tx_sg[0].lkey = device->mr->lkey;
168 iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
173 int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
177 struct iser_rx_desc *rx_desc;
178 struct ib_sge *rx_sg;
179 struct iser_device *device = ib_conn->device;
181 ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
182 sizeof(struct iser_rx_desc), GFP_KERNEL);
183 if (!ib_conn->rx_descs)
184 goto rx_desc_alloc_fail;
186 rx_desc = ib_conn->rx_descs;
188 for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
189 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
190 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
191 if (ib_dma_mapping_error(device->ib_device, dma_addr))
192 goto rx_desc_dma_map_failed;
194 rx_desc->dma_addr = dma_addr;
196 rx_sg = &rx_desc->rx_sg;
197 rx_sg->addr = rx_desc->dma_addr;
198 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
199 rx_sg->lkey = device->mr->lkey;
202 ib_conn->rx_desc_head = 0;
205 rx_desc_dma_map_failed:
206 rx_desc = ib_conn->rx_descs;
207 for (j = 0; j < i; j++, rx_desc++)
208 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
209 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
210 kfree(ib_conn->rx_descs);
211 ib_conn->rx_descs = NULL;
213 iser_err("failed allocating rx descriptors / data buffers\n");
217 void iser_free_rx_descriptors(struct iser_conn *ib_conn)
220 struct iser_rx_desc *rx_desc;
221 struct iser_device *device = ib_conn->device;
223 if (ib_conn->login_buf) {
224 ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
225 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
226 kfree(ib_conn->login_buf);
229 if (!ib_conn->rx_descs)
232 rx_desc = ib_conn->rx_descs;
233 for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
234 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
235 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
236 kfree(ib_conn->rx_descs);
240 * iser_conn_set_full_featured_mode - (iSER API)
242 int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
244 struct iscsi_iser_conn *iser_conn = conn->dd_data;
246 iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
248 /* Check that there is no posted recv or send buffers left - */
249 /* they must be consumed during the login phase */
250 BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
251 BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
253 if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
256 /* Initial post receive buffers */
257 if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
264 iser_check_xmit(struct iscsi_conn *conn, void *task)
266 struct iscsi_iser_conn *iser_conn = conn->dd_data;
268 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
269 ISER_QP_MAX_REQ_DTOS) {
270 iser_dbg("%ld can't xmit task %p\n",jiffies,task);
278 * iser_send_command - send command PDU
280 int iser_send_command(struct iscsi_conn *conn,
281 struct iscsi_task *task)
283 struct iscsi_iser_conn *iser_conn = conn->dd_data;
284 struct iscsi_iser_task *iser_task = task->dd_data;
287 struct iser_data_buf *data_buf;
288 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
289 struct scsi_cmnd *sc = task->sc;
290 struct iser_tx_desc *tx_desc = &iser_task->desc;
292 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
293 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
296 if (iser_check_xmit(conn, task))
299 edtl = ntohl(hdr->data_length);
301 /* build the tx desc regd header and add it to the tx desc dto */
302 tx_desc->type = ISCSI_TX_SCSI_COMMAND;
303 iser_create_send_desc(iser_conn->ib_conn, tx_desc);
305 if (hdr->flags & ISCSI_FLAG_CMD_READ)
306 data_buf = &iser_task->data[ISER_DIR_IN];
308 data_buf = &iser_task->data[ISER_DIR_OUT];
310 if (scsi_sg_count(sc)) { /* using a scatter list */
311 data_buf->buf = scsi_sglist(sc);
312 data_buf->size = scsi_sg_count(sc);
315 data_buf->data_len = scsi_bufflen(sc);
317 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
318 err = iser_prepare_read_cmd(task, edtl);
320 goto send_command_error;
322 if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
323 err = iser_prepare_write_cmd(task,
326 task->unsol_r2t.data_length,
329 goto send_command_error;
332 iser_task->status = ISER_TASK_STATUS_STARTED;
334 err = iser_post_send(iser_conn->ib_conn, tx_desc);
339 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
344 * iser_send_data_out - send data out PDU
346 int iser_send_data_out(struct iscsi_conn *conn,
347 struct iscsi_task *task,
348 struct iscsi_data *hdr)
350 struct iscsi_iser_conn *iser_conn = conn->dd_data;
351 struct iscsi_iser_task *iser_task = task->dd_data;
352 struct iser_tx_desc *tx_desc = NULL;
353 struct iser_regd_buf *regd_buf;
354 unsigned long buf_offset;
355 unsigned long data_seg_len;
358 struct ib_sge *tx_dsg;
361 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
362 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
366 if (iser_check_xmit(conn, task))
369 itt = (__force uint32_t)hdr->itt;
370 data_seg_len = ntoh24(hdr->dlength);
371 buf_offset = ntohl(hdr->offset);
373 iser_dbg("%s itt %d dseg_len %d offset %d\n",
374 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
376 tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_NOIO);
377 if (tx_desc == NULL) {
378 iser_err("Failed to alloc desc for post dataout\n");
382 tx_desc->type = ISCSI_TX_DATAOUT;
383 tx_desc->iser_header.flags = ISER_VER;
384 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
386 /* build the tx desc */
387 iser_initialize_task_headers(task, tx_desc);
389 regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
390 tx_dsg = &tx_desc->tx_sg[1];
391 tx_dsg->addr = regd_buf->reg.va + buf_offset;
392 tx_dsg->length = data_seg_len;
393 tx_dsg->lkey = regd_buf->reg.lkey;
394 tx_desc->num_sge = 2;
396 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
397 iser_err("Offset:%ld & DSL:%ld in Data-Out "
398 "inconsistent with total len:%ld, itt:%d\n",
399 buf_offset, data_seg_len,
400 iser_task->data[ISER_DIR_OUT].data_len, itt);
402 goto send_data_out_error;
404 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
405 itt, buf_offset, data_seg_len);
408 err = iser_post_send(iser_conn->ib_conn, tx_desc);
413 kmem_cache_free(ig.desc_cache, tx_desc);
414 iser_err("conn %p failed err %d\n",conn, err);
418 int iser_send_control(struct iscsi_conn *conn,
419 struct iscsi_task *task)
421 struct iscsi_iser_conn *iser_conn = conn->dd_data;
422 struct iscsi_iser_task *iser_task = task->dd_data;
423 struct iser_tx_desc *mdesc = &iser_task->desc;
424 unsigned long data_seg_len;
426 struct iser_device *device;
428 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
429 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
433 if (iser_check_xmit(conn, task))
436 /* build the tx desc regd header and add it to the tx desc dto */
437 mdesc->type = ISCSI_TX_CONTROL;
438 iser_create_send_desc(iser_conn->ib_conn, mdesc);
440 device = iser_conn->ib_conn->device;
442 data_seg_len = ntoh24(task->hdr->dlength);
444 if (data_seg_len > 0) {
445 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
446 if (task != conn->login_task) {
447 iser_err("data present on non login task!!!\n");
448 goto send_control_error;
450 memcpy(iser_conn->ib_conn->login_buf, task->data,
452 tx_dsg->addr = iser_conn->ib_conn->login_dma;
453 tx_dsg->length = data_seg_len;
454 tx_dsg->lkey = device->mr->lkey;
458 if (task == conn->login_task) {
459 err = iser_post_recvl(iser_conn->ib_conn);
461 goto send_control_error;
464 err = iser_post_send(iser_conn->ib_conn, mdesc);
469 iser_err("conn %p failed err %d\n",conn, err);
474 * iser_rcv_dto_completion - recv DTO completion
476 void iser_rcv_completion(struct iser_rx_desc *rx_desc,
477 unsigned long rx_xfer_len,
478 struct iser_conn *ib_conn)
480 struct iscsi_iser_conn *conn = ib_conn->iser_conn;
481 struct iscsi_task *task;
482 struct iscsi_iser_task *iser_task;
483 struct iscsi_hdr *hdr;
484 unsigned char opcode;
486 int rx_buflen, outstanding, count, err;
488 /* differentiate between login to all other PDUs */
489 if ((char *)rx_desc == ib_conn->login_buf) {
490 rx_dma = ib_conn->login_dma;
491 rx_buflen = ISER_RX_LOGIN_SIZE;
493 rx_dma = rx_desc->dma_addr;
494 rx_buflen = ISER_RX_PAYLOAD_SIZE;
497 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
498 rx_buflen, DMA_FROM_DEVICE);
500 hdr = &rx_desc->iscsi_header;
502 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
503 hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
505 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
507 if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
508 spin_lock(&conn->iscsi_conn->session->lock);
509 task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
511 __iscsi_get_task(task);
512 spin_unlock(&conn->iscsi_conn->session->lock);
515 iser_err("itt can't be matched to task!!! "
516 "conn %p opcode %d itt %d\n",
517 conn->iscsi_conn, opcode, hdr->itt);
519 iser_task = task->dd_data;
520 iser_dbg("itt %d task %p\n",hdr->itt, task);
521 iser_task->status = ISER_TASK_STATUS_COMPLETED;
522 iser_task_rdma_finalize(iser_task);
523 iscsi_put_task(task);
527 iscsi_iser_recv(conn->iscsi_conn, hdr,
528 rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
530 ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
531 rx_buflen, DMA_FROM_DEVICE);
533 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
534 * task eliminates the need to worry on tasks which are completed in *
535 * parallel to the execution of iser_conn_term. So the code that waits *
536 * for the posted rx bufs refcount to become zero handles everything */
537 conn->ib_conn->post_recv_buf_count--;
539 if (rx_dma == ib_conn->login_dma)
542 outstanding = ib_conn->post_recv_buf_count;
543 if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
544 count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
546 err = iser_post_recvm(ib_conn, count);
548 iser_err("posting %d rx bufs err %d\n", count, err);
552 void iser_snd_completion(struct iser_tx_desc *tx_desc,
553 struct iser_conn *ib_conn)
555 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
556 struct iscsi_conn *conn = iser_conn->iscsi_conn;
557 struct iscsi_task *task;
559 struct iser_device *device = ib_conn->device;
561 if (tx_desc->type == ISCSI_TX_DATAOUT) {
562 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
563 ISER_HEADERS_LEN, DMA_TO_DEVICE);
564 kmem_cache_free(ig.desc_cache, tx_desc);
567 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
568 ISER_QP_MAX_REQ_DTOS)
571 atomic_dec(&ib_conn->post_send_buf_count);
574 iser_dbg("%ld resuming tx\n",jiffies);
575 iscsi_conn_queue_work(conn);
578 if (tx_desc->type == ISCSI_TX_CONTROL) {
579 /* this arithmetic is legal by libiscsi dd_data allocation */
580 task = (void *) ((long)(void *)tx_desc -
581 sizeof(struct iscsi_task));
582 if (task->hdr->itt == RESERVED_ITT)
583 iscsi_put_task(task);
587 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
590 iser_task->status = ISER_TASK_STATUS_INIT;
592 iser_task->dir[ISER_DIR_IN] = 0;
593 iser_task->dir[ISER_DIR_OUT] = 0;
595 iser_task->data[ISER_DIR_IN].data_len = 0;
596 iser_task->data[ISER_DIR_OUT].data_len = 0;
598 memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
599 sizeof(struct iser_regd_buf));
600 memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
601 sizeof(struct iser_regd_buf));
604 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
606 int is_rdma_aligned = 1;
607 struct iser_regd_buf *regd;
609 /* if we were reading, copy back to unaligned sglist,
610 * anyway dma_unmap and free the copy
612 if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
614 iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
616 if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
618 iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
621 if (iser_task->dir[ISER_DIR_IN]) {
622 regd = &iser_task->rdma_regd[ISER_DIR_IN];
623 if (regd->reg.is_fmr)
624 iser_unreg_mem(®d->reg);
627 if (iser_task->dir[ISER_DIR_OUT]) {
628 regd = &iser_task->rdma_regd[ISER_DIR_OUT];
629 if (regd->reg.is_fmr)
630 iser_unreg_mem(®d->reg);
633 /* if the data was unaligned, it was already unmapped and then copied */
635 iser_dma_unmap_task_data(iser_task);