1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
6 #include <linux/spinlock.h>
7 #include <linux/vmalloc.h>
9 #include <scsi/scsi_tcq.h>
11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
12 unsigned int timer_msec)
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
15 msecs_to_jiffies(timer_msec));
18 static void qedf_cmd_timeout(struct work_struct *work)
21 struct qedf_ioreq *io_req =
22 container_of(work, struct qedf_ioreq, timeout_work.work);
23 struct qedf_ctx *qedf;
24 struct qedf_rport *fcport;
26 fcport = io_req->fcport;
27 if (io_req->fcport == NULL) {
28 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
34 switch (io_req->cmd_type) {
37 QEDF_INFO(NULL, QEDF_LOG_IO,
38 "qedf is NULL for ABTS xid=0x%x.\n",
43 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
45 /* Cleanup timed out ABTS */
46 qedf_initiate_cleanup(io_req, true);
47 complete(&io_req->abts_done);
50 * Need to call kref_put for reference taken when initiate_abts
51 * was called since abts_compl won't be called now that we've
52 * cleaned up the task.
54 kref_put(&io_req->refcount, qedf_release_cmd);
56 /* Clear in abort bit now that we're done with the command */
57 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
60 * Now that the original I/O and the ABTS are complete see
61 * if we need to reconnect to the target.
63 qedf_restart_rport(fcport);
67 QEDF_INFO(NULL, QEDF_LOG_IO,
68 "qedf is NULL for ELS xid=0x%x.\n",
72 /* ELS request no longer outstanding since it timed out */
73 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
75 kref_get(&io_req->refcount);
77 * Don't attempt to clean an ELS timeout as any subseqeunt
78 * ABTS or cleanup requests just hang. For now just free
79 * the resources of the original I/O and the RRQ
81 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
83 qedf_initiate_cleanup(io_req, true);
84 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
85 /* Call callback function to complete command */
86 if (io_req->cb_func && io_req->cb_arg) {
87 io_req->cb_func(io_req->cb_arg);
88 io_req->cb_arg = NULL;
90 kref_put(&io_req->refcount, qedf_release_cmd);
92 case QEDF_SEQ_CLEANUP:
93 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
94 "xid=0x%x.\n", io_req->xid);
95 qedf_initiate_cleanup(io_req, true);
96 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
97 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
100 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
101 "Hit default case, xid=0x%x.\n", io_req->xid);
106 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
108 struct io_bdt *bdt_info;
109 struct qedf_ctx *qedf = cmgr->qedf;
112 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
115 struct qedf_ioreq *io_req;
117 num_ios = max_xid - min_xid + 1;
119 /* Free fcoe_bdt_ctx structures */
120 if (!cmgr->io_bdt_pool) {
121 QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
125 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
126 for (i = 0; i < num_ios; i++) {
127 bdt_info = cmgr->io_bdt_pool[i];
128 if (bdt_info->bd_tbl) {
129 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
130 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
131 bdt_info->bd_tbl = NULL;
135 /* Destroy io_bdt pool */
136 for (i = 0; i < num_ios; i++) {
137 kfree(cmgr->io_bdt_pool[i]);
138 cmgr->io_bdt_pool[i] = NULL;
141 kfree(cmgr->io_bdt_pool);
142 cmgr->io_bdt_pool = NULL;
146 for (i = 0; i < num_ios; i++) {
147 io_req = &cmgr->cmds[i];
148 kfree(io_req->sgl_task_params);
149 kfree(io_req->task_params);
150 /* Make sure we free per command sense buffer */
151 if (io_req->sense_buffer)
152 dma_free_coherent(&qedf->pdev->dev,
153 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
154 io_req->sense_buffer_dma);
155 cancel_delayed_work_sync(&io_req->rrq_work);
158 /* Free command manager itself */
162 static void qedf_handle_rrq(struct work_struct *work)
164 struct qedf_ioreq *io_req =
165 container_of(work, struct qedf_ioreq, rrq_work.work);
167 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
168 qedf_send_rrq(io_req);
172 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
174 struct qedf_cmd_mgr *cmgr;
175 struct io_bdt *bdt_info;
176 struct qedf_ioreq *io_req;
181 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
183 /* Make sure num_queues is already set before calling this function */
184 if (!qedf->num_queues) {
185 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
189 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
190 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
191 "max_xid 0x%x.\n", min_xid, max_xid);
195 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
196 "0x%x.\n", min_xid, max_xid);
198 num_ios = max_xid - min_xid + 1;
200 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
202 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
207 spin_lock_init(&cmgr->lock);
210 * Initialize I/O request fields.
214 for (i = 0; i < num_ios; i++) {
215 io_req = &cmgr->cmds[i];
216 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
220 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
222 /* Allocate DMA memory to hold sense buffer */
223 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
224 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
226 if (!io_req->sense_buffer) {
227 QEDF_ERR(&qedf->dbg_ctx,
228 "Failed to alloc sense buffer.\n");
232 /* Allocate task parameters to pass to f/w init funcions */
233 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
235 if (!io_req->task_params) {
236 QEDF_ERR(&(qedf->dbg_ctx),
237 "Failed to allocate task_params for xid=0x%x\n",
243 * Allocate scatter/gather list info to pass to f/w init
246 io_req->sgl_task_params = kzalloc(
247 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
248 if (!io_req->sgl_task_params) {
249 QEDF_ERR(&(qedf->dbg_ctx),
250 "Failed to allocate sgl_task_params for xid=0x%x\n",
256 /* Allocate pool of io_bdts - one for each qedf_ioreq */
257 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
260 if (!cmgr->io_bdt_pool) {
261 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
265 for (i = 0; i < num_ios; i++) {
266 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
268 if (!cmgr->io_bdt_pool[i]) {
269 QEDF_WARN(&(qedf->dbg_ctx),
270 "Failed to alloc io_bdt_pool[%d].\n", i);
275 for (i = 0; i < num_ios; i++) {
276 bdt_info = cmgr->io_bdt_pool[i];
277 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
278 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
279 &bdt_info->bd_tbl_dma, GFP_KERNEL);
280 if (!bdt_info->bd_tbl) {
281 QEDF_WARN(&(qedf->dbg_ctx),
282 "Failed to alloc bdt_tbl[%d].\n", i);
286 atomic_set(&cmgr->free_list_cnt, num_ios);
287 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
288 "cmgr->free_list_cnt=%d.\n",
289 atomic_read(&cmgr->free_list_cnt));
294 qedf_cmd_mgr_free(cmgr);
298 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
300 struct qedf_ctx *qedf = fcport->qedf;
301 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
302 struct qedf_ioreq *io_req = NULL;
303 struct io_bdt *bd_tbl;
309 free_sqes = atomic_read(&fcport->free_sqes);
312 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
313 "Returning NULL, free_sqes=%d.\n ",
318 /* Limit the number of outstanding R/W tasks */
319 if ((atomic_read(&fcport->num_active_ios) >=
320 NUM_RW_TASKS_PER_CONNECTION)) {
321 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
322 "Returning NULL, num_active_ios=%d.\n",
323 atomic_read(&fcport->num_active_ios));
327 /* Limit global TIDs certain tasks */
328 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
329 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
330 "Returning NULL, free_list_cnt=%d.\n",
331 atomic_read(&cmd_mgr->free_list_cnt));
335 spin_lock_irqsave(&cmd_mgr->lock, flags);
336 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
337 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
339 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
342 /* Check to make sure command was previously freed */
347 if (i == FCOE_PARAMS_NUM_TASKS) {
348 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
352 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
353 QEDF_ERR(&qedf->dbg_ctx,
354 "io_req found to be dirty ox_id = 0x%x.\n",
357 /* Clear any flags now that we've reallocated the xid */
360 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
362 atomic_inc(&fcport->num_active_ios);
363 atomic_dec(&fcport->free_sqes);
365 atomic_dec(&cmd_mgr->free_list_cnt);
367 io_req->cmd_mgr = cmd_mgr;
368 io_req->fcport = fcport;
370 /* Clear any stale sc_cmd back pointer */
371 io_req->sc_cmd = NULL;
374 /* Hold the io_req against deletion */
375 kref_init(&io_req->refcount); /* ID: 001 */
376 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
378 /* Bind io_bdt for this io_req */
379 /* Have a static link between io_req and io_bdt_pool */
380 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
381 if (bd_tbl == NULL) {
382 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
383 kref_put(&io_req->refcount, qedf_release_cmd);
386 bd_tbl->io_req = io_req;
387 io_req->cmd_type = cmd_type;
388 io_req->tm_flags = 0;
390 /* Reset sequence offset data */
391 io_req->rx_buf_off = 0;
392 io_req->tx_buf_off = 0;
393 io_req->rx_id = 0xffff; /* No OX_ID */
398 /* Record failure for stats and return NULL to caller */
399 qedf->alloc_failures++;
403 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
405 struct qedf_mp_req *mp_req = &(io_req->mp_req);
406 struct qedf_ctx *qedf = io_req->fcport->qedf;
407 uint64_t sz = sizeof(struct scsi_sge);
410 if (mp_req->mp_req_bd) {
411 dma_free_coherent(&qedf->pdev->dev, sz,
412 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
413 mp_req->mp_req_bd = NULL;
415 if (mp_req->mp_resp_bd) {
416 dma_free_coherent(&qedf->pdev->dev, sz,
417 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
418 mp_req->mp_resp_bd = NULL;
420 if (mp_req->req_buf) {
421 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
422 mp_req->req_buf, mp_req->req_buf_dma);
423 mp_req->req_buf = NULL;
425 if (mp_req->resp_buf) {
426 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
427 mp_req->resp_buf, mp_req->resp_buf_dma);
428 mp_req->resp_buf = NULL;
432 void qedf_release_cmd(struct kref *ref)
434 struct qedf_ioreq *io_req =
435 container_of(ref, struct qedf_ioreq, refcount);
436 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
437 struct qedf_rport *fcport = io_req->fcport;
440 if (io_req->cmd_type == QEDF_SCSI_CMD) {
441 QEDF_WARN(&fcport->qedf->dbg_ctx,
442 "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
443 io_req, io_req->xid);
444 WARN_ON(io_req->sc_cmd);
447 if (io_req->cmd_type == QEDF_ELS ||
448 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
449 qedf_free_mp_resc(io_req);
451 atomic_inc(&cmd_mgr->free_list_cnt);
452 atomic_dec(&fcport->num_active_ios);
453 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
454 if (atomic_read(&fcport->num_active_ios) < 0) {
455 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
459 /* Increment task retry identifier now that the request is released */
460 io_req->task_retry_identifier++;
461 io_req->fcport = NULL;
463 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
465 spin_lock_irqsave(&cmd_mgr->lock, flags);
466 io_req->fcport = NULL;
468 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
471 static int qedf_map_sg(struct qedf_ioreq *io_req)
473 struct scsi_cmnd *sc = io_req->sc_cmd;
474 struct Scsi_Host *host = sc->device->host;
475 struct fc_lport *lport = shost_priv(host);
476 struct qedf_ctx *qedf = lport_priv(lport);
477 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
478 struct scatterlist *sg;
486 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
487 scsi_sg_count(sc), sc->sc_data_direction);
488 sg = scsi_sglist(sc);
490 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
492 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
493 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
495 scsi_for_each_sg(sc, sg, sg_count, i) {
496 sg_len = (u32)sg_dma_len(sg);
497 addr = (u64)sg_dma_address(sg);
500 * Intermediate s/g element so check if start address
501 * is page aligned. Only required for writes and only if the
502 * number of scatter/gather elements is 8 or more.
504 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
505 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
506 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
508 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
509 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
510 bd[bd_count].sge_len = cpu_to_le32(sg_len);
513 byte_count += sg_len;
516 /* To catch a case where FAST and SLOW nothing is set, set FAST */
517 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
518 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
520 if (byte_count != scsi_bufflen(sc))
521 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
522 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
523 scsi_bufflen(sc), io_req->xid);
528 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
530 struct scsi_cmnd *sc = io_req->sc_cmd;
531 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
534 if (scsi_sg_count(sc)) {
535 bd_count = qedf_map_sg(io_req);
540 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
543 io_req->bd_tbl->bd_valid = bd_count;
548 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
549 struct fcp_cmnd *fcp_cmnd)
551 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
553 /* fcp_cmnd is 32 bytes */
554 memset(fcp_cmnd, 0, FCP_CMND_LEN);
556 /* 8 bytes: SCSI LUN info */
557 int_to_scsilun(sc_cmd->device->lun,
558 (struct scsi_lun *)&fcp_cmnd->fc_lun);
560 /* 4 bytes: flag info */
561 fcp_cmnd->fc_pri_ta = 0;
562 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
563 fcp_cmnd->fc_flags = io_req->io_req_flags;
564 fcp_cmnd->fc_cmdref = 0;
566 /* Populate data direction */
567 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
568 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
570 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
571 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
572 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
573 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
576 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
578 /* 16 bytes: CDB information */
579 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
580 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
582 /* 4 bytes: FCP data length */
583 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
586 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
587 struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
588 struct fcoe_wqe *sqe)
590 enum fcoe_task_type task_type;
591 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
592 struct io_bdt *bd_tbl = io_req->bd_tbl;
596 struct qedf_ctx *qedf = fcport->qedf;
597 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
598 struct regpair sense_data_buffer_phys_addr;
603 /* Note init_initiator_rw_fcoe_task memsets the task context */
604 io_req->task = task_ctx;
605 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
606 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
607 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
609 /* Set task type bassed on DMA directio of command */
610 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
611 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
613 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
614 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
615 tx_io_size = io_req->data_xfer_len;
617 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
618 rx_io_size = io_req->data_xfer_len;
622 /* Setup the fields for fcoe_task_params */
623 io_req->task_params->context = task_ctx;
624 io_req->task_params->sqe = sqe;
625 io_req->task_params->task_type = task_type;
626 io_req->task_params->tx_io_size = tx_io_size;
627 io_req->task_params->rx_io_size = rx_io_size;
628 io_req->task_params->conn_cid = fcport->fw_cid;
629 io_req->task_params->itid = io_req->xid;
630 io_req->task_params->cq_rss_number = cq_idx;
631 io_req->task_params->is_tape_device = fcport->dev_type;
633 /* Fill in information for scatter/gather list */
634 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
635 bd_count = bd_tbl->bd_valid;
636 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
637 io_req->sgl_task_params->sgl_phys_addr.lo =
638 U64_LO(bd_tbl->bd_tbl_dma);
639 io_req->sgl_task_params->sgl_phys_addr.hi =
640 U64_HI(bd_tbl->bd_tbl_dma);
641 io_req->sgl_task_params->num_sges = bd_count;
642 io_req->sgl_task_params->total_buffer_size =
643 scsi_bufflen(io_req->sc_cmd);
644 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
645 io_req->sgl_task_params->small_mid_sge = 1;
647 io_req->sgl_task_params->small_mid_sge = 0;
650 /* Fill in physical address of sense buffer */
651 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
652 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
654 /* fill FCP_CMND IU */
655 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
657 /* Swap fcp_cmnd since FC is big endian */
658 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
659 for (i = 0; i < cnt; i++) {
660 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
662 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
664 init_initiator_rw_fcoe_task(io_req->task_params,
665 io_req->sgl_task_params,
666 sense_data_buffer_phys_addr,
667 io_req->task_retry_identifier, fcp_cmnd);
669 /* Increment SGL type counters */
670 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
671 qedf->slow_sge_ios++;
673 qedf->fast_sge_ios++;
676 void qedf_init_mp_task(struct qedf_ioreq *io_req,
677 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
679 struct qedf_mp_req *mp_req = &(io_req->mp_req);
680 struct qedf_rport *fcport = io_req->fcport;
681 struct qedf_ctx *qedf = io_req->fcport->qedf;
682 struct fc_frame_header *fc_hdr;
683 struct fcoe_tx_mid_path_params task_fc_hdr;
684 struct scsi_sgl_task_params tx_sgl_task_params;
685 struct scsi_sgl_task_params rx_sgl_task_params;
687 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
688 "Initializing MP task for cmd_type=%d\n",
691 qedf->control_requests++;
693 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
694 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
695 memset(task_ctx, 0, sizeof(struct fcoe_task_context));
696 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
698 /* Setup the task from io_req for easy reference */
699 io_req->task = task_ctx;
701 /* Setup the fields for fcoe_task_params */
702 io_req->task_params->context = task_ctx;
703 io_req->task_params->sqe = sqe;
704 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
705 io_req->task_params->tx_io_size = io_req->data_xfer_len;
706 /* rx_io_size tells the f/w how large a response buffer we have */
707 io_req->task_params->rx_io_size = PAGE_SIZE;
708 io_req->task_params->conn_cid = fcport->fw_cid;
709 io_req->task_params->itid = io_req->xid;
710 /* Return middle path commands on CQ 0 */
711 io_req->task_params->cq_rss_number = 0;
712 io_req->task_params->is_tape_device = fcport->dev_type;
714 fc_hdr = &(mp_req->req_fc_hdr);
715 /* Set OX_ID and RX_ID based on driver task id */
716 fc_hdr->fh_ox_id = io_req->xid;
717 fc_hdr->fh_rx_id = htons(0xffff);
719 /* Set up FC header information */
720 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
721 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
722 task_fc_hdr.type = fc_hdr->fh_type;
723 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
724 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
725 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
726 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
728 /* Set up s/g list parameters for request buffer */
729 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
730 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
731 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
732 tx_sgl_task_params.num_sges = 1;
733 /* Set PAGE_SIZE for now since sg element is that size ??? */
734 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
735 tx_sgl_task_params.small_mid_sge = 0;
737 /* Set up s/g list parameters for request buffer */
738 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
739 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
740 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
741 rx_sgl_task_params.num_sges = 1;
742 /* Set PAGE_SIZE for now since sg element is that size ??? */
743 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
744 rx_sgl_task_params.small_mid_sge = 0;
748 * Last arg is 0 as previous code did not set that we wanted the
749 * fc header information.
751 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
754 &rx_sgl_task_params, 0);
757 /* Presumed that fcport->rport_lock is held */
758 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
760 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
763 rval = fcport->sq_prod_idx;
765 /* Adjust ring index */
766 fcport->sq_prod_idx++;
767 fcport->fw_sq_prod_idx++;
768 if (fcport->sq_prod_idx == total_sqe)
769 fcport->sq_prod_idx = 0;
774 void qedf_ring_doorbell(struct qedf_rport *fcport)
776 struct fcoe_db_data dbell = { 0 };
780 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
781 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
782 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
783 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
785 dbell.sq_prod = fcport->fw_sq_prod_idx;
786 /* wmb makes sure that the BDs data is updated before updating the
787 * producer, otherwise FW may read old data from the BDs.
791 writel(*(u32 *)&dbell, fcport->p_doorbell);
793 * Fence required to flush the write combined buffer, since another
794 * CPU may write to the same doorbell address and data may be lost
795 * due to relaxed order nature of write combined bar.
800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
803 struct qedf_ctx *qedf = fcport->qedf;
804 struct qedf_io_log *io_log;
805 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
809 spin_lock_irqsave(&qedf->io_trace_lock, flags);
811 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
812 io_log->direction = direction;
813 io_log->task_id = io_req->xid;
814 io_log->port_id = fcport->rdata->ids.port_id;
815 io_log->lun = sc_cmd->device->lun;
816 io_log->op = op = sc_cmd->cmnd[0];
817 io_log->lba[0] = sc_cmd->cmnd[2];
818 io_log->lba[1] = sc_cmd->cmnd[3];
819 io_log->lba[2] = sc_cmd->cmnd[4];
820 io_log->lba[3] = sc_cmd->cmnd[5];
821 io_log->bufflen = scsi_bufflen(sc_cmd);
822 io_log->sg_count = scsi_sg_count(sc_cmd);
823 io_log->result = sc_cmd->result;
824 io_log->jiffies = jiffies;
825 io_log->refcount = kref_read(&io_req->refcount);
827 if (direction == QEDF_IO_TRACE_REQ) {
828 /* For requests we only care abot the submission CPU */
829 io_log->req_cpu = io_req->cpu;
832 } else if (direction == QEDF_IO_TRACE_RSP) {
833 io_log->req_cpu = io_req->cpu;
834 io_log->int_cpu = io_req->int_cpu;
835 io_log->rsp_cpu = smp_processor_id();
838 io_log->sge_type = io_req->sge_type;
840 qedf->io_trace_idx++;
841 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
842 qedf->io_trace_idx = 0;
844 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
847 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
849 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
850 struct Scsi_Host *host = sc_cmd->device->host;
851 struct fc_lport *lport = shost_priv(host);
852 struct qedf_ctx *qedf = lport_priv(lport);
853 struct fcoe_task_context *task_ctx;
855 struct fcoe_wqe *sqe;
858 /* Initialize rest of io_req fileds */
859 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
860 sc_cmd->SCp.ptr = (char *)io_req;
861 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
863 /* Record which cpu this request is associated with */
864 io_req->cpu = smp_processor_id();
866 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
867 io_req->io_req_flags = QEDF_READ;
868 qedf->input_requests++;
869 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
870 io_req->io_req_flags = QEDF_WRITE;
871 qedf->output_requests++;
873 io_req->io_req_flags = 0;
874 qedf->control_requests++;
879 /* Build buffer descriptor list for firmware from sg list */
880 if (qedf_build_bd_list_from_sg(io_req)) {
881 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
882 /* Release cmd will release io_req, but sc_cmd is assigned */
883 io_req->sc_cmd = NULL;
884 kref_put(&io_req->refcount, qedf_release_cmd);
888 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
889 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
890 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
891 /* Release cmd will release io_req, but sc_cmd is assigned */
892 io_req->sc_cmd = NULL;
893 kref_put(&io_req->refcount, qedf_release_cmd);
897 /* Record LUN number for later use if we neeed them */
898 io_req->lun = (int)sc_cmd->device->lun;
900 /* Obtain free SQE */
901 sqe_idx = qedf_get_sqe_idx(fcport);
902 sqe = &fcport->sq[sqe_idx];
903 memset(sqe, 0, sizeof(struct fcoe_wqe));
905 /* Get the task context */
906 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
908 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
910 /* Release cmd will release io_req, but sc_cmd is assigned */
911 io_req->sc_cmd = NULL;
912 kref_put(&io_req->refcount, qedf_release_cmd);
916 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
919 qedf_ring_doorbell(fcport);
921 /* Set that command is with the firmware now */
922 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
924 if (qedf_io_tracing && io_req->sc_cmd)
925 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
931 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
933 struct fc_lport *lport = shost_priv(host);
934 struct qedf_ctx *qedf = lport_priv(lport);
935 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
936 struct fc_rport_libfc_priv *rp = rport->dd_data;
937 struct qedf_rport *fcport;
938 struct qedf_ioreq *io_req;
941 unsigned long flags = 0;
944 num_sgs = scsi_sg_count(sc_cmd);
945 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
946 QEDF_ERR(&qedf->dbg_ctx,
947 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
948 num_sgs, QEDF_MAX_BDS_PER_CMD);
949 sc_cmd->result = DID_ERROR;
954 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
955 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
956 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
957 "Returning DNC as unloading or stop io, flags 0x%lx.\n",
959 sc_cmd->result = DID_NO_CONNECT << 16;
964 if (!qedf->pdev->msix_enabled) {
965 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
966 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
968 sc_cmd->result = DID_NO_CONNECT << 16;
973 rval = fc_remote_port_chkready(rport);
975 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
976 "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
977 rval, rport->port_id);
978 sc_cmd->result = rval;
983 /* Retry command if we are doing a qed drain operation */
984 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
985 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
986 rc = SCSI_MLQUEUE_HOST_BUSY;
990 if (lport->state != LPORT_ST_READY ||
991 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
992 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
993 rc = SCSI_MLQUEUE_HOST_BUSY;
997 /* rport and tgt are allocated together, so tgt should be non-NULL */
998 fcport = (struct qedf_rport *)&rp[1];
1000 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1001 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1003 * Session is not offloaded yet. Let SCSI-ml retry
1006 rc = SCSI_MLQUEUE_TARGET_BUSY;
1010 atomic_inc(&fcport->ios_to_queue);
1012 if (fcport->retry_delay_timestamp) {
1013 /* Take fcport->rport_lock for resetting the delay_timestamp */
1014 spin_lock_irqsave(&fcport->rport_lock, flags);
1015 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1016 fcport->retry_delay_timestamp = 0;
1018 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1019 /* If retry_delay timer is active, flow off the ML */
1020 rc = SCSI_MLQUEUE_TARGET_BUSY;
1021 atomic_dec(&fcport->ios_to_queue);
1024 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1027 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1029 rc = SCSI_MLQUEUE_HOST_BUSY;
1030 atomic_dec(&fcport->ios_to_queue);
1034 io_req->sc_cmd = sc_cmd;
1036 /* Take fcport->rport_lock for posting to fcport send queue */
1037 spin_lock_irqsave(&fcport->rport_lock, flags);
1038 if (qedf_post_io_req(fcport, io_req)) {
1039 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1040 /* Return SQE to pool */
1041 atomic_inc(&fcport->free_sqes);
1042 rc = SCSI_MLQUEUE_HOST_BUSY;
1044 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1045 atomic_dec(&fcport->ios_to_queue);
1051 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1052 struct fcoe_cqe_rsp_info *fcp_rsp)
1054 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1055 struct qedf_ctx *qedf = io_req->fcport->qedf;
1056 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1057 int fcp_sns_len = 0;
1058 int fcp_rsp_len = 0;
1059 uint8_t *rsp_info, *sense_data;
1061 io_req->fcp_status = FC_GOOD;
1062 io_req->fcp_resid = 0;
1063 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1064 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1065 io_req->fcp_resid = fcp_rsp->fcp_resid;
1067 io_req->scsi_comp_flags = rsp_flags;
1068 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1069 fcp_rsp->scsi_status_code;
1072 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1073 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1076 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1077 fcp_sns_len = fcp_rsp->fcp_sns_len;
1079 io_req->fcp_rsp_len = fcp_rsp_len;
1080 io_req->fcp_sns_len = fcp_sns_len;
1081 rsp_info = sense_data = io_req->sense_buffer;
1083 /* fetch fcp_rsp_code */
1084 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1085 /* Only for task management function */
1086 io_req->fcp_rsp_code = rsp_info[3];
1087 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1088 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1089 /* Adjust sense-data location. */
1090 sense_data += fcp_rsp_len;
1093 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1094 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1095 "Truncating sense buffer\n");
1096 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1099 /* The sense buffer can be NULL for TMF commands */
1100 if (sc_cmd->sense_buffer) {
1101 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1103 memcpy(sc_cmd->sense_buffer, sense_data,
1108 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1110 struct scsi_cmnd *sc = io_req->sc_cmd;
1112 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1113 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1114 scsi_sg_count(sc), sc->sc_data_direction);
1115 io_req->bd_tbl->bd_valid = 0;
1119 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1120 struct qedf_ioreq *io_req)
1122 struct scsi_cmnd *sc_cmd;
1123 struct fcoe_cqe_rsp_info *fcp_rsp;
1124 struct qedf_rport *fcport;
1126 u16 scope, qualifier = 0;
1127 u8 fw_residual_flag = 0;
1128 unsigned long flags = 0;
1136 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1137 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1138 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1139 QEDF_ERR(&qedf->dbg_ctx,
1140 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1145 sc_cmd = io_req->sc_cmd;
1146 fcp_rsp = &cqe->cqe_info.rsp_info;
1149 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1153 if (!sc_cmd->SCp.ptr) {
1154 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1155 "another context.\n");
1159 if (!sc_cmd->device) {
1160 QEDF_ERR(&qedf->dbg_ctx,
1161 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1165 if (!scsi_cmd_to_rq(sc_cmd)->q) {
1166 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1167 "is not valid, sc_cmd=%p.\n", sc_cmd);
1171 fcport = io_req->fcport;
1174 * When flush is active, let the cmds be completed from the cleanup
1177 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1178 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1179 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1180 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1181 "Dropping good completion xid=0x%x as fcport is flushing",
1186 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1188 qedf_unmap_sg_list(qedf, io_req);
1190 /* Check for FCP transport error */
1191 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1192 QEDF_ERR(&(qedf->dbg_ctx),
1193 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1194 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1195 io_req->fcp_rsp_code);
1196 sc_cmd->result = DID_BUS_BUSY << 16;
1200 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1201 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1202 if (fw_residual_flag) {
1203 QEDF_ERR(&qedf->dbg_ctx,
1204 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1205 io_req->xid, fcp_rsp->rsp_flags.flags,
1207 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1208 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1210 if (io_req->cdb_status == 0)
1211 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1213 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1216 * Set resid to the whole buffer length so we won't try to resue
1217 * any previously data.
1219 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1223 switch (io_req->fcp_status) {
1225 if (io_req->cdb_status == 0) {
1226 /* Good I/O completion */
1227 sc_cmd->result = DID_OK << 16;
1229 refcount = kref_read(&io_req->refcount);
1230 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1231 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1232 "lba=%02x%02x%02x%02x cdb_status=%d "
1233 "fcp_resid=0x%x refcount=%d.\n",
1234 qedf->lport->host->host_no, sc_cmd->device->id,
1235 sc_cmd->device->lun, io_req->xid,
1236 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1237 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1238 io_req->cdb_status, io_req->fcp_resid,
1240 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1242 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1243 io_req->cdb_status == SAM_STAT_BUSY) {
1245 * Check whether we need to set retry_delay at
1246 * all based on retry_delay module parameter
1247 * and the status qualifier.
1251 scope = fcp_rsp->retry_delay_timer & 0xC000;
1253 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1255 if (qedf_retry_delay)
1258 if (io_req->cdb_status ==
1259 SAM_STAT_TASK_SET_FULL)
1260 qedf->task_set_fulls++;
1265 if (io_req->fcp_resid)
1266 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1268 if (chk_scope == 1) {
1269 if ((scope == 1 || scope == 2) &&
1270 (qualifier > 0 && qualifier <= 0x3FEF)) {
1271 /* Check we don't go over the max */
1272 if (qualifier > QEDF_RETRY_DELAY_MAX) {
1273 qualifier = QEDF_RETRY_DELAY_MAX;
1274 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1276 (fcp_rsp->retry_delay_timer &
1279 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1280 "Scope = %d and qualifier = %d",
1282 /* Take fcport->rport_lock to
1283 * update the retry_delay_timestamp
1285 spin_lock_irqsave(&fcport->rport_lock, flags);
1286 fcport->retry_delay_timestamp =
1287 jiffies + (qualifier * HZ / 10);
1288 spin_unlock_irqrestore(&fcport->rport_lock,
1292 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1293 "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1299 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1300 io_req->fcp_status);
1305 if (qedf_io_tracing)
1306 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1309 * We wait till the end of the function to clear the
1310 * outstanding bit in case we need to send an abort
1312 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1314 io_req->sc_cmd = NULL;
1315 sc_cmd->SCp.ptr = NULL;
1317 kref_put(&io_req->refcount, qedf_release_cmd);
1320 /* Return a SCSI command in some other context besides a normal completion */
1321 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1324 struct scsi_cmnd *sc_cmd;
1328 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1332 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1333 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1334 "io_req:%p scsi_done handling already done\n",
1340 * We will be done with this command after this call so clear the
1343 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1345 sc_cmd = io_req->sc_cmd;
1348 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1352 if (!virt_addr_valid(sc_cmd)) {
1353 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1357 if (!sc_cmd->SCp.ptr) {
1358 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1359 "another context.\n");
1363 if (!sc_cmd->device) {
1364 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1369 if (!virt_addr_valid(sc_cmd->device)) {
1370 QEDF_ERR(&qedf->dbg_ctx,
1371 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1375 if (!sc_cmd->sense_buffer) {
1376 QEDF_ERR(&qedf->dbg_ctx,
1377 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1382 if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1383 QEDF_ERR(&qedf->dbg_ctx,
1384 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1389 qedf_unmap_sg_list(qedf, io_req);
1391 sc_cmd->result = result << 16;
1392 refcount = kref_read(&io_req->refcount);
1393 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1394 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1395 "allowed=%d retries=%d refcount=%d.\n",
1396 qedf->lport->host->host_no, sc_cmd->device->id,
1397 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1398 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1399 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1403 * Set resid to the whole buffer length so we won't try to resue any
1404 * previously read data
1406 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1408 if (qedf_io_tracing)
1409 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1411 io_req->sc_cmd = NULL;
1412 sc_cmd->SCp.ptr = NULL;
1414 kref_put(&io_req->refcount, qedf_release_cmd);
1419 * Clear the io_req->sc_cmd backpointer so we don't try to process
1422 io_req->sc_cmd = NULL;
1423 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
1427 * Handle warning type CQE completions. This is mainly used for REC timer
1430 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1431 struct qedf_ioreq *io_req)
1434 struct qedf_rport *fcport = io_req->fcport;
1435 u64 err_warn_bit_map;
1439 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1440 "cqe is NULL for io_req %p xid=0x%x\n",
1441 io_req, io_req->xid);
1445 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1446 "xid=0x%x\n", io_req->xid);
1447 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1448 "err_warn_bitmap=%08x:%08x\n",
1449 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1450 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1451 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1452 "rx_buff_off=%08x, rx_id=%04x\n",
1453 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1454 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1455 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1457 /* Normalize the error bitmap value to an just an unsigned int */
1458 err_warn_bit_map = (u64)
1459 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1460 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1461 for (i = 0; i < 64; i++) {
1462 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1468 /* Check if REC TOV expired if this is a tape device */
1469 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1471 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1472 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1473 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1474 io_req->rx_buf_off =
1475 cqe->cqe_info.err_info.rx_buf_off;
1476 io_req->tx_buf_off =
1477 cqe->cqe_info.err_info.tx_buf_off;
1478 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1479 rval = qedf_send_rec(io_req);
1481 * We only want to abort the io_req if we
1482 * can't queue the REC command as we want to
1483 * keep the exchange open for recovery.
1493 init_completion(&io_req->abts_done);
1494 rval = qedf_initiate_abts(io_req, true);
1496 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1499 /* Cleanup a command when we receive an error detection completion */
1500 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1501 struct qedf_ioreq *io_req)
1505 if (io_req == NULL) {
1506 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1510 if (io_req->fcport == NULL) {
1511 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1516 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1517 "cqe is NULL for io_req %p\n", io_req);
1521 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1522 "xid=0x%x\n", io_req->xid);
1523 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1524 "err_warn_bitmap=%08x:%08x\n",
1525 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1526 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1527 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1528 "rx_buff_off=%08x, rx_id=%04x\n",
1529 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1530 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1531 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1533 /* When flush is active, let the cmds be flushed out from the cleanup context */
1534 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1535 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1536 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1537 QEDF_ERR(&qedf->dbg_ctx,
1538 "Dropping EQE for xid=0x%x as fcport is flushing",
1543 if (qedf->stop_io_on_error) {
1544 qedf_stop_all_io(qedf);
1548 init_completion(&io_req->abts_done);
1549 rval = qedf_initiate_abts(io_req, true);
1551 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1554 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1555 struct qedf_ioreq *els_req)
1557 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1558 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1559 kref_read(&els_req->refcount));
1562 * Need to distinguish this from a timeout when calling the
1565 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1567 clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
1569 /* Cancel the timer */
1570 cancel_delayed_work_sync(&els_req->timeout_work);
1572 /* Call callback function to complete command */
1573 if (els_req->cb_func && els_req->cb_arg) {
1574 els_req->cb_func(els_req->cb_arg);
1575 els_req->cb_arg = NULL;
1578 /* Release kref for original initiate_els */
1579 kref_put(&els_req->refcount, qedf_release_cmd);
1582 /* A value of -1 for lun is a wild card that means flush all
1583 * active SCSI I/Os for the target.
1585 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1587 struct qedf_ioreq *io_req;
1588 struct qedf_ctx *qedf;
1589 struct qedf_cmd_mgr *cmd_mgr;
1591 unsigned long flags;
1597 QEDF_ERR(NULL, "fcport is NULL\n");
1601 /* Check that fcport is still offloaded */
1602 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1603 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1607 qedf = fcport->qedf;
1610 QEDF_ERR(NULL, "qedf is NULL.\n");
1614 /* Only wait for all commands to be queued in the Upload context */
1615 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1617 while (atomic_read(&fcport->ios_to_queue)) {
1618 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1619 "Waiting for %d I/Os to be queued\n",
1620 atomic_read(&fcport->ios_to_queue));
1621 if (wait_cnt == 0) {
1623 "%d IOs request could not be queued\n",
1624 atomic_read(&fcport->ios_to_queue));
1631 cmd_mgr = qedf->cmd_mgr;
1633 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1634 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1635 atomic_read(&fcport->num_active_ios), fcport,
1636 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1637 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1639 mutex_lock(&qedf->flush_mutex);
1641 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1643 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1644 fcport->lun_reset_lun = lun;
1647 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1648 io_req = &cmd_mgr->cmds[i];
1652 if (!io_req->fcport)
1655 spin_lock_irqsave(&cmd_mgr->lock, flags);
1657 if (io_req->alloc) {
1658 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1659 if (io_req->cmd_type == QEDF_SCSI_CMD)
1660 QEDF_ERR(&qedf->dbg_ctx,
1661 "Allocated but not queued, xid=0x%x\n",
1664 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1666 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1670 if (io_req->fcport != fcport)
1673 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1674 * but RRQ is still pending.
1675 * Workaround: Within qedf_send_rrq, we check if the fcport is
1676 * NULL, and we drop the ref on the io_req to clean it up.
1678 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1679 refcount = kref_read(&io_req->refcount);
1680 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1681 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1682 io_req->xid, io_req->cmd_type, refcount);
1683 /* If RRQ work has been queue, try to cancel it and
1686 if (atomic_read(&io_req->state) ==
1687 QEDFC_CMD_ST_RRQ_WAIT) {
1688 if (cancel_delayed_work_sync
1689 (&io_req->rrq_work)) {
1690 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1691 "Putting reference for pending RRQ work xid=0x%x.\n",
1694 kref_put(&io_req->refcount,
1701 /* Only consider flushing ELS during target reset */
1702 if (io_req->cmd_type == QEDF_ELS &&
1704 rc = kref_get_unless_zero(&io_req->refcount);
1706 QEDF_ERR(&(qedf->dbg_ctx),
1707 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1708 io_req, io_req->xid);
1711 qedf_initiate_cleanup(io_req, false);
1713 qedf_flush_els_req(qedf, io_req);
1716 * Release the kref and go back to the top of the
1722 if (io_req->cmd_type == QEDF_ABTS) {
1724 rc = kref_get_unless_zero(&io_req->refcount);
1726 QEDF_ERR(&(qedf->dbg_ctx),
1727 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1728 io_req, io_req->xid);
1731 if (lun != -1 && io_req->lun != lun)
1734 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1735 "Flushing abort xid=0x%x.\n", io_req->xid);
1737 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1738 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1739 "Putting ref for cancelled RRQ work xid=0x%x.\n",
1741 kref_put(&io_req->refcount, qedf_release_cmd);
1744 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1745 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1746 "Putting ref for cancelled tmo work xid=0x%x.\n",
1748 qedf_initiate_cleanup(io_req, true);
1749 /* Notify eh_abort handler that ABTS is
1752 complete(&io_req->abts_done);
1753 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1755 kref_put(&io_req->refcount, qedf_release_cmd);
1761 if (!io_req->sc_cmd)
1763 if (!io_req->sc_cmd->device) {
1764 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1765 "Device backpointer NULL for sc_cmd=%p.\n",
1767 /* Put reference for non-existent scsi_cmnd */
1768 io_req->sc_cmd = NULL;
1769 qedf_initiate_cleanup(io_req, false);
1770 kref_put(&io_req->refcount, qedf_release_cmd);
1774 if (io_req->lun != lun)
1779 * Use kref_get_unless_zero in the unlikely case the command
1780 * we're about to flush was completed in the normal SCSI path
1782 rc = kref_get_unless_zero(&io_req->refcount);
1784 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1785 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1789 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1790 "Cleanup xid=0x%x.\n", io_req->xid);
1793 /* Cleanup task and return I/O mid-layer */
1794 qedf_initiate_cleanup(io_req, true);
1797 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
1801 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1802 "Flushed 0x%x I/Os, active=0x%x.\n",
1803 flush_cnt, atomic_read(&fcport->num_active_ios));
1804 /* Only wait for all commands to complete in the Upload context */
1805 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1807 while (atomic_read(&fcport->num_active_ios)) {
1808 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1809 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1811 atomic_read(&fcport->num_active_ios),
1813 if (wait_cnt == 0) {
1814 QEDF_ERR(&qedf->dbg_ctx,
1815 "Flushed %d I/Os, active=%d.\n",
1817 atomic_read(&fcport->num_active_ios));
1818 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1819 io_req = &cmd_mgr->cmds[i];
1820 if (io_req->fcport &&
1821 io_req->fcport == fcport) {
1823 kref_read(&io_req->refcount);
1824 set_bit(QEDF_CMD_DIRTY,
1826 QEDF_ERR(&qedf->dbg_ctx,
1827 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1828 io_req, io_req->xid,
1843 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1844 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1845 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1846 mutex_unlock(&qedf->flush_mutex);
1850 * Initiate a ABTS middle path command. Note that we don't have to initialize
1851 * the task context for an ABTS task.
1853 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1855 struct fc_lport *lport;
1856 struct qedf_rport *fcport = io_req->fcport;
1857 struct fc_rport_priv *rdata;
1858 struct qedf_ctx *qedf;
1861 unsigned long flags;
1862 struct fcoe_wqe *sqe;
1866 /* Sanity check qedf_rport before dereferencing any pointers */
1867 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1868 QEDF_ERR(NULL, "tgt not offloaded\n");
1873 qedf = fcport->qedf;
1874 rdata = fcport->rdata;
1876 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1877 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1882 lport = qedf->lport;
1884 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1885 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1887 goto drop_rdata_kref;
1890 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1891 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1893 goto drop_rdata_kref;
1896 /* Ensure room on SQ */
1897 if (!atomic_read(&fcport->free_sqes)) {
1898 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1900 goto drop_rdata_kref;
1903 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1904 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1906 goto drop_rdata_kref;
1909 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1910 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1911 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1912 QEDF_ERR(&qedf->dbg_ctx,
1913 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1914 io_req->xid, io_req->sc_cmd);
1916 goto drop_rdata_kref;
1919 kref_get(&io_req->refcount);
1922 qedf->control_requests++;
1923 qedf->packet_aborts++;
1925 /* Set the command type to abort */
1926 io_req->cmd_type = QEDF_ABTS;
1927 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1929 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1930 refcount = kref_read(&io_req->refcount);
1931 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1932 "ABTS io_req xid = 0x%x refcount=%d\n",
1935 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1937 spin_lock_irqsave(&fcport->rport_lock, flags);
1939 sqe_idx = qedf_get_sqe_idx(fcport);
1940 sqe = &fcport->sq[sqe_idx];
1941 memset(sqe, 0, sizeof(struct fcoe_wqe));
1942 io_req->task_params->sqe = sqe;
1944 init_initiator_abort_fcoe_task(io_req->task_params);
1945 qedf_ring_doorbell(fcport);
1947 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1950 kref_put(&rdata->kref, fc_rport_destroy);
1955 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1956 struct qedf_ioreq *io_req)
1960 struct qedf_rport *fcport = io_req->fcport;
1962 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1963 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1965 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1967 /* This was added at a point when we were scheduling abts_compl &
1968 * cleanup_compl on different CPUs and there was a possibility of
1969 * the io_req to be freed from the other context before we got here.
1972 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1973 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1979 * When flush is active, let the cmds be completed from the cleanup
1982 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1983 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1984 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1985 "Dropping ABTS completion xid=0x%x as fcport is flushing",
1990 if (!cancel_delayed_work(&io_req->timeout_work)) {
1991 QEDF_ERR(&qedf->dbg_ctx,
1992 "Wasn't able to cancel abts timeout work.\n");
1996 case FC_RCTL_BA_ACC:
1997 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1998 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1999 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2000 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
2002 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2003 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
2008 * Dont release this cmd yet. It will be relesed
2009 * after we get RRQ response
2011 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2012 msecs_to_jiffies(qedf->lport->r_a_tov));
2013 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2015 /* For error cases let the cleanup return the command */
2016 case FC_RCTL_BA_RJT:
2017 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2018 "ABTS response - RJT\n");
2019 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2022 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
2026 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2028 if (io_req->sc_cmd) {
2029 if (!io_req->return_scsi_cmd_on_abts)
2030 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2031 "Not call scsi_done for xid=0x%x.\n",
2033 if (io_req->return_scsi_cmd_on_abts)
2034 qedf_scsi_done(qedf, io_req, DID_ERROR);
2037 /* Notify eh_abort handler that ABTS is complete */
2038 complete(&io_req->abts_done);
2040 kref_put(&io_req->refcount, qedf_release_cmd);
2043 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2045 struct qedf_mp_req *mp_req;
2046 struct scsi_sge *mp_req_bd;
2047 struct scsi_sge *mp_resp_bd;
2048 struct qedf_ctx *qedf = io_req->fcport->qedf;
2052 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2054 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2055 memset(mp_req, 0, sizeof(struct qedf_mp_req));
2057 if (io_req->cmd_type != QEDF_ELS) {
2058 mp_req->req_len = sizeof(struct fcp_cmnd);
2059 io_req->data_xfer_len = mp_req->req_len;
2061 mp_req->req_len = io_req->data_xfer_len;
2063 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2064 &mp_req->req_buf_dma, GFP_KERNEL);
2065 if (!mp_req->req_buf) {
2066 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2067 qedf_free_mp_resc(io_req);
2071 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2072 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2073 if (!mp_req->resp_buf) {
2074 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2076 qedf_free_mp_resc(io_req);
2080 /* Allocate and map mp_req_bd and mp_resp_bd */
2081 sz = sizeof(struct scsi_sge);
2082 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2083 &mp_req->mp_req_bd_dma, GFP_KERNEL);
2084 if (!mp_req->mp_req_bd) {
2085 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2086 qedf_free_mp_resc(io_req);
2090 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2091 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2092 if (!mp_req->mp_resp_bd) {
2093 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2094 qedf_free_mp_resc(io_req);
2099 addr = mp_req->req_buf_dma;
2100 mp_req_bd = mp_req->mp_req_bd;
2101 mp_req_bd->sge_addr.lo = U64_LO(addr);
2102 mp_req_bd->sge_addr.hi = U64_HI(addr);
2103 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2106 * MP buffer is either a task mgmt command or an ELS.
2107 * So the assumption is that it consumes a single bd
2108 * entry in the bd table
2110 mp_resp_bd = mp_req->mp_resp_bd;
2111 addr = mp_req->resp_buf_dma;
2112 mp_resp_bd->sge_addr.lo = U64_LO(addr);
2113 mp_resp_bd->sge_addr.hi = U64_HI(addr);
2114 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2120 * Last ditch effort to clear the port if it's stuck. Used only after a
2121 * cleanup task times out.
2123 static void qedf_drain_request(struct qedf_ctx *qedf)
2125 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2126 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2130 /* Set bit to return all queuecommand requests as busy */
2131 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2133 /* Call qed drain request for function. Should be synchronous */
2134 qed_ops->common->drain(qedf->cdev);
2136 /* Settle time for CQEs to be returned */
2139 /* Unplug and continue */
2140 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2144 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2147 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2148 bool return_scsi_cmd_on_abts)
2150 struct qedf_rport *fcport;
2151 struct qedf_ctx *qedf;
2154 unsigned long flags;
2155 struct fcoe_wqe *sqe;
2159 fcport = io_req->fcport;
2161 QEDF_ERR(NULL, "fcport is NULL.\n");
2165 /* Sanity check qedf_rport before dereferencing any pointers */
2166 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2167 QEDF_ERR(NULL, "tgt not offloaded\n");
2171 qedf = fcport->qedf;
2173 QEDF_ERR(NULL, "qedf is NULL.\n");
2177 if (io_req->cmd_type == QEDF_ELS) {
2181 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2182 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2183 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2184 "cleanup processing or already completed.\n",
2188 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2191 /* Ensure room on SQ */
2192 if (!atomic_read(&fcport->free_sqes)) {
2193 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2194 /* Need to make sure we clear the flag since it was set */
2195 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2199 if (io_req->cmd_type == QEDF_CLEANUP) {
2200 QEDF_ERR(&qedf->dbg_ctx,
2201 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2202 io_req->xid, io_req->cmd_type);
2203 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2207 refcount = kref_read(&io_req->refcount);
2209 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2210 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2211 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2212 refcount, fcport, fcport->rdata->ids.port_id);
2214 /* Cleanup cmds re-use the same TID as the original I/O */
2215 io_req->cmd_type = QEDF_CLEANUP;
2216 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2218 init_completion(&io_req->cleanup_done);
2220 spin_lock_irqsave(&fcport->rport_lock, flags);
2222 sqe_idx = qedf_get_sqe_idx(fcport);
2223 sqe = &fcport->sq[sqe_idx];
2224 memset(sqe, 0, sizeof(struct fcoe_wqe));
2225 io_req->task_params->sqe = sqe;
2227 init_initiator_cleanup_fcoe_task(io_req->task_params);
2228 qedf_ring_doorbell(fcport);
2230 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2232 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2233 QEDF_CLEANUP_TIMEOUT * HZ);
2238 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2239 "xid=%x.\n", io_req->xid);
2240 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2241 /* Issue a drain request if cleanup task times out */
2242 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2243 qedf_drain_request(qedf);
2246 /* If it TASK MGMT handle it, reference will be decreased
2247 * in qedf_execute_tmf
2249 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2250 io_req->tm_flags == FCP_TMF_TGT_RESET) {
2251 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2252 io_req->sc_cmd = NULL;
2253 complete(&io_req->tm_done);
2256 if (io_req->sc_cmd) {
2257 if (!io_req->return_scsi_cmd_on_abts)
2258 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2259 "Not call scsi_done for xid=0x%x.\n",
2261 if (io_req->return_scsi_cmd_on_abts)
2262 qedf_scsi_done(qedf, io_req, DID_ERROR);
2266 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2268 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2273 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2274 struct qedf_ioreq *io_req)
2276 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2279 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2281 /* Complete so we can finish cleaning up the I/O */
2282 complete(&io_req->cleanup_done);
2285 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2288 struct qedf_ioreq *io_req;
2289 struct fcoe_task_context *task;
2290 struct qedf_ctx *qedf = fcport->qedf;
2291 struct fc_lport *lport = qedf->lport;
2296 unsigned long flags;
2297 struct fcoe_wqe *sqe;
2301 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2305 lun = (int)sc_cmd->device->lun;
2306 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2307 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2312 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2314 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2319 if (tm_flags == FCP_TMF_LUN_RESET)
2321 else if (tm_flags == FCP_TMF_TGT_RESET)
2322 qedf->target_resets++;
2324 /* Initialize rest of io_req fields */
2325 io_req->sc_cmd = sc_cmd;
2326 io_req->fcport = fcport;
2327 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2329 /* Record which cpu this request is associated with */
2330 io_req->cpu = smp_processor_id();
2333 io_req->io_req_flags = QEDF_READ;
2334 io_req->data_xfer_len = 0;
2335 io_req->tm_flags = tm_flags;
2337 /* Default is to return a SCSI command when an error occurs */
2338 io_req->return_scsi_cmd_on_abts = false;
2340 /* Obtain exchange id */
2343 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2346 /* Initialize task context for this IO request */
2347 task = qedf_get_task_mem(&qedf->tasks, xid);
2349 init_completion(&io_req->tm_done);
2351 spin_lock_irqsave(&fcport->rport_lock, flags);
2353 sqe_idx = qedf_get_sqe_idx(fcport);
2354 sqe = &fcport->sq[sqe_idx];
2355 memset(sqe, 0, sizeof(struct fcoe_wqe));
2357 qedf_init_task(fcport, lport, io_req, task, sqe);
2358 qedf_ring_doorbell(fcport);
2360 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2362 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2363 tmo = wait_for_completion_timeout(&io_req->tm_done,
2364 QEDF_TM_TIMEOUT * HZ);
2368 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2369 /* Clear outstanding bit since command timed out */
2370 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2371 io_req->sc_cmd = NULL;
2373 /* Check TMF response code */
2374 if (io_req->fcp_rsp_code == 0)
2380 * Double check that fcport has not gone into an uploading state before
2381 * executing the command flush for the LUN/target.
2383 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2384 QEDF_ERR(&qedf->dbg_ctx,
2385 "fcport is uploading, not executing flush.\n");
2388 /* We do not need this io_req any more */
2389 kref_put(&io_req->refcount, qedf_release_cmd);
2392 if (tm_flags == FCP_TMF_LUN_RESET)
2393 qedf_flush_active_ios(fcport, lun);
2395 qedf_flush_active_ios(fcport, -1);
2398 if (rc != SUCCESS) {
2399 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2402 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2408 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2410 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2411 struct fc_rport_libfc_priv *rp = rport->dd_data;
2412 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2413 struct qedf_ctx *qedf;
2414 struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2417 struct qedf_ioreq *io_req = NULL;
2419 struct fc_rport_priv *rdata = fcport->rdata;
2422 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2423 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2424 rport->scsi_target_id, (int)sc_cmd->device->lun);
2426 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2427 QEDF_ERR(NULL, "stale rport\n");
2431 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2432 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2435 if (sc_cmd->SCp.ptr) {
2436 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2437 ref_cnt = kref_read(&io_req->refcount);
2439 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2440 io_req, io_req->xid, ref_cnt);
2443 rval = fc_remote_port_chkready(rport);
2445 QEDF_ERR(NULL, "device_reset rport not ready\n");
2450 rc = fc_block_scsi_eh(sc_cmd);
2455 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2460 qedf = fcport->qedf;
2463 QEDF_ERR(NULL, "qedf is NULL.\n");
2468 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2469 QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2474 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2475 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2480 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2481 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2486 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2488 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2491 QEDF_ERR(&qedf->dbg_ctx,
2492 "fcport %p port_id=%06x is uploading.\n",
2493 fcport, fcport->rdata->ids.port_id);
2498 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2501 kref_put(&rdata->kref, fc_rport_destroy);
2505 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2506 struct qedf_ioreq *io_req)
2508 struct fcoe_cqe_rsp_info *fcp_rsp;
2510 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2512 fcp_rsp = &cqe->cqe_info.rsp_info;
2513 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2515 io_req->sc_cmd = NULL;
2516 complete(&io_req->tm_done);
2519 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2520 struct fcoe_cqe *cqe)
2522 unsigned long flags;
2523 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2524 u32 payload_len, crc;
2525 struct fc_frame_header *fh;
2526 struct fc_frame *fp;
2527 struct qedf_io_work *io_work;
2530 struct scsi_bd *p_bd_info;
2532 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2533 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2534 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2535 le32_to_cpu(p_bd_info->address.hi),
2536 le32_to_cpu(p_bd_info->address.lo),
2537 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2538 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2539 qedf->bdq_prod_idx, pktlen);
2541 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2542 if (bdq_idx >= QEDF_BDQ_SIZE) {
2543 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2545 goto increment_prod;
2548 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2550 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2551 "unsolicited packet.\n");
2552 goto increment_prod;
2555 if (qedf_dump_frames) {
2556 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2557 "BDQ frame is at addr=%p.\n", bdq_addr);
2558 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2559 (void *)bdq_addr, pktlen, false);
2562 /* Allocate frame */
2563 payload_len = pktlen - sizeof(struct fc_frame_header);
2564 fp = fc_frame_alloc(qedf->lport, payload_len);
2566 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2567 goto increment_prod;
2570 /* Copy data from BDQ buffer into fc_frame struct */
2571 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2572 memcpy(fh, (void *)bdq_addr, pktlen);
2574 QEDF_WARN(&qedf->dbg_ctx,
2575 "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2576 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2577 fh->fh_type, fc_frame_payload_op(fp));
2579 /* Initialize the frame so libfc sees it as a valid frame */
2580 crc = fcoe_fc_crc(fp);
2582 fr_dev(fp) = qedf->lport;
2583 fr_sof(fp) = FC_SOF_I3;
2584 fr_eof(fp) = FC_EOF_T;
2585 fr_crc(fp) = cpu_to_le32(~crc);
2588 * We need to return the frame back up to libfc in a non-atomic
2591 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2593 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2594 "work for I/O completion.\n");
2596 goto increment_prod;
2598 memset(io_work, 0, sizeof(struct qedf_io_work));
2600 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2602 /* Copy contents of CQE for deferred processing */
2603 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2605 io_work->qedf = qedf;
2608 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2610 spin_lock_irqsave(&qedf->hba_lock, flags);
2612 /* Increment producer to let f/w know we've handled the frame */
2613 qedf->bdq_prod_idx++;
2615 /* Producer index wraps at uint16_t boundary */
2616 if (qedf->bdq_prod_idx == 0xffff)
2617 qedf->bdq_prod_idx = 0;
2619 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2620 readw(qedf->bdq_primary_prod);
2621 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2622 readw(qedf->bdq_secondary_prod);
2624 spin_unlock_irqrestore(&qedf->hba_lock, flags);