2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2018 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 unsigned int timer_msec)
17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 msecs_to_jiffies(timer_msec));
21 static void qedf_cmd_timeout(struct work_struct *work)
24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work);
26 struct qedf_ctx *qedf;
27 struct qedf_rport *fcport;
31 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
35 fcport = io_req->fcport;
36 if (io_req->fcport == NULL) {
37 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
43 switch (io_req->cmd_type) {
46 QEDF_INFO(NULL, QEDF_LOG_IO,
47 "qedf is NULL for ABTS xid=0x%x.\n",
52 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
54 /* Cleanup timed out ABTS */
55 qedf_initiate_cleanup(io_req, true);
56 complete(&io_req->abts_done);
59 * Need to call kref_put for reference taken when initiate_abts
60 * was called since abts_compl won't be called now that we've
61 * cleaned up the task.
63 kref_put(&io_req->refcount, qedf_release_cmd);
65 /* Clear in abort bit now that we're done with the command */
66 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
69 * Now that the original I/O and the ABTS are complete see
70 * if we need to reconnect to the target.
72 qedf_restart_rport(fcport);
76 QEDF_INFO(NULL, QEDF_LOG_IO,
77 "qedf is NULL for ELS xid=0x%x.\n",
81 /* ELS request no longer outstanding since it timed out */
82 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
84 kref_get(&io_req->refcount);
86 * Don't attempt to clean an ELS timeout as any subseqeunt
87 * ABTS or cleanup requests just hang. For now just free
88 * the resources of the original I/O and the RRQ
90 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
92 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
93 /* Call callback function to complete command */
94 if (io_req->cb_func && io_req->cb_arg) {
95 op = io_req->cb_arg->op;
96 io_req->cb_func(io_req->cb_arg);
97 io_req->cb_arg = NULL;
99 qedf_initiate_cleanup(io_req, true);
100 kref_put(&io_req->refcount, qedf_release_cmd);
102 case QEDF_SEQ_CLEANUP:
103 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
104 "xid=0x%x.\n", io_req->xid);
105 qedf_initiate_cleanup(io_req, true);
106 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
107 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
114 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
116 struct io_bdt *bdt_info;
117 struct qedf_ctx *qedf = cmgr->qedf;
120 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
123 struct qedf_ioreq *io_req;
125 num_ios = max_xid - min_xid + 1;
127 /* Free fcoe_bdt_ctx structures */
128 if (!cmgr->io_bdt_pool)
131 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
132 for (i = 0; i < num_ios; i++) {
133 bdt_info = cmgr->io_bdt_pool[i];
134 if (bdt_info->bd_tbl) {
135 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
136 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
137 bdt_info->bd_tbl = NULL;
141 /* Destroy io_bdt pool */
142 for (i = 0; i < num_ios; i++) {
143 kfree(cmgr->io_bdt_pool[i]);
144 cmgr->io_bdt_pool[i] = NULL;
147 kfree(cmgr->io_bdt_pool);
148 cmgr->io_bdt_pool = NULL;
152 for (i = 0; i < num_ios; i++) {
153 io_req = &cmgr->cmds[i];
154 kfree(io_req->sgl_task_params);
155 kfree(io_req->task_params);
156 /* Make sure we free per command sense buffer */
157 if (io_req->sense_buffer)
158 dma_free_coherent(&qedf->pdev->dev,
159 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
160 io_req->sense_buffer_dma);
161 cancel_delayed_work_sync(&io_req->rrq_work);
164 /* Free command manager itself */
168 static void qedf_handle_rrq(struct work_struct *work)
170 struct qedf_ioreq *io_req =
171 container_of(work, struct qedf_ioreq, rrq_work.work);
173 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
174 qedf_send_rrq(io_req);
178 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
180 struct qedf_cmd_mgr *cmgr;
181 struct io_bdt *bdt_info;
182 struct qedf_ioreq *io_req;
187 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
189 /* Make sure num_queues is already set before calling this function */
190 if (!qedf->num_queues) {
191 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
195 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
196 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
197 "max_xid 0x%x.\n", min_xid, max_xid);
201 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
202 "0x%x.\n", min_xid, max_xid);
204 num_ios = max_xid - min_xid + 1;
206 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
208 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
213 spin_lock_init(&cmgr->lock);
216 * Initialize I/O request fields.
220 for (i = 0; i < num_ios; i++) {
221 io_req = &cmgr->cmds[i];
222 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
226 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
228 /* Allocate DMA memory to hold sense buffer */
229 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
230 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
232 if (!io_req->sense_buffer)
235 /* Allocate task parameters to pass to f/w init funcions */
236 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
238 if (!io_req->task_params) {
239 QEDF_ERR(&(qedf->dbg_ctx),
240 "Failed to allocate task_params for xid=0x%x\n",
246 * Allocate scatter/gather list info to pass to f/w init
249 io_req->sgl_task_params = kzalloc(
250 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
251 if (!io_req->sgl_task_params) {
252 QEDF_ERR(&(qedf->dbg_ctx),
253 "Failed to allocate sgl_task_params for xid=0x%x\n",
259 /* Allocate pool of io_bdts - one for each qedf_ioreq */
260 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
263 if (!cmgr->io_bdt_pool) {
264 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
268 for (i = 0; i < num_ios; i++) {
269 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
271 if (!cmgr->io_bdt_pool[i]) {
272 QEDF_WARN(&(qedf->dbg_ctx),
273 "Failed to alloc io_bdt_pool[%d].\n", i);
278 for (i = 0; i < num_ios; i++) {
279 bdt_info = cmgr->io_bdt_pool[i];
280 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
281 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
282 &bdt_info->bd_tbl_dma, GFP_KERNEL);
283 if (!bdt_info->bd_tbl) {
284 QEDF_WARN(&(qedf->dbg_ctx),
285 "Failed to alloc bdt_tbl[%d].\n", i);
289 atomic_set(&cmgr->free_list_cnt, num_ios);
290 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
291 "cmgr->free_list_cnt=%d.\n",
292 atomic_read(&cmgr->free_list_cnt));
297 qedf_cmd_mgr_free(cmgr);
301 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
303 struct qedf_ctx *qedf = fcport->qedf;
304 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
305 struct qedf_ioreq *io_req = NULL;
306 struct io_bdt *bd_tbl;
312 free_sqes = atomic_read(&fcport->free_sqes);
315 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
316 "Returning NULL, free_sqes=%d.\n ",
321 /* Limit the number of outstanding R/W tasks */
322 if ((atomic_read(&fcport->num_active_ios) >=
323 NUM_RW_TASKS_PER_CONNECTION)) {
324 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
325 "Returning NULL, num_active_ios=%d.\n",
326 atomic_read(&fcport->num_active_ios));
330 /* Limit global TIDs certain tasks */
331 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
332 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
333 "Returning NULL, free_list_cnt=%d.\n",
334 atomic_read(&cmd_mgr->free_list_cnt));
338 spin_lock_irqsave(&cmd_mgr->lock, flags);
339 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
340 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
342 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
345 /* Check to make sure command was previously freed */
350 if (i == FCOE_PARAMS_NUM_TASKS) {
351 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
355 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
356 QEDF_ERR(&qedf->dbg_ctx,
357 "io_req found to be dirty ox_id = 0x%x.\n",
360 /* Clear any flags now that we've reallocated the xid */
363 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
365 atomic_inc(&fcport->num_active_ios);
366 atomic_dec(&fcport->free_sqes);
368 atomic_dec(&cmd_mgr->free_list_cnt);
370 io_req->cmd_mgr = cmd_mgr;
371 io_req->fcport = fcport;
373 /* Clear any stale sc_cmd back pointer */
374 io_req->sc_cmd = NULL;
377 /* Hold the io_req against deletion */
378 kref_init(&io_req->refcount); /* ID: 001 */
379 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
381 /* Bind io_bdt for this io_req */
382 /* Have a static link between io_req and io_bdt_pool */
383 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
384 if (bd_tbl == NULL) {
385 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
386 kref_put(&io_req->refcount, qedf_release_cmd);
389 bd_tbl->io_req = io_req;
390 io_req->cmd_type = cmd_type;
391 io_req->tm_flags = 0;
393 /* Reset sequence offset data */
394 io_req->rx_buf_off = 0;
395 io_req->tx_buf_off = 0;
396 io_req->rx_id = 0xffff; /* No OX_ID */
401 /* Record failure for stats and return NULL to caller */
402 qedf->alloc_failures++;
406 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
408 struct qedf_mp_req *mp_req = &(io_req->mp_req);
409 struct qedf_ctx *qedf = io_req->fcport->qedf;
410 uint64_t sz = sizeof(struct scsi_sge);
413 if (mp_req->mp_req_bd) {
414 dma_free_coherent(&qedf->pdev->dev, sz,
415 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
416 mp_req->mp_req_bd = NULL;
418 if (mp_req->mp_resp_bd) {
419 dma_free_coherent(&qedf->pdev->dev, sz,
420 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
421 mp_req->mp_resp_bd = NULL;
423 if (mp_req->req_buf) {
424 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
425 mp_req->req_buf, mp_req->req_buf_dma);
426 mp_req->req_buf = NULL;
428 if (mp_req->resp_buf) {
429 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
430 mp_req->resp_buf, mp_req->resp_buf_dma);
431 mp_req->resp_buf = NULL;
435 void qedf_release_cmd(struct kref *ref)
437 struct qedf_ioreq *io_req =
438 container_of(ref, struct qedf_ioreq, refcount);
439 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
440 struct qedf_rport *fcport = io_req->fcport;
443 if (io_req->cmd_type == QEDF_SCSI_CMD)
444 WARN_ON(io_req->sc_cmd);
446 if (io_req->cmd_type == QEDF_ELS ||
447 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
448 qedf_free_mp_resc(io_req);
450 atomic_inc(&cmd_mgr->free_list_cnt);
451 atomic_dec(&fcport->num_active_ios);
452 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
453 if (atomic_read(&fcport->num_active_ios) < 0)
454 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
456 /* Increment task retry identifier now that the request is released */
457 io_req->task_retry_identifier++;
458 io_req->fcport = NULL;
460 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
462 spin_lock_irqsave(&cmd_mgr->lock, flags);
463 io_req->fcport = NULL;
465 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
468 static int qedf_map_sg(struct qedf_ioreq *io_req)
470 struct scsi_cmnd *sc = io_req->sc_cmd;
471 struct Scsi_Host *host = sc->device->host;
472 struct fc_lport *lport = shost_priv(host);
473 struct qedf_ctx *qedf = lport_priv(lport);
474 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
475 struct scatterlist *sg;
483 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
484 scsi_sg_count(sc), sc->sc_data_direction);
485 sg = scsi_sglist(sc);
487 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
489 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
490 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
492 scsi_for_each_sg(sc, sg, sg_count, i) {
493 sg_len = (u32)sg_dma_len(sg);
494 addr = (u64)sg_dma_address(sg);
495 end_addr = (u64)(addr + sg_len);
498 * Intermediate s/g element so check if start and end address
499 * is page aligned. Only required for writes and only if the
500 * number of scatter/gather elements is 8 or more.
502 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
503 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
504 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
506 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
507 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
508 bd[bd_count].sge_len = cpu_to_le32(sg_len);
511 byte_count += sg_len;
514 /* To catch a case where FAST and SLOW nothing is set, set FAST */
515 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
516 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
518 if (byte_count != scsi_bufflen(sc))
519 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
520 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
521 scsi_bufflen(sc), io_req->xid);
526 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
528 struct scsi_cmnd *sc = io_req->sc_cmd;
529 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
532 if (scsi_sg_count(sc)) {
533 bd_count = qedf_map_sg(io_req);
538 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
541 io_req->bd_tbl->bd_valid = bd_count;
546 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
547 struct fcp_cmnd *fcp_cmnd)
549 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
551 /* fcp_cmnd is 32 bytes */
552 memset(fcp_cmnd, 0, FCP_CMND_LEN);
554 /* 8 bytes: SCSI LUN info */
555 int_to_scsilun(sc_cmd->device->lun,
556 (struct scsi_lun *)&fcp_cmnd->fc_lun);
558 /* 4 bytes: flag info */
559 fcp_cmnd->fc_pri_ta = 0;
560 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
561 fcp_cmnd->fc_flags = io_req->io_req_flags;
562 fcp_cmnd->fc_cmdref = 0;
564 /* Populate data direction */
565 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
566 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
568 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
569 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
570 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
571 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
574 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
576 /* 16 bytes: CDB information */
577 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
578 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
580 /* 4 bytes: FCP data length */
581 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
584 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
585 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
586 struct fcoe_wqe *sqe)
588 enum fcoe_task_type task_type;
589 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
590 struct io_bdt *bd_tbl = io_req->bd_tbl;
594 struct qedf_ctx *qedf = fcport->qedf;
595 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
596 struct regpair sense_data_buffer_phys_addr;
601 /* Note init_initiator_rw_fcoe_task memsets the task context */
602 io_req->task = task_ctx;
603 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
604 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
605 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
607 /* Set task type bassed on DMA directio of command */
608 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
609 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
611 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
612 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
613 tx_io_size = io_req->data_xfer_len;
615 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
616 rx_io_size = io_req->data_xfer_len;
620 /* Setup the fields for fcoe_task_params */
621 io_req->task_params->context = task_ctx;
622 io_req->task_params->sqe = sqe;
623 io_req->task_params->task_type = task_type;
624 io_req->task_params->tx_io_size = tx_io_size;
625 io_req->task_params->rx_io_size = rx_io_size;
626 io_req->task_params->conn_cid = fcport->fw_cid;
627 io_req->task_params->itid = io_req->xid;
628 io_req->task_params->cq_rss_number = cq_idx;
629 io_req->task_params->is_tape_device = fcport->dev_type;
631 /* Fill in information for scatter/gather list */
632 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
633 bd_count = bd_tbl->bd_valid;
634 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
635 io_req->sgl_task_params->sgl_phys_addr.lo =
636 U64_LO(bd_tbl->bd_tbl_dma);
637 io_req->sgl_task_params->sgl_phys_addr.hi =
638 U64_HI(bd_tbl->bd_tbl_dma);
639 io_req->sgl_task_params->num_sges = bd_count;
640 io_req->sgl_task_params->total_buffer_size =
641 scsi_bufflen(io_req->sc_cmd);
642 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
643 io_req->sgl_task_params->small_mid_sge = 1;
645 io_req->sgl_task_params->small_mid_sge = 0;
648 /* Fill in physical address of sense buffer */
649 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
650 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
652 /* fill FCP_CMND IU */
653 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
655 /* Swap fcp_cmnd since FC is big endian */
656 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
657 for (i = 0; i < cnt; i++) {
658 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
660 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
662 init_initiator_rw_fcoe_task(io_req->task_params,
663 io_req->sgl_task_params,
664 sense_data_buffer_phys_addr,
665 io_req->task_retry_identifier, fcp_cmnd);
667 /* Increment SGL type counters */
668 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
669 qedf->slow_sge_ios++;
671 qedf->fast_sge_ios++;
674 void qedf_init_mp_task(struct qedf_ioreq *io_req,
675 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
677 struct qedf_mp_req *mp_req = &(io_req->mp_req);
678 struct qedf_rport *fcport = io_req->fcport;
679 struct qedf_ctx *qedf = io_req->fcport->qedf;
680 struct fc_frame_header *fc_hdr;
681 struct fcoe_tx_mid_path_params task_fc_hdr;
682 struct scsi_sgl_task_params tx_sgl_task_params;
683 struct scsi_sgl_task_params rx_sgl_task_params;
685 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
686 "Initializing MP task for cmd_type=%d\n",
689 qedf->control_requests++;
691 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
692 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
693 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
694 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
696 /* Setup the task from io_req for easy reference */
697 io_req->task = task_ctx;
699 /* Setup the fields for fcoe_task_params */
700 io_req->task_params->context = task_ctx;
701 io_req->task_params->sqe = sqe;
702 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
703 io_req->task_params->tx_io_size = io_req->data_xfer_len;
704 /* rx_io_size tells the f/w how large a response buffer we have */
705 io_req->task_params->rx_io_size = PAGE_SIZE;
706 io_req->task_params->conn_cid = fcport->fw_cid;
707 io_req->task_params->itid = io_req->xid;
708 /* Return middle path commands on CQ 0 */
709 io_req->task_params->cq_rss_number = 0;
710 io_req->task_params->is_tape_device = fcport->dev_type;
712 fc_hdr = &(mp_req->req_fc_hdr);
713 /* Set OX_ID and RX_ID based on driver task id */
714 fc_hdr->fh_ox_id = io_req->xid;
715 fc_hdr->fh_rx_id = htons(0xffff);
717 /* Set up FC header information */
718 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
719 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
720 task_fc_hdr.type = fc_hdr->fh_type;
721 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
722 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
723 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
724 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
726 /* Set up s/g list parameters for request buffer */
727 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
728 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
729 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
730 tx_sgl_task_params.num_sges = 1;
731 /* Set PAGE_SIZE for now since sg element is that size ??? */
732 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
733 tx_sgl_task_params.small_mid_sge = 0;
735 /* Set up s/g list parameters for request buffer */
736 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
737 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
738 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
739 rx_sgl_task_params.num_sges = 1;
740 /* Set PAGE_SIZE for now since sg element is that size ??? */
741 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
742 rx_sgl_task_params.small_mid_sge = 0;
746 * Last arg is 0 as previous code did not set that we wanted the
747 * fc header information.
749 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
752 &rx_sgl_task_params, 0);
755 /* Presumed that fcport->rport_lock is held */
756 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
758 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
761 rval = fcport->sq_prod_idx;
763 /* Adjust ring index */
764 fcport->sq_prod_idx++;
765 fcport->fw_sq_prod_idx++;
766 if (fcport->sq_prod_idx == total_sqe)
767 fcport->sq_prod_idx = 0;
772 void qedf_ring_doorbell(struct qedf_rport *fcport)
774 struct fcoe_db_data dbell = { 0 };
778 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
779 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
780 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
781 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
783 dbell.sq_prod = fcport->fw_sq_prod_idx;
784 /* wmb makes sure that the BDs data is updated before updating the
785 * producer, otherwise FW may read old data from the BDs.
789 writel(*(u32 *)&dbell, fcport->p_doorbell);
791 * Fence required to flush the write combined buffer, since another
792 * CPU may write to the same doorbell address and data may be lost
793 * due to relaxed order nature of write combined bar.
798 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
801 struct qedf_ctx *qedf = fcport->qedf;
802 struct qedf_io_log *io_log;
803 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
807 spin_lock_irqsave(&qedf->io_trace_lock, flags);
809 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
810 io_log->direction = direction;
811 io_log->task_id = io_req->xid;
812 io_log->port_id = fcport->rdata->ids.port_id;
813 io_log->lun = sc_cmd->device->lun;
814 io_log->op = op = sc_cmd->cmnd[0];
815 io_log->lba[0] = sc_cmd->cmnd[2];
816 io_log->lba[1] = sc_cmd->cmnd[3];
817 io_log->lba[2] = sc_cmd->cmnd[4];
818 io_log->lba[3] = sc_cmd->cmnd[5];
819 io_log->bufflen = scsi_bufflen(sc_cmd);
820 io_log->sg_count = scsi_sg_count(sc_cmd);
821 io_log->result = sc_cmd->result;
822 io_log->jiffies = jiffies;
823 io_log->refcount = kref_read(&io_req->refcount);
825 if (direction == QEDF_IO_TRACE_REQ) {
826 /* For requests we only care abot the submission CPU */
827 io_log->req_cpu = io_req->cpu;
830 } else if (direction == QEDF_IO_TRACE_RSP) {
831 io_log->req_cpu = io_req->cpu;
832 io_log->int_cpu = io_req->int_cpu;
833 io_log->rsp_cpu = smp_processor_id();
836 io_log->sge_type = io_req->sge_type;
838 qedf->io_trace_idx++;
839 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
840 qedf->io_trace_idx = 0;
842 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
845 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
847 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
848 struct Scsi_Host *host = sc_cmd->device->host;
849 struct fc_lport *lport = shost_priv(host);
850 struct qedf_ctx *qedf = lport_priv(lport);
851 struct e4_fcoe_task_context *task_ctx;
853 enum fcoe_task_type req_type = 0;
854 struct fcoe_wqe *sqe;
857 /* Initialize rest of io_req fileds */
858 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
859 sc_cmd->SCp.ptr = (char *)io_req;
860 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
862 /* Record which cpu this request is associated with */
863 io_req->cpu = smp_processor_id();
865 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
866 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
867 io_req->io_req_flags = QEDF_READ;
868 qedf->input_requests++;
869 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
870 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
871 io_req->io_req_flags = QEDF_WRITE;
872 qedf->output_requests++;
874 io_req->io_req_flags = 0;
875 qedf->control_requests++;
880 /* Build buffer descriptor list for firmware from sg list */
881 if (qedf_build_bd_list_from_sg(io_req)) {
882 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
883 /* Release cmd will release io_req, but sc_cmd is assigned */
884 io_req->sc_cmd = NULL;
885 kref_put(&io_req->refcount, qedf_release_cmd);
889 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
890 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
891 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
892 /* Release cmd will release io_req, but sc_cmd is assigned */
893 io_req->sc_cmd = NULL;
894 kref_put(&io_req->refcount, qedf_release_cmd);
898 /* Record LUN number for later use if we neeed them */
899 io_req->lun = (int)sc_cmd->device->lun;
901 /* Obtain free SQE */
902 sqe_idx = qedf_get_sqe_idx(fcport);
903 sqe = &fcport->sq[sqe_idx];
904 memset(sqe, 0, sizeof(struct fcoe_wqe));
906 /* Get the task context */
907 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
909 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
911 /* Release cmd will release io_req, but sc_cmd is assigned */
912 io_req->sc_cmd = NULL;
913 kref_put(&io_req->refcount, qedf_release_cmd);
917 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
920 qedf_ring_doorbell(fcport);
922 /* Set that command is with the firmware now */
923 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
925 if (qedf_io_tracing && io_req->sc_cmd)
926 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
932 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
934 struct fc_lport *lport = shost_priv(host);
935 struct qedf_ctx *qedf = lport_priv(lport);
936 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
937 struct fc_rport_libfc_priv *rp = rport->dd_data;
938 struct qedf_rport *fcport;
939 struct qedf_ioreq *io_req;
942 unsigned long flags = 0;
945 num_sgs = scsi_sg_count(sc_cmd);
946 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
947 QEDF_ERR(&qedf->dbg_ctx,
948 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
949 num_sgs, QEDF_MAX_BDS_PER_CMD);
950 sc_cmd->result = DID_ERROR;
951 sc_cmd->scsi_done(sc_cmd);
955 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
956 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
957 sc_cmd->result = DID_NO_CONNECT << 16;
958 sc_cmd->scsi_done(sc_cmd);
962 if (!qedf->pdev->msix_enabled) {
963 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
964 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
966 sc_cmd->result = DID_NO_CONNECT << 16;
967 sc_cmd->scsi_done(sc_cmd);
971 rval = fc_remote_port_chkready(rport);
973 sc_cmd->result = rval;
974 sc_cmd->scsi_done(sc_cmd);
978 /* Retry command if we are doing a qed drain operation */
979 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
980 rc = SCSI_MLQUEUE_HOST_BUSY;
984 if (lport->state != LPORT_ST_READY ||
985 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
986 rc = SCSI_MLQUEUE_HOST_BUSY;
990 /* rport and tgt are allocated together, so tgt should be non-NULL */
991 fcport = (struct qedf_rport *)&rp[1];
993 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
994 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
996 * Session is not offloaded yet. Let SCSI-ml retry
999 rc = SCSI_MLQUEUE_TARGET_BUSY;
1003 atomic_inc(&fcport->ios_to_queue);
1005 if (fcport->retry_delay_timestamp) {
1006 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1007 fcport->retry_delay_timestamp = 0;
1009 /* If retry_delay timer is active, flow off the ML */
1010 rc = SCSI_MLQUEUE_TARGET_BUSY;
1011 atomic_dec(&fcport->ios_to_queue);
1016 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1018 rc = SCSI_MLQUEUE_HOST_BUSY;
1019 atomic_dec(&fcport->ios_to_queue);
1023 io_req->sc_cmd = sc_cmd;
1025 /* Take fcport->rport_lock for posting to fcport send queue */
1026 spin_lock_irqsave(&fcport->rport_lock, flags);
1027 if (qedf_post_io_req(fcport, io_req)) {
1028 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1029 /* Return SQE to pool */
1030 atomic_inc(&fcport->free_sqes);
1031 rc = SCSI_MLQUEUE_HOST_BUSY;
1033 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1034 atomic_dec(&fcport->ios_to_queue);
1040 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1041 struct fcoe_cqe_rsp_info *fcp_rsp)
1043 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1044 struct qedf_ctx *qedf = io_req->fcport->qedf;
1045 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1046 int fcp_sns_len = 0;
1047 int fcp_rsp_len = 0;
1048 uint8_t *rsp_info, *sense_data;
1050 io_req->fcp_status = FC_GOOD;
1051 io_req->fcp_resid = 0;
1052 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1053 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1054 io_req->fcp_resid = fcp_rsp->fcp_resid;
1056 io_req->scsi_comp_flags = rsp_flags;
1057 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1058 fcp_rsp->scsi_status_code;
1061 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1062 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1065 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1066 fcp_sns_len = fcp_rsp->fcp_sns_len;
1068 io_req->fcp_rsp_len = fcp_rsp_len;
1069 io_req->fcp_sns_len = fcp_sns_len;
1070 rsp_info = sense_data = io_req->sense_buffer;
1072 /* fetch fcp_rsp_code */
1073 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1074 /* Only for task management function */
1075 io_req->fcp_rsp_code = rsp_info[3];
1076 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1077 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1078 /* Adjust sense-data location. */
1079 sense_data += fcp_rsp_len;
1082 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1083 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1084 "Truncating sense buffer\n");
1085 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1088 /* The sense buffer can be NULL for TMF commands */
1089 if (sc_cmd->sense_buffer) {
1090 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1092 memcpy(sc_cmd->sense_buffer, sense_data,
1097 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1099 struct scsi_cmnd *sc = io_req->sc_cmd;
1101 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1102 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1103 scsi_sg_count(sc), sc->sc_data_direction);
1104 io_req->bd_tbl->bd_valid = 0;
1108 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1109 struct qedf_ioreq *io_req)
1112 struct e4_fcoe_task_context *task_ctx;
1113 struct scsi_cmnd *sc_cmd;
1114 struct fcoe_cqe_rsp_info *fcp_rsp;
1115 struct qedf_rport *fcport;
1117 u16 scope, qualifier = 0;
1118 u8 fw_residual_flag = 0;
1125 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1126 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1127 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1128 QEDF_ERR(&qedf->dbg_ctx,
1129 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1135 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1136 sc_cmd = io_req->sc_cmd;
1137 fcp_rsp = &cqe->cqe_info.rsp_info;
1140 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1144 if (!sc_cmd->SCp.ptr) {
1145 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1146 "another context.\n");
1150 if (!sc_cmd->device) {
1151 QEDF_ERR(&qedf->dbg_ctx,
1152 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1156 if (!sc_cmd->request) {
1157 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1158 "sc_cmd=%p.\n", sc_cmd);
1162 if (!sc_cmd->request->q) {
1163 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1164 "is not valid, sc_cmd=%p.\n", sc_cmd);
1168 fcport = io_req->fcport;
1171 * When flush is active, let the cmds be completed from the cleanup
1174 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1175 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1176 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1177 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1178 "Dropping good completion xid=0x%x as fcport is flushing",
1183 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1185 qedf_unmap_sg_list(qedf, io_req);
1187 /* Check for FCP transport error */
1188 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1189 QEDF_ERR(&(qedf->dbg_ctx),
1190 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1191 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1192 io_req->fcp_rsp_code);
1193 sc_cmd->result = DID_BUS_BUSY << 16;
1197 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1198 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1199 if (fw_residual_flag) {
1200 QEDF_ERR(&qedf->dbg_ctx,
1201 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1202 io_req->xid, fcp_rsp->rsp_flags.flags,
1204 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1205 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1207 if (io_req->cdb_status == 0)
1208 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1210 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1213 * Set resid to the whole buffer length so we won't try to resue
1214 * any previously data.
1216 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1220 switch (io_req->fcp_status) {
1222 if (io_req->cdb_status == 0) {
1223 /* Good I/O completion */
1224 sc_cmd->result = DID_OK << 16;
1226 refcount = kref_read(&io_req->refcount);
1227 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1228 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1229 "lba=%02x%02x%02x%02x cdb_status=%d "
1230 "fcp_resid=0x%x refcount=%d.\n",
1231 qedf->lport->host->host_no, sc_cmd->device->id,
1232 sc_cmd->device->lun, io_req->xid,
1233 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1234 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1235 io_req->cdb_status, io_req->fcp_resid,
1237 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1239 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1240 io_req->cdb_status == SAM_STAT_BUSY) {
1242 * Check whether we need to set retry_delay at
1243 * all based on retry_delay module parameter
1244 * and the status qualifier.
1248 scope = fcp_rsp->retry_delay_timer & 0xC000;
1250 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1252 if (qedf_retry_delay &&
1253 scope > 0 && qualifier > 0 &&
1254 qualifier <= 0x3FEF) {
1255 /* Check we don't go over the max */
1256 if (qualifier > QEDF_RETRY_DELAY_MAX)
1258 QEDF_RETRY_DELAY_MAX;
1259 fcport->retry_delay_timestamp =
1260 jiffies + (qualifier * HZ / 10);
1263 if (io_req->cdb_status ==
1264 SAM_STAT_TASK_SET_FULL)
1265 qedf->task_set_fulls++;
1270 if (io_req->fcp_resid)
1271 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1274 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1275 io_req->fcp_status);
1280 if (qedf_io_tracing)
1281 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1284 * We wait till the end of the function to clear the
1285 * outstanding bit in case we need to send an abort
1287 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1289 io_req->sc_cmd = NULL;
1290 sc_cmd->SCp.ptr = NULL;
1291 sc_cmd->scsi_done(sc_cmd);
1292 kref_put(&io_req->refcount, qedf_release_cmd);
1295 /* Return a SCSI command in some other context besides a normal completion */
1296 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1300 struct scsi_cmnd *sc_cmd;
1306 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1307 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1308 "io_req:%p scsi_done handling already done\n",
1314 * We will be done with this command after this call so clear the
1317 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1320 sc_cmd = io_req->sc_cmd;
1323 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1327 if (!virt_addr_valid(sc_cmd)) {
1328 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1332 if (!sc_cmd->SCp.ptr) {
1333 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1334 "another context.\n");
1338 if (!sc_cmd->device) {
1339 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1344 if (!virt_addr_valid(sc_cmd->device)) {
1345 QEDF_ERR(&qedf->dbg_ctx,
1346 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1350 if (!sc_cmd->sense_buffer) {
1351 QEDF_ERR(&qedf->dbg_ctx,
1352 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1357 if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1358 QEDF_ERR(&qedf->dbg_ctx,
1359 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1364 if (!sc_cmd->scsi_done) {
1365 QEDF_ERR(&qedf->dbg_ctx,
1366 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1371 qedf_unmap_sg_list(qedf, io_req);
1373 sc_cmd->result = result << 16;
1374 refcount = kref_read(&io_req->refcount);
1375 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1376 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1377 "allowed=%d retries=%d refcount=%d.\n",
1378 qedf->lport->host->host_no, sc_cmd->device->id,
1379 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1380 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1381 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1385 * Set resid to the whole buffer length so we won't try to resue any
1386 * previously read data
1388 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1390 if (qedf_io_tracing)
1391 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1393 io_req->sc_cmd = NULL;
1394 sc_cmd->SCp.ptr = NULL;
1395 sc_cmd->scsi_done(sc_cmd);
1396 kref_put(&io_req->refcount, qedf_release_cmd);
1401 * Clear the io_req->sc_cmd backpointer so we don't try to process
1404 io_req->sc_cmd = NULL;
1405 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
1409 * Handle warning type CQE completions. This is mainly used for REC timer
1412 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1413 struct qedf_ioreq *io_req)
1416 struct qedf_rport *fcport = io_req->fcport;
1417 u64 err_warn_bit_map;
1423 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1424 "xid=0x%x\n", io_req->xid);
1425 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1426 "err_warn_bitmap=%08x:%08x\n",
1427 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1428 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1429 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1430 "rx_buff_off=%08x, rx_id=%04x\n",
1431 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1432 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1433 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1435 /* Normalize the error bitmap value to an just an unsigned int */
1436 err_warn_bit_map = (u64)
1437 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1438 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1439 for (i = 0; i < 64; i++) {
1440 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1446 /* Check if REC TOV expired if this is a tape device */
1447 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1449 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1450 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1451 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1452 io_req->rx_buf_off =
1453 cqe->cqe_info.err_info.rx_buf_off;
1454 io_req->tx_buf_off =
1455 cqe->cqe_info.err_info.tx_buf_off;
1456 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1457 rval = qedf_send_rec(io_req);
1459 * We only want to abort the io_req if we
1460 * can't queue the REC command as we want to
1461 * keep the exchange open for recovery.
1471 init_completion(&io_req->abts_done);
1472 rval = qedf_initiate_abts(io_req, true);
1474 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1477 /* Cleanup a command when we receive an error detection completion */
1478 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1479 struct qedf_ioreq *io_req)
1486 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1487 "xid=0x%x\n", io_req->xid);
1488 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1489 "err_warn_bitmap=%08x:%08x\n",
1490 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1491 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1492 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1493 "rx_buff_off=%08x, rx_id=%04x\n",
1494 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1495 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1496 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1498 if (qedf->stop_io_on_error) {
1499 qedf_stop_all_io(qedf);
1503 init_completion(&io_req->abts_done);
1504 rval = qedf_initiate_abts(io_req, true);
1506 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1509 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1510 struct qedf_ioreq *els_req)
1512 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1513 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1514 kref_read(&els_req->refcount));
1517 * Need to distinguish this from a timeout when calling the
1520 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1522 /* Cancel the timer */
1523 cancel_delayed_work_sync(&els_req->timeout_work);
1525 /* Call callback function to complete command */
1526 if (els_req->cb_func && els_req->cb_arg) {
1527 els_req->cb_func(els_req->cb_arg);
1528 els_req->cb_arg = NULL;
1531 /* Release kref for original initiate_els */
1532 kref_put(&els_req->refcount, qedf_release_cmd);
1535 /* A value of -1 for lun is a wild card that means flush all
1536 * active SCSI I/Os for the target.
1538 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1540 struct qedf_ioreq *io_req;
1541 struct qedf_ctx *qedf;
1542 struct qedf_cmd_mgr *cmd_mgr;
1544 unsigned long flags;
1552 /* Check that fcport is still offloaded */
1553 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1554 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1558 qedf = fcport->qedf;
1561 QEDF_ERR(NULL, "qedf is NULL.\n");
1565 /* Only wait for all commands to be queued in the Upload context */
1566 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1568 while (atomic_read(&fcport->ios_to_queue)) {
1569 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1570 "Waiting for %d I/Os to be queued\n",
1571 atomic_read(&fcport->ios_to_queue));
1572 if (wait_cnt == 0) {
1574 "%d IOs request could not be queued\n",
1575 atomic_read(&fcport->ios_to_queue));
1582 cmd_mgr = qedf->cmd_mgr;
1584 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1585 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1586 atomic_read(&fcport->num_active_ios), fcport,
1587 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1588 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1590 mutex_lock(&qedf->flush_mutex);
1592 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1594 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1595 fcport->lun_reset_lun = lun;
1598 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1599 io_req = &cmd_mgr->cmds[i];
1603 if (!io_req->fcport)
1606 spin_lock_irqsave(&cmd_mgr->lock, flags);
1608 if (io_req->alloc) {
1609 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1610 if (io_req->cmd_type == QEDF_SCSI_CMD)
1611 QEDF_ERR(&qedf->dbg_ctx,
1612 "Allocated but not queued, xid=0x%x\n",
1615 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1617 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1621 if (io_req->fcport != fcport)
1624 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1625 * but RRQ is still pending.
1626 * Workaround: Within qedf_send_rrq, we check if the fcport is
1627 * NULL, and we drop the ref on the io_req to clean it up.
1629 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1630 refcount = kref_read(&io_req->refcount);
1631 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1632 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1633 io_req->xid, io_req->cmd_type, refcount);
1634 /* If RRQ work has been queue, try to cancel it and
1637 if (atomic_read(&io_req->state) ==
1638 QEDFC_CMD_ST_RRQ_WAIT) {
1639 if (cancel_delayed_work_sync
1640 (&io_req->rrq_work)) {
1641 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1642 "Putting reference for pending RRQ work xid=0x%x.\n",
1645 kref_put(&io_req->refcount,
1652 /* Only consider flushing ELS during target reset */
1653 if (io_req->cmd_type == QEDF_ELS &&
1655 rc = kref_get_unless_zero(&io_req->refcount);
1657 QEDF_ERR(&(qedf->dbg_ctx),
1658 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1659 io_req, io_req->xid);
1663 qedf_flush_els_req(qedf, io_req);
1665 * Release the kref and go back to the top of the
1671 if (io_req->cmd_type == QEDF_ABTS) {
1673 rc = kref_get_unless_zero(&io_req->refcount);
1675 QEDF_ERR(&(qedf->dbg_ctx),
1676 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1677 io_req, io_req->xid);
1680 if (lun != -1 && io_req->lun != lun)
1683 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1684 "Flushing abort xid=0x%x.\n", io_req->xid);
1686 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1687 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1688 "Putting ref for cancelled RRQ work xid=0x%x.\n",
1690 kref_put(&io_req->refcount, qedf_release_cmd);
1693 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1694 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1695 "Putting ref for cancelled tmo work xid=0x%x.\n",
1697 qedf_initiate_cleanup(io_req, true);
1698 /* Notify eh_abort handler that ABTS is
1701 complete(&io_req->abts_done);
1702 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1704 kref_put(&io_req->refcount, qedf_release_cmd);
1710 if (!io_req->sc_cmd)
1712 if (!io_req->sc_cmd->device) {
1713 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1714 "Device backpointer NULL for sc_cmd=%p.\n",
1716 /* Put reference for non-existent scsi_cmnd */
1717 io_req->sc_cmd = NULL;
1718 qedf_initiate_cleanup(io_req, false);
1719 kref_put(&io_req->refcount, qedf_release_cmd);
1723 if (io_req->lun != lun)
1728 * Use kref_get_unless_zero in the unlikely case the command
1729 * we're about to flush was completed in the normal SCSI path
1731 rc = kref_get_unless_zero(&io_req->refcount);
1733 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1734 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1738 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1739 "Cleanup xid=0x%x.\n", io_req->xid);
1742 /* Cleanup task and return I/O mid-layer */
1743 qedf_initiate_cleanup(io_req, true);
1746 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
1750 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1751 "Flushed 0x%x I/Os, active=0x%x.\n",
1752 flush_cnt, atomic_read(&fcport->num_active_ios));
1753 /* Only wait for all commands to complete in the Upload context */
1754 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1756 while (atomic_read(&fcport->num_active_ios)) {
1757 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1758 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1760 atomic_read(&fcport->num_active_ios),
1762 if (wait_cnt == 0) {
1763 QEDF_ERR(&qedf->dbg_ctx,
1764 "Flushed %d I/Os, active=%d.\n",
1766 atomic_read(&fcport->num_active_ios));
1767 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1768 io_req = &cmd_mgr->cmds[i];
1769 if (io_req->fcport &&
1770 io_req->fcport == fcport) {
1772 kref_read(&io_req->refcount);
1773 set_bit(QEDF_CMD_DIRTY,
1775 QEDF_ERR(&qedf->dbg_ctx,
1776 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1777 io_req, io_req->xid,
1792 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1793 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1794 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1795 mutex_unlock(&qedf->flush_mutex);
1799 * Initiate a ABTS middle path command. Note that we don't have to initialize
1800 * the task context for an ABTS task.
1802 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1804 struct fc_lport *lport;
1805 struct qedf_rport *fcport = io_req->fcport;
1806 struct fc_rport_priv *rdata;
1807 struct qedf_ctx *qedf;
1811 unsigned long flags;
1812 struct fcoe_wqe *sqe;
1816 /* Sanity check qedf_rport before dereferencing any pointers */
1817 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1818 QEDF_ERR(NULL, "tgt not offloaded\n");
1823 qedf = fcport->qedf;
1824 rdata = fcport->rdata;
1826 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1827 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1832 r_a_tov = rdata->r_a_tov;
1833 lport = qedf->lport;
1835 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1836 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1838 goto drop_rdata_kref;
1841 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1842 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1844 goto drop_rdata_kref;
1847 /* Ensure room on SQ */
1848 if (!atomic_read(&fcport->free_sqes)) {
1849 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1851 goto drop_rdata_kref;
1854 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1855 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1857 goto drop_rdata_kref;
1860 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1861 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1862 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1863 QEDF_ERR(&qedf->dbg_ctx,
1864 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1865 io_req->xid, io_req->sc_cmd);
1867 goto drop_rdata_kref;
1870 kref_get(&io_req->refcount);
1873 qedf->control_requests++;
1874 qedf->packet_aborts++;
1876 /* Set the command type to abort */
1877 io_req->cmd_type = QEDF_ABTS;
1878 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1880 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1881 refcount = kref_read(&io_req->refcount);
1882 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1883 "ABTS io_req xid = 0x%x refcount=%d\n",
1886 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1888 spin_lock_irqsave(&fcport->rport_lock, flags);
1890 sqe_idx = qedf_get_sqe_idx(fcport);
1891 sqe = &fcport->sq[sqe_idx];
1892 memset(sqe, 0, sizeof(struct fcoe_wqe));
1893 io_req->task_params->sqe = sqe;
1895 init_initiator_abort_fcoe_task(io_req->task_params);
1896 qedf_ring_doorbell(fcport);
1898 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1901 kref_put(&rdata->kref, fc_rport_destroy);
1906 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1907 struct qedf_ioreq *io_req)
1912 struct qedf_rport *fcport = io_req->fcport;
1914 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1915 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1918 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1920 /* This was added at a point when we were scheduling abts_compl &
1921 * cleanup_compl on different CPUs and there was a possibility of
1922 * the io_req to be freed from the other context before we got here.
1925 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1926 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1932 * When flush is active, let the cmds be completed from the cleanup
1935 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1936 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1937 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1938 "Dropping ABTS completion xid=0x%x as fcport is flushing",
1943 if (!cancel_delayed_work(&io_req->timeout_work)) {
1944 QEDF_ERR(&qedf->dbg_ctx,
1945 "Wasn't able to cancel abts timeout work.\n");
1949 case FC_RCTL_BA_ACC:
1950 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1951 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1952 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1953 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
1955 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1956 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
1961 * Dont release this cmd yet. It will be relesed
1962 * after we get RRQ response
1964 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1965 msecs_to_jiffies(qedf->lport->r_a_tov));
1966 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
1968 /* For error cases let the cleanup return the command */
1969 case FC_RCTL_BA_RJT:
1970 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1971 "ABTS response - RJT\n");
1972 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1975 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1979 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1981 if (io_req->sc_cmd) {
1982 if (io_req->return_scsi_cmd_on_abts)
1983 qedf_scsi_done(qedf, io_req, DID_ERROR);
1986 /* Notify eh_abort handler that ABTS is complete */
1987 complete(&io_req->abts_done);
1989 kref_put(&io_req->refcount, qedf_release_cmd);
1992 int qedf_init_mp_req(struct qedf_ioreq *io_req)
1994 struct qedf_mp_req *mp_req;
1995 struct scsi_sge *mp_req_bd;
1996 struct scsi_sge *mp_resp_bd;
1997 struct qedf_ctx *qedf = io_req->fcport->qedf;
2001 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2003 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2004 memset(mp_req, 0, sizeof(struct qedf_mp_req));
2006 if (io_req->cmd_type != QEDF_ELS) {
2007 mp_req->req_len = sizeof(struct fcp_cmnd);
2008 io_req->data_xfer_len = mp_req->req_len;
2010 mp_req->req_len = io_req->data_xfer_len;
2012 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2013 &mp_req->req_buf_dma, GFP_KERNEL);
2014 if (!mp_req->req_buf) {
2015 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2016 qedf_free_mp_resc(io_req);
2020 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2021 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2022 if (!mp_req->resp_buf) {
2023 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2025 qedf_free_mp_resc(io_req);
2029 /* Allocate and map mp_req_bd and mp_resp_bd */
2030 sz = sizeof(struct scsi_sge);
2031 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2032 &mp_req->mp_req_bd_dma, GFP_KERNEL);
2033 if (!mp_req->mp_req_bd) {
2034 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2035 qedf_free_mp_resc(io_req);
2039 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2040 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2041 if (!mp_req->mp_resp_bd) {
2042 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2043 qedf_free_mp_resc(io_req);
2048 addr = mp_req->req_buf_dma;
2049 mp_req_bd = mp_req->mp_req_bd;
2050 mp_req_bd->sge_addr.lo = U64_LO(addr);
2051 mp_req_bd->sge_addr.hi = U64_HI(addr);
2052 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2055 * MP buffer is either a task mgmt command or an ELS.
2056 * So the assumption is that it consumes a single bd
2057 * entry in the bd table
2059 mp_resp_bd = mp_req->mp_resp_bd;
2060 addr = mp_req->resp_buf_dma;
2061 mp_resp_bd->sge_addr.lo = U64_LO(addr);
2062 mp_resp_bd->sge_addr.hi = U64_HI(addr);
2063 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2069 * Last ditch effort to clear the port if it's stuck. Used only after a
2070 * cleanup task times out.
2072 static void qedf_drain_request(struct qedf_ctx *qedf)
2074 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2075 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2079 /* Set bit to return all queuecommand requests as busy */
2080 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2082 /* Call qed drain request for function. Should be synchronous */
2083 qed_ops->common->drain(qedf->cdev);
2085 /* Settle time for CQEs to be returned */
2088 /* Unplug and continue */
2089 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2093 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2096 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2097 bool return_scsi_cmd_on_abts)
2099 struct qedf_rport *fcport;
2100 struct qedf_ctx *qedf;
2102 struct e4_fcoe_task_context *task;
2105 unsigned long flags;
2106 struct fcoe_wqe *sqe;
2110 fcport = io_req->fcport;
2112 QEDF_ERR(NULL, "fcport is NULL.\n");
2116 /* Sanity check qedf_rport before dereferencing any pointers */
2117 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2118 QEDF_ERR(NULL, "tgt not offloaded\n");
2123 qedf = fcport->qedf;
2125 QEDF_ERR(NULL, "qedf is NULL.\n");
2129 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2130 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2131 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2132 "cleanup processing or already completed.\n",
2136 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2138 /* Ensure room on SQ */
2139 if (!atomic_read(&fcport->free_sqes)) {
2140 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2141 /* Need to make sure we clear the flag since it was set */
2142 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2146 if (io_req->cmd_type == QEDF_CLEANUP) {
2147 QEDF_ERR(&qedf->dbg_ctx,
2148 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2149 io_req->xid, io_req->cmd_type);
2150 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2154 refcount = kref_read(&io_req->refcount);
2156 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2157 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2158 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2159 refcount, fcport, fcport->rdata->ids.port_id);
2161 /* Cleanup cmds re-use the same TID as the original I/O */
2163 io_req->cmd_type = QEDF_CLEANUP;
2164 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2166 task = qedf_get_task_mem(&qedf->tasks, xid);
2168 init_completion(&io_req->cleanup_done);
2170 spin_lock_irqsave(&fcport->rport_lock, flags);
2172 sqe_idx = qedf_get_sqe_idx(fcport);
2173 sqe = &fcport->sq[sqe_idx];
2174 memset(sqe, 0, sizeof(struct fcoe_wqe));
2175 io_req->task_params->sqe = sqe;
2177 init_initiator_cleanup_fcoe_task(io_req->task_params);
2178 qedf_ring_doorbell(fcport);
2180 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2182 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2183 QEDF_CLEANUP_TIMEOUT * HZ);
2188 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2189 "xid=%x.\n", io_req->xid);
2190 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2191 /* Issue a drain request if cleanup task times out */
2192 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2193 qedf_drain_request(qedf);
2196 /* If it TASK MGMT handle it, reference will be decreased
2197 * in qedf_execute_tmf
2199 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2200 io_req->tm_flags == FCP_TMF_TGT_RESET) {
2201 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2202 io_req->sc_cmd = NULL;
2203 complete(&io_req->tm_done);
2206 if (io_req->sc_cmd) {
2207 if (io_req->return_scsi_cmd_on_abts)
2208 qedf_scsi_done(qedf, io_req, DID_ERROR);
2212 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2214 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2219 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2220 struct qedf_ioreq *io_req)
2222 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2225 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2227 /* Complete so we can finish cleaning up the I/O */
2228 complete(&io_req->cleanup_done);
2231 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2234 struct qedf_ioreq *io_req;
2235 struct e4_fcoe_task_context *task;
2236 struct qedf_ctx *qedf = fcport->qedf;
2237 struct fc_lport *lport = qedf->lport;
2242 unsigned long flags;
2243 struct fcoe_wqe *sqe;
2247 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
2251 lun = (int)sc_cmd->device->lun;
2252 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2253 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2258 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2260 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2265 if (tm_flags == FCP_TMF_LUN_RESET)
2267 else if (tm_flags == FCP_TMF_TGT_RESET)
2268 qedf->target_resets++;
2270 /* Initialize rest of io_req fields */
2271 io_req->sc_cmd = sc_cmd;
2272 io_req->fcport = fcport;
2273 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2275 /* Record which cpu this request is associated with */
2276 io_req->cpu = smp_processor_id();
2279 io_req->io_req_flags = QEDF_READ;
2280 io_req->data_xfer_len = 0;
2281 io_req->tm_flags = tm_flags;
2283 /* Default is to return a SCSI command when an error occurs */
2284 io_req->return_scsi_cmd_on_abts = false;
2286 /* Obtain exchange id */
2289 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2292 /* Initialize task context for this IO request */
2293 task = qedf_get_task_mem(&qedf->tasks, xid);
2295 init_completion(&io_req->tm_done);
2297 spin_lock_irqsave(&fcport->rport_lock, flags);
2299 sqe_idx = qedf_get_sqe_idx(fcport);
2300 sqe = &fcport->sq[sqe_idx];
2301 memset(sqe, 0, sizeof(struct fcoe_wqe));
2303 qedf_init_task(fcport, lport, io_req, task, sqe);
2304 qedf_ring_doorbell(fcport);
2306 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2308 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2309 tmo = wait_for_completion_timeout(&io_req->tm_done,
2310 QEDF_TM_TIMEOUT * HZ);
2314 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2315 /* Clear outstanding bit since command timed out */
2316 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2317 io_req->sc_cmd = NULL;
2319 /* Check TMF response code */
2320 if (io_req->fcp_rsp_code == 0)
2326 * Double check that fcport has not gone into an uploading state before
2327 * executing the command flush for the LUN/target.
2329 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2330 QEDF_ERR(&qedf->dbg_ctx,
2331 "fcport is uploading, not executing flush.\n");
2334 /* We do not need this io_req any more */
2335 kref_put(&io_req->refcount, qedf_release_cmd);
2338 if (tm_flags == FCP_TMF_LUN_RESET)
2339 qedf_flush_active_ios(fcport, lun);
2341 qedf_flush_active_ios(fcport, -1);
2344 if (rc != SUCCESS) {
2345 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2348 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2354 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2356 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2357 struct fc_rport_libfc_priv *rp = rport->dd_data;
2358 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2359 struct qedf_ctx *qedf;
2360 struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2363 struct qedf_ioreq *io_req = NULL;
2365 struct fc_rport_priv *rdata = fcport->rdata;
2368 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2369 tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id,
2370 (int)sc_cmd->device->lun);
2372 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2373 QEDF_ERR(NULL, "stale rport\n");
2377 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2378 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2381 if (sc_cmd->SCp.ptr) {
2382 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2383 ref_cnt = kref_read(&io_req->refcount);
2385 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2386 io_req, io_req->xid, ref_cnt);
2389 rval = fc_remote_port_chkready(rport);
2391 QEDF_ERR(NULL, "device_reset rport not ready\n");
2396 rc = fc_block_scsi_eh(sc_cmd);
2401 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2406 qedf = fcport->qedf;
2409 QEDF_ERR(NULL, "qedf is NULL.\n");
2414 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2415 QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2420 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2421 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2426 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2427 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2432 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2434 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2437 QEDF_ERR(&qedf->dbg_ctx,
2438 "fcport %p port_id=%06x is uploading.\n",
2439 fcport, fcport->rdata->ids.port_id);
2444 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2447 kref_put(&rdata->kref, fc_rport_destroy);
2451 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2452 struct qedf_ioreq *io_req)
2454 struct fcoe_cqe_rsp_info *fcp_rsp;
2456 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2458 fcp_rsp = &cqe->cqe_info.rsp_info;
2459 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2461 io_req->sc_cmd = NULL;
2462 complete(&io_req->tm_done);
2465 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2466 struct fcoe_cqe *cqe)
2468 unsigned long flags;
2470 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2471 u32 payload_len, crc;
2472 struct fc_frame_header *fh;
2473 struct fc_frame *fp;
2474 struct qedf_io_work *io_work;
2477 struct scsi_bd *p_bd_info;
2479 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2480 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2481 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2482 le32_to_cpu(p_bd_info->address.hi),
2483 le32_to_cpu(p_bd_info->address.lo),
2484 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2485 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2486 qedf->bdq_prod_idx, pktlen);
2488 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2489 if (bdq_idx >= QEDF_BDQ_SIZE) {
2490 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2492 goto increment_prod;
2495 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2497 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2498 "unsolicited packet.\n");
2499 goto increment_prod;
2502 if (qedf_dump_frames) {
2503 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2504 "BDQ frame is at addr=%p.\n", bdq_addr);
2505 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2506 (void *)bdq_addr, pktlen, false);
2509 /* Allocate frame */
2510 payload_len = pktlen - sizeof(struct fc_frame_header);
2511 fp = fc_frame_alloc(qedf->lport, payload_len);
2513 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2514 goto increment_prod;
2517 /* Copy data from BDQ buffer into fc_frame struct */
2518 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2519 memcpy(fh, (void *)bdq_addr, pktlen);
2521 /* Initialize the frame so libfc sees it as a valid frame */
2522 crc = fcoe_fc_crc(fp);
2524 fr_dev(fp) = qedf->lport;
2525 fr_sof(fp) = FC_SOF_I3;
2526 fr_eof(fp) = FC_EOF_T;
2527 fr_crc(fp) = cpu_to_le32(~crc);
2530 * We need to return the frame back up to libfc in a non-atomic
2533 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2535 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2536 "work for I/O completion.\n");
2538 goto increment_prod;
2540 memset(io_work, 0, sizeof(struct qedf_io_work));
2542 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2544 /* Copy contents of CQE for deferred processing */
2545 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2547 io_work->qedf = qedf;
2550 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2552 spin_lock_irqsave(&qedf->hba_lock, flags);
2554 /* Increment producer to let f/w know we've handled the frame */
2555 qedf->bdq_prod_idx++;
2557 /* Producer index wraps at uint16_t boundary */
2558 if (qedf->bdq_prod_idx == 0xffff)
2559 qedf->bdq_prod_idx = 0;
2561 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2562 tmp = readw(qedf->bdq_primary_prod);
2563 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2564 tmp = readw(qedf->bdq_secondary_prod);
2566 spin_unlock_irqrestore(&qedf->hba_lock, flags);