1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
6 #include <linux/spinlock.h>
7 #include <linux/vmalloc.h>
9 #include <scsi/scsi_tcq.h>
11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
12 unsigned int timer_msec)
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
15 msecs_to_jiffies(timer_msec));
18 static void qedf_cmd_timeout(struct work_struct *work)
21 struct qedf_ioreq *io_req =
22 container_of(work, struct qedf_ioreq, timeout_work.work);
23 struct qedf_ctx *qedf;
24 struct qedf_rport *fcport;
26 fcport = io_req->fcport;
27 if (io_req->fcport == NULL) {
28 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
34 switch (io_req->cmd_type) {
37 QEDF_INFO(NULL, QEDF_LOG_IO,
38 "qedf is NULL for ABTS xid=0x%x.\n",
43 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
45 /* Cleanup timed out ABTS */
46 qedf_initiate_cleanup(io_req, true);
47 complete(&io_req->abts_done);
50 * Need to call kref_put for reference taken when initiate_abts
51 * was called since abts_compl won't be called now that we've
52 * cleaned up the task.
54 kref_put(&io_req->refcount, qedf_release_cmd);
56 /* Clear in abort bit now that we're done with the command */
57 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
60 * Now that the original I/O and the ABTS are complete see
61 * if we need to reconnect to the target.
63 qedf_restart_rport(fcport);
67 QEDF_INFO(NULL, QEDF_LOG_IO,
68 "qedf is NULL for ELS xid=0x%x.\n",
72 /* ELS request no longer outstanding since it timed out */
73 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
75 kref_get(&io_req->refcount);
77 * Don't attempt to clean an ELS timeout as any subseqeunt
78 * ABTS or cleanup requests just hang. For now just free
79 * the resources of the original I/O and the RRQ
81 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
83 qedf_initiate_cleanup(io_req, true);
84 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
85 /* Call callback function to complete command */
86 if (io_req->cb_func && io_req->cb_arg) {
87 io_req->cb_func(io_req->cb_arg);
88 io_req->cb_arg = NULL;
90 kref_put(&io_req->refcount, qedf_release_cmd);
92 case QEDF_SEQ_CLEANUP:
93 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
94 "xid=0x%x.\n", io_req->xid);
95 qedf_initiate_cleanup(io_req, true);
96 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
97 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
100 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
101 "Hit default case, xid=0x%x.\n", io_req->xid);
106 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
108 struct io_bdt *bdt_info;
109 struct qedf_ctx *qedf = cmgr->qedf;
112 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
115 struct qedf_ioreq *io_req;
117 num_ios = max_xid - min_xid + 1;
119 /* Free fcoe_bdt_ctx structures */
120 if (!cmgr->io_bdt_pool) {
121 QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
125 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
126 for (i = 0; i < num_ios; i++) {
127 bdt_info = cmgr->io_bdt_pool[i];
128 if (bdt_info->bd_tbl) {
129 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
130 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
131 bdt_info->bd_tbl = NULL;
135 /* Destroy io_bdt pool */
136 for (i = 0; i < num_ios; i++) {
137 kfree(cmgr->io_bdt_pool[i]);
138 cmgr->io_bdt_pool[i] = NULL;
141 kfree(cmgr->io_bdt_pool);
142 cmgr->io_bdt_pool = NULL;
146 for (i = 0; i < num_ios; i++) {
147 io_req = &cmgr->cmds[i];
148 kfree(io_req->sgl_task_params);
149 kfree(io_req->task_params);
150 /* Make sure we free per command sense buffer */
151 if (io_req->sense_buffer)
152 dma_free_coherent(&qedf->pdev->dev,
153 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
154 io_req->sense_buffer_dma);
155 cancel_delayed_work_sync(&io_req->rrq_work);
158 /* Free command manager itself */
162 static void qedf_handle_rrq(struct work_struct *work)
164 struct qedf_ioreq *io_req =
165 container_of(work, struct qedf_ioreq, rrq_work.work);
167 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
168 qedf_send_rrq(io_req);
172 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
174 struct qedf_cmd_mgr *cmgr;
175 struct io_bdt *bdt_info;
176 struct qedf_ioreq *io_req;
181 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
183 /* Make sure num_queues is already set before calling this function */
184 if (!qedf->num_queues) {
185 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
189 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
190 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
191 "max_xid 0x%x.\n", min_xid, max_xid);
195 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
196 "0x%x.\n", min_xid, max_xid);
198 num_ios = max_xid - min_xid + 1;
200 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
202 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
207 spin_lock_init(&cmgr->lock);
210 * Initialize I/O request fields.
214 for (i = 0; i < num_ios; i++) {
215 io_req = &cmgr->cmds[i];
216 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
220 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
222 /* Allocate DMA memory to hold sense buffer */
223 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
224 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
226 if (!io_req->sense_buffer) {
227 QEDF_ERR(&qedf->dbg_ctx,
228 "Failed to alloc sense buffer.\n");
232 /* Allocate task parameters to pass to f/w init funcions */
233 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
235 if (!io_req->task_params) {
236 QEDF_ERR(&(qedf->dbg_ctx),
237 "Failed to allocate task_params for xid=0x%x\n",
243 * Allocate scatter/gather list info to pass to f/w init
246 io_req->sgl_task_params = kzalloc(
247 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
248 if (!io_req->sgl_task_params) {
249 QEDF_ERR(&(qedf->dbg_ctx),
250 "Failed to allocate sgl_task_params for xid=0x%x\n",
256 /* Allocate pool of io_bdts - one for each qedf_ioreq */
257 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
260 if (!cmgr->io_bdt_pool) {
261 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
265 for (i = 0; i < num_ios; i++) {
266 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
268 if (!cmgr->io_bdt_pool[i]) {
269 QEDF_WARN(&(qedf->dbg_ctx),
270 "Failed to alloc io_bdt_pool[%d].\n", i);
275 for (i = 0; i < num_ios; i++) {
276 bdt_info = cmgr->io_bdt_pool[i];
277 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
278 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
279 &bdt_info->bd_tbl_dma, GFP_KERNEL);
280 if (!bdt_info->bd_tbl) {
281 QEDF_WARN(&(qedf->dbg_ctx),
282 "Failed to alloc bdt_tbl[%d].\n", i);
286 atomic_set(&cmgr->free_list_cnt, num_ios);
287 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
288 "cmgr->free_list_cnt=%d.\n",
289 atomic_read(&cmgr->free_list_cnt));
294 qedf_cmd_mgr_free(cmgr);
298 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
300 struct qedf_ctx *qedf = fcport->qedf;
301 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
302 struct qedf_ioreq *io_req = NULL;
303 struct io_bdt *bd_tbl;
309 free_sqes = atomic_read(&fcport->free_sqes);
312 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
313 "Returning NULL, free_sqes=%d.\n ",
318 /* Limit the number of outstanding R/W tasks */
319 if ((atomic_read(&fcport->num_active_ios) >=
320 NUM_RW_TASKS_PER_CONNECTION)) {
321 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
322 "Returning NULL, num_active_ios=%d.\n",
323 atomic_read(&fcport->num_active_ios));
327 /* Limit global TIDs certain tasks */
328 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
329 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
330 "Returning NULL, free_list_cnt=%d.\n",
331 atomic_read(&cmd_mgr->free_list_cnt));
335 spin_lock_irqsave(&cmd_mgr->lock, flags);
336 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
337 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
339 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
342 /* Check to make sure command was previously freed */
347 if (i == FCOE_PARAMS_NUM_TASKS) {
348 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
352 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
353 QEDF_ERR(&qedf->dbg_ctx,
354 "io_req found to be dirty ox_id = 0x%x.\n",
357 /* Clear any flags now that we've reallocated the xid */
360 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
362 atomic_inc(&fcport->num_active_ios);
363 atomic_dec(&fcport->free_sqes);
365 atomic_dec(&cmd_mgr->free_list_cnt);
367 io_req->cmd_mgr = cmd_mgr;
368 io_req->fcport = fcport;
370 /* Clear any stale sc_cmd back pointer */
371 io_req->sc_cmd = NULL;
374 /* Hold the io_req against deletion */
375 kref_init(&io_req->refcount); /* ID: 001 */
376 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
378 /* Bind io_bdt for this io_req */
379 /* Have a static link between io_req and io_bdt_pool */
380 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
381 if (bd_tbl == NULL) {
382 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
383 kref_put(&io_req->refcount, qedf_release_cmd);
386 bd_tbl->io_req = io_req;
387 io_req->cmd_type = cmd_type;
388 io_req->tm_flags = 0;
390 /* Reset sequence offset data */
391 io_req->rx_buf_off = 0;
392 io_req->tx_buf_off = 0;
393 io_req->rx_id = 0xffff; /* No OX_ID */
398 /* Record failure for stats and return NULL to caller */
399 qedf->alloc_failures++;
403 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
405 struct qedf_mp_req *mp_req = &(io_req->mp_req);
406 struct qedf_ctx *qedf = io_req->fcport->qedf;
407 uint64_t sz = sizeof(struct scsi_sge);
410 if (mp_req->mp_req_bd) {
411 dma_free_coherent(&qedf->pdev->dev, sz,
412 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
413 mp_req->mp_req_bd = NULL;
415 if (mp_req->mp_resp_bd) {
416 dma_free_coherent(&qedf->pdev->dev, sz,
417 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
418 mp_req->mp_resp_bd = NULL;
420 if (mp_req->req_buf) {
421 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
422 mp_req->req_buf, mp_req->req_buf_dma);
423 mp_req->req_buf = NULL;
425 if (mp_req->resp_buf) {
426 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
427 mp_req->resp_buf, mp_req->resp_buf_dma);
428 mp_req->resp_buf = NULL;
432 void qedf_release_cmd(struct kref *ref)
434 struct qedf_ioreq *io_req =
435 container_of(ref, struct qedf_ioreq, refcount);
436 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
437 struct qedf_rport *fcport = io_req->fcport;
440 if (io_req->cmd_type == QEDF_SCSI_CMD) {
441 QEDF_WARN(&fcport->qedf->dbg_ctx,
442 "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
443 io_req, io_req->xid);
444 WARN_ON(io_req->sc_cmd);
447 if (io_req->cmd_type == QEDF_ELS ||
448 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
449 qedf_free_mp_resc(io_req);
451 atomic_inc(&cmd_mgr->free_list_cnt);
452 atomic_dec(&fcport->num_active_ios);
453 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
454 if (atomic_read(&fcport->num_active_ios) < 0) {
455 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
459 /* Increment task retry identifier now that the request is released */
460 io_req->task_retry_identifier++;
461 io_req->fcport = NULL;
463 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
465 spin_lock_irqsave(&cmd_mgr->lock, flags);
466 io_req->fcport = NULL;
468 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
471 static int qedf_map_sg(struct qedf_ioreq *io_req)
473 struct scsi_cmnd *sc = io_req->sc_cmd;
474 struct Scsi_Host *host = sc->device->host;
475 struct fc_lport *lport = shost_priv(host);
476 struct qedf_ctx *qedf = lport_priv(lport);
477 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
478 struct scatterlist *sg;
486 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
487 scsi_sg_count(sc), sc->sc_data_direction);
488 sg = scsi_sglist(sc);
490 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
492 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
493 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
495 scsi_for_each_sg(sc, sg, sg_count, i) {
496 sg_len = (u32)sg_dma_len(sg);
497 addr = (u64)sg_dma_address(sg);
500 * Intermediate s/g element so check if start address
501 * is page aligned. Only required for writes and only if the
502 * number of scatter/gather elements is 8 or more.
504 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
505 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
506 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
508 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
509 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
510 bd[bd_count].sge_len = cpu_to_le32(sg_len);
513 byte_count += sg_len;
516 /* To catch a case where FAST and SLOW nothing is set, set FAST */
517 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
518 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
520 if (byte_count != scsi_bufflen(sc))
521 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
522 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
523 scsi_bufflen(sc), io_req->xid);
528 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
530 struct scsi_cmnd *sc = io_req->sc_cmd;
531 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
534 if (scsi_sg_count(sc)) {
535 bd_count = qedf_map_sg(io_req);
540 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
543 io_req->bd_tbl->bd_valid = bd_count;
548 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
549 struct fcp_cmnd *fcp_cmnd)
551 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
553 /* fcp_cmnd is 32 bytes */
554 memset(fcp_cmnd, 0, FCP_CMND_LEN);
556 /* 8 bytes: SCSI LUN info */
557 int_to_scsilun(sc_cmd->device->lun,
558 (struct scsi_lun *)&fcp_cmnd->fc_lun);
560 /* 4 bytes: flag info */
561 fcp_cmnd->fc_pri_ta = 0;
562 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
563 fcp_cmnd->fc_flags = io_req->io_req_flags;
564 fcp_cmnd->fc_cmdref = 0;
566 /* Populate data direction */
567 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
568 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
570 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
571 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
572 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
573 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
576 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
578 /* 16 bytes: CDB information */
579 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
580 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
582 /* 4 bytes: FCP data length */
583 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
586 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
587 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
588 struct fcoe_wqe *sqe)
590 enum fcoe_task_type task_type;
591 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
592 struct io_bdt *bd_tbl = io_req->bd_tbl;
596 struct qedf_ctx *qedf = fcport->qedf;
597 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
598 struct regpair sense_data_buffer_phys_addr;
603 /* Note init_initiator_rw_fcoe_task memsets the task context */
604 io_req->task = task_ctx;
605 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
606 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
607 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
609 /* Set task type bassed on DMA directio of command */
610 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
611 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
613 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
614 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
615 tx_io_size = io_req->data_xfer_len;
617 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
618 rx_io_size = io_req->data_xfer_len;
622 /* Setup the fields for fcoe_task_params */
623 io_req->task_params->context = task_ctx;
624 io_req->task_params->sqe = sqe;
625 io_req->task_params->task_type = task_type;
626 io_req->task_params->tx_io_size = tx_io_size;
627 io_req->task_params->rx_io_size = rx_io_size;
628 io_req->task_params->conn_cid = fcport->fw_cid;
629 io_req->task_params->itid = io_req->xid;
630 io_req->task_params->cq_rss_number = cq_idx;
631 io_req->task_params->is_tape_device = fcport->dev_type;
633 /* Fill in information for scatter/gather list */
634 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
635 bd_count = bd_tbl->bd_valid;
636 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
637 io_req->sgl_task_params->sgl_phys_addr.lo =
638 U64_LO(bd_tbl->bd_tbl_dma);
639 io_req->sgl_task_params->sgl_phys_addr.hi =
640 U64_HI(bd_tbl->bd_tbl_dma);
641 io_req->sgl_task_params->num_sges = bd_count;
642 io_req->sgl_task_params->total_buffer_size =
643 scsi_bufflen(io_req->sc_cmd);
644 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
645 io_req->sgl_task_params->small_mid_sge = 1;
647 io_req->sgl_task_params->small_mid_sge = 0;
650 /* Fill in physical address of sense buffer */
651 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
652 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
654 /* fill FCP_CMND IU */
655 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
657 /* Swap fcp_cmnd since FC is big endian */
658 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
659 for (i = 0; i < cnt; i++) {
660 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
662 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
664 init_initiator_rw_fcoe_task(io_req->task_params,
665 io_req->sgl_task_params,
666 sense_data_buffer_phys_addr,
667 io_req->task_retry_identifier, fcp_cmnd);
669 /* Increment SGL type counters */
670 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
671 qedf->slow_sge_ios++;
673 qedf->fast_sge_ios++;
676 void qedf_init_mp_task(struct qedf_ioreq *io_req,
677 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
679 struct qedf_mp_req *mp_req = &(io_req->mp_req);
680 struct qedf_rport *fcport = io_req->fcport;
681 struct qedf_ctx *qedf = io_req->fcport->qedf;
682 struct fc_frame_header *fc_hdr;
683 struct fcoe_tx_mid_path_params task_fc_hdr;
684 struct scsi_sgl_task_params tx_sgl_task_params;
685 struct scsi_sgl_task_params rx_sgl_task_params;
687 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
688 "Initializing MP task for cmd_type=%d\n",
691 qedf->control_requests++;
693 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
694 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
695 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
696 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
698 /* Setup the task from io_req for easy reference */
699 io_req->task = task_ctx;
701 /* Setup the fields for fcoe_task_params */
702 io_req->task_params->context = task_ctx;
703 io_req->task_params->sqe = sqe;
704 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
705 io_req->task_params->tx_io_size = io_req->data_xfer_len;
706 /* rx_io_size tells the f/w how large a response buffer we have */
707 io_req->task_params->rx_io_size = PAGE_SIZE;
708 io_req->task_params->conn_cid = fcport->fw_cid;
709 io_req->task_params->itid = io_req->xid;
710 /* Return middle path commands on CQ 0 */
711 io_req->task_params->cq_rss_number = 0;
712 io_req->task_params->is_tape_device = fcport->dev_type;
714 fc_hdr = &(mp_req->req_fc_hdr);
715 /* Set OX_ID and RX_ID based on driver task id */
716 fc_hdr->fh_ox_id = io_req->xid;
717 fc_hdr->fh_rx_id = htons(0xffff);
719 /* Set up FC header information */
720 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
721 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
722 task_fc_hdr.type = fc_hdr->fh_type;
723 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
724 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
725 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
726 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
728 /* Set up s/g list parameters for request buffer */
729 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
730 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
731 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
732 tx_sgl_task_params.num_sges = 1;
733 /* Set PAGE_SIZE for now since sg element is that size ??? */
734 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
735 tx_sgl_task_params.small_mid_sge = 0;
737 /* Set up s/g list parameters for request buffer */
738 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
739 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
740 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
741 rx_sgl_task_params.num_sges = 1;
742 /* Set PAGE_SIZE for now since sg element is that size ??? */
743 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
744 rx_sgl_task_params.small_mid_sge = 0;
748 * Last arg is 0 as previous code did not set that we wanted the
749 * fc header information.
751 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
754 &rx_sgl_task_params, 0);
757 /* Presumed that fcport->rport_lock is held */
758 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
760 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
763 rval = fcport->sq_prod_idx;
765 /* Adjust ring index */
766 fcport->sq_prod_idx++;
767 fcport->fw_sq_prod_idx++;
768 if (fcport->sq_prod_idx == total_sqe)
769 fcport->sq_prod_idx = 0;
774 void qedf_ring_doorbell(struct qedf_rport *fcport)
776 struct fcoe_db_data dbell = { 0 };
780 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
781 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
782 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
783 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
785 dbell.sq_prod = fcport->fw_sq_prod_idx;
786 /* wmb makes sure that the BDs data is updated before updating the
787 * producer, otherwise FW may read old data from the BDs.
791 writel(*(u32 *)&dbell, fcport->p_doorbell);
793 * Fence required to flush the write combined buffer, since another
794 * CPU may write to the same doorbell address and data may be lost
795 * due to relaxed order nature of write combined bar.
800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
803 struct qedf_ctx *qedf = fcport->qedf;
804 struct qedf_io_log *io_log;
805 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
809 spin_lock_irqsave(&qedf->io_trace_lock, flags);
811 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
812 io_log->direction = direction;
813 io_log->task_id = io_req->xid;
814 io_log->port_id = fcport->rdata->ids.port_id;
815 io_log->lun = sc_cmd->device->lun;
816 io_log->op = op = sc_cmd->cmnd[0];
817 io_log->lba[0] = sc_cmd->cmnd[2];
818 io_log->lba[1] = sc_cmd->cmnd[3];
819 io_log->lba[2] = sc_cmd->cmnd[4];
820 io_log->lba[3] = sc_cmd->cmnd[5];
821 io_log->bufflen = scsi_bufflen(sc_cmd);
822 io_log->sg_count = scsi_sg_count(sc_cmd);
823 io_log->result = sc_cmd->result;
824 io_log->jiffies = jiffies;
825 io_log->refcount = kref_read(&io_req->refcount);
827 if (direction == QEDF_IO_TRACE_REQ) {
828 /* For requests we only care abot the submission CPU */
829 io_log->req_cpu = io_req->cpu;
832 } else if (direction == QEDF_IO_TRACE_RSP) {
833 io_log->req_cpu = io_req->cpu;
834 io_log->int_cpu = io_req->int_cpu;
835 io_log->rsp_cpu = smp_processor_id();
838 io_log->sge_type = io_req->sge_type;
840 qedf->io_trace_idx++;
841 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
842 qedf->io_trace_idx = 0;
844 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
847 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
849 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
850 struct Scsi_Host *host = sc_cmd->device->host;
851 struct fc_lport *lport = shost_priv(host);
852 struct qedf_ctx *qedf = lport_priv(lport);
853 struct e4_fcoe_task_context *task_ctx;
855 struct fcoe_wqe *sqe;
858 /* Initialize rest of io_req fileds */
859 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
860 sc_cmd->SCp.ptr = (char *)io_req;
861 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
863 /* Record which cpu this request is associated with */
864 io_req->cpu = smp_processor_id();
866 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
867 io_req->io_req_flags = QEDF_READ;
868 qedf->input_requests++;
869 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
870 io_req->io_req_flags = QEDF_WRITE;
871 qedf->output_requests++;
873 io_req->io_req_flags = 0;
874 qedf->control_requests++;
879 /* Build buffer descriptor list for firmware from sg list */
880 if (qedf_build_bd_list_from_sg(io_req)) {
881 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
882 /* Release cmd will release io_req, but sc_cmd is assigned */
883 io_req->sc_cmd = NULL;
884 kref_put(&io_req->refcount, qedf_release_cmd);
888 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
889 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
890 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
891 /* Release cmd will release io_req, but sc_cmd is assigned */
892 io_req->sc_cmd = NULL;
893 kref_put(&io_req->refcount, qedf_release_cmd);
897 /* Record LUN number for later use if we neeed them */
898 io_req->lun = (int)sc_cmd->device->lun;
900 /* Obtain free SQE */
901 sqe_idx = qedf_get_sqe_idx(fcport);
902 sqe = &fcport->sq[sqe_idx];
903 memset(sqe, 0, sizeof(struct fcoe_wqe));
905 /* Get the task context */
906 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
908 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
910 /* Release cmd will release io_req, but sc_cmd is assigned */
911 io_req->sc_cmd = NULL;
912 kref_put(&io_req->refcount, qedf_release_cmd);
916 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
919 qedf_ring_doorbell(fcport);
921 /* Set that command is with the firmware now */
922 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
924 if (qedf_io_tracing && io_req->sc_cmd)
925 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
931 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
933 struct fc_lport *lport = shost_priv(host);
934 struct qedf_ctx *qedf = lport_priv(lport);
935 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
936 struct fc_rport_libfc_priv *rp = rport->dd_data;
937 struct qedf_rport *fcport;
938 struct qedf_ioreq *io_req;
941 unsigned long flags = 0;
944 num_sgs = scsi_sg_count(sc_cmd);
945 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
946 QEDF_ERR(&qedf->dbg_ctx,
947 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
948 num_sgs, QEDF_MAX_BDS_PER_CMD);
949 sc_cmd->result = DID_ERROR;
950 sc_cmd->scsi_done(sc_cmd);
954 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
955 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
956 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
957 "Returning DNC as unloading or stop io, flags 0x%lx.\n",
959 sc_cmd->result = DID_NO_CONNECT << 16;
960 sc_cmd->scsi_done(sc_cmd);
964 if (!qedf->pdev->msix_enabled) {
965 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
966 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
968 sc_cmd->result = DID_NO_CONNECT << 16;
969 sc_cmd->scsi_done(sc_cmd);
973 rval = fc_remote_port_chkready(rport);
975 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
976 "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
977 rval, rport->port_id);
978 sc_cmd->result = rval;
979 sc_cmd->scsi_done(sc_cmd);
983 /* Retry command if we are doing a qed drain operation */
984 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
985 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
986 rc = SCSI_MLQUEUE_HOST_BUSY;
990 if (lport->state != LPORT_ST_READY ||
991 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
992 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
993 rc = SCSI_MLQUEUE_HOST_BUSY;
997 /* rport and tgt are allocated together, so tgt should be non-NULL */
998 fcport = (struct qedf_rport *)&rp[1];
1000 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1001 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1003 * Session is not offloaded yet. Let SCSI-ml retry
1006 rc = SCSI_MLQUEUE_TARGET_BUSY;
1010 atomic_inc(&fcport->ios_to_queue);
1012 if (fcport->retry_delay_timestamp) {
1013 /* Take fcport->rport_lock for resetting the delay_timestamp */
1014 spin_lock_irqsave(&fcport->rport_lock, flags);
1015 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1016 fcport->retry_delay_timestamp = 0;
1018 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1019 /* If retry_delay timer is active, flow off the ML */
1020 rc = SCSI_MLQUEUE_TARGET_BUSY;
1021 atomic_dec(&fcport->ios_to_queue);
1024 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1027 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1029 rc = SCSI_MLQUEUE_HOST_BUSY;
1030 atomic_dec(&fcport->ios_to_queue);
1034 io_req->sc_cmd = sc_cmd;
1036 /* Take fcport->rport_lock for posting to fcport send queue */
1037 spin_lock_irqsave(&fcport->rport_lock, flags);
1038 if (qedf_post_io_req(fcport, io_req)) {
1039 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1040 /* Return SQE to pool */
1041 atomic_inc(&fcport->free_sqes);
1042 rc = SCSI_MLQUEUE_HOST_BUSY;
1044 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1045 atomic_dec(&fcport->ios_to_queue);
1051 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1052 struct fcoe_cqe_rsp_info *fcp_rsp)
1054 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1055 struct qedf_ctx *qedf = io_req->fcport->qedf;
1056 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1057 int fcp_sns_len = 0;
1058 int fcp_rsp_len = 0;
1059 uint8_t *rsp_info, *sense_data;
1061 io_req->fcp_status = FC_GOOD;
1062 io_req->fcp_resid = 0;
1063 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1064 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1065 io_req->fcp_resid = fcp_rsp->fcp_resid;
1067 io_req->scsi_comp_flags = rsp_flags;
1068 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1069 fcp_rsp->scsi_status_code;
1072 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1073 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1076 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1077 fcp_sns_len = fcp_rsp->fcp_sns_len;
1079 io_req->fcp_rsp_len = fcp_rsp_len;
1080 io_req->fcp_sns_len = fcp_sns_len;
1081 rsp_info = sense_data = io_req->sense_buffer;
1083 /* fetch fcp_rsp_code */
1084 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1085 /* Only for task management function */
1086 io_req->fcp_rsp_code = rsp_info[3];
1087 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1088 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1089 /* Adjust sense-data location. */
1090 sense_data += fcp_rsp_len;
1093 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1094 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1095 "Truncating sense buffer\n");
1096 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1099 /* The sense buffer can be NULL for TMF commands */
1100 if (sc_cmd->sense_buffer) {
1101 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1103 memcpy(sc_cmd->sense_buffer, sense_data,
1108 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1110 struct scsi_cmnd *sc = io_req->sc_cmd;
1112 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1113 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1114 scsi_sg_count(sc), sc->sc_data_direction);
1115 io_req->bd_tbl->bd_valid = 0;
1119 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1120 struct qedf_ioreq *io_req)
1122 struct scsi_cmnd *sc_cmd;
1123 struct fcoe_cqe_rsp_info *fcp_rsp;
1124 struct qedf_rport *fcport;
1126 u16 scope, qualifier = 0;
1127 u8 fw_residual_flag = 0;
1128 unsigned long flags = 0;
1136 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1137 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1138 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1139 QEDF_ERR(&qedf->dbg_ctx,
1140 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1145 sc_cmd = io_req->sc_cmd;
1146 fcp_rsp = &cqe->cqe_info.rsp_info;
1149 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1153 if (!sc_cmd->SCp.ptr) {
1154 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1155 "another context.\n");
1159 if (!sc_cmd->device) {
1160 QEDF_ERR(&qedf->dbg_ctx,
1161 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1165 if (!sc_cmd->request) {
1166 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1167 "sc_cmd=%p.\n", sc_cmd);
1171 if (!sc_cmd->request->q) {
1172 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1173 "is not valid, sc_cmd=%p.\n", sc_cmd);
1177 fcport = io_req->fcport;
1180 * When flush is active, let the cmds be completed from the cleanup
1183 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1184 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1185 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1186 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1187 "Dropping good completion xid=0x%x as fcport is flushing",
1192 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1194 qedf_unmap_sg_list(qedf, io_req);
1196 /* Check for FCP transport error */
1197 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1198 QEDF_ERR(&(qedf->dbg_ctx),
1199 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1200 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1201 io_req->fcp_rsp_code);
1202 sc_cmd->result = DID_BUS_BUSY << 16;
1206 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1207 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1208 if (fw_residual_flag) {
1209 QEDF_ERR(&qedf->dbg_ctx,
1210 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1211 io_req->xid, fcp_rsp->rsp_flags.flags,
1213 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1214 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1216 if (io_req->cdb_status == 0)
1217 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1219 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1222 * Set resid to the whole buffer length so we won't try to resue
1223 * any previously data.
1225 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1229 switch (io_req->fcp_status) {
1231 if (io_req->cdb_status == 0) {
1232 /* Good I/O completion */
1233 sc_cmd->result = DID_OK << 16;
1235 refcount = kref_read(&io_req->refcount);
1236 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1237 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1238 "lba=%02x%02x%02x%02x cdb_status=%d "
1239 "fcp_resid=0x%x refcount=%d.\n",
1240 qedf->lport->host->host_no, sc_cmd->device->id,
1241 sc_cmd->device->lun, io_req->xid,
1242 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1243 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1244 io_req->cdb_status, io_req->fcp_resid,
1246 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1248 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1249 io_req->cdb_status == SAM_STAT_BUSY) {
1251 * Check whether we need to set retry_delay at
1252 * all based on retry_delay module parameter
1253 * and the status qualifier.
1257 scope = fcp_rsp->retry_delay_timer & 0xC000;
1259 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1261 if (qedf_retry_delay)
1264 if (io_req->cdb_status ==
1265 SAM_STAT_TASK_SET_FULL)
1266 qedf->task_set_fulls++;
1271 if (io_req->fcp_resid)
1272 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1274 if (chk_scope == 1) {
1275 if ((scope == 1 || scope == 2) &&
1276 (qualifier > 0 && qualifier <= 0x3FEF)) {
1277 /* Check we don't go over the max */
1278 if (qualifier > QEDF_RETRY_DELAY_MAX) {
1279 qualifier = QEDF_RETRY_DELAY_MAX;
1280 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1282 (fcp_rsp->retry_delay_timer &
1285 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1286 "Scope = %d and qualifier = %d",
1288 /* Take fcport->rport_lock to
1289 * update the retry_delay_timestamp
1291 spin_lock_irqsave(&fcport->rport_lock, flags);
1292 fcport->retry_delay_timestamp =
1293 jiffies + (qualifier * HZ / 10);
1294 spin_unlock_irqrestore(&fcport->rport_lock,
1298 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1299 "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1305 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1306 io_req->fcp_status);
1311 if (qedf_io_tracing)
1312 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1315 * We wait till the end of the function to clear the
1316 * outstanding bit in case we need to send an abort
1318 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1320 io_req->sc_cmd = NULL;
1321 sc_cmd->SCp.ptr = NULL;
1322 sc_cmd->scsi_done(sc_cmd);
1323 kref_put(&io_req->refcount, qedf_release_cmd);
1326 /* Return a SCSI command in some other context besides a normal completion */
1327 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1330 struct scsi_cmnd *sc_cmd;
1334 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1338 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1339 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1340 "io_req:%p scsi_done handling already done\n",
1346 * We will be done with this command after this call so clear the
1349 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1351 sc_cmd = io_req->sc_cmd;
1354 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1358 if (!virt_addr_valid(sc_cmd)) {
1359 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1363 if (!sc_cmd->SCp.ptr) {
1364 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1365 "another context.\n");
1369 if (!sc_cmd->device) {
1370 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1375 if (!virt_addr_valid(sc_cmd->device)) {
1376 QEDF_ERR(&qedf->dbg_ctx,
1377 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1381 if (!sc_cmd->sense_buffer) {
1382 QEDF_ERR(&qedf->dbg_ctx,
1383 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1388 if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1389 QEDF_ERR(&qedf->dbg_ctx,
1390 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1395 if (!sc_cmd->scsi_done) {
1396 QEDF_ERR(&qedf->dbg_ctx,
1397 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1402 qedf_unmap_sg_list(qedf, io_req);
1404 sc_cmd->result = result << 16;
1405 refcount = kref_read(&io_req->refcount);
1406 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1407 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1408 "allowed=%d retries=%d refcount=%d.\n",
1409 qedf->lport->host->host_no, sc_cmd->device->id,
1410 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1411 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1412 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1416 * Set resid to the whole buffer length so we won't try to resue any
1417 * previously read data
1419 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1421 if (qedf_io_tracing)
1422 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1424 io_req->sc_cmd = NULL;
1425 sc_cmd->SCp.ptr = NULL;
1426 sc_cmd->scsi_done(sc_cmd);
1427 kref_put(&io_req->refcount, qedf_release_cmd);
1432 * Clear the io_req->sc_cmd backpointer so we don't try to process
1435 io_req->sc_cmd = NULL;
1436 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
1440 * Handle warning type CQE completions. This is mainly used for REC timer
1443 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1444 struct qedf_ioreq *io_req)
1447 struct qedf_rport *fcport = io_req->fcport;
1448 u64 err_warn_bit_map;
1452 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1453 "cqe is NULL for io_req %p xid=0x%x\n",
1454 io_req, io_req->xid);
1458 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1459 "xid=0x%x\n", io_req->xid);
1460 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1461 "err_warn_bitmap=%08x:%08x\n",
1462 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1463 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1464 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1465 "rx_buff_off=%08x, rx_id=%04x\n",
1466 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1467 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1468 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1470 /* Normalize the error bitmap value to an just an unsigned int */
1471 err_warn_bit_map = (u64)
1472 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1473 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1474 for (i = 0; i < 64; i++) {
1475 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1481 /* Check if REC TOV expired if this is a tape device */
1482 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1484 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1485 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1486 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1487 io_req->rx_buf_off =
1488 cqe->cqe_info.err_info.rx_buf_off;
1489 io_req->tx_buf_off =
1490 cqe->cqe_info.err_info.tx_buf_off;
1491 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1492 rval = qedf_send_rec(io_req);
1494 * We only want to abort the io_req if we
1495 * can't queue the REC command as we want to
1496 * keep the exchange open for recovery.
1506 init_completion(&io_req->abts_done);
1507 rval = qedf_initiate_abts(io_req, true);
1509 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1512 /* Cleanup a command when we receive an error detection completion */
1513 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1514 struct qedf_ioreq *io_req)
1518 if (io_req == NULL) {
1519 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1523 if (io_req->fcport == NULL) {
1524 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1529 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1530 "cqe is NULL for io_req %p\n", io_req);
1534 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1535 "xid=0x%x\n", io_req->xid);
1536 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1537 "err_warn_bitmap=%08x:%08x\n",
1538 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1539 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1540 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1541 "rx_buff_off=%08x, rx_id=%04x\n",
1542 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1543 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1544 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1546 /* When flush is active, let the cmds be flushed out from the cleanup context */
1547 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1548 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1549 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1550 QEDF_ERR(&qedf->dbg_ctx,
1551 "Dropping EQE for xid=0x%x as fcport is flushing",
1556 if (qedf->stop_io_on_error) {
1557 qedf_stop_all_io(qedf);
1561 init_completion(&io_req->abts_done);
1562 rval = qedf_initiate_abts(io_req, true);
1564 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1567 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1568 struct qedf_ioreq *els_req)
1570 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1571 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1572 kref_read(&els_req->refcount));
1575 * Need to distinguish this from a timeout when calling the
1578 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1580 clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
1582 /* Cancel the timer */
1583 cancel_delayed_work_sync(&els_req->timeout_work);
1585 /* Call callback function to complete command */
1586 if (els_req->cb_func && els_req->cb_arg) {
1587 els_req->cb_func(els_req->cb_arg);
1588 els_req->cb_arg = NULL;
1591 /* Release kref for original initiate_els */
1592 kref_put(&els_req->refcount, qedf_release_cmd);
1595 /* A value of -1 for lun is a wild card that means flush all
1596 * active SCSI I/Os for the target.
1598 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1600 struct qedf_ioreq *io_req;
1601 struct qedf_ctx *qedf;
1602 struct qedf_cmd_mgr *cmd_mgr;
1604 unsigned long flags;
1610 QEDF_ERR(NULL, "fcport is NULL\n");
1614 /* Check that fcport is still offloaded */
1615 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1616 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1620 qedf = fcport->qedf;
1623 QEDF_ERR(NULL, "qedf is NULL.\n");
1627 /* Only wait for all commands to be queued in the Upload context */
1628 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1630 while (atomic_read(&fcport->ios_to_queue)) {
1631 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1632 "Waiting for %d I/Os to be queued\n",
1633 atomic_read(&fcport->ios_to_queue));
1634 if (wait_cnt == 0) {
1636 "%d IOs request could not be queued\n",
1637 atomic_read(&fcport->ios_to_queue));
1644 cmd_mgr = qedf->cmd_mgr;
1646 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1647 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1648 atomic_read(&fcport->num_active_ios), fcport,
1649 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1650 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1652 mutex_lock(&qedf->flush_mutex);
1654 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1656 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1657 fcport->lun_reset_lun = lun;
1660 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1661 io_req = &cmd_mgr->cmds[i];
1665 if (!io_req->fcport)
1668 spin_lock_irqsave(&cmd_mgr->lock, flags);
1670 if (io_req->alloc) {
1671 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1672 if (io_req->cmd_type == QEDF_SCSI_CMD)
1673 QEDF_ERR(&qedf->dbg_ctx,
1674 "Allocated but not queued, xid=0x%x\n",
1677 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1679 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1683 if (io_req->fcport != fcport)
1686 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1687 * but RRQ is still pending.
1688 * Workaround: Within qedf_send_rrq, we check if the fcport is
1689 * NULL, and we drop the ref on the io_req to clean it up.
1691 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1692 refcount = kref_read(&io_req->refcount);
1693 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1694 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1695 io_req->xid, io_req->cmd_type, refcount);
1696 /* If RRQ work has been queue, try to cancel it and
1699 if (atomic_read(&io_req->state) ==
1700 QEDFC_CMD_ST_RRQ_WAIT) {
1701 if (cancel_delayed_work_sync
1702 (&io_req->rrq_work)) {
1703 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1704 "Putting reference for pending RRQ work xid=0x%x.\n",
1707 kref_put(&io_req->refcount,
1714 /* Only consider flushing ELS during target reset */
1715 if (io_req->cmd_type == QEDF_ELS &&
1717 rc = kref_get_unless_zero(&io_req->refcount);
1719 QEDF_ERR(&(qedf->dbg_ctx),
1720 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1721 io_req, io_req->xid);
1724 qedf_initiate_cleanup(io_req, false);
1726 qedf_flush_els_req(qedf, io_req);
1729 * Release the kref and go back to the top of the
1735 if (io_req->cmd_type == QEDF_ABTS) {
1737 rc = kref_get_unless_zero(&io_req->refcount);
1739 QEDF_ERR(&(qedf->dbg_ctx),
1740 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1741 io_req, io_req->xid);
1744 if (lun != -1 && io_req->lun != lun)
1747 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1748 "Flushing abort xid=0x%x.\n", io_req->xid);
1750 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1751 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1752 "Putting ref for cancelled RRQ work xid=0x%x.\n",
1754 kref_put(&io_req->refcount, qedf_release_cmd);
1757 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1758 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1759 "Putting ref for cancelled tmo work xid=0x%x.\n",
1761 qedf_initiate_cleanup(io_req, true);
1762 /* Notify eh_abort handler that ABTS is
1765 complete(&io_req->abts_done);
1766 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1768 kref_put(&io_req->refcount, qedf_release_cmd);
1774 if (!io_req->sc_cmd)
1776 if (!io_req->sc_cmd->device) {
1777 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1778 "Device backpointer NULL for sc_cmd=%p.\n",
1780 /* Put reference for non-existent scsi_cmnd */
1781 io_req->sc_cmd = NULL;
1782 qedf_initiate_cleanup(io_req, false);
1783 kref_put(&io_req->refcount, qedf_release_cmd);
1787 if (io_req->lun != lun)
1792 * Use kref_get_unless_zero in the unlikely case the command
1793 * we're about to flush was completed in the normal SCSI path
1795 rc = kref_get_unless_zero(&io_req->refcount);
1797 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1798 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1802 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1803 "Cleanup xid=0x%x.\n", io_req->xid);
1806 /* Cleanup task and return I/O mid-layer */
1807 qedf_initiate_cleanup(io_req, true);
1810 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
1814 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1815 "Flushed 0x%x I/Os, active=0x%x.\n",
1816 flush_cnt, atomic_read(&fcport->num_active_ios));
1817 /* Only wait for all commands to complete in the Upload context */
1818 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1820 while (atomic_read(&fcport->num_active_ios)) {
1821 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1822 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1824 atomic_read(&fcport->num_active_ios),
1826 if (wait_cnt == 0) {
1827 QEDF_ERR(&qedf->dbg_ctx,
1828 "Flushed %d I/Os, active=%d.\n",
1830 atomic_read(&fcport->num_active_ios));
1831 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1832 io_req = &cmd_mgr->cmds[i];
1833 if (io_req->fcport &&
1834 io_req->fcport == fcport) {
1836 kref_read(&io_req->refcount);
1837 set_bit(QEDF_CMD_DIRTY,
1839 QEDF_ERR(&qedf->dbg_ctx,
1840 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1841 io_req, io_req->xid,
1856 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1857 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1858 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1859 mutex_unlock(&qedf->flush_mutex);
1863 * Initiate a ABTS middle path command. Note that we don't have to initialize
1864 * the task context for an ABTS task.
1866 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1868 struct fc_lport *lport;
1869 struct qedf_rport *fcport = io_req->fcport;
1870 struct fc_rport_priv *rdata;
1871 struct qedf_ctx *qedf;
1874 unsigned long flags;
1875 struct fcoe_wqe *sqe;
1879 /* Sanity check qedf_rport before dereferencing any pointers */
1880 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1881 QEDF_ERR(NULL, "tgt not offloaded\n");
1886 qedf = fcport->qedf;
1887 rdata = fcport->rdata;
1889 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1890 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1895 lport = qedf->lport;
1897 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1898 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1900 goto drop_rdata_kref;
1903 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1904 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1906 goto drop_rdata_kref;
1909 /* Ensure room on SQ */
1910 if (!atomic_read(&fcport->free_sqes)) {
1911 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1913 goto drop_rdata_kref;
1916 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1917 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1919 goto drop_rdata_kref;
1922 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1923 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1924 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1925 QEDF_ERR(&qedf->dbg_ctx,
1926 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1927 io_req->xid, io_req->sc_cmd);
1929 goto drop_rdata_kref;
1932 kref_get(&io_req->refcount);
1935 qedf->control_requests++;
1936 qedf->packet_aborts++;
1938 /* Set the command type to abort */
1939 io_req->cmd_type = QEDF_ABTS;
1940 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1942 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1943 refcount = kref_read(&io_req->refcount);
1944 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1945 "ABTS io_req xid = 0x%x refcount=%d\n",
1948 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1950 spin_lock_irqsave(&fcport->rport_lock, flags);
1952 sqe_idx = qedf_get_sqe_idx(fcport);
1953 sqe = &fcport->sq[sqe_idx];
1954 memset(sqe, 0, sizeof(struct fcoe_wqe));
1955 io_req->task_params->sqe = sqe;
1957 init_initiator_abort_fcoe_task(io_req->task_params);
1958 qedf_ring_doorbell(fcport);
1960 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1963 kref_put(&rdata->kref, fc_rport_destroy);
1968 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1969 struct qedf_ioreq *io_req)
1973 struct qedf_rport *fcport = io_req->fcport;
1975 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1976 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1978 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1980 /* This was added at a point when we were scheduling abts_compl &
1981 * cleanup_compl on different CPUs and there was a possibility of
1982 * the io_req to be freed from the other context before we got here.
1985 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1986 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1992 * When flush is active, let the cmds be completed from the cleanup
1995 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1996 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1997 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1998 "Dropping ABTS completion xid=0x%x as fcport is flushing",
2003 if (!cancel_delayed_work(&io_req->timeout_work)) {
2004 QEDF_ERR(&qedf->dbg_ctx,
2005 "Wasn't able to cancel abts timeout work.\n");
2009 case FC_RCTL_BA_ACC:
2010 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2011 "ABTS response - ACC Send RRQ after R_A_TOV\n");
2012 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2013 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
2015 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2016 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
2021 * Dont release this cmd yet. It will be relesed
2022 * after we get RRQ response
2024 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2025 msecs_to_jiffies(qedf->lport->r_a_tov));
2026 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2028 /* For error cases let the cleanup return the command */
2029 case FC_RCTL_BA_RJT:
2030 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2031 "ABTS response - RJT\n");
2032 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2035 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
2039 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2041 if (io_req->sc_cmd) {
2042 if (!io_req->return_scsi_cmd_on_abts)
2043 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2044 "Not call scsi_done for xid=0x%x.\n",
2046 if (io_req->return_scsi_cmd_on_abts)
2047 qedf_scsi_done(qedf, io_req, DID_ERROR);
2050 /* Notify eh_abort handler that ABTS is complete */
2051 complete(&io_req->abts_done);
2053 kref_put(&io_req->refcount, qedf_release_cmd);
2056 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2058 struct qedf_mp_req *mp_req;
2059 struct scsi_sge *mp_req_bd;
2060 struct scsi_sge *mp_resp_bd;
2061 struct qedf_ctx *qedf = io_req->fcport->qedf;
2065 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2067 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2068 memset(mp_req, 0, sizeof(struct qedf_mp_req));
2070 if (io_req->cmd_type != QEDF_ELS) {
2071 mp_req->req_len = sizeof(struct fcp_cmnd);
2072 io_req->data_xfer_len = mp_req->req_len;
2074 mp_req->req_len = io_req->data_xfer_len;
2076 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2077 &mp_req->req_buf_dma, GFP_KERNEL);
2078 if (!mp_req->req_buf) {
2079 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2080 qedf_free_mp_resc(io_req);
2084 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2085 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2086 if (!mp_req->resp_buf) {
2087 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2089 qedf_free_mp_resc(io_req);
2093 /* Allocate and map mp_req_bd and mp_resp_bd */
2094 sz = sizeof(struct scsi_sge);
2095 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2096 &mp_req->mp_req_bd_dma, GFP_KERNEL);
2097 if (!mp_req->mp_req_bd) {
2098 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2099 qedf_free_mp_resc(io_req);
2103 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2104 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2105 if (!mp_req->mp_resp_bd) {
2106 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2107 qedf_free_mp_resc(io_req);
2112 addr = mp_req->req_buf_dma;
2113 mp_req_bd = mp_req->mp_req_bd;
2114 mp_req_bd->sge_addr.lo = U64_LO(addr);
2115 mp_req_bd->sge_addr.hi = U64_HI(addr);
2116 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2119 * MP buffer is either a task mgmt command or an ELS.
2120 * So the assumption is that it consumes a single bd
2121 * entry in the bd table
2123 mp_resp_bd = mp_req->mp_resp_bd;
2124 addr = mp_req->resp_buf_dma;
2125 mp_resp_bd->sge_addr.lo = U64_LO(addr);
2126 mp_resp_bd->sge_addr.hi = U64_HI(addr);
2127 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2133 * Last ditch effort to clear the port if it's stuck. Used only after a
2134 * cleanup task times out.
2136 static void qedf_drain_request(struct qedf_ctx *qedf)
2138 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2139 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2143 /* Set bit to return all queuecommand requests as busy */
2144 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2146 /* Call qed drain request for function. Should be synchronous */
2147 qed_ops->common->drain(qedf->cdev);
2149 /* Settle time for CQEs to be returned */
2152 /* Unplug and continue */
2153 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2157 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2160 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2161 bool return_scsi_cmd_on_abts)
2163 struct qedf_rport *fcport;
2164 struct qedf_ctx *qedf;
2167 unsigned long flags;
2168 struct fcoe_wqe *sqe;
2172 fcport = io_req->fcport;
2174 QEDF_ERR(NULL, "fcport is NULL.\n");
2178 /* Sanity check qedf_rport before dereferencing any pointers */
2179 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2180 QEDF_ERR(NULL, "tgt not offloaded\n");
2184 qedf = fcport->qedf;
2186 QEDF_ERR(NULL, "qedf is NULL.\n");
2190 if (io_req->cmd_type == QEDF_ELS) {
2194 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2195 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2196 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2197 "cleanup processing or already completed.\n",
2201 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2204 /* Ensure room on SQ */
2205 if (!atomic_read(&fcport->free_sqes)) {
2206 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2207 /* Need to make sure we clear the flag since it was set */
2208 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2212 if (io_req->cmd_type == QEDF_CLEANUP) {
2213 QEDF_ERR(&qedf->dbg_ctx,
2214 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2215 io_req->xid, io_req->cmd_type);
2216 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2220 refcount = kref_read(&io_req->refcount);
2222 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2223 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2224 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2225 refcount, fcport, fcport->rdata->ids.port_id);
2227 /* Cleanup cmds re-use the same TID as the original I/O */
2228 io_req->cmd_type = QEDF_CLEANUP;
2229 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2231 init_completion(&io_req->cleanup_done);
2233 spin_lock_irqsave(&fcport->rport_lock, flags);
2235 sqe_idx = qedf_get_sqe_idx(fcport);
2236 sqe = &fcport->sq[sqe_idx];
2237 memset(sqe, 0, sizeof(struct fcoe_wqe));
2238 io_req->task_params->sqe = sqe;
2240 init_initiator_cleanup_fcoe_task(io_req->task_params);
2241 qedf_ring_doorbell(fcport);
2243 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2245 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2246 QEDF_CLEANUP_TIMEOUT * HZ);
2251 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2252 "xid=%x.\n", io_req->xid);
2253 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2254 /* Issue a drain request if cleanup task times out */
2255 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2256 qedf_drain_request(qedf);
2259 /* If it TASK MGMT handle it, reference will be decreased
2260 * in qedf_execute_tmf
2262 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2263 io_req->tm_flags == FCP_TMF_TGT_RESET) {
2264 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2265 io_req->sc_cmd = NULL;
2266 complete(&io_req->tm_done);
2269 if (io_req->sc_cmd) {
2270 if (!io_req->return_scsi_cmd_on_abts)
2271 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2272 "Not call scsi_done for xid=0x%x.\n",
2274 if (io_req->return_scsi_cmd_on_abts)
2275 qedf_scsi_done(qedf, io_req, DID_ERROR);
2279 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2281 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2286 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2287 struct qedf_ioreq *io_req)
2289 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2292 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2294 /* Complete so we can finish cleaning up the I/O */
2295 complete(&io_req->cleanup_done);
2298 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2301 struct qedf_ioreq *io_req;
2302 struct e4_fcoe_task_context *task;
2303 struct qedf_ctx *qedf = fcport->qedf;
2304 struct fc_lport *lport = qedf->lport;
2309 unsigned long flags;
2310 struct fcoe_wqe *sqe;
2314 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2318 lun = (int)sc_cmd->device->lun;
2319 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2320 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2325 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2327 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2332 if (tm_flags == FCP_TMF_LUN_RESET)
2334 else if (tm_flags == FCP_TMF_TGT_RESET)
2335 qedf->target_resets++;
2337 /* Initialize rest of io_req fields */
2338 io_req->sc_cmd = sc_cmd;
2339 io_req->fcport = fcport;
2340 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2342 /* Record which cpu this request is associated with */
2343 io_req->cpu = smp_processor_id();
2346 io_req->io_req_flags = QEDF_READ;
2347 io_req->data_xfer_len = 0;
2348 io_req->tm_flags = tm_flags;
2350 /* Default is to return a SCSI command when an error occurs */
2351 io_req->return_scsi_cmd_on_abts = false;
2353 /* Obtain exchange id */
2356 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2359 /* Initialize task context for this IO request */
2360 task = qedf_get_task_mem(&qedf->tasks, xid);
2362 init_completion(&io_req->tm_done);
2364 spin_lock_irqsave(&fcport->rport_lock, flags);
2366 sqe_idx = qedf_get_sqe_idx(fcport);
2367 sqe = &fcport->sq[sqe_idx];
2368 memset(sqe, 0, sizeof(struct fcoe_wqe));
2370 qedf_init_task(fcport, lport, io_req, task, sqe);
2371 qedf_ring_doorbell(fcport);
2373 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2375 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2376 tmo = wait_for_completion_timeout(&io_req->tm_done,
2377 QEDF_TM_TIMEOUT * HZ);
2381 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2382 /* Clear outstanding bit since command timed out */
2383 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2384 io_req->sc_cmd = NULL;
2386 /* Check TMF response code */
2387 if (io_req->fcp_rsp_code == 0)
2393 * Double check that fcport has not gone into an uploading state before
2394 * executing the command flush for the LUN/target.
2396 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2397 QEDF_ERR(&qedf->dbg_ctx,
2398 "fcport is uploading, not executing flush.\n");
2401 /* We do not need this io_req any more */
2402 kref_put(&io_req->refcount, qedf_release_cmd);
2405 if (tm_flags == FCP_TMF_LUN_RESET)
2406 qedf_flush_active_ios(fcport, lun);
2408 qedf_flush_active_ios(fcport, -1);
2411 if (rc != SUCCESS) {
2412 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2415 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2421 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2423 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2424 struct fc_rport_libfc_priv *rp = rport->dd_data;
2425 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2426 struct qedf_ctx *qedf;
2427 struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2430 struct qedf_ioreq *io_req = NULL;
2432 struct fc_rport_priv *rdata = fcport->rdata;
2435 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2436 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2437 rport->scsi_target_id, (int)sc_cmd->device->lun);
2439 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2440 QEDF_ERR(NULL, "stale rport\n");
2444 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2445 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2448 if (sc_cmd->SCp.ptr) {
2449 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2450 ref_cnt = kref_read(&io_req->refcount);
2452 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2453 io_req, io_req->xid, ref_cnt);
2456 rval = fc_remote_port_chkready(rport);
2458 QEDF_ERR(NULL, "device_reset rport not ready\n");
2463 rc = fc_block_scsi_eh(sc_cmd);
2468 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2473 qedf = fcport->qedf;
2476 QEDF_ERR(NULL, "qedf is NULL.\n");
2481 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2482 QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2487 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2488 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2493 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2494 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2499 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2501 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2504 QEDF_ERR(&qedf->dbg_ctx,
2505 "fcport %p port_id=%06x is uploading.\n",
2506 fcport, fcport->rdata->ids.port_id);
2511 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2514 kref_put(&rdata->kref, fc_rport_destroy);
2518 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2519 struct qedf_ioreq *io_req)
2521 struct fcoe_cqe_rsp_info *fcp_rsp;
2523 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2525 fcp_rsp = &cqe->cqe_info.rsp_info;
2526 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2528 io_req->sc_cmd = NULL;
2529 complete(&io_req->tm_done);
2532 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2533 struct fcoe_cqe *cqe)
2535 unsigned long flags;
2536 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2537 u32 payload_len, crc;
2538 struct fc_frame_header *fh;
2539 struct fc_frame *fp;
2540 struct qedf_io_work *io_work;
2543 struct scsi_bd *p_bd_info;
2545 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2546 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2547 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2548 le32_to_cpu(p_bd_info->address.hi),
2549 le32_to_cpu(p_bd_info->address.lo),
2550 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2551 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2552 qedf->bdq_prod_idx, pktlen);
2554 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2555 if (bdq_idx >= QEDF_BDQ_SIZE) {
2556 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2558 goto increment_prod;
2561 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2563 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2564 "unsolicited packet.\n");
2565 goto increment_prod;
2568 if (qedf_dump_frames) {
2569 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2570 "BDQ frame is at addr=%p.\n", bdq_addr);
2571 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2572 (void *)bdq_addr, pktlen, false);
2575 /* Allocate frame */
2576 payload_len = pktlen - sizeof(struct fc_frame_header);
2577 fp = fc_frame_alloc(qedf->lport, payload_len);
2579 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2580 goto increment_prod;
2583 /* Copy data from BDQ buffer into fc_frame struct */
2584 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2585 memcpy(fh, (void *)bdq_addr, pktlen);
2587 QEDF_WARN(&qedf->dbg_ctx,
2588 "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2589 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2590 fh->fh_type, fc_frame_payload_op(fp));
2592 /* Initialize the frame so libfc sees it as a valid frame */
2593 crc = fcoe_fc_crc(fp);
2595 fr_dev(fp) = qedf->lport;
2596 fr_sof(fp) = FC_SOF_I3;
2597 fr_eof(fp) = FC_EOF_T;
2598 fr_crc(fp) = cpu_to_le32(~crc);
2601 * We need to return the frame back up to libfc in a non-atomic
2604 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2606 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2607 "work for I/O completion.\n");
2609 goto increment_prod;
2611 memset(io_work, 0, sizeof(struct qedf_io_work));
2613 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2615 /* Copy contents of CQE for deferred processing */
2616 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2618 io_work->qedf = qedf;
2621 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2623 spin_lock_irqsave(&qedf->hba_lock, flags);
2625 /* Increment producer to let f/w know we've handled the frame */
2626 qedf->bdq_prod_idx++;
2628 /* Producer index wraps at uint16_t boundary */
2629 if (qedf->bdq_prod_idx == 0xffff)
2630 qedf->bdq_prod_idx = 0;
2632 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2633 readw(qedf->bdq_primary_prod);
2634 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2635 readw(qedf->bdq_secondary_prod);
2637 spin_unlock_irqrestore(&qedf->hba_lock, flags);