1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
6 #include <linux/spinlock.h>
7 #include <linux/vmalloc.h>
9 #include <scsi/scsi_tcq.h>
11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
12 unsigned int timer_msec)
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
15 msecs_to_jiffies(timer_msec));
18 static void qedf_cmd_timeout(struct work_struct *work)
21 struct qedf_ioreq *io_req =
22 container_of(work, struct qedf_ioreq, timeout_work.work);
23 struct qedf_ctx *qedf;
24 struct qedf_rport *fcport;
28 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
32 fcport = io_req->fcport;
33 if (io_req->fcport == NULL) {
34 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
40 switch (io_req->cmd_type) {
43 QEDF_INFO(NULL, QEDF_LOG_IO,
44 "qedf is NULL for ABTS xid=0x%x.\n",
49 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
51 /* Cleanup timed out ABTS */
52 qedf_initiate_cleanup(io_req, true);
53 complete(&io_req->abts_done);
56 * Need to call kref_put for reference taken when initiate_abts
57 * was called since abts_compl won't be called now that we've
58 * cleaned up the task.
60 kref_put(&io_req->refcount, qedf_release_cmd);
62 /* Clear in abort bit now that we're done with the command */
63 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
66 * Now that the original I/O and the ABTS are complete see
67 * if we need to reconnect to the target.
69 qedf_restart_rport(fcport);
73 QEDF_INFO(NULL, QEDF_LOG_IO,
74 "qedf is NULL for ELS xid=0x%x.\n",
78 /* ELS request no longer outstanding since it timed out */
79 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
81 kref_get(&io_req->refcount);
83 * Don't attempt to clean an ELS timeout as any subseqeunt
84 * ABTS or cleanup requests just hang. For now just free
85 * the resources of the original I/O and the RRQ
87 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
89 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
90 /* Call callback function to complete command */
91 if (io_req->cb_func && io_req->cb_arg) {
92 op = io_req->cb_arg->op;
93 io_req->cb_func(io_req->cb_arg);
94 io_req->cb_arg = NULL;
96 qedf_initiate_cleanup(io_req, true);
97 kref_put(&io_req->refcount, qedf_release_cmd);
99 case QEDF_SEQ_CLEANUP:
100 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
101 "xid=0x%x.\n", io_req->xid);
102 qedf_initiate_cleanup(io_req, true);
103 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
104 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
107 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
108 "Hit default case, xid=0x%x.\n", io_req->xid);
113 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
115 struct io_bdt *bdt_info;
116 struct qedf_ctx *qedf = cmgr->qedf;
119 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
122 struct qedf_ioreq *io_req;
124 num_ios = max_xid - min_xid + 1;
126 /* Free fcoe_bdt_ctx structures */
127 if (!cmgr->io_bdt_pool) {
128 QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
132 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
133 for (i = 0; i < num_ios; i++) {
134 bdt_info = cmgr->io_bdt_pool[i];
135 if (bdt_info->bd_tbl) {
136 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
137 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
138 bdt_info->bd_tbl = NULL;
142 /* Destroy io_bdt pool */
143 for (i = 0; i < num_ios; i++) {
144 kfree(cmgr->io_bdt_pool[i]);
145 cmgr->io_bdt_pool[i] = NULL;
148 kfree(cmgr->io_bdt_pool);
149 cmgr->io_bdt_pool = NULL;
153 for (i = 0; i < num_ios; i++) {
154 io_req = &cmgr->cmds[i];
155 kfree(io_req->sgl_task_params);
156 kfree(io_req->task_params);
157 /* Make sure we free per command sense buffer */
158 if (io_req->sense_buffer)
159 dma_free_coherent(&qedf->pdev->dev,
160 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
161 io_req->sense_buffer_dma);
162 cancel_delayed_work_sync(&io_req->rrq_work);
165 /* Free command manager itself */
169 static void qedf_handle_rrq(struct work_struct *work)
171 struct qedf_ioreq *io_req =
172 container_of(work, struct qedf_ioreq, rrq_work.work);
174 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
175 qedf_send_rrq(io_req);
179 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
181 struct qedf_cmd_mgr *cmgr;
182 struct io_bdt *bdt_info;
183 struct qedf_ioreq *io_req;
188 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
190 /* Make sure num_queues is already set before calling this function */
191 if (!qedf->num_queues) {
192 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
196 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
197 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
198 "max_xid 0x%x.\n", min_xid, max_xid);
202 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
203 "0x%x.\n", min_xid, max_xid);
205 num_ios = max_xid - min_xid + 1;
207 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
209 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
214 spin_lock_init(&cmgr->lock);
217 * Initialize I/O request fields.
221 for (i = 0; i < num_ios; i++) {
222 io_req = &cmgr->cmds[i];
223 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
227 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
229 /* Allocate DMA memory to hold sense buffer */
230 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
231 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
233 if (!io_req->sense_buffer) {
234 QEDF_ERR(&qedf->dbg_ctx,
235 "Failed to alloc sense buffer.\n");
239 /* Allocate task parameters to pass to f/w init funcions */
240 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
242 if (!io_req->task_params) {
243 QEDF_ERR(&(qedf->dbg_ctx),
244 "Failed to allocate task_params for xid=0x%x\n",
250 * Allocate scatter/gather list info to pass to f/w init
253 io_req->sgl_task_params = kzalloc(
254 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
255 if (!io_req->sgl_task_params) {
256 QEDF_ERR(&(qedf->dbg_ctx),
257 "Failed to allocate sgl_task_params for xid=0x%x\n",
263 /* Allocate pool of io_bdts - one for each qedf_ioreq */
264 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
267 if (!cmgr->io_bdt_pool) {
268 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
272 for (i = 0; i < num_ios; i++) {
273 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
275 if (!cmgr->io_bdt_pool[i]) {
276 QEDF_WARN(&(qedf->dbg_ctx),
277 "Failed to alloc io_bdt_pool[%d].\n", i);
282 for (i = 0; i < num_ios; i++) {
283 bdt_info = cmgr->io_bdt_pool[i];
284 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
285 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
286 &bdt_info->bd_tbl_dma, GFP_KERNEL);
287 if (!bdt_info->bd_tbl) {
288 QEDF_WARN(&(qedf->dbg_ctx),
289 "Failed to alloc bdt_tbl[%d].\n", i);
293 atomic_set(&cmgr->free_list_cnt, num_ios);
294 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
295 "cmgr->free_list_cnt=%d.\n",
296 atomic_read(&cmgr->free_list_cnt));
301 qedf_cmd_mgr_free(cmgr);
305 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
307 struct qedf_ctx *qedf = fcport->qedf;
308 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
309 struct qedf_ioreq *io_req = NULL;
310 struct io_bdt *bd_tbl;
316 free_sqes = atomic_read(&fcport->free_sqes);
319 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
320 "Returning NULL, free_sqes=%d.\n ",
325 /* Limit the number of outstanding R/W tasks */
326 if ((atomic_read(&fcport->num_active_ios) >=
327 NUM_RW_TASKS_PER_CONNECTION)) {
328 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
329 "Returning NULL, num_active_ios=%d.\n",
330 atomic_read(&fcport->num_active_ios));
334 /* Limit global TIDs certain tasks */
335 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
336 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
337 "Returning NULL, free_list_cnt=%d.\n",
338 atomic_read(&cmd_mgr->free_list_cnt));
342 spin_lock_irqsave(&cmd_mgr->lock, flags);
343 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
344 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
346 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
349 /* Check to make sure command was previously freed */
354 if (i == FCOE_PARAMS_NUM_TASKS) {
355 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
359 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
360 QEDF_ERR(&qedf->dbg_ctx,
361 "io_req found to be dirty ox_id = 0x%x.\n",
364 /* Clear any flags now that we've reallocated the xid */
367 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
369 atomic_inc(&fcport->num_active_ios);
370 atomic_dec(&fcport->free_sqes);
372 atomic_dec(&cmd_mgr->free_list_cnt);
374 io_req->cmd_mgr = cmd_mgr;
375 io_req->fcport = fcport;
377 /* Clear any stale sc_cmd back pointer */
378 io_req->sc_cmd = NULL;
381 /* Hold the io_req against deletion */
382 kref_init(&io_req->refcount); /* ID: 001 */
383 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
385 /* Bind io_bdt for this io_req */
386 /* Have a static link between io_req and io_bdt_pool */
387 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
388 if (bd_tbl == NULL) {
389 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
390 kref_put(&io_req->refcount, qedf_release_cmd);
393 bd_tbl->io_req = io_req;
394 io_req->cmd_type = cmd_type;
395 io_req->tm_flags = 0;
397 /* Reset sequence offset data */
398 io_req->rx_buf_off = 0;
399 io_req->tx_buf_off = 0;
400 io_req->rx_id = 0xffff; /* No OX_ID */
405 /* Record failure for stats and return NULL to caller */
406 qedf->alloc_failures++;
410 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
412 struct qedf_mp_req *mp_req = &(io_req->mp_req);
413 struct qedf_ctx *qedf = io_req->fcport->qedf;
414 uint64_t sz = sizeof(struct scsi_sge);
417 if (mp_req->mp_req_bd) {
418 dma_free_coherent(&qedf->pdev->dev, sz,
419 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
420 mp_req->mp_req_bd = NULL;
422 if (mp_req->mp_resp_bd) {
423 dma_free_coherent(&qedf->pdev->dev, sz,
424 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
425 mp_req->mp_resp_bd = NULL;
427 if (mp_req->req_buf) {
428 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
429 mp_req->req_buf, mp_req->req_buf_dma);
430 mp_req->req_buf = NULL;
432 if (mp_req->resp_buf) {
433 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
434 mp_req->resp_buf, mp_req->resp_buf_dma);
435 mp_req->resp_buf = NULL;
439 void qedf_release_cmd(struct kref *ref)
441 struct qedf_ioreq *io_req =
442 container_of(ref, struct qedf_ioreq, refcount);
443 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
444 struct qedf_rport *fcport = io_req->fcport;
447 if (io_req->cmd_type == QEDF_SCSI_CMD) {
448 QEDF_WARN(&fcport->qedf->dbg_ctx,
449 "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
450 io_req, io_req->xid);
451 WARN_ON(io_req->sc_cmd);
454 if (io_req->cmd_type == QEDF_ELS ||
455 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
456 qedf_free_mp_resc(io_req);
458 atomic_inc(&cmd_mgr->free_list_cnt);
459 atomic_dec(&fcport->num_active_ios);
460 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
461 if (atomic_read(&fcport->num_active_ios) < 0) {
462 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
466 /* Increment task retry identifier now that the request is released */
467 io_req->task_retry_identifier++;
468 io_req->fcport = NULL;
470 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
472 spin_lock_irqsave(&cmd_mgr->lock, flags);
473 io_req->fcport = NULL;
475 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
478 static int qedf_map_sg(struct qedf_ioreq *io_req)
480 struct scsi_cmnd *sc = io_req->sc_cmd;
481 struct Scsi_Host *host = sc->device->host;
482 struct fc_lport *lport = shost_priv(host);
483 struct qedf_ctx *qedf = lport_priv(lport);
484 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
485 struct scatterlist *sg;
493 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
494 scsi_sg_count(sc), sc->sc_data_direction);
495 sg = scsi_sglist(sc);
497 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
499 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
500 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
502 scsi_for_each_sg(sc, sg, sg_count, i) {
503 sg_len = (u32)sg_dma_len(sg);
504 addr = (u64)sg_dma_address(sg);
505 end_addr = (u64)(addr + sg_len);
508 * Intermediate s/g element so check if start and end address
509 * is page aligned. Only required for writes and only if the
510 * number of scatter/gather elements is 8 or more.
512 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
513 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
514 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
516 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
517 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
518 bd[bd_count].sge_len = cpu_to_le32(sg_len);
521 byte_count += sg_len;
524 /* To catch a case where FAST and SLOW nothing is set, set FAST */
525 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
526 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
528 if (byte_count != scsi_bufflen(sc))
529 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
530 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
531 scsi_bufflen(sc), io_req->xid);
536 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
538 struct scsi_cmnd *sc = io_req->sc_cmd;
539 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
542 if (scsi_sg_count(sc)) {
543 bd_count = qedf_map_sg(io_req);
548 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
551 io_req->bd_tbl->bd_valid = bd_count;
556 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
557 struct fcp_cmnd *fcp_cmnd)
559 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
561 /* fcp_cmnd is 32 bytes */
562 memset(fcp_cmnd, 0, FCP_CMND_LEN);
564 /* 8 bytes: SCSI LUN info */
565 int_to_scsilun(sc_cmd->device->lun,
566 (struct scsi_lun *)&fcp_cmnd->fc_lun);
568 /* 4 bytes: flag info */
569 fcp_cmnd->fc_pri_ta = 0;
570 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
571 fcp_cmnd->fc_flags = io_req->io_req_flags;
572 fcp_cmnd->fc_cmdref = 0;
574 /* Populate data direction */
575 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
576 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
578 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
579 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
580 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
581 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
584 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
586 /* 16 bytes: CDB information */
587 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
588 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
590 /* 4 bytes: FCP data length */
591 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
594 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
595 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
596 struct fcoe_wqe *sqe)
598 enum fcoe_task_type task_type;
599 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
600 struct io_bdt *bd_tbl = io_req->bd_tbl;
604 struct qedf_ctx *qedf = fcport->qedf;
605 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
606 struct regpair sense_data_buffer_phys_addr;
611 /* Note init_initiator_rw_fcoe_task memsets the task context */
612 io_req->task = task_ctx;
613 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
614 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
615 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
617 /* Set task type bassed on DMA directio of command */
618 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
619 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
621 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
622 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
623 tx_io_size = io_req->data_xfer_len;
625 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
626 rx_io_size = io_req->data_xfer_len;
630 /* Setup the fields for fcoe_task_params */
631 io_req->task_params->context = task_ctx;
632 io_req->task_params->sqe = sqe;
633 io_req->task_params->task_type = task_type;
634 io_req->task_params->tx_io_size = tx_io_size;
635 io_req->task_params->rx_io_size = rx_io_size;
636 io_req->task_params->conn_cid = fcport->fw_cid;
637 io_req->task_params->itid = io_req->xid;
638 io_req->task_params->cq_rss_number = cq_idx;
639 io_req->task_params->is_tape_device = fcport->dev_type;
641 /* Fill in information for scatter/gather list */
642 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
643 bd_count = bd_tbl->bd_valid;
644 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
645 io_req->sgl_task_params->sgl_phys_addr.lo =
646 U64_LO(bd_tbl->bd_tbl_dma);
647 io_req->sgl_task_params->sgl_phys_addr.hi =
648 U64_HI(bd_tbl->bd_tbl_dma);
649 io_req->sgl_task_params->num_sges = bd_count;
650 io_req->sgl_task_params->total_buffer_size =
651 scsi_bufflen(io_req->sc_cmd);
652 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
653 io_req->sgl_task_params->small_mid_sge = 1;
655 io_req->sgl_task_params->small_mid_sge = 0;
658 /* Fill in physical address of sense buffer */
659 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
660 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
662 /* fill FCP_CMND IU */
663 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
665 /* Swap fcp_cmnd since FC is big endian */
666 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
667 for (i = 0; i < cnt; i++) {
668 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
670 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
672 init_initiator_rw_fcoe_task(io_req->task_params,
673 io_req->sgl_task_params,
674 sense_data_buffer_phys_addr,
675 io_req->task_retry_identifier, fcp_cmnd);
677 /* Increment SGL type counters */
678 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
679 qedf->slow_sge_ios++;
681 qedf->fast_sge_ios++;
684 void qedf_init_mp_task(struct qedf_ioreq *io_req,
685 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
687 struct qedf_mp_req *mp_req = &(io_req->mp_req);
688 struct qedf_rport *fcport = io_req->fcport;
689 struct qedf_ctx *qedf = io_req->fcport->qedf;
690 struct fc_frame_header *fc_hdr;
691 struct fcoe_tx_mid_path_params task_fc_hdr;
692 struct scsi_sgl_task_params tx_sgl_task_params;
693 struct scsi_sgl_task_params rx_sgl_task_params;
695 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
696 "Initializing MP task for cmd_type=%d\n",
699 qedf->control_requests++;
701 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
702 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
703 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
704 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
706 /* Setup the task from io_req for easy reference */
707 io_req->task = task_ctx;
709 /* Setup the fields for fcoe_task_params */
710 io_req->task_params->context = task_ctx;
711 io_req->task_params->sqe = sqe;
712 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
713 io_req->task_params->tx_io_size = io_req->data_xfer_len;
714 /* rx_io_size tells the f/w how large a response buffer we have */
715 io_req->task_params->rx_io_size = PAGE_SIZE;
716 io_req->task_params->conn_cid = fcport->fw_cid;
717 io_req->task_params->itid = io_req->xid;
718 /* Return middle path commands on CQ 0 */
719 io_req->task_params->cq_rss_number = 0;
720 io_req->task_params->is_tape_device = fcport->dev_type;
722 fc_hdr = &(mp_req->req_fc_hdr);
723 /* Set OX_ID and RX_ID based on driver task id */
724 fc_hdr->fh_ox_id = io_req->xid;
725 fc_hdr->fh_rx_id = htons(0xffff);
727 /* Set up FC header information */
728 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
729 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
730 task_fc_hdr.type = fc_hdr->fh_type;
731 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
732 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
733 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
734 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
736 /* Set up s/g list parameters for request buffer */
737 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
738 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
739 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
740 tx_sgl_task_params.num_sges = 1;
741 /* Set PAGE_SIZE for now since sg element is that size ??? */
742 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
743 tx_sgl_task_params.small_mid_sge = 0;
745 /* Set up s/g list parameters for request buffer */
746 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
747 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
748 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
749 rx_sgl_task_params.num_sges = 1;
750 /* Set PAGE_SIZE for now since sg element is that size ??? */
751 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
752 rx_sgl_task_params.small_mid_sge = 0;
756 * Last arg is 0 as previous code did not set that we wanted the
757 * fc header information.
759 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
762 &rx_sgl_task_params, 0);
765 /* Presumed that fcport->rport_lock is held */
766 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
768 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
771 rval = fcport->sq_prod_idx;
773 /* Adjust ring index */
774 fcport->sq_prod_idx++;
775 fcport->fw_sq_prod_idx++;
776 if (fcport->sq_prod_idx == total_sqe)
777 fcport->sq_prod_idx = 0;
782 void qedf_ring_doorbell(struct qedf_rport *fcport)
784 struct fcoe_db_data dbell = { 0 };
788 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
789 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
790 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
791 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
793 dbell.sq_prod = fcport->fw_sq_prod_idx;
794 /* wmb makes sure that the BDs data is updated before updating the
795 * producer, otherwise FW may read old data from the BDs.
799 writel(*(u32 *)&dbell, fcport->p_doorbell);
801 * Fence required to flush the write combined buffer, since another
802 * CPU may write to the same doorbell address and data may be lost
803 * due to relaxed order nature of write combined bar.
808 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
811 struct qedf_ctx *qedf = fcport->qedf;
812 struct qedf_io_log *io_log;
813 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
817 spin_lock_irqsave(&qedf->io_trace_lock, flags);
819 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
820 io_log->direction = direction;
821 io_log->task_id = io_req->xid;
822 io_log->port_id = fcport->rdata->ids.port_id;
823 io_log->lun = sc_cmd->device->lun;
824 io_log->op = op = sc_cmd->cmnd[0];
825 io_log->lba[0] = sc_cmd->cmnd[2];
826 io_log->lba[1] = sc_cmd->cmnd[3];
827 io_log->lba[2] = sc_cmd->cmnd[4];
828 io_log->lba[3] = sc_cmd->cmnd[5];
829 io_log->bufflen = scsi_bufflen(sc_cmd);
830 io_log->sg_count = scsi_sg_count(sc_cmd);
831 io_log->result = sc_cmd->result;
832 io_log->jiffies = jiffies;
833 io_log->refcount = kref_read(&io_req->refcount);
835 if (direction == QEDF_IO_TRACE_REQ) {
836 /* For requests we only care abot the submission CPU */
837 io_log->req_cpu = io_req->cpu;
840 } else if (direction == QEDF_IO_TRACE_RSP) {
841 io_log->req_cpu = io_req->cpu;
842 io_log->int_cpu = io_req->int_cpu;
843 io_log->rsp_cpu = smp_processor_id();
846 io_log->sge_type = io_req->sge_type;
848 qedf->io_trace_idx++;
849 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
850 qedf->io_trace_idx = 0;
852 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
855 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
857 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
858 struct Scsi_Host *host = sc_cmd->device->host;
859 struct fc_lport *lport = shost_priv(host);
860 struct qedf_ctx *qedf = lport_priv(lport);
861 struct e4_fcoe_task_context *task_ctx;
863 enum fcoe_task_type req_type = 0;
864 struct fcoe_wqe *sqe;
867 /* Initialize rest of io_req fileds */
868 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
869 sc_cmd->SCp.ptr = (char *)io_req;
870 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
872 /* Record which cpu this request is associated with */
873 io_req->cpu = smp_processor_id();
875 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
876 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
877 io_req->io_req_flags = QEDF_READ;
878 qedf->input_requests++;
879 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
880 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
881 io_req->io_req_flags = QEDF_WRITE;
882 qedf->output_requests++;
884 io_req->io_req_flags = 0;
885 qedf->control_requests++;
890 /* Build buffer descriptor list for firmware from sg list */
891 if (qedf_build_bd_list_from_sg(io_req)) {
892 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
893 /* Release cmd will release io_req, but sc_cmd is assigned */
894 io_req->sc_cmd = NULL;
895 kref_put(&io_req->refcount, qedf_release_cmd);
899 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
900 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
901 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
902 /* Release cmd will release io_req, but sc_cmd is assigned */
903 io_req->sc_cmd = NULL;
904 kref_put(&io_req->refcount, qedf_release_cmd);
908 /* Record LUN number for later use if we neeed them */
909 io_req->lun = (int)sc_cmd->device->lun;
911 /* Obtain free SQE */
912 sqe_idx = qedf_get_sqe_idx(fcport);
913 sqe = &fcport->sq[sqe_idx];
914 memset(sqe, 0, sizeof(struct fcoe_wqe));
916 /* Get the task context */
917 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
919 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
921 /* Release cmd will release io_req, but sc_cmd is assigned */
922 io_req->sc_cmd = NULL;
923 kref_put(&io_req->refcount, qedf_release_cmd);
927 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
930 qedf_ring_doorbell(fcport);
932 /* Set that command is with the firmware now */
933 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
935 if (qedf_io_tracing && io_req->sc_cmd)
936 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
942 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
944 struct fc_lport *lport = shost_priv(host);
945 struct qedf_ctx *qedf = lport_priv(lport);
946 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
947 struct fc_rport_libfc_priv *rp = rport->dd_data;
948 struct qedf_rport *fcport;
949 struct qedf_ioreq *io_req;
952 unsigned long flags = 0;
955 num_sgs = scsi_sg_count(sc_cmd);
956 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
957 QEDF_ERR(&qedf->dbg_ctx,
958 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
959 num_sgs, QEDF_MAX_BDS_PER_CMD);
960 sc_cmd->result = DID_ERROR;
961 sc_cmd->scsi_done(sc_cmd);
965 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
966 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
967 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
968 "Returning DNC as unloading or stop io, flags 0x%lx.\n",
970 sc_cmd->result = DID_NO_CONNECT << 16;
971 sc_cmd->scsi_done(sc_cmd);
975 if (!qedf->pdev->msix_enabled) {
976 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
977 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
979 sc_cmd->result = DID_NO_CONNECT << 16;
980 sc_cmd->scsi_done(sc_cmd);
984 rval = fc_remote_port_chkready(rport);
986 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
987 "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
988 rval, rport->port_id);
989 sc_cmd->result = rval;
990 sc_cmd->scsi_done(sc_cmd);
994 /* Retry command if we are doing a qed drain operation */
995 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
996 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
997 rc = SCSI_MLQUEUE_HOST_BUSY;
1001 if (lport->state != LPORT_ST_READY ||
1002 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1003 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
1004 rc = SCSI_MLQUEUE_HOST_BUSY;
1008 /* rport and tgt are allocated together, so tgt should be non-NULL */
1009 fcport = (struct qedf_rport *)&rp[1];
1011 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1012 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1014 * Session is not offloaded yet. Let SCSI-ml retry
1017 rc = SCSI_MLQUEUE_TARGET_BUSY;
1021 atomic_inc(&fcport->ios_to_queue);
1023 if (fcport->retry_delay_timestamp) {
1024 /* Take fcport->rport_lock for resetting the delay_timestamp */
1025 spin_lock_irqsave(&fcport->rport_lock, flags);
1026 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1027 fcport->retry_delay_timestamp = 0;
1029 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1030 /* If retry_delay timer is active, flow off the ML */
1031 rc = SCSI_MLQUEUE_TARGET_BUSY;
1032 atomic_dec(&fcport->ios_to_queue);
1035 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1038 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1040 rc = SCSI_MLQUEUE_HOST_BUSY;
1041 atomic_dec(&fcport->ios_to_queue);
1045 io_req->sc_cmd = sc_cmd;
1047 /* Take fcport->rport_lock for posting to fcport send queue */
1048 spin_lock_irqsave(&fcport->rport_lock, flags);
1049 if (qedf_post_io_req(fcport, io_req)) {
1050 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1051 /* Return SQE to pool */
1052 atomic_inc(&fcport->free_sqes);
1053 rc = SCSI_MLQUEUE_HOST_BUSY;
1055 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1056 atomic_dec(&fcport->ios_to_queue);
1062 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1063 struct fcoe_cqe_rsp_info *fcp_rsp)
1065 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1066 struct qedf_ctx *qedf = io_req->fcport->qedf;
1067 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1068 int fcp_sns_len = 0;
1069 int fcp_rsp_len = 0;
1070 uint8_t *rsp_info, *sense_data;
1072 io_req->fcp_status = FC_GOOD;
1073 io_req->fcp_resid = 0;
1074 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1075 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1076 io_req->fcp_resid = fcp_rsp->fcp_resid;
1078 io_req->scsi_comp_flags = rsp_flags;
1079 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1080 fcp_rsp->scsi_status_code;
1083 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1084 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1087 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1088 fcp_sns_len = fcp_rsp->fcp_sns_len;
1090 io_req->fcp_rsp_len = fcp_rsp_len;
1091 io_req->fcp_sns_len = fcp_sns_len;
1092 rsp_info = sense_data = io_req->sense_buffer;
1094 /* fetch fcp_rsp_code */
1095 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1096 /* Only for task management function */
1097 io_req->fcp_rsp_code = rsp_info[3];
1098 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1099 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1100 /* Adjust sense-data location. */
1101 sense_data += fcp_rsp_len;
1104 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1105 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1106 "Truncating sense buffer\n");
1107 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1110 /* The sense buffer can be NULL for TMF commands */
1111 if (sc_cmd->sense_buffer) {
1112 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1114 memcpy(sc_cmd->sense_buffer, sense_data,
1119 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1121 struct scsi_cmnd *sc = io_req->sc_cmd;
1123 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1124 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1125 scsi_sg_count(sc), sc->sc_data_direction);
1126 io_req->bd_tbl->bd_valid = 0;
1130 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1131 struct qedf_ioreq *io_req)
1134 struct e4_fcoe_task_context *task_ctx;
1135 struct scsi_cmnd *sc_cmd;
1136 struct fcoe_cqe_rsp_info *fcp_rsp;
1137 struct qedf_rport *fcport;
1139 u16 scope, qualifier = 0;
1140 u8 fw_residual_flag = 0;
1141 unsigned long flags = 0;
1149 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1150 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1151 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1152 QEDF_ERR(&qedf->dbg_ctx,
1153 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1159 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1160 sc_cmd = io_req->sc_cmd;
1161 fcp_rsp = &cqe->cqe_info.rsp_info;
1164 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1168 if (!sc_cmd->SCp.ptr) {
1169 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1170 "another context.\n");
1174 if (!sc_cmd->device) {
1175 QEDF_ERR(&qedf->dbg_ctx,
1176 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1180 if (!sc_cmd->request) {
1181 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1182 "sc_cmd=%p.\n", sc_cmd);
1186 if (!sc_cmd->request->q) {
1187 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1188 "is not valid, sc_cmd=%p.\n", sc_cmd);
1192 fcport = io_req->fcport;
1195 * When flush is active, let the cmds be completed from the cleanup
1198 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1199 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1200 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1201 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1202 "Dropping good completion xid=0x%x as fcport is flushing",
1207 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1209 qedf_unmap_sg_list(qedf, io_req);
1211 /* Check for FCP transport error */
1212 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1213 QEDF_ERR(&(qedf->dbg_ctx),
1214 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1215 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1216 io_req->fcp_rsp_code);
1217 sc_cmd->result = DID_BUS_BUSY << 16;
1221 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1222 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1223 if (fw_residual_flag) {
1224 QEDF_ERR(&qedf->dbg_ctx,
1225 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1226 io_req->xid, fcp_rsp->rsp_flags.flags,
1228 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1229 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1231 if (io_req->cdb_status == 0)
1232 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1234 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1237 * Set resid to the whole buffer length so we won't try to resue
1238 * any previously data.
1240 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1244 switch (io_req->fcp_status) {
1246 if (io_req->cdb_status == 0) {
1247 /* Good I/O completion */
1248 sc_cmd->result = DID_OK << 16;
1250 refcount = kref_read(&io_req->refcount);
1251 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1252 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1253 "lba=%02x%02x%02x%02x cdb_status=%d "
1254 "fcp_resid=0x%x refcount=%d.\n",
1255 qedf->lport->host->host_no, sc_cmd->device->id,
1256 sc_cmd->device->lun, io_req->xid,
1257 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1258 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1259 io_req->cdb_status, io_req->fcp_resid,
1261 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1263 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1264 io_req->cdb_status == SAM_STAT_BUSY) {
1266 * Check whether we need to set retry_delay at
1267 * all based on retry_delay module parameter
1268 * and the status qualifier.
1272 scope = fcp_rsp->retry_delay_timer & 0xC000;
1274 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1276 if (qedf_retry_delay)
1279 if (io_req->cdb_status ==
1280 SAM_STAT_TASK_SET_FULL)
1281 qedf->task_set_fulls++;
1286 if (io_req->fcp_resid)
1287 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1289 if (chk_scope == 1) {
1290 if ((scope == 1 || scope == 2) &&
1291 (qualifier > 0 && qualifier <= 0x3FEF)) {
1292 /* Check we don't go over the max */
1293 if (qualifier > QEDF_RETRY_DELAY_MAX) {
1294 qualifier = QEDF_RETRY_DELAY_MAX;
1295 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1297 (fcp_rsp->retry_delay_timer &
1300 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1301 "Scope = %d and qualifier = %d",
1303 /* Take fcport->rport_lock to
1304 * update the retry_delay_timestamp
1306 spin_lock_irqsave(&fcport->rport_lock, flags);
1307 fcport->retry_delay_timestamp =
1308 jiffies + (qualifier * HZ / 10);
1309 spin_unlock_irqrestore(&fcport->rport_lock,
1313 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1314 "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1320 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1321 io_req->fcp_status);
1326 if (qedf_io_tracing)
1327 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1330 * We wait till the end of the function to clear the
1331 * outstanding bit in case we need to send an abort
1333 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1335 io_req->sc_cmd = NULL;
1336 sc_cmd->SCp.ptr = NULL;
1337 sc_cmd->scsi_done(sc_cmd);
1338 kref_put(&io_req->refcount, qedf_release_cmd);
1341 /* Return a SCSI command in some other context besides a normal completion */
1342 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1346 struct scsi_cmnd *sc_cmd;
1350 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1354 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1355 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1356 "io_req:%p scsi_done handling already done\n",
1362 * We will be done with this command after this call so clear the
1365 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1368 sc_cmd = io_req->sc_cmd;
1371 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1375 if (!virt_addr_valid(sc_cmd)) {
1376 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1380 if (!sc_cmd->SCp.ptr) {
1381 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1382 "another context.\n");
1386 if (!sc_cmd->device) {
1387 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1392 if (!virt_addr_valid(sc_cmd->device)) {
1393 QEDF_ERR(&qedf->dbg_ctx,
1394 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1398 if (!sc_cmd->sense_buffer) {
1399 QEDF_ERR(&qedf->dbg_ctx,
1400 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1405 if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1406 QEDF_ERR(&qedf->dbg_ctx,
1407 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1412 if (!sc_cmd->scsi_done) {
1413 QEDF_ERR(&qedf->dbg_ctx,
1414 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1419 qedf_unmap_sg_list(qedf, io_req);
1421 sc_cmd->result = result << 16;
1422 refcount = kref_read(&io_req->refcount);
1423 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1424 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1425 "allowed=%d retries=%d refcount=%d.\n",
1426 qedf->lport->host->host_no, sc_cmd->device->id,
1427 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1428 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1429 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1433 * Set resid to the whole buffer length so we won't try to resue any
1434 * previously read data
1436 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1438 if (qedf_io_tracing)
1439 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1441 io_req->sc_cmd = NULL;
1442 sc_cmd->SCp.ptr = NULL;
1443 sc_cmd->scsi_done(sc_cmd);
1444 kref_put(&io_req->refcount, qedf_release_cmd);
1449 * Clear the io_req->sc_cmd backpointer so we don't try to process
1452 io_req->sc_cmd = NULL;
1453 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
1457 * Handle warning type CQE completions. This is mainly used for REC timer
1460 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1461 struct qedf_ioreq *io_req)
1464 struct qedf_rport *fcport = io_req->fcport;
1465 u64 err_warn_bit_map;
1469 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1470 "cqe is NULL for io_req %p xid=0x%x\n",
1471 io_req, io_req->xid);
1475 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1476 "xid=0x%x\n", io_req->xid);
1477 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1478 "err_warn_bitmap=%08x:%08x\n",
1479 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1480 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1481 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1482 "rx_buff_off=%08x, rx_id=%04x\n",
1483 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1484 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1485 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1487 /* Normalize the error bitmap value to an just an unsigned int */
1488 err_warn_bit_map = (u64)
1489 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1490 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1491 for (i = 0; i < 64; i++) {
1492 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1498 /* Check if REC TOV expired if this is a tape device */
1499 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1501 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1502 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1503 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1504 io_req->rx_buf_off =
1505 cqe->cqe_info.err_info.rx_buf_off;
1506 io_req->tx_buf_off =
1507 cqe->cqe_info.err_info.tx_buf_off;
1508 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1509 rval = qedf_send_rec(io_req);
1511 * We only want to abort the io_req if we
1512 * can't queue the REC command as we want to
1513 * keep the exchange open for recovery.
1523 init_completion(&io_req->abts_done);
1524 rval = qedf_initiate_abts(io_req, true);
1526 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1529 /* Cleanup a command when we receive an error detection completion */
1530 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1531 struct qedf_ioreq *io_req)
1536 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1537 "cqe is NULL for io_req %p\n", io_req);
1541 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1542 "xid=0x%x\n", io_req->xid);
1543 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1544 "err_warn_bitmap=%08x:%08x\n",
1545 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1546 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1547 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1548 "rx_buff_off=%08x, rx_id=%04x\n",
1549 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1550 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1551 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1553 if (qedf->stop_io_on_error) {
1554 qedf_stop_all_io(qedf);
1558 init_completion(&io_req->abts_done);
1559 rval = qedf_initiate_abts(io_req, true);
1561 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1564 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1565 struct qedf_ioreq *els_req)
1567 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1568 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1569 kref_read(&els_req->refcount));
1572 * Need to distinguish this from a timeout when calling the
1575 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1577 /* Cancel the timer */
1578 cancel_delayed_work_sync(&els_req->timeout_work);
1580 /* Call callback function to complete command */
1581 if (els_req->cb_func && els_req->cb_arg) {
1582 els_req->cb_func(els_req->cb_arg);
1583 els_req->cb_arg = NULL;
1586 /* Release kref for original initiate_els */
1587 kref_put(&els_req->refcount, qedf_release_cmd);
1590 /* A value of -1 for lun is a wild card that means flush all
1591 * active SCSI I/Os for the target.
1593 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1595 struct qedf_ioreq *io_req;
1596 struct qedf_ctx *qedf;
1597 struct qedf_cmd_mgr *cmd_mgr;
1599 unsigned long flags;
1605 QEDF_ERR(NULL, "fcport is NULL\n");
1609 /* Check that fcport is still offloaded */
1610 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1611 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1615 qedf = fcport->qedf;
1618 QEDF_ERR(NULL, "qedf is NULL.\n");
1622 /* Only wait for all commands to be queued in the Upload context */
1623 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1625 while (atomic_read(&fcport->ios_to_queue)) {
1626 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1627 "Waiting for %d I/Os to be queued\n",
1628 atomic_read(&fcport->ios_to_queue));
1629 if (wait_cnt == 0) {
1631 "%d IOs request could not be queued\n",
1632 atomic_read(&fcport->ios_to_queue));
1639 cmd_mgr = qedf->cmd_mgr;
1641 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1642 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1643 atomic_read(&fcport->num_active_ios), fcport,
1644 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1645 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1647 mutex_lock(&qedf->flush_mutex);
1649 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1651 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1652 fcport->lun_reset_lun = lun;
1655 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1656 io_req = &cmd_mgr->cmds[i];
1660 if (!io_req->fcport)
1663 spin_lock_irqsave(&cmd_mgr->lock, flags);
1665 if (io_req->alloc) {
1666 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1667 if (io_req->cmd_type == QEDF_SCSI_CMD)
1668 QEDF_ERR(&qedf->dbg_ctx,
1669 "Allocated but not queued, xid=0x%x\n",
1672 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1674 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1678 if (io_req->fcport != fcport)
1681 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1682 * but RRQ is still pending.
1683 * Workaround: Within qedf_send_rrq, we check if the fcport is
1684 * NULL, and we drop the ref on the io_req to clean it up.
1686 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1687 refcount = kref_read(&io_req->refcount);
1688 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1689 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1690 io_req->xid, io_req->cmd_type, refcount);
1691 /* If RRQ work has been queue, try to cancel it and
1694 if (atomic_read(&io_req->state) ==
1695 QEDFC_CMD_ST_RRQ_WAIT) {
1696 if (cancel_delayed_work_sync
1697 (&io_req->rrq_work)) {
1698 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1699 "Putting reference for pending RRQ work xid=0x%x.\n",
1702 kref_put(&io_req->refcount,
1709 /* Only consider flushing ELS during target reset */
1710 if (io_req->cmd_type == QEDF_ELS &&
1712 rc = kref_get_unless_zero(&io_req->refcount);
1714 QEDF_ERR(&(qedf->dbg_ctx),
1715 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1716 io_req, io_req->xid);
1720 qedf_flush_els_req(qedf, io_req);
1722 * Release the kref and go back to the top of the
1728 if (io_req->cmd_type == QEDF_ABTS) {
1730 rc = kref_get_unless_zero(&io_req->refcount);
1732 QEDF_ERR(&(qedf->dbg_ctx),
1733 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1734 io_req, io_req->xid);
1737 if (lun != -1 && io_req->lun != lun)
1740 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1741 "Flushing abort xid=0x%x.\n", io_req->xid);
1743 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1744 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1745 "Putting ref for cancelled RRQ work xid=0x%x.\n",
1747 kref_put(&io_req->refcount, qedf_release_cmd);
1750 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1751 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1752 "Putting ref for cancelled tmo work xid=0x%x.\n",
1754 qedf_initiate_cleanup(io_req, true);
1755 /* Notify eh_abort handler that ABTS is
1758 complete(&io_req->abts_done);
1759 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1761 kref_put(&io_req->refcount, qedf_release_cmd);
1767 if (!io_req->sc_cmd)
1769 if (!io_req->sc_cmd->device) {
1770 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1771 "Device backpointer NULL for sc_cmd=%p.\n",
1773 /* Put reference for non-existent scsi_cmnd */
1774 io_req->sc_cmd = NULL;
1775 qedf_initiate_cleanup(io_req, false);
1776 kref_put(&io_req->refcount, qedf_release_cmd);
1780 if (io_req->lun != lun)
1785 * Use kref_get_unless_zero in the unlikely case the command
1786 * we're about to flush was completed in the normal SCSI path
1788 rc = kref_get_unless_zero(&io_req->refcount);
1790 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1791 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1795 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1796 "Cleanup xid=0x%x.\n", io_req->xid);
1799 /* Cleanup task and return I/O mid-layer */
1800 qedf_initiate_cleanup(io_req, true);
1803 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
1807 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1808 "Flushed 0x%x I/Os, active=0x%x.\n",
1809 flush_cnt, atomic_read(&fcport->num_active_ios));
1810 /* Only wait for all commands to complete in the Upload context */
1811 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1813 while (atomic_read(&fcport->num_active_ios)) {
1814 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1815 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1817 atomic_read(&fcport->num_active_ios),
1819 if (wait_cnt == 0) {
1820 QEDF_ERR(&qedf->dbg_ctx,
1821 "Flushed %d I/Os, active=%d.\n",
1823 atomic_read(&fcport->num_active_ios));
1824 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1825 io_req = &cmd_mgr->cmds[i];
1826 if (io_req->fcport &&
1827 io_req->fcport == fcport) {
1829 kref_read(&io_req->refcount);
1830 set_bit(QEDF_CMD_DIRTY,
1832 QEDF_ERR(&qedf->dbg_ctx,
1833 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1834 io_req, io_req->xid,
1849 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1850 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1851 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1852 mutex_unlock(&qedf->flush_mutex);
1856 * Initiate a ABTS middle path command. Note that we don't have to initialize
1857 * the task context for an ABTS task.
1859 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1861 struct fc_lport *lport;
1862 struct qedf_rport *fcport = io_req->fcport;
1863 struct fc_rport_priv *rdata;
1864 struct qedf_ctx *qedf;
1868 unsigned long flags;
1869 struct fcoe_wqe *sqe;
1873 /* Sanity check qedf_rport before dereferencing any pointers */
1874 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1875 QEDF_ERR(NULL, "tgt not offloaded\n");
1880 qedf = fcport->qedf;
1881 rdata = fcport->rdata;
1883 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1884 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1889 r_a_tov = rdata->r_a_tov;
1890 lport = qedf->lport;
1892 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1893 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1895 goto drop_rdata_kref;
1898 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1899 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1901 goto drop_rdata_kref;
1904 /* Ensure room on SQ */
1905 if (!atomic_read(&fcport->free_sqes)) {
1906 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1908 goto drop_rdata_kref;
1911 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1912 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1914 goto drop_rdata_kref;
1917 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1918 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1919 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1920 QEDF_ERR(&qedf->dbg_ctx,
1921 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1922 io_req->xid, io_req->sc_cmd);
1924 goto drop_rdata_kref;
1927 kref_get(&io_req->refcount);
1930 qedf->control_requests++;
1931 qedf->packet_aborts++;
1933 /* Set the command type to abort */
1934 io_req->cmd_type = QEDF_ABTS;
1935 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1937 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1938 refcount = kref_read(&io_req->refcount);
1939 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1940 "ABTS io_req xid = 0x%x refcount=%d\n",
1943 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1945 spin_lock_irqsave(&fcport->rport_lock, flags);
1947 sqe_idx = qedf_get_sqe_idx(fcport);
1948 sqe = &fcport->sq[sqe_idx];
1949 memset(sqe, 0, sizeof(struct fcoe_wqe));
1950 io_req->task_params->sqe = sqe;
1952 init_initiator_abort_fcoe_task(io_req->task_params);
1953 qedf_ring_doorbell(fcport);
1955 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1958 kref_put(&rdata->kref, fc_rport_destroy);
1963 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1964 struct qedf_ioreq *io_req)
1969 struct qedf_rport *fcport = io_req->fcport;
1971 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1972 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1975 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1977 /* This was added at a point when we were scheduling abts_compl &
1978 * cleanup_compl on different CPUs and there was a possibility of
1979 * the io_req to be freed from the other context before we got here.
1982 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1983 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1989 * When flush is active, let the cmds be completed from the cleanup
1992 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1993 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1994 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1995 "Dropping ABTS completion xid=0x%x as fcport is flushing",
2000 if (!cancel_delayed_work(&io_req->timeout_work)) {
2001 QEDF_ERR(&qedf->dbg_ctx,
2002 "Wasn't able to cancel abts timeout work.\n");
2006 case FC_RCTL_BA_ACC:
2007 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2008 "ABTS response - ACC Send RRQ after R_A_TOV\n");
2009 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2010 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
2012 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2013 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
2018 * Dont release this cmd yet. It will be relesed
2019 * after we get RRQ response
2021 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2022 msecs_to_jiffies(qedf->lport->r_a_tov));
2023 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2025 /* For error cases let the cleanup return the command */
2026 case FC_RCTL_BA_RJT:
2027 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2028 "ABTS response - RJT\n");
2029 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2032 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
2036 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2038 if (io_req->sc_cmd) {
2039 if (!io_req->return_scsi_cmd_on_abts)
2040 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2041 "Not call scsi_done for xid=0x%x.\n",
2043 if (io_req->return_scsi_cmd_on_abts)
2044 qedf_scsi_done(qedf, io_req, DID_ERROR);
2047 /* Notify eh_abort handler that ABTS is complete */
2048 complete(&io_req->abts_done);
2050 kref_put(&io_req->refcount, qedf_release_cmd);
2053 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2055 struct qedf_mp_req *mp_req;
2056 struct scsi_sge *mp_req_bd;
2057 struct scsi_sge *mp_resp_bd;
2058 struct qedf_ctx *qedf = io_req->fcport->qedf;
2062 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2064 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2065 memset(mp_req, 0, sizeof(struct qedf_mp_req));
2067 if (io_req->cmd_type != QEDF_ELS) {
2068 mp_req->req_len = sizeof(struct fcp_cmnd);
2069 io_req->data_xfer_len = mp_req->req_len;
2071 mp_req->req_len = io_req->data_xfer_len;
2073 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2074 &mp_req->req_buf_dma, GFP_KERNEL);
2075 if (!mp_req->req_buf) {
2076 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2077 qedf_free_mp_resc(io_req);
2081 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2082 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2083 if (!mp_req->resp_buf) {
2084 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2086 qedf_free_mp_resc(io_req);
2090 /* Allocate and map mp_req_bd and mp_resp_bd */
2091 sz = sizeof(struct scsi_sge);
2092 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2093 &mp_req->mp_req_bd_dma, GFP_KERNEL);
2094 if (!mp_req->mp_req_bd) {
2095 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2096 qedf_free_mp_resc(io_req);
2100 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2101 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2102 if (!mp_req->mp_resp_bd) {
2103 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2104 qedf_free_mp_resc(io_req);
2109 addr = mp_req->req_buf_dma;
2110 mp_req_bd = mp_req->mp_req_bd;
2111 mp_req_bd->sge_addr.lo = U64_LO(addr);
2112 mp_req_bd->sge_addr.hi = U64_HI(addr);
2113 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2116 * MP buffer is either a task mgmt command or an ELS.
2117 * So the assumption is that it consumes a single bd
2118 * entry in the bd table
2120 mp_resp_bd = mp_req->mp_resp_bd;
2121 addr = mp_req->resp_buf_dma;
2122 mp_resp_bd->sge_addr.lo = U64_LO(addr);
2123 mp_resp_bd->sge_addr.hi = U64_HI(addr);
2124 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2130 * Last ditch effort to clear the port if it's stuck. Used only after a
2131 * cleanup task times out.
2133 static void qedf_drain_request(struct qedf_ctx *qedf)
2135 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2136 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2140 /* Set bit to return all queuecommand requests as busy */
2141 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2143 /* Call qed drain request for function. Should be synchronous */
2144 qed_ops->common->drain(qedf->cdev);
2146 /* Settle time for CQEs to be returned */
2149 /* Unplug and continue */
2150 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2154 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2157 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2158 bool return_scsi_cmd_on_abts)
2160 struct qedf_rport *fcport;
2161 struct qedf_ctx *qedf;
2163 struct e4_fcoe_task_context *task;
2166 unsigned long flags;
2167 struct fcoe_wqe *sqe;
2171 fcport = io_req->fcport;
2173 QEDF_ERR(NULL, "fcport is NULL.\n");
2177 /* Sanity check qedf_rport before dereferencing any pointers */
2178 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2179 QEDF_ERR(NULL, "tgt not offloaded\n");
2184 qedf = fcport->qedf;
2186 QEDF_ERR(NULL, "qedf is NULL.\n");
2190 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2191 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2192 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2193 "cleanup processing or already completed.\n",
2197 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2199 /* Ensure room on SQ */
2200 if (!atomic_read(&fcport->free_sqes)) {
2201 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2202 /* Need to make sure we clear the flag since it was set */
2203 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2207 if (io_req->cmd_type == QEDF_CLEANUP) {
2208 QEDF_ERR(&qedf->dbg_ctx,
2209 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2210 io_req->xid, io_req->cmd_type);
2211 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2215 refcount = kref_read(&io_req->refcount);
2217 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2218 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2219 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2220 refcount, fcport, fcport->rdata->ids.port_id);
2222 /* Cleanup cmds re-use the same TID as the original I/O */
2224 io_req->cmd_type = QEDF_CLEANUP;
2225 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2227 task = qedf_get_task_mem(&qedf->tasks, xid);
2229 init_completion(&io_req->cleanup_done);
2231 spin_lock_irqsave(&fcport->rport_lock, flags);
2233 sqe_idx = qedf_get_sqe_idx(fcport);
2234 sqe = &fcport->sq[sqe_idx];
2235 memset(sqe, 0, sizeof(struct fcoe_wqe));
2236 io_req->task_params->sqe = sqe;
2238 init_initiator_cleanup_fcoe_task(io_req->task_params);
2239 qedf_ring_doorbell(fcport);
2241 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2243 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2244 QEDF_CLEANUP_TIMEOUT * HZ);
2249 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2250 "xid=%x.\n", io_req->xid);
2251 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2252 /* Issue a drain request if cleanup task times out */
2253 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2254 qedf_drain_request(qedf);
2257 /* If it TASK MGMT handle it, reference will be decreased
2258 * in qedf_execute_tmf
2260 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2261 io_req->tm_flags == FCP_TMF_TGT_RESET) {
2262 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2263 io_req->sc_cmd = NULL;
2264 complete(&io_req->tm_done);
2267 if (io_req->sc_cmd) {
2268 if (!io_req->return_scsi_cmd_on_abts)
2269 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2270 "Not call scsi_done for xid=0x%x.\n",
2272 if (io_req->return_scsi_cmd_on_abts)
2273 qedf_scsi_done(qedf, io_req, DID_ERROR);
2277 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2279 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2284 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2285 struct qedf_ioreq *io_req)
2287 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2290 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2292 /* Complete so we can finish cleaning up the I/O */
2293 complete(&io_req->cleanup_done);
2296 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2299 struct qedf_ioreq *io_req;
2300 struct e4_fcoe_task_context *task;
2301 struct qedf_ctx *qedf = fcport->qedf;
2302 struct fc_lport *lport = qedf->lport;
2307 unsigned long flags;
2308 struct fcoe_wqe *sqe;
2312 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2316 lun = (int)sc_cmd->device->lun;
2317 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2318 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2323 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2325 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2330 if (tm_flags == FCP_TMF_LUN_RESET)
2332 else if (tm_flags == FCP_TMF_TGT_RESET)
2333 qedf->target_resets++;
2335 /* Initialize rest of io_req fields */
2336 io_req->sc_cmd = sc_cmd;
2337 io_req->fcport = fcport;
2338 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2340 /* Record which cpu this request is associated with */
2341 io_req->cpu = smp_processor_id();
2344 io_req->io_req_flags = QEDF_READ;
2345 io_req->data_xfer_len = 0;
2346 io_req->tm_flags = tm_flags;
2348 /* Default is to return a SCSI command when an error occurs */
2349 io_req->return_scsi_cmd_on_abts = false;
2351 /* Obtain exchange id */
2354 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2357 /* Initialize task context for this IO request */
2358 task = qedf_get_task_mem(&qedf->tasks, xid);
2360 init_completion(&io_req->tm_done);
2362 spin_lock_irqsave(&fcport->rport_lock, flags);
2364 sqe_idx = qedf_get_sqe_idx(fcport);
2365 sqe = &fcport->sq[sqe_idx];
2366 memset(sqe, 0, sizeof(struct fcoe_wqe));
2368 qedf_init_task(fcport, lport, io_req, task, sqe);
2369 qedf_ring_doorbell(fcport);
2371 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2373 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2374 tmo = wait_for_completion_timeout(&io_req->tm_done,
2375 QEDF_TM_TIMEOUT * HZ);
2379 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2380 /* Clear outstanding bit since command timed out */
2381 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2382 io_req->sc_cmd = NULL;
2384 /* Check TMF response code */
2385 if (io_req->fcp_rsp_code == 0)
2391 * Double check that fcport has not gone into an uploading state before
2392 * executing the command flush for the LUN/target.
2394 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2395 QEDF_ERR(&qedf->dbg_ctx,
2396 "fcport is uploading, not executing flush.\n");
2399 /* We do not need this io_req any more */
2400 kref_put(&io_req->refcount, qedf_release_cmd);
2403 if (tm_flags == FCP_TMF_LUN_RESET)
2404 qedf_flush_active_ios(fcport, lun);
2406 qedf_flush_active_ios(fcport, -1);
2409 if (rc != SUCCESS) {
2410 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2413 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2419 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2421 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2422 struct fc_rport_libfc_priv *rp = rport->dd_data;
2423 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2424 struct qedf_ctx *qedf;
2425 struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2428 struct qedf_ioreq *io_req = NULL;
2430 struct fc_rport_priv *rdata = fcport->rdata;
2433 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2434 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2435 rport->scsi_target_id, (int)sc_cmd->device->lun);
2437 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2438 QEDF_ERR(NULL, "stale rport\n");
2442 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2443 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2446 if (sc_cmd->SCp.ptr) {
2447 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2448 ref_cnt = kref_read(&io_req->refcount);
2450 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2451 io_req, io_req->xid, ref_cnt);
2454 rval = fc_remote_port_chkready(rport);
2456 QEDF_ERR(NULL, "device_reset rport not ready\n");
2461 rc = fc_block_scsi_eh(sc_cmd);
2466 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2471 qedf = fcport->qedf;
2474 QEDF_ERR(NULL, "qedf is NULL.\n");
2479 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2480 QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2485 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2486 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2491 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2492 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2497 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2499 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2502 QEDF_ERR(&qedf->dbg_ctx,
2503 "fcport %p port_id=%06x is uploading.\n",
2504 fcport, fcport->rdata->ids.port_id);
2509 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2512 kref_put(&rdata->kref, fc_rport_destroy);
2516 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2517 struct qedf_ioreq *io_req)
2519 struct fcoe_cqe_rsp_info *fcp_rsp;
2521 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2523 fcp_rsp = &cqe->cqe_info.rsp_info;
2524 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2526 io_req->sc_cmd = NULL;
2527 complete(&io_req->tm_done);
2530 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2531 struct fcoe_cqe *cqe)
2533 unsigned long flags;
2535 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2536 u32 payload_len, crc;
2537 struct fc_frame_header *fh;
2538 struct fc_frame *fp;
2539 struct qedf_io_work *io_work;
2542 struct scsi_bd *p_bd_info;
2544 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2545 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2546 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2547 le32_to_cpu(p_bd_info->address.hi),
2548 le32_to_cpu(p_bd_info->address.lo),
2549 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2550 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2551 qedf->bdq_prod_idx, pktlen);
2553 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2554 if (bdq_idx >= QEDF_BDQ_SIZE) {
2555 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2557 goto increment_prod;
2560 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2562 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2563 "unsolicited packet.\n");
2564 goto increment_prod;
2567 if (qedf_dump_frames) {
2568 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2569 "BDQ frame is at addr=%p.\n", bdq_addr);
2570 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2571 (void *)bdq_addr, pktlen, false);
2574 /* Allocate frame */
2575 payload_len = pktlen - sizeof(struct fc_frame_header);
2576 fp = fc_frame_alloc(qedf->lport, payload_len);
2578 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2579 goto increment_prod;
2582 /* Copy data from BDQ buffer into fc_frame struct */
2583 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2584 memcpy(fh, (void *)bdq_addr, pktlen);
2586 QEDF_WARN(&qedf->dbg_ctx,
2587 "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2588 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2589 fh->fh_type, fc_frame_payload_op(fp));
2591 /* Initialize the frame so libfc sees it as a valid frame */
2592 crc = fcoe_fc_crc(fp);
2594 fr_dev(fp) = qedf->lport;
2595 fr_sof(fp) = FC_SOF_I3;
2596 fr_eof(fp) = FC_EOF_T;
2597 fr_crc(fp) = cpu_to_le32(~crc);
2600 * We need to return the frame back up to libfc in a non-atomic
2603 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2605 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2606 "work for I/O completion.\n");
2608 goto increment_prod;
2610 memset(io_work, 0, sizeof(struct qedf_io_work));
2612 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2614 /* Copy contents of CQE for deferred processing */
2615 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2617 io_work->qedf = qedf;
2620 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2622 spin_lock_irqsave(&qedf->hba_lock, flags);
2624 /* Increment producer to let f/w know we've handled the frame */
2625 qedf->bdq_prod_idx++;
2627 /* Producer index wraps at uint16_t boundary */
2628 if (qedf->bdq_prod_idx == 0xffff)
2629 qedf->bdq_prod_idx = 0;
2631 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2632 tmp = readw(qedf->bdq_primary_prod);
2633 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2634 tmp = readw(qedf->bdq_secondary_prod);
2636 spin_unlock_irqrestore(&qedf->hba_lock, flags);