1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2017 QLogic Corporation
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
12 static struct nvme_fc_port_template qla_nvme_fc_transport;
14 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
16 struct qla_nvme_rport *rport;
17 struct nvme_fc_port_info req;
20 if (!IS_ENABLED(CONFIG_NVME_FC))
23 if (!vha->flags.nvme_enabled) {
24 ql_log(ql_log_info, vha, 0x2100,
25 "%s: Not registering target since Host NVME is not enabled\n",
30 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
33 if (!(fcport->nvme_prli_service_param &
34 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
38 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
40 memset(&req, 0, sizeof(struct nvme_fc_port_info));
41 req.port_name = wwn_to_u64(fcport->port_name);
42 req.node_name = wwn_to_u64(fcport->node_name);
46 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
47 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
49 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
50 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
52 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
53 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
55 req.port_id = fcport->d_id.b24;
57 ql_log(ql_log_info, vha, 0x2102,
58 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
59 __func__, req.node_name, req.port_name,
62 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
63 &fcport->nvme_remote_port);
65 ql_log(ql_log_warn, vha, 0x212e,
66 "Failed to register remote port. Transport returned %d\n",
71 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
72 ql_log(ql_log_info, vha, 0x212a,
73 "PortID:%06x Supports SLER\n", req.port_id);
75 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
76 ql_log(ql_log_info, vha, 0x212b,
77 "PortID:%06x Supports PI control\n", req.port_id);
79 rport = fcport->nvme_remote_port->private;
80 rport->fcport = fcport;
82 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
86 /* Allocate a queue for NVMe traffic */
87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
88 unsigned int qidx, u16 qsize, void **handle)
90 struct scsi_qla_host *vha;
91 struct qla_hw_data *ha;
92 struct qla_qpair *qpair;
97 vha = (struct scsi_qla_host *)lport->private;
100 ql_log(ql_log_info, vha, 0x2104,
101 "%s: handle %p, idx =%d, qsize %d\n",
102 __func__, handle, qidx, qsize);
104 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
105 ql_log(ql_log_warn, vha, 0x212f,
106 "%s: Illegal qidx=%d. Max=%d\n",
107 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
111 if (ha->queue_pair_map[qidx]) {
112 *handle = ha->queue_pair_map[qidx];
113 ql_log(ql_log_info, vha, 0x2121,
114 "Returning existing qpair of %p for idx=%x\n",
119 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
121 ql_log(ql_log_warn, vha, 0x2122,
122 "Failed to allocate qpair\n");
130 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
132 struct srb *sp = container_of(kref, struct srb, cmd_kref);
133 struct nvme_private *priv = (struct nvme_private *)sp->priv;
134 struct nvmefc_fcp_req *fd;
135 struct srb_iocb *nvme;
141 nvme = &sp->u.iocb_cmd;
142 fd = nvme->u.nvme.desc;
144 spin_lock_irqsave(&priv->cmd_lock, flags);
147 if (priv->comp_status == QLA_SUCCESS) {
148 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
149 fd->status = NVME_SC_SUCCESS;
152 fd->transferred_length = 0;
153 fd->status = NVME_SC_INTERNAL;
155 spin_unlock_irqrestore(&priv->cmd_lock, flags);
159 qla2xxx_rel_qpair_sp(sp->qpair, sp);
162 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
164 struct srb *sp = container_of(kref, struct srb, cmd_kref);
165 struct nvme_private *priv = (struct nvme_private *)sp->priv;
166 struct nvmefc_ls_req *fd;
172 spin_lock_irqsave(&priv->cmd_lock, flags);
175 spin_unlock_irqrestore(&priv->cmd_lock, flags);
178 fd->done(fd, priv->comp_status);
183 static void qla_nvme_ls_complete(struct work_struct *work)
185 struct nvme_private *priv =
186 container_of(work, struct nvme_private, ls_work);
188 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
191 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
193 struct nvme_private *priv = sp->priv;
195 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
201 priv->comp_status = res;
202 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
203 schedule_work(&priv->ls_work);
206 /* it assumed that QPair lock is held. */
207 static void qla_nvme_sp_done(srb_t *sp, int res)
209 struct nvme_private *priv = sp->priv;
211 priv->comp_status = res;
212 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
217 static void qla_nvme_abort_work(struct work_struct *work)
219 struct nvme_private *priv =
220 container_of(work, struct nvme_private, abort_work);
221 srb_t *sp = priv->sp;
222 fc_port_t *fcport = sp->fcport;
223 struct qla_hw_data *ha = fcport->vha->hw;
226 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
227 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
228 __func__, sp, sp->handle, fcport, fcport->deleted);
230 if (!ha->flags.fw_started || fcport->deleted)
233 if (ha->flags.host_shutting_down) {
234 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
235 "%s Calling done on sp: %p, type: 0x%x\n",
236 __func__, sp, sp->type);
241 rval = ha->isp_ops->abort_command(sp);
243 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
244 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
245 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
246 sp, sp->handle, fcport, rval);
249 * Returned before decreasing kref so that I/O requests
250 * are waited until ABTS complete. This kref is decreased
251 * at qla24xx_abort_sp_done function.
253 if (ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
256 /* kref_get was done before work was schedule. */
257 kref_put(&sp->cmd_kref, sp->put_fn);
260 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
261 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
263 struct nvme_private *priv = fd->private;
266 spin_lock_irqsave(&priv->cmd_lock, flags);
268 spin_unlock_irqrestore(&priv->cmd_lock, flags);
272 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
273 spin_unlock_irqrestore(&priv->cmd_lock, flags);
276 spin_unlock_irqrestore(&priv->cmd_lock, flags);
278 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
279 schedule_work(&priv->abort_work);
282 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
283 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
285 struct qla_nvme_rport *qla_rport = rport->private;
286 fc_port_t *fcport = qla_rport->fcport;
287 struct srb_iocb *nvme;
288 struct nvme_private *priv = fd->private;
289 struct scsi_qla_host *vha;
290 int rval = QLA_FUNCTION_FAILED;
291 struct qla_hw_data *ha;
294 if (!fcport || fcport->deleted)
300 if (!ha->flags.fw_started)
303 /* Alloc SRB structure */
304 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
308 sp->type = SRB_NVME_LS;
309 sp->name = "nvme_ls";
310 sp->done = qla_nvme_sp_ls_done;
311 sp->put_fn = qla_nvme_release_ls_cmd_kref;
314 kref_init(&sp->cmd_kref);
315 spin_lock_init(&priv->cmd_lock);
316 nvme = &sp->u.iocb_cmd;
318 nvme->u.nvme.desc = fd;
319 nvme->u.nvme.dir = 0;
321 nvme->u.nvme.cmd_len = fd->rqstlen;
322 nvme->u.nvme.rsp_len = fd->rsplen;
323 nvme->u.nvme.rsp_dma = fd->rspdma;
324 nvme->u.nvme.timeout_sec = fd->timeout;
325 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
326 fd->rqstlen, DMA_TO_DEVICE);
327 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
328 fd->rqstlen, DMA_TO_DEVICE);
330 rval = qla2x00_start_sp(sp);
331 if (rval != QLA_SUCCESS) {
332 ql_log(ql_log_warn, vha, 0x700e,
333 "qla2x00_start_sp failed = %d\n", rval);
334 wake_up(&sp->nvme_ls_waitq);
344 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
345 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
346 struct nvmefc_fcp_req *fd)
348 struct nvme_private *priv = fd->private;
351 spin_lock_irqsave(&priv->cmd_lock, flags);
353 spin_unlock_irqrestore(&priv->cmd_lock, flags);
356 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
357 spin_unlock_irqrestore(&priv->cmd_lock, flags);
360 spin_unlock_irqrestore(&priv->cmd_lock, flags);
362 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
363 schedule_work(&priv->abort_work);
366 static inline int qla2x00_start_nvme_mq(srb_t *sp)
371 struct cmd_nvme *cmd_pkt;
376 struct dsd64 *cur_dsd;
377 struct req_que *req = NULL;
378 struct scsi_qla_host *vha = sp->fcport->vha;
379 struct qla_hw_data *ha = vha->hw;
380 struct qla_qpair *qpair = sp->qpair;
381 struct srb_iocb *nvme = &sp->u.iocb_cmd;
382 struct scatterlist *sgl, *sg;
383 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
384 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
385 uint32_t rval = QLA_SUCCESS;
387 /* Setup qpair pointers */
389 tot_dsds = fd->sg_cnt;
391 /* Acquire qpair specific lock */
392 spin_lock_irqsave(&qpair->qp_lock, flags);
394 handle = qla2xxx_get_next_handle(req);
399 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
400 if (req->cnt < (req_cnt + 2)) {
401 if (IS_SHADOW_REG_CAPABLE(ha)) {
404 cnt = rd_reg_dword_relaxed(req->req_q_out);
405 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
409 if (req->ring_index < cnt)
410 req->cnt = cnt - req->ring_index;
412 req->cnt = req->length - (req->ring_index - cnt);
414 if (req->cnt < (req_cnt + 2)){
420 if (unlikely(!fd->sqid)) {
421 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
422 nvme->u.nvme.aen_op = 1;
423 atomic_inc(&ha->nvme_active_aen_cnt);
427 /* Build command packet. */
428 req->current_outstanding_cmd = handle;
429 req->outstanding_cmds[handle] = sp;
433 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
434 cmd_pkt->handle = make_handle(req->id, handle);
436 /* Zero out remaining portion of packet. */
437 clr_ptr = (uint32_t *)cmd_pkt + 2;
438 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
440 cmd_pkt->entry_status = 0;
442 /* Update entry type to indicate Command NVME IOCB */
443 cmd_pkt->entry_type = COMMAND_NVME;
445 /* No data transfer how do we check buffer len == 0?? */
446 if (fd->io_dir == NVMEFC_FCP_READ) {
447 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
448 qpair->counters.input_bytes += fd->payload_length;
449 qpair->counters.input_requests++;
450 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
451 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
452 if ((vha->flags.nvme_first_burst) &&
453 (sp->fcport->nvme_prli_service_param &
454 NVME_PRLI_SP_FIRST_BURST)) {
455 if ((fd->payload_length <=
456 sp->fcport->nvme_first_burst_size) ||
457 (sp->fcport->nvme_first_burst_size == 0))
458 cmd_pkt->control_flags |=
459 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
461 qpair->counters.output_bytes += fd->payload_length;
462 qpair->counters.output_requests++;
463 } else if (fd->io_dir == 0) {
464 cmd_pkt->control_flags = 0;
466 /* Set BIT_13 of control flags for Async event */
467 if (vha->flags.nvme2_enabled &&
468 cmd->sqe.common.opcode == nvme_admin_async_event) {
469 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
473 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
474 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
475 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
476 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
477 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
480 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
481 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
484 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
485 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
487 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
488 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
490 /* One DSD is available in the Command Type NVME IOCB */
492 cur_dsd = &cmd_pkt->nvme_dsd;
495 /* Load data segments */
496 for_each_sg(sgl, sg, tot_dsds, i) {
497 cont_a64_entry_t *cont_pkt;
499 /* Allocate additional continuation packets? */
500 if (avail_dsds == 0) {
502 * Five DSDs are available in the Continuation
506 /* Adjust ring index */
508 if (req->ring_index == req->length) {
510 req->ring_ptr = req->ring;
514 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
515 put_unaligned_le32(CONTINUE_A64_TYPE,
516 &cont_pkt->entry_type);
518 cur_dsd = cont_pkt->dsd;
519 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
522 append_dsd64(&cur_dsd, sg);
526 /* Set total entry count. */
527 cmd_pkt->entry_count = (uint8_t)req_cnt;
530 /* Adjust ring index. */
532 if (req->ring_index == req->length) {
534 req->ring_ptr = req->ring;
539 /* Set chip new ring index. */
540 wrt_reg_dword(req->req_q_in, req->ring_index);
543 spin_unlock_irqrestore(&qpair->qp_lock, flags);
549 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
550 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
551 struct nvmefc_fcp_req *fd)
554 struct srb_iocb *nvme;
555 struct scsi_qla_host *vha;
558 struct qla_qpair *qpair = hw_queue_handle;
559 struct nvme_private *priv = fd->private;
560 struct qla_nvme_rport *qla_rport = rport->private;
563 /* nvme association has been torn down */
567 fcport = qla_rport->fcport;
569 if (unlikely(!qpair || !fcport || fcport->deleted))
572 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
577 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
581 * If we know the dev is going away while the transport is still sending
582 * IO's return busy back to stall the IO Q. This happens when the
583 * link goes away and fw hasn't notified us yet, but IO's are being
584 * returned. If the dev comes back quickly we won't exhaust the IO
585 * retry count at the core.
587 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
590 /* Alloc SRB structure */
591 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
595 init_waitqueue_head(&sp->nvme_ls_waitq);
596 kref_init(&sp->cmd_kref);
597 spin_lock_init(&priv->cmd_lock);
600 sp->type = SRB_NVME_CMD;
601 sp->name = "nvme_cmd";
602 sp->done = qla_nvme_sp_done;
603 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
607 nvme = &sp->u.iocb_cmd;
608 nvme->u.nvme.desc = fd;
610 rval = qla2x00_start_nvme_mq(sp);
611 if (rval != QLA_SUCCESS) {
612 ql_log(ql_log_warn, vha, 0x212d,
613 "qla2x00_start_nvme_mq failed = %d\n", rval);
614 wake_up(&sp->nvme_ls_waitq);
617 qla2xxx_rel_qpair_sp(sp->qpair, sp);
623 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
625 struct scsi_qla_host *vha = lport->private;
627 ql_log(ql_log_info, vha, 0x210f,
628 "localport delete of %p completed.\n", vha->nvme_local_port);
629 vha->nvme_local_port = NULL;
630 complete(&vha->nvme_del_done);
633 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
636 struct qla_nvme_rport *qla_rport = rport->private;
638 fcport = qla_rport->fcport;
639 fcport->nvme_remote_port = NULL;
640 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
641 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
642 ql_log(ql_log_info, fcport->vha, 0x2110,
643 "remoteport_delete of %p %8phN completed.\n",
644 fcport, fcport->port_name);
645 complete(&fcport->nvme_del_done);
648 static struct nvme_fc_port_template qla_nvme_fc_transport = {
649 .localport_delete = qla_nvme_localport_delete,
650 .remoteport_delete = qla_nvme_remoteport_delete,
651 .create_queue = qla_nvme_alloc_queue,
652 .delete_queue = NULL,
653 .ls_req = qla_nvme_ls_req,
654 .ls_abort = qla_nvme_ls_abort,
655 .fcp_io = qla_nvme_post_cmd,
656 .fcp_abort = qla_nvme_fcp_abort,
658 .max_sgl_segments = 1024,
659 .max_dif_sgl_segments = 64,
660 .dma_boundary = 0xFFFFFFFF,
662 .remote_priv_sz = sizeof(struct qla_nvme_rport),
663 .lsrqst_priv_sz = sizeof(struct nvme_private),
664 .fcprqst_priv_sz = sizeof(struct nvme_private),
667 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
671 if (!IS_ENABLED(CONFIG_NVME_FC))
674 ql_log(ql_log_warn, NULL, 0x2112,
675 "%s: unregister remoteport on %p %8phN\n",
676 __func__, fcport, fcport->port_name);
678 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
679 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
681 init_completion(&fcport->nvme_del_done);
682 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
684 ql_log(ql_log_info, fcport->vha, 0x2114,
685 "%s: Failed to unregister nvme_remote_port (%d)\n",
687 wait_for_completion(&fcport->nvme_del_done);
690 void qla_nvme_delete(struct scsi_qla_host *vha)
694 if (!IS_ENABLED(CONFIG_NVME_FC))
697 if (vha->nvme_local_port) {
698 init_completion(&vha->nvme_del_done);
699 ql_log(ql_log_info, vha, 0x2116,
700 "unregister localport=%p\n",
701 vha->nvme_local_port);
702 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
704 ql_log(ql_log_info, vha, 0x2115,
705 "Unregister of localport failed\n");
707 wait_for_completion(&vha->nvme_del_done);
711 int qla_nvme_register_hba(struct scsi_qla_host *vha)
713 struct nvme_fc_port_template *tmpl;
714 struct qla_hw_data *ha;
715 struct nvme_fc_port_info pinfo;
718 if (!IS_ENABLED(CONFIG_NVME_FC))
722 tmpl = &qla_nvme_fc_transport;
724 WARN_ON(vha->nvme_local_port);
726 if (ha->max_req_queues < 3) {
727 if (!ha->flags.max_req_queue_warned)
728 ql_log(ql_log_info, vha, 0x2120,
729 "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
730 __func__, ha->max_req_queues);
731 ha->flags.max_req_queue_warned = 1;
735 qla_nvme_fc_transport.max_hw_queues =
736 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
737 (uint8_t)(ha->max_req_queues - 2));
739 pinfo.node_name = wwn_to_u64(vha->node_name);
740 pinfo.port_name = wwn_to_u64(vha->port_name);
741 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
742 pinfo.port_id = vha->d_id.b24;
744 ql_log(ql_log_info, vha, 0xffff,
745 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
746 pinfo.node_name, pinfo.port_name, pinfo.port_id);
747 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
749 ret = nvme_fc_register_localport(&pinfo, tmpl,
750 get_device(&ha->pdev->dev), &vha->nvme_local_port);
752 ql_log(ql_log_warn, vha, 0xffff,
753 "register_localport failed: ret=%x\n", ret);
755 vha->nvme_local_port->private = vha;
761 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
763 struct qla_hw_data *ha;
765 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
768 ha = orig_sp->fcport->vha->hw;
770 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
771 /* Use Driver Specified Retry Count */
772 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
773 abt->drv.abts_rty_cnt = cpu_to_le16(2);
774 /* Use specified response timeout */
775 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
776 /* set it to 2 * r_a_tov in secs */
777 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
780 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
783 struct scsi_qla_host *vha;
785 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
788 vha = orig_sp->fcport->vha;
790 comp_status = le16_to_cpu(abt->comp_status);
791 switch (comp_status) {
792 case CS_RESET: /* reset event aborted */
793 case CS_ABORTED: /* IOCB was cleaned */
794 /* N_Port handle is not currently logged in */
796 /* N_Port handle was logged out while waiting for ABTS to complete */
797 case CS_PORT_UNAVAILABLE:
798 /* Firmware found that the port name changed */
799 case CS_PORT_LOGGED_OUT:
800 /* BA_RJT was received for the ABTS */
801 case CS_PORT_CONFIG_CHG:
802 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09d,
803 "Abort I/O IOCB completed with error, comp_status=%x\n",
807 /* BA_RJT was received for the ABTS */
808 case CS_REJECT_RECEIVED:
809 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
810 "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
811 abt->fw.ba_rjt_vendorUnique);
812 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
813 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
814 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
818 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09f,
819 "IOCB request is completed successfully comp_status=%x\n",
824 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a0,
825 "IOCB request is failed, comp_status=%x\n", comp_status);
829 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a1,
830 "Invalid Abort IO IOCB Completion Status %x\n",
836 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
838 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
840 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);