1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2017 QLogic Corporation
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-mq.h>
14 static struct nvme_fc_port_template qla_nvme_fc_transport;
16 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
18 struct qla_nvme_rport *rport;
19 struct nvme_fc_port_info req;
22 if (!IS_ENABLED(CONFIG_NVME_FC))
25 if (!vha->flags.nvme_enabled) {
26 ql_log(ql_log_info, vha, 0x2100,
27 "%s: Not registering target since Host NVME is not enabled\n",
32 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
35 if (!(fcport->nvme_prli_service_param &
36 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
37 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
40 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
42 memset(&req, 0, sizeof(struct nvme_fc_port_info));
43 req.port_name = wwn_to_u64(fcport->port_name);
44 req.node_name = wwn_to_u64(fcport->node_name);
46 req.dev_loss_tmo = fcport->dev_loss_tmo;
48 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
49 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
51 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
52 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
54 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
55 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
57 req.port_id = fcport->d_id.b24;
59 ql_log(ql_log_info, vha, 0x2102,
60 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
61 __func__, req.node_name, req.port_name,
64 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
65 &fcport->nvme_remote_port);
67 ql_log(ql_log_warn, vha, 0x212e,
68 "Failed to register remote port. Transport returned %d\n",
73 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
74 fcport->dev_loss_tmo);
76 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
77 ql_log(ql_log_info, vha, 0x212a,
78 "PortID:%06x Supports SLER\n", req.port_id);
80 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
81 ql_log(ql_log_info, vha, 0x212b,
82 "PortID:%06x Supports PI control\n", req.port_id);
84 rport = fcport->nvme_remote_port->private;
85 rport->fcport = fcport;
87 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
91 /* Allocate a queue for NVMe traffic */
92 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
93 unsigned int qidx, u16 qsize, void **handle)
95 struct scsi_qla_host *vha;
96 struct qla_hw_data *ha;
97 struct qla_qpair *qpair;
99 /* Map admin queue and 1st IO queue to index 0 */
103 vha = (struct scsi_qla_host *)lport->private;
106 ql_log(ql_log_info, vha, 0x2104,
107 "%s: handle %p, idx =%d, qsize %d\n",
108 __func__, handle, qidx, qsize);
110 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
111 ql_log(ql_log_warn, vha, 0x212f,
112 "%s: Illegal qidx=%d. Max=%d\n",
113 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
117 /* Use base qpair if max_qpairs is 0 */
118 if (!ha->max_qpairs) {
119 qpair = ha->base_qpair;
121 if (ha->queue_pair_map[qidx]) {
122 *handle = ha->queue_pair_map[qidx];
123 ql_log(ql_log_info, vha, 0x2121,
124 "Returning existing qpair of %p for idx=%x\n",
129 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
131 ql_log(ql_log_warn, vha, 0x2122,
132 "Failed to allocate qpair\n");
141 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
143 struct srb *sp = container_of(kref, struct srb, cmd_kref);
144 struct nvme_private *priv = (struct nvme_private *)sp->priv;
145 struct nvmefc_fcp_req *fd;
146 struct srb_iocb *nvme;
152 nvme = &sp->u.iocb_cmd;
153 fd = nvme->u.nvme.desc;
155 spin_lock_irqsave(&priv->cmd_lock, flags);
158 if (priv->comp_status == QLA_SUCCESS) {
159 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
160 fd->status = NVME_SC_SUCCESS;
163 fd->transferred_length = 0;
164 fd->status = NVME_SC_INTERNAL;
166 spin_unlock_irqrestore(&priv->cmd_lock, flags);
170 qla2xxx_rel_qpair_sp(sp->qpair, sp);
173 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
175 struct srb *sp = container_of(kref, struct srb, cmd_kref);
176 struct nvme_private *priv = (struct nvme_private *)sp->priv;
177 struct nvmefc_ls_req *fd;
183 spin_lock_irqsave(&priv->cmd_lock, flags);
186 spin_unlock_irqrestore(&priv->cmd_lock, flags);
189 fd->done(fd, priv->comp_status);
194 static void qla_nvme_ls_complete(struct work_struct *work)
196 struct nvme_private *priv =
197 container_of(work, struct nvme_private, ls_work);
199 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
202 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
204 struct nvme_private *priv = sp->priv;
206 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
212 priv->comp_status = res;
213 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
214 schedule_work(&priv->ls_work);
217 /* it assumed that QPair lock is held. */
218 static void qla_nvme_sp_done(srb_t *sp, int res)
220 struct nvme_private *priv = sp->priv;
222 priv->comp_status = res;
223 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
228 static void qla_nvme_abort_work(struct work_struct *work)
230 struct nvme_private *priv =
231 container_of(work, struct nvme_private, abort_work);
232 srb_t *sp = priv->sp;
233 fc_port_t *fcport = sp->fcport;
234 struct qla_hw_data *ha = fcport->vha->hw;
235 int rval, abts_done_called = 1;
236 bool io_wait_for_abort_done;
239 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
240 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
241 __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
243 if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
246 if (ha->flags.host_shutting_down) {
247 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
248 "%s Calling done on sp: %p, type: 0x%x\n",
249 __func__, sp, sp->type);
255 * sp may not be valid after abort_command if return code is either
256 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
258 io_wait_for_abort_done = ql2xabts_wait_nvme &&
259 QLA_ABTS_WAIT_ENABLED(sp);
262 rval = ha->isp_ops->abort_command(sp);
264 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
265 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
266 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
267 sp, handle, fcport, rval);
270 * If async tmf is enabled, the abort callback is called only on
271 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
273 if (ql2xasynctmfenable &&
274 rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
275 abts_done_called = 0;
278 * Returned before decreasing kref so that I/O requests
279 * are waited until ABTS complete. This kref is decreased
280 * at qla24xx_abort_sp_done function.
282 if (abts_done_called && io_wait_for_abort_done)
285 /* kref_get was done before work was schedule. */
286 kref_put(&sp->cmd_kref, sp->put_fn);
289 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
290 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
292 struct nvme_private *priv = fd->private;
295 spin_lock_irqsave(&priv->cmd_lock, flags);
297 spin_unlock_irqrestore(&priv->cmd_lock, flags);
301 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
302 spin_unlock_irqrestore(&priv->cmd_lock, flags);
305 spin_unlock_irqrestore(&priv->cmd_lock, flags);
307 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
308 schedule_work(&priv->abort_work);
311 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
312 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
314 struct qla_nvme_rport *qla_rport = rport->private;
315 fc_port_t *fcport = qla_rport->fcport;
316 struct srb_iocb *nvme;
317 struct nvme_private *priv = fd->private;
318 struct scsi_qla_host *vha;
319 int rval = QLA_FUNCTION_FAILED;
320 struct qla_hw_data *ha;
323 if (!fcport || fcport->deleted)
329 if (!ha->flags.fw_started)
332 /* Alloc SRB structure */
333 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
337 sp->type = SRB_NVME_LS;
338 sp->name = "nvme_ls";
339 sp->done = qla_nvme_sp_ls_done;
340 sp->put_fn = qla_nvme_release_ls_cmd_kref;
343 kref_init(&sp->cmd_kref);
344 spin_lock_init(&priv->cmd_lock);
345 nvme = &sp->u.iocb_cmd;
347 nvme->u.nvme.desc = fd;
348 nvme->u.nvme.dir = 0;
350 nvme->u.nvme.cmd_len = fd->rqstlen;
351 nvme->u.nvme.rsp_len = fd->rsplen;
352 nvme->u.nvme.rsp_dma = fd->rspdma;
353 nvme->u.nvme.timeout_sec = fd->timeout;
354 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
355 fd->rqstlen, DMA_TO_DEVICE);
356 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
357 fd->rqstlen, DMA_TO_DEVICE);
359 rval = qla2x00_start_sp(sp);
360 if (rval != QLA_SUCCESS) {
361 ql_log(ql_log_warn, vha, 0x700e,
362 "qla2x00_start_sp failed = %d\n", rval);
363 wake_up(&sp->nvme_ls_waitq);
373 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
374 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
375 struct nvmefc_fcp_req *fd)
377 struct nvme_private *priv = fd->private;
380 spin_lock_irqsave(&priv->cmd_lock, flags);
382 spin_unlock_irqrestore(&priv->cmd_lock, flags);
385 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
386 spin_unlock_irqrestore(&priv->cmd_lock, flags);
389 spin_unlock_irqrestore(&priv->cmd_lock, flags);
391 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
392 schedule_work(&priv->abort_work);
395 static inline int qla2x00_start_nvme_mq(srb_t *sp)
400 struct cmd_nvme *cmd_pkt;
405 struct dsd64 *cur_dsd;
406 struct req_que *req = NULL;
407 struct rsp_que *rsp = NULL;
408 struct scsi_qla_host *vha = sp->fcport->vha;
409 struct qla_hw_data *ha = vha->hw;
410 struct qla_qpair *qpair = sp->qpair;
411 struct srb_iocb *nvme = &sp->u.iocb_cmd;
412 struct scatterlist *sgl, *sg;
413 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
414 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
415 uint32_t rval = QLA_SUCCESS;
417 /* Setup qpair pointers */
420 tot_dsds = fd->sg_cnt;
422 /* Acquire qpair specific lock */
423 spin_lock_irqsave(&qpair->qp_lock, flags);
425 handle = qla2xxx_get_next_handle(req);
430 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
431 if (req->cnt < (req_cnt + 2)) {
432 if (IS_SHADOW_REG_CAPABLE(ha)) {
435 cnt = rd_reg_dword_relaxed(req->req_q_out);
436 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
440 if (req->ring_index < cnt)
441 req->cnt = cnt - req->ring_index;
443 req->cnt = req->length - (req->ring_index - cnt);
445 if (req->cnt < (req_cnt + 2)){
451 if (unlikely(!fd->sqid)) {
452 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
453 nvme->u.nvme.aen_op = 1;
454 atomic_inc(&ha->nvme_active_aen_cnt);
458 /* Build command packet. */
459 req->current_outstanding_cmd = handle;
460 req->outstanding_cmds[handle] = sp;
464 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
465 cmd_pkt->handle = make_handle(req->id, handle);
467 /* Zero out remaining portion of packet. */
468 clr_ptr = (uint32_t *)cmd_pkt + 2;
469 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
471 cmd_pkt->entry_status = 0;
473 /* Update entry type to indicate Command NVME IOCB */
474 cmd_pkt->entry_type = COMMAND_NVME;
476 /* No data transfer how do we check buffer len == 0?? */
477 if (fd->io_dir == NVMEFC_FCP_READ) {
478 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
479 qpair->counters.input_bytes += fd->payload_length;
480 qpair->counters.input_requests++;
481 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
482 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
483 if ((vha->flags.nvme_first_burst) &&
484 (sp->fcport->nvme_prli_service_param &
485 NVME_PRLI_SP_FIRST_BURST)) {
486 if ((fd->payload_length <=
487 sp->fcport->nvme_first_burst_size) ||
488 (sp->fcport->nvme_first_burst_size == 0))
489 cmd_pkt->control_flags |=
490 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
492 qpair->counters.output_bytes += fd->payload_length;
493 qpair->counters.output_requests++;
494 } else if (fd->io_dir == 0) {
495 cmd_pkt->control_flags = 0;
498 if (sp->fcport->edif.enable && fd->io_dir != 0)
499 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
501 /* Set BIT_13 of control flags for Async event */
502 if (vha->flags.nvme2_enabled &&
503 cmd->sqe.common.opcode == nvme_admin_async_event) {
504 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
508 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
509 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
510 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
511 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
512 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
515 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
516 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
519 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
520 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
522 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
523 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
525 /* One DSD is available in the Command Type NVME IOCB */
527 cur_dsd = &cmd_pkt->nvme_dsd;
530 /* Load data segments */
531 for_each_sg(sgl, sg, tot_dsds, i) {
532 cont_a64_entry_t *cont_pkt;
534 /* Allocate additional continuation packets? */
535 if (avail_dsds == 0) {
537 * Five DSDs are available in the Continuation
541 /* Adjust ring index */
543 if (req->ring_index == req->length) {
545 req->ring_ptr = req->ring;
549 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
550 put_unaligned_le32(CONTINUE_A64_TYPE,
551 &cont_pkt->entry_type);
553 cur_dsd = cont_pkt->dsd;
554 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
557 append_dsd64(&cur_dsd, sg);
561 /* Set total entry count. */
562 cmd_pkt->entry_count = (uint8_t)req_cnt;
565 /* Adjust ring index. */
567 if (req->ring_index == req->length) {
569 req->ring_ptr = req->ring;
574 /* ignore nvme async cmd due to long timeout */
575 if (!nvme->u.nvme.aen_op)
576 sp->qpair->cmd_cnt++;
578 /* Set chip new ring index. */
579 wrt_reg_dword(req->req_q_in, req->ring_index);
581 if (vha->flags.process_response_queue &&
582 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
583 qla24xx_process_response_queue(vha, rsp);
586 spin_unlock_irqrestore(&qpair->qp_lock, flags);
592 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
593 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
594 struct nvmefc_fcp_req *fd)
597 struct srb_iocb *nvme;
598 struct scsi_qla_host *vha;
601 struct qla_qpair *qpair = hw_queue_handle;
602 struct nvme_private *priv = fd->private;
603 struct qla_nvme_rport *qla_rport = rport->private;
606 /* nvme association has been torn down */
610 fcport = qla_rport->fcport;
612 if (unlikely(!qpair || !fcport || fcport->deleted))
615 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
620 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
624 * If we know the dev is going away while the transport is still sending
625 * IO's return busy back to stall the IO Q. This happens when the
626 * link goes away and fw hasn't notified us yet, but IO's are being
627 * returned. If the dev comes back quickly we won't exhaust the IO
628 * retry count at the core.
630 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
633 /* Alloc SRB structure */
634 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
638 init_waitqueue_head(&sp->nvme_ls_waitq);
639 kref_init(&sp->cmd_kref);
640 spin_lock_init(&priv->cmd_lock);
643 sp->type = SRB_NVME_CMD;
644 sp->name = "nvme_cmd";
645 sp->done = qla_nvme_sp_done;
646 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
650 nvme = &sp->u.iocb_cmd;
651 nvme->u.nvme.desc = fd;
653 rval = qla2x00_start_nvme_mq(sp);
654 if (rval != QLA_SUCCESS) {
655 ql_log(ql_log_warn, vha, 0x212d,
656 "qla2x00_start_nvme_mq failed = %d\n", rval);
657 wake_up(&sp->nvme_ls_waitq);
660 qla2xxx_rel_qpair_sp(sp->qpair, sp);
666 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
667 struct blk_mq_queue_map *map)
669 struct scsi_qla_host *vha = lport->private;
672 rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
674 ql_log(ql_log_warn, vha, 0x21de,
675 "pci map queue failed 0x%x", rc);
678 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
680 struct scsi_qla_host *vha = lport->private;
682 ql_log(ql_log_info, vha, 0x210f,
683 "localport delete of %p completed.\n", vha->nvme_local_port);
684 vha->nvme_local_port = NULL;
685 complete(&vha->nvme_del_done);
688 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
691 struct qla_nvme_rport *qla_rport = rport->private;
693 fcport = qla_rport->fcport;
694 fcport->nvme_remote_port = NULL;
695 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
696 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
697 ql_log(ql_log_info, fcport->vha, 0x2110,
698 "remoteport_delete of %p %8phN completed.\n",
699 fcport, fcport->port_name);
700 complete(&fcport->nvme_del_done);
703 static struct nvme_fc_port_template qla_nvme_fc_transport = {
704 .localport_delete = qla_nvme_localport_delete,
705 .remoteport_delete = qla_nvme_remoteport_delete,
706 .create_queue = qla_nvme_alloc_queue,
707 .delete_queue = NULL,
708 .ls_req = qla_nvme_ls_req,
709 .ls_abort = qla_nvme_ls_abort,
710 .fcp_io = qla_nvme_post_cmd,
711 .fcp_abort = qla_nvme_fcp_abort,
712 .map_queues = qla_nvme_map_queues,
714 .max_sgl_segments = 1024,
715 .max_dif_sgl_segments = 64,
716 .dma_boundary = 0xFFFFFFFF,
718 .remote_priv_sz = sizeof(struct qla_nvme_rport),
719 .lsrqst_priv_sz = sizeof(struct nvme_private),
720 .fcprqst_priv_sz = sizeof(struct nvme_private),
723 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
727 if (!IS_ENABLED(CONFIG_NVME_FC))
730 ql_log(ql_log_warn, fcport->vha, 0x2112,
731 "%s: unregister remoteport on %p %8phN\n",
732 __func__, fcport, fcport->port_name);
734 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
735 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
737 init_completion(&fcport->nvme_del_done);
738 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
740 ql_log(ql_log_info, fcport->vha, 0x2114,
741 "%s: Failed to unregister nvme_remote_port (%d)\n",
743 wait_for_completion(&fcport->nvme_del_done);
746 void qla_nvme_delete(struct scsi_qla_host *vha)
750 if (!IS_ENABLED(CONFIG_NVME_FC))
753 if (vha->nvme_local_port) {
754 init_completion(&vha->nvme_del_done);
755 ql_log(ql_log_info, vha, 0x2116,
756 "unregister localport=%p\n",
757 vha->nvme_local_port);
758 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
760 ql_log(ql_log_info, vha, 0x2115,
761 "Unregister of localport failed\n");
763 wait_for_completion(&vha->nvme_del_done);
767 int qla_nvme_register_hba(struct scsi_qla_host *vha)
769 struct nvme_fc_port_template *tmpl;
770 struct qla_hw_data *ha;
771 struct nvme_fc_port_info pinfo;
774 if (!IS_ENABLED(CONFIG_NVME_FC))
778 tmpl = &qla_nvme_fc_transport;
780 WARN_ON(vha->nvme_local_port);
782 qla_nvme_fc_transport.max_hw_queues =
783 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
784 (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
786 pinfo.node_name = wwn_to_u64(vha->node_name);
787 pinfo.port_name = wwn_to_u64(vha->port_name);
788 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
789 pinfo.port_id = vha->d_id.b24;
791 ql_log(ql_log_info, vha, 0xffff,
792 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
793 pinfo.node_name, pinfo.port_name, pinfo.port_id);
794 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
796 ret = nvme_fc_register_localport(&pinfo, tmpl,
797 get_device(&ha->pdev->dev), &vha->nvme_local_port);
799 ql_log(ql_log_warn, vha, 0xffff,
800 "register_localport failed: ret=%x\n", ret);
802 vha->nvme_local_port->private = vha;
808 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
810 struct qla_hw_data *ha;
812 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
815 ha = orig_sp->fcport->vha->hw;
817 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
818 /* Use Driver Specified Retry Count */
819 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
820 abt->drv.abts_rty_cnt = cpu_to_le16(2);
821 /* Use specified response timeout */
822 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
823 /* set it to 2 * r_a_tov in secs */
824 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
827 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
830 struct scsi_qla_host *vha;
832 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
835 vha = orig_sp->fcport->vha;
837 comp_status = le16_to_cpu(abt->comp_status);
838 switch (comp_status) {
839 case CS_RESET: /* reset event aborted */
840 case CS_ABORTED: /* IOCB was cleaned */
841 /* N_Port handle is not currently logged in */
843 /* N_Port handle was logged out while waiting for ABTS to complete */
844 case CS_PORT_UNAVAILABLE:
845 /* Firmware found that the port name changed */
846 case CS_PORT_LOGGED_OUT:
847 /* BA_RJT was received for the ABTS */
848 case CS_PORT_CONFIG_CHG:
849 ql_dbg(ql_dbg_async, vha, 0xf09d,
850 "Abort I/O IOCB completed with error, comp_status=%x\n",
854 /* BA_RJT was received for the ABTS */
855 case CS_REJECT_RECEIVED:
856 ql_dbg(ql_dbg_async, vha, 0xf09e,
857 "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
858 abt->fw.ba_rjt_vendorUnique);
859 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
860 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
861 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
865 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
866 "IOCB request is completed successfully comp_status=%x\n",
871 ql_dbg(ql_dbg_async, vha, 0xf0a0,
872 "IOCB request is failed, comp_status=%x\n", comp_status);
876 ql_dbg(ql_dbg_async, vha, 0xf0a1,
877 "Invalid Abort IO IOCB Completion Status %x\n",
883 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
885 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
887 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);