2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2017 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
15 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
17 struct qla_nvme_rport *rport;
18 struct nvme_fc_port_info req;
21 if (!IS_ENABLED(CONFIG_NVME_FC))
24 if (!vha->flags.nvme_enabled) {
25 ql_log(ql_log_info, vha, 0x2100,
26 "%s: Not registering target since Host NVME is not enabled\n",
31 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
34 if (!(fcport->nvme_prli_service_param &
35 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
36 (fcport->nvme_flag & NVME_FLAG_REGISTERED))
39 fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
41 memset(&req, 0, sizeof(struct nvme_fc_port_info));
42 req.port_name = wwn_to_u64(fcport->port_name);
43 req.node_name = wwn_to_u64(fcport->node_name);
47 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
48 req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
50 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
51 req.port_role |= FC_PORT_ROLE_NVME_TARGET;
53 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
54 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
56 req.port_id = fcport->d_id.b24;
58 ql_log(ql_log_info, vha, 0x2102,
59 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
60 __func__, req.node_name, req.port_name,
63 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
64 &fcport->nvme_remote_port);
66 ql_log(ql_log_warn, vha, 0x212e,
67 "Failed to register remote port. Transport returned %d\n",
72 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
73 ql_log(ql_log_info, vha, 0x212a,
74 "PortID:%06x Supports SLER\n", req.port_id);
76 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
77 ql_log(ql_log_info, vha, 0x212b,
78 "PortID:%06x Supports PI control\n", req.port_id);
80 rport = fcport->nvme_remote_port->private;
81 rport->fcport = fcport;
83 fcport->nvme_flag |= NVME_FLAG_REGISTERED;
87 /* Allocate a queue for NVMe traffic */
88 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
89 unsigned int qidx, u16 qsize, void **handle)
91 struct scsi_qla_host *vha;
92 struct qla_hw_data *ha;
93 struct qla_qpair *qpair;
98 vha = (struct scsi_qla_host *)lport->private;
101 ql_log(ql_log_info, vha, 0x2104,
102 "%s: handle %p, idx =%d, qsize %d\n",
103 __func__, handle, qidx, qsize);
105 if (qidx > qla_nvme_fc_transport.max_hw_queues) {
106 ql_log(ql_log_warn, vha, 0x212f,
107 "%s: Illegal qidx=%d. Max=%d\n",
108 __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
112 if (ha->queue_pair_map[qidx]) {
113 *handle = ha->queue_pair_map[qidx];
114 ql_log(ql_log_info, vha, 0x2121,
115 "Returning existing qpair of %p for idx=%x\n",
120 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
122 ql_log(ql_log_warn, vha, 0x2122,
123 "Failed to allocate qpair\n");
131 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
133 struct srb *sp = container_of(kref, struct srb, cmd_kref);
134 struct nvme_private *priv = (struct nvme_private *)sp->priv;
135 struct nvmefc_fcp_req *fd;
136 struct srb_iocb *nvme;
142 nvme = &sp->u.iocb_cmd;
143 fd = nvme->u.nvme.desc;
145 spin_lock_irqsave(&priv->cmd_lock, flags);
148 if (priv->comp_status == QLA_SUCCESS) {
149 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
150 fd->status = NVME_SC_SUCCESS;
153 fd->transferred_length = 0;
154 fd->status = NVME_SC_INTERNAL;
156 spin_unlock_irqrestore(&priv->cmd_lock, flags);
160 qla2xxx_rel_qpair_sp(sp->qpair, sp);
163 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
165 struct srb *sp = container_of(kref, struct srb, cmd_kref);
166 struct nvme_private *priv = (struct nvme_private *)sp->priv;
167 struct nvmefc_ls_req *fd;
173 spin_lock_irqsave(&priv->cmd_lock, flags);
176 spin_unlock_irqrestore(&priv->cmd_lock, flags);
179 fd->done(fd, priv->comp_status);
184 static void qla_nvme_ls_complete(struct work_struct *work)
186 struct nvme_private *priv =
187 container_of(work, struct nvme_private, ls_work);
189 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
192 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
194 struct nvme_private *priv = sp->priv;
196 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
202 priv->comp_status = res;
203 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
204 schedule_work(&priv->ls_work);
207 /* it assumed that QPair lock is held. */
208 static void qla_nvme_sp_done(srb_t *sp, int res)
210 struct nvme_private *priv = sp->priv;
212 priv->comp_status = res;
213 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
218 static void qla_nvme_abort_work(struct work_struct *work)
220 struct nvme_private *priv =
221 container_of(work, struct nvme_private, abort_work);
222 srb_t *sp = priv->sp;
223 fc_port_t *fcport = sp->fcport;
224 struct qla_hw_data *ha = fcport->vha->hw;
227 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
228 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
229 __func__, sp, sp->handle, fcport, fcport->deleted);
231 if (!ha->flags.fw_started && fcport->deleted)
234 if (ha->flags.host_shutting_down) {
235 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
236 "%s Calling done on sp: %p, type: 0x%x\n",
237 __func__, sp, sp->type);
242 rval = ha->isp_ops->abort_command(sp);
244 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
245 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
246 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
247 sp, sp->handle, fcport, rval);
250 /* kref_get was done before work was schedule. */
251 kref_put(&sp->cmd_kref, sp->put_fn);
254 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
255 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
257 struct nvme_private *priv = fd->private;
260 spin_lock_irqsave(&priv->cmd_lock, flags);
262 spin_unlock_irqrestore(&priv->cmd_lock, flags);
266 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
267 spin_unlock_irqrestore(&priv->cmd_lock, flags);
270 spin_unlock_irqrestore(&priv->cmd_lock, flags);
272 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
273 schedule_work(&priv->abort_work);
276 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
277 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
279 struct qla_nvme_rport *qla_rport = rport->private;
280 fc_port_t *fcport = qla_rport->fcport;
281 struct srb_iocb *nvme;
282 struct nvme_private *priv = fd->private;
283 struct scsi_qla_host *vha;
284 int rval = QLA_FUNCTION_FAILED;
285 struct qla_hw_data *ha;
289 if (!fcport || (fcport && fcport->deleted))
295 if (!ha->flags.fw_started)
298 /* Alloc SRB structure */
299 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
303 sp->type = SRB_NVME_LS;
304 sp->name = "nvme_ls";
305 sp->done = qla_nvme_sp_ls_done;
306 sp->put_fn = qla_nvme_release_ls_cmd_kref;
309 kref_init(&sp->cmd_kref);
310 spin_lock_init(&priv->cmd_lock);
311 nvme = &sp->u.iocb_cmd;
313 nvme->u.nvme.desc = fd;
314 nvme->u.nvme.dir = 0;
316 nvme->u.nvme.cmd_len = fd->rqstlen;
317 nvme->u.nvme.rsp_len = fd->rsplen;
318 nvme->u.nvme.rsp_dma = fd->rspdma;
319 nvme->u.nvme.timeout_sec = fd->timeout;
320 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
321 fd->rqstlen, DMA_TO_DEVICE);
322 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
323 fd->rqstlen, DMA_TO_DEVICE);
325 rval = qla2x00_start_sp(sp);
326 if (rval != QLA_SUCCESS) {
327 ql_log(ql_log_warn, vha, 0x700e,
328 "qla2x00_start_sp failed = %d\n", rval);
329 wake_up(&sp->nvme_ls_waitq);
339 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
340 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
341 struct nvmefc_fcp_req *fd)
343 struct nvme_private *priv = fd->private;
346 spin_lock_irqsave(&priv->cmd_lock, flags);
348 spin_unlock_irqrestore(&priv->cmd_lock, flags);
351 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
352 spin_unlock_irqrestore(&priv->cmd_lock, flags);
355 spin_unlock_irqrestore(&priv->cmd_lock, flags);
357 INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
358 schedule_work(&priv->abort_work);
361 static inline int qla2x00_start_nvme_mq(srb_t *sp)
366 struct cmd_nvme *cmd_pkt;
371 struct dsd64 *cur_dsd;
372 struct req_que *req = NULL;
373 struct scsi_qla_host *vha = sp->fcport->vha;
374 struct qla_hw_data *ha = vha->hw;
375 struct qla_qpair *qpair = sp->qpair;
376 struct srb_iocb *nvme = &sp->u.iocb_cmd;
377 struct scatterlist *sgl, *sg;
378 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
379 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
380 uint32_t rval = QLA_SUCCESS;
382 /* Setup qpair pointers */
384 tot_dsds = fd->sg_cnt;
386 /* Acquire qpair specific lock */
387 spin_lock_irqsave(&qpair->qp_lock, flags);
389 handle = qla2xxx_get_next_handle(req);
394 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
395 if (req->cnt < (req_cnt + 2)) {
396 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
397 rd_reg_dword_relaxed(req->req_q_out);
399 if (req->ring_index < cnt)
400 req->cnt = cnt - req->ring_index;
402 req->cnt = req->length - (req->ring_index - cnt);
404 if (req->cnt < (req_cnt + 2)){
410 if (unlikely(!fd->sqid)) {
411 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
412 nvme->u.nvme.aen_op = 1;
413 atomic_inc(&ha->nvme_active_aen_cnt);
417 /* Build command packet. */
418 req->current_outstanding_cmd = handle;
419 req->outstanding_cmds[handle] = sp;
423 cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
424 cmd_pkt->handle = make_handle(req->id, handle);
426 /* Zero out remaining portion of packet. */
427 clr_ptr = (uint32_t *)cmd_pkt + 2;
428 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
430 cmd_pkt->entry_status = 0;
432 /* Update entry type to indicate Command NVME IOCB */
433 cmd_pkt->entry_type = COMMAND_NVME;
435 /* No data transfer how do we check buffer len == 0?? */
436 if (fd->io_dir == NVMEFC_FCP_READ) {
437 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
438 qpair->counters.input_bytes += fd->payload_length;
439 qpair->counters.input_requests++;
440 } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
441 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
442 if ((vha->flags.nvme_first_burst) &&
443 (sp->fcport->nvme_prli_service_param &
444 NVME_PRLI_SP_FIRST_BURST)) {
445 if ((fd->payload_length <=
446 sp->fcport->nvme_first_burst_size) ||
447 (sp->fcport->nvme_first_burst_size == 0))
448 cmd_pkt->control_flags |=
449 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
451 qpair->counters.output_bytes += fd->payload_length;
452 qpair->counters.output_requests++;
453 } else if (fd->io_dir == 0) {
454 cmd_pkt->control_flags = 0;
456 /* Set BIT_13 of control flags for Async event */
457 if (vha->flags.nvme2_enabled &&
458 cmd->sqe.common.opcode == nvme_admin_async_event) {
459 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
463 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
464 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
465 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
466 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
467 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
470 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
471 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
474 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
475 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
477 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
478 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
480 /* One DSD is available in the Command Type NVME IOCB */
482 cur_dsd = &cmd_pkt->nvme_dsd;
485 /* Load data segments */
486 for_each_sg(sgl, sg, tot_dsds, i) {
487 cont_a64_entry_t *cont_pkt;
489 /* Allocate additional continuation packets? */
490 if (avail_dsds == 0) {
492 * Five DSDs are available in the Continuation
496 /* Adjust ring index */
498 if (req->ring_index == req->length) {
500 req->ring_ptr = req->ring;
504 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
505 put_unaligned_le32(CONTINUE_A64_TYPE,
506 &cont_pkt->entry_type);
508 cur_dsd = cont_pkt->dsd;
509 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
512 append_dsd64(&cur_dsd, sg);
516 /* Set total entry count. */
517 cmd_pkt->entry_count = (uint8_t)req_cnt;
520 /* Adjust ring index. */
522 if (req->ring_index == req->length) {
524 req->ring_ptr = req->ring;
529 /* Set chip new ring index. */
530 wrt_reg_dword(req->req_q_in, req->ring_index);
533 spin_unlock_irqrestore(&qpair->qp_lock, flags);
538 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
539 struct nvme_fc_remote_port *rport, void *hw_queue_handle,
540 struct nvmefc_fcp_req *fd)
543 struct srb_iocb *nvme;
544 struct scsi_qla_host *vha;
547 struct qla_qpair *qpair = hw_queue_handle;
548 struct nvme_private *priv = fd->private;
549 struct qla_nvme_rport *qla_rport = rport->private;
552 /* nvme association has been torn down */
556 fcport = qla_rport->fcport;
558 if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
559 (fcport && fcport->deleted))
564 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
567 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
568 (qpair && !qpair->fw_started) || fcport->deleted)
572 * If we know the dev is going away while the transport is still sending
573 * IO's return busy back to stall the IO Q. This happens when the
574 * link goes away and fw hasn't notified us yet, but IO's are being
575 * returned. If the dev comes back quickly we won't exhaust the IO
576 * retry count at the core.
578 if (fcport->nvme_flag & NVME_FLAG_RESETTING)
581 /* Alloc SRB structure */
582 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
586 init_waitqueue_head(&sp->nvme_ls_waitq);
587 kref_init(&sp->cmd_kref);
588 spin_lock_init(&priv->cmd_lock);
591 sp->type = SRB_NVME_CMD;
592 sp->name = "nvme_cmd";
593 sp->done = qla_nvme_sp_done;
594 sp->put_fn = qla_nvme_release_fcp_cmd_kref;
597 nvme = &sp->u.iocb_cmd;
598 nvme->u.nvme.desc = fd;
600 rval = qla2x00_start_nvme_mq(sp);
601 if (rval != QLA_SUCCESS) {
602 ql_log(ql_log_warn, vha, 0x212d,
603 "qla2x00_start_nvme_mq failed = %d\n", rval);
604 wake_up(&sp->nvme_ls_waitq);
607 qla2xxx_rel_qpair_sp(sp->qpair, sp);
613 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
615 struct scsi_qla_host *vha = lport->private;
617 ql_log(ql_log_info, vha, 0x210f,
618 "localport delete of %p completed.\n", vha->nvme_local_port);
619 vha->nvme_local_port = NULL;
620 complete(&vha->nvme_del_done);
623 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
626 struct qla_nvme_rport *qla_rport = rport->private;
628 fcport = qla_rport->fcport;
629 fcport->nvme_remote_port = NULL;
630 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
631 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
632 ql_log(ql_log_info, fcport->vha, 0x2110,
633 "remoteport_delete of %p %8phN completed.\n",
634 fcport, fcport->port_name);
635 complete(&fcport->nvme_del_done);
638 static struct nvme_fc_port_template qla_nvme_fc_transport = {
639 .localport_delete = qla_nvme_localport_delete,
640 .remoteport_delete = qla_nvme_remoteport_delete,
641 .create_queue = qla_nvme_alloc_queue,
642 .delete_queue = NULL,
643 .ls_req = qla_nvme_ls_req,
644 .ls_abort = qla_nvme_ls_abort,
645 .fcp_io = qla_nvme_post_cmd,
646 .fcp_abort = qla_nvme_fcp_abort,
648 .max_sgl_segments = 1024,
649 .max_dif_sgl_segments = 64,
650 .dma_boundary = 0xFFFFFFFF,
652 .remote_priv_sz = sizeof(struct qla_nvme_rport),
653 .lsrqst_priv_sz = sizeof(struct nvme_private),
654 .fcprqst_priv_sz = sizeof(struct nvme_private),
657 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
661 if (!IS_ENABLED(CONFIG_NVME_FC))
664 ql_log(ql_log_warn, NULL, 0x2112,
665 "%s: unregister remoteport on %p %8phN\n",
666 __func__, fcport, fcport->port_name);
668 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
669 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
671 init_completion(&fcport->nvme_del_done);
672 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
674 ql_log(ql_log_info, fcport->vha, 0x2114,
675 "%s: Failed to unregister nvme_remote_port (%d)\n",
677 wait_for_completion(&fcport->nvme_del_done);
680 void qla_nvme_delete(struct scsi_qla_host *vha)
684 if (!IS_ENABLED(CONFIG_NVME_FC))
687 if (vha->nvme_local_port) {
688 init_completion(&vha->nvme_del_done);
689 ql_log(ql_log_info, vha, 0x2116,
690 "unregister localport=%p\n",
691 vha->nvme_local_port);
692 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
694 ql_log(ql_log_info, vha, 0x2115,
695 "Unregister of localport failed\n");
697 wait_for_completion(&vha->nvme_del_done);
701 int qla_nvme_register_hba(struct scsi_qla_host *vha)
703 struct nvme_fc_port_template *tmpl;
704 struct qla_hw_data *ha;
705 struct nvme_fc_port_info pinfo;
708 if (!IS_ENABLED(CONFIG_NVME_FC))
712 tmpl = &qla_nvme_fc_transport;
714 WARN_ON(vha->nvme_local_port);
716 if (ha->max_req_queues < 3) {
717 if (!ha->flags.max_req_queue_warned)
718 ql_log(ql_log_info, vha, 0x2120,
719 "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
720 __func__, ha->max_req_queues);
721 ha->flags.max_req_queue_warned = 1;
725 qla_nvme_fc_transport.max_hw_queues =
726 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
727 (uint8_t)(ha->max_req_queues - 2));
729 pinfo.node_name = wwn_to_u64(vha->node_name);
730 pinfo.port_name = wwn_to_u64(vha->port_name);
731 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
732 pinfo.port_id = vha->d_id.b24;
734 ql_log(ql_log_info, vha, 0xffff,
735 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
736 pinfo.node_name, pinfo.port_name, pinfo.port_id);
737 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
739 ret = nvme_fc_register_localport(&pinfo, tmpl,
740 get_device(&ha->pdev->dev), &vha->nvme_local_port);
742 ql_log(ql_log_warn, vha, 0xffff,
743 "register_localport failed: ret=%x\n", ret);
745 vha->nvme_local_port->private = vha;