Merge branch '5.12/scsi-fixes' into 5.13/scsi-staging
authorMartin K. Petersen <martin.petersen@oracle.com>
Tue, 13 Apr 2021 01:41:54 +0000 (21:41 -0400)
committerMartin K. Petersen <martin.petersen@oracle.com>
Tue, 13 Apr 2021 01:41:54 +0000 (21:41 -0400)
Resolve a couple of conflicts between the 5.12 fixes branch and the
5.13 staging tree (iSCSI target and UFS).

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
221 files changed:
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
block/blk-mq-sched.c
block/blk-mq.c
block/blk-mq.h
block/kyber-iosched.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/message/fusion/lsi/mpi.h
drivers/message/fusion/lsi/mpi_ioc.h
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptbase.h
drivers/message/fusion/mptdebug.h
drivers/message/fusion/mptlan.c
drivers/message/fusion/mptsas.c
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-sas.c
drivers/scsi/3w-xxxx.c
drivers/scsi/53c700.c
drivers/scsi/BusLogic.c
drivers/scsi/FlashPoint.c
drivers/scsi/a100u2w.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/aacraid/commctrl.c
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/rx.c
drivers/scsi/advansys.c
drivers/scsi/aic94xx/aic94xx.h
drivers/scsi/aic94xx/aic94xx_dump.c
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/aic94xx/aic94xx_sds.c
drivers/scsi/atp870u.c
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bfa/bfa_fc.h
drivers/scsi/bfa/bfa_fcs.h
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_sysfs.c
drivers/scsi/csiostor/csio_hw_t5.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxlflash/main.c
drivers/scsi/cxlflash/superpipe.c
drivers/scsi/cxlflash/vlun.c
drivers/scsi/dc395x.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/esas2r/esas2r_log.c
drivers/scsi/esp_scsi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/fnic/fnic_debugfs.c
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/fnic/fnic_trace.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/initio.c
drivers/scsi/ipr.c
drivers/scsi/isci/host.c
drivers/scsi/isci/phy.c
drivers/scsi/isci/port.c
drivers/scsi/isci/port_config.c
drivers/scsi/isci/remote_device.c
drivers/scsi/isci/remote_node_context.c
drivers/scsi/isci/remote_node_table.c
drivers/scsi/isci/request.c
drivers/scsi/isci/task.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_disc.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/mac53c94.c
drivers/scsi/megaraid/megaraid_mbox.c
drivers/scsi/megaraid/megaraid_mm.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/megaraid/megaraid_sas_fusion.h
drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/mvsas/mv_sas.h
drivers/scsi/mvumi.c
drivers/scsi/myrb.c
drivers/scsi/myrs.c
drivers/scsi/nsp32.c
drivers/scsi/pm8001/pm8001_ctl.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/pmcraid.c
drivers/scsi/qedf/qedf.h
drivers/scsi/qedf/qedf_dbg.h
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla1280.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_mr.h
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_nx2.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sd_zbc.c
drivers/scsi/sg.c
drivers/scsi/sim710.c
drivers/scsi/smartpqi/smartpqi.h
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/smartpqi/smartpqi_sas_transport.c
drivers/scsi/smartpqi/smartpqi_sis.c
drivers/scsi/smartpqi/smartpqi_sis.h
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/cdns-pltfrm.c
drivers/scsi/ufs/ufs-debugfs.c
drivers/scsi/ufs/ufs-debugfs.h
drivers/scsi/ufs/ufs-exynos.c
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs-sysfs.c
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_stat.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/loopback/tcm_loop.c
drivers/target/loopback/tcm_loop.h
drivers/target/sbp/sbp_target.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_iblock.h
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_rd.h
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_stat.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/usb/gadget/function/f_tcm.c
drivers/vhost/scsi.c
drivers/xen/xen-scsiback.c
include/linux/blk-mq.h
include/linux/hyperv.h
include/linux/sbitmap.h
include/scsi/libfcoe.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/scsi/scsi_dh.h
include/scsi/scsi_host.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/trace/events/ufs.h
lib/sbitmap.c

index 415ccdd..d8fd4df 100644 (file)
@@ -14,6 +14,8 @@ Required properties:
                            "qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
                            "qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
                            "qcom,sm8150-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
+                           "qcom,sm8250-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
+                           "qcom,sm8350-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
 - interrupts        : <interrupt mapping for UFS host controller IRQ>
 - reg               : <registers mapping>
 
index ddb65e9..712b7f0 100644 (file)
@@ -131,6 +131,7 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
 
        do {
                struct request *rq;
+               int budget_token;
 
                if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
                        break;
@@ -140,12 +141,13 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
                        break;
                }
 
-               if (!blk_mq_get_dispatch_budget(q))
+               budget_token = blk_mq_get_dispatch_budget(q);
+               if (budget_token < 0)
                        break;
 
                rq = e->type->ops.dispatch_request(hctx);
                if (!rq) {
-                       blk_mq_put_dispatch_budget(q);
+                       blk_mq_put_dispatch_budget(q, budget_token);
                        /*
                         * We're releasing without dispatching. Holding the
                         * budget could have blocked any "hctx"s with the
@@ -157,6 +159,8 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
                        break;
                }
 
+               blk_mq_set_rq_budget_token(rq, budget_token);
+
                /*
                 * Now this rq owns the budget which has to be released
                 * if this rq won't be queued to driver via .queue_rq()
@@ -230,6 +234,8 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
        struct request *rq;
 
        do {
+               int budget_token;
+
                if (!list_empty_careful(&hctx->dispatch)) {
                        ret = -EAGAIN;
                        break;
@@ -238,12 +244,13 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
                if (!sbitmap_any_bit_set(&hctx->ctx_map))
                        break;
 
-               if (!blk_mq_get_dispatch_budget(q))
+               budget_token = blk_mq_get_dispatch_budget(q);
+               if (budget_token < 0)
                        break;
 
                rq = blk_mq_dequeue_from_ctx(hctx, ctx);
                if (!rq) {
-                       blk_mq_put_dispatch_budget(q);
+                       blk_mq_put_dispatch_budget(q, budget_token);
                        /*
                         * We're releasing without dispatching. Holding the
                         * budget could have blocked any "hctx"s with the
@@ -255,6 +262,8 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
                        break;
                }
 
+               blk_mq_set_rq_budget_token(rq, budget_token);
+
                /*
                 * Now this rq owns the budget which has to be released
                 * if this rq won't be queued to driver via .queue_rq()
index d4d7c1c..2e825a7 100644 (file)
@@ -1277,10 +1277,15 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
                                                  bool need_budget)
 {
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+       int budget_token = -1;
 
-       if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) {
-               blk_mq_put_driver_tag(rq);
-               return PREP_DISPATCH_NO_BUDGET;
+       if (need_budget) {
+               budget_token = blk_mq_get_dispatch_budget(rq->q);
+               if (budget_token < 0) {
+                       blk_mq_put_driver_tag(rq);
+                       return PREP_DISPATCH_NO_BUDGET;
+               }
+               blk_mq_set_rq_budget_token(rq, budget_token);
        }
 
        if (!blk_mq_get_driver_tag(rq)) {
@@ -1297,7 +1302,7 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
                         * together during handling partial dispatch
                         */
                        if (need_budget)
-                               blk_mq_put_dispatch_budget(rq->q);
+                               blk_mq_put_dispatch_budget(rq->q, budget_token);
                        return PREP_DISPATCH_NO_TAG;
                }
        }
@@ -1307,12 +1312,16 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
 
 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
 static void blk_mq_release_budgets(struct request_queue *q,
-               unsigned int nr_budgets)
+               struct list_head *list)
 {
-       int i;
+       struct request *rq;
 
-       for (i = 0; i < nr_budgets; i++)
-               blk_mq_put_dispatch_budget(q);
+       list_for_each_entry(rq, list, queuelist) {
+               int budget_token = blk_mq_get_rq_budget_token(rq);
+
+               if (budget_token >= 0)
+                       blk_mq_put_dispatch_budget(q, budget_token);
+       }
 }
 
 /*
@@ -1410,7 +1419,8 @@ out:
                        (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
                bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
 
-               blk_mq_release_budgets(q, nr_budgets);
+               if (nr_budgets)
+                       blk_mq_release_budgets(q, list);
 
                spin_lock(&hctx->lock);
                list_splice_tail_init(list, &hctx->dispatch);
@@ -2009,6 +2019,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 {
        struct request_queue *q = rq->q;
        bool run_queue = true;
+       int budget_token;
 
        /*
         * RCU or SRCU read lock is needed before checking quiesced flag.
@@ -2026,11 +2037,14 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
        if (q->elevator && !bypass_insert)
                goto insert;
 
-       if (!blk_mq_get_dispatch_budget(q))
+       budget_token = blk_mq_get_dispatch_budget(q);
+       if (budget_token < 0)
                goto insert;
 
+       blk_mq_set_rq_budget_token(rq, budget_token);
+
        if (!blk_mq_get_driver_tag(rq)) {
-               blk_mq_put_dispatch_budget(q);
+               blk_mq_put_dispatch_budget(q, budget_token);
                goto insert;
        }
 
@@ -2702,7 +2716,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
                goto free_cpumask;
 
        if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
-                               gfp, node))
+                               gfp, node, false, false))
                goto free_ctxs;
        hctx->nr_ctx = 0;
 
index 3616453..9ce64bc 100644 (file)
@@ -187,17 +187,34 @@ unsigned int blk_mq_in_flight(struct request_queue *q,
 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
                unsigned int inflight[2]);
 
-static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
+static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
+                                             int budget_token)
 {
        if (q->mq_ops->put_budget)
-               q->mq_ops->put_budget(q);
+               q->mq_ops->put_budget(q, budget_token);
 }
 
-static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
+static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
 {
        if (q->mq_ops->get_budget)
                return q->mq_ops->get_budget(q);
-       return true;
+       return 0;
+}
+
+static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
+{
+       if (token < 0)
+               return;
+
+       if (rq->q->mq_ops->set_rq_budget_token)
+               rq->q->mq_ops->set_rq_budget_token(rq, token);
+}
+
+static inline int blk_mq_get_rq_budget_token(struct request *rq)
+{
+       if (rq->q->mq_ops->get_rq_budget_token)
+               return rq->q->mq_ops->get_rq_budget_token(rq);
+       return -1;
 }
 
 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
index 33d34d6..8969e12 100644 (file)
@@ -478,7 +478,8 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 
        for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
                if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
-                                     ilog2(8), GFP_KERNEL, hctx->numa_node)) {
+                                     ilog2(8), GFP_KERNEL, hctx->numa_node,
+                                     false, false)) {
                        while (--i >= 0)
                                sbitmap_free(&khd->kcq_map[i]);
                        goto err_kcqs;
index 6be60aa..51c386a 100644 (file)
@@ -1528,16 +1528,20 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
                goto busy;
        }
 
-       rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
-                              &send_ioctx->sense_data[0],
-                              scsilun_to_int(&srp_cmd->lun), data_len,
-                              TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
-                              sg, sg_cnt, NULL, 0, NULL, 0);
+       rc = target_init_cmd(cmd, ch->sess, &send_ioctx->sense_data[0],
+                            scsilun_to_int(&srp_cmd->lun), data_len,
+                            TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
        if (rc != 0) {
                pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
                         srp_cmd->tag);
                goto busy;
        }
+
+       if (target_submit_prep(cmd, srp_cmd->cdb, sg, sg_cnt, NULL, 0, NULL, 0,
+                              GFP_KERNEL))
+               return;
+
+       target_submit(cmd);
        return;
 
 busy:
index a575545..eccbe54 100644 (file)
@@ -424,8 +424,8 @@ typedef struct _SGE_TRANSACTION32
     U8                      ContextSize;
     U8                      DetailsLength;
     U8                      Flags;
-    U32                     TransactionContext[1];
-    U32                     TransactionDetails[1];
+    U32                     TransactionContext;
+    U32                     TransactionDetails[];
 } SGE_TRANSACTION32, MPI_POINTER PTR_SGE_TRANSACTION32,
   SGETransaction32_t, MPI_POINTER pSGETransaction32_t;
 
index c249f29..1534460 100644 (file)
@@ -448,7 +448,7 @@ typedef struct _MSG_EVENT_NOTIFY_REPLY
      U32                    IOCLogInfo;                 /* 10h */
      U32                    Event;                      /* 14h */
      U32                    EventContext;               /* 18h */
-     U32                    Data[1];                    /* 1Ch */
+     U32                    Data[];                    /* 1Ch */
 } MSG_EVENT_NOTIFY_REPLY, MPI_POINTER PTR_MSG_EVENT_NOTIFY_REPLY,
   EventNotificationReply_t, MPI_POINTER pEventNotificationReply_t;
 
index 549797d..fa9b122 100644 (file)
@@ -4974,7 +4974,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
 
        if (hdr.PageLength > 0) {
                data_sz = hdr.PageLength * 4;
-               ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+               ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
                rc = -ENOMEM;
                if (ppage0_alloc) {
                        memset((u8 *)ppage0_alloc, 0, data_sz);
@@ -5020,7 +5020,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
 
        data_sz = hdr.PageLength * 4;
        rc = -ENOMEM;
-       ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
+       ppage1_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
        if (ppage1_alloc) {
                memset((u8 *)ppage1_alloc, 0, data_sz);
                cfg.physAddr = page1_dma;
@@ -5321,7 +5321,7 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
        /* Read the config page */
        data_sz = hdr.PageLength * 4;
        rc = -ENOMEM;
-       ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+       ppage_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
        if (ppage_alloc) {
                memset((u8 *)ppage_alloc, 0, data_sz);
                cfg.physAddr = page_dma;
index 813d463..b9e0376 100644 (file)
@@ -274,7 +274,7 @@ typedef union _MPT_FRAME_TRACKER {
        } linkage;
        /*
         * NOTE: When request frames are free, on the linkage structure
-        * contets are valid.  All other values are invalid.
+        * contents are valid.  All other values are invalid.
         * In particular, do NOT reply on offset [2]
         * (in words) being the * message context.
         * The message context must be reset (computed via base address
index 2205dca..c281b13 100644 (file)
 
 #ifdef CONFIG_FUSION_LOGGING
 #define MPT_CHECK_LOGGING(IOC, CMD, BITS)                      \
-{                                                              \
+do {                                                           \
        if (IOC->debug_level & BITS)                            \
                CMD;                                            \
-}
+} while (0)
 #else
-#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS)                      \
+do { } while (0)
 #endif
 
 
index 7d3784a..3261cac 100644 (file)
@@ -72,9 +72,6 @@ MODULE_VERSION(my_VERSION);
 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
        (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
 
-#define MPT_LAN_TRANSACTION32_SIZE \
-       (sizeof(SGETransaction32_t) - sizeof(u32))
-
 /*
  *  Fusion MPT LAN private structures
  */
@@ -745,7 +742,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
        pTrans->ContextSize   = sizeof(u32);
        pTrans->DetailsLength = 2 * sizeof(u32);
        pTrans->Flags         = 0;
-       pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+       pTrans->TransactionContext = cpu_to_le32(ctx);
 
 //     dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
 //                     IOC_AND_NETDEV_NAMES_s_s(dev),
@@ -1159,7 +1156,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
                        __func__, buckets, curr));
 
        max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
-                       (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
+                       (sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
 
        while (buckets) {
                mf = mpt_get_msg_frame(LanCtx, mpt_dev);
@@ -1234,7 +1231,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
                        pTrans->ContextSize   = sizeof(u32);
                        pTrans->DetailsLength = 0;
                        pTrans->Flags         = 0;
-                       pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+                       pTrans->TransactionContext = cpu_to_le32(ctx);
 
                        pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
 
index 5eb0b33..1857869 100644 (file)
@@ -3442,14 +3442,12 @@ mptsas_expander_event_add(MPT_ADAPTER *ioc,
        __le64 sas_address;
 
        port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
-       if (!port_info)
-               BUG();
+       BUG_ON(!port_info);
        port_info->num_phys = (expander_data->NumPhys) ?
            expander_data->NumPhys : 1;
        port_info->phy_info = kcalloc(port_info->num_phys,
            sizeof(struct mptsas_phyinfo), GFP_KERNEL);
-       if (!port_info->phy_info)
-               BUG();
+       BUG_ON(!port_info->phy_info);
        memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
        for (i = 0; i < port_info->num_phys; i++) {
                port_info->phy_info[i].portinfo = port_info;
@@ -3781,7 +3779,7 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
                                                printk(MYIOC_s_DEBUG_FMT
                                                "SDEV OUTSTANDING CMDS"
                                                "%d\n", ioc->name,
-                                               atomic_read(&sdev->device_busy)));
+                                               scsi_device_busy(sdev)));
                                }
 
                        }
index b96e82d..47028f5 100644 (file)
@@ -939,13 +939,13 @@ out:
 /* This function will empty the response queue */
 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
 {
-       u32 status_reg_value, response_que_value;
+       u32 status_reg_value;
        int count = 0, retval = 1;
 
        status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 
        while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
-               response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+               readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
                status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
                count++;
        }
@@ -1698,9 +1698,6 @@ out:
 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
 {
        int heads, sectors, cylinders;
-       TW_Device_Extension *tw_dev;
-
-       tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
 
        if (capacity >= 0x200000) {
                heads = 255;
@@ -1809,14 +1806,11 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
        u32 num_sectors = 0x0;
        int i, sg_count;
        struct scsi_cmnd *srb = NULL;
-       struct scatterlist *sglist = NULL, *sg;
+       struct scatterlist *sg;
        int retval = 1;
 
-       if (tw_dev->srb[request_id]) {
+       if (tw_dev->srb[request_id])
                srb = tw_dev->srb[request_id];
-               if (scsi_sglist(srb))
-                       sglist = scsi_sglist(srb);
-       }
 
        /* Initialize command packet */
        full_command_packet = tw_dev->command_packet_virt[request_id];
index 3db0e42..4fde39d 100644 (file)
@@ -295,14 +295,11 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
        TW_Command_Apache *command_packet;
        int i, sg_count;
        struct scsi_cmnd *srb = NULL;
-       struct scatterlist *sglist = NULL, *sg;
+       struct scatterlist *sg;
        int retval = 1;
 
-       if (tw_dev->srb[request_id]) {
+       if (tw_dev->srb[request_id])
                srb = tw_dev->srb[request_id];
-               if (scsi_sglist(srb))
-                       sglist = scsi_sglist(srb);
-       }
 
        /* Initialize command packet */
        full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -863,7 +860,6 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
        TW_Command_Full *full_command_packet;
        unsigned short error;
        char *error_str;
-       int retval = 1;
 
        header = tw_dev->sense_buffer_virt[i];
        full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -895,7 +891,7 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
                goto out;
        }
 out:
-       return retval;
+       return 1;
 } /* End twl_fill_sense() */
 
 /* This function will free up device extension resources */
@@ -1408,9 +1404,6 @@ out:
 static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
 {
        int heads, sectors;
-       TW_Device_Extension *tw_dev;
-
-       tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
 
        if (capacity >= 0x200000) {
                heads = 255;
index d90b9fc..a729288 100644 (file)
@@ -460,12 +460,12 @@ static int tw_check_errors(TW_Device_Extension *tw_dev)
 /* This function will empty the response que */
 static void tw_empty_response_que(TW_Device_Extension *tw_dev)
 {
-       u32 status_reg_value, response_que_value;
+       u32 status_reg_value;
 
        status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
 
        while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
-               response_que_value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
+               inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
                status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
        }
 } /* End tw_empty_response_que() */
@@ -1342,10 +1342,8 @@ static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev
                             sector_t capacity, int geom[])
 {
        int heads, sectors, cylinders;
-       TW_Device_Extension *tw_dev;
 
        dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam()\n");
-       tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
 
        heads = 64;
        sectors = 32;
index 3242ff6..4fd91f8 100644 (file)
@@ -980,7 +980,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                        NCR_700_set_tag_neg_state(SCp->device,
                                                  NCR_700_FINISHED_TAG_NEGOTIATION);
                        
-               /* check for contingent allegiance contitions */
+               /* check for contingent allegiance conditions */
                if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
                   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
                        struct NCR_700_command_slot *slot =
index ccb061a..0ac3f71 100644 (file)
@@ -3578,7 +3578,7 @@ Target    Requested Completed  Requested Completed  Requested Completed\n\
 /*
   blogic_msg prints Driver Messages.
 */
-
+__printf(2, 4)
 static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
                        struct blogic_adapter *adapter, ...)
 {
index 24ace18..0464e37 100644 (file)
@@ -1615,7 +1615,6 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
 
        unsigned char thisCard;
        CALL_BK_FN callback;
-       unsigned char TID;
        struct sccb *pSaveSCCB;
        struct sccb_mgr_tar_info *currTar_Info;
 
@@ -1652,9 +1651,6 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
                        }
 
                        else {
-
-                               TID = p_Sccb->TargID;
-
                                if (p_Sccb->Sccb_tag) {
                                        MDISABLE_INT(ioport);
                                        if (((struct sccb_card *)pCurrCard)->
@@ -4534,7 +4530,7 @@ static void FPT_phaseBusFree(u32 port, unsigned char p_card)
  *
  * Function: Auto Load Default Map
  *
- * Description: Load the Automation RAM with the defualt map values.
+ * Description: Load the Automation RAM with the default map values.
  *
  *---------------------------------------------------------------------*/
 static void FPT_autoLoadDefaultMap(u32 p_port)
index 66c5143..028af6b 100644 (file)
@@ -269,7 +269,7 @@ static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
 }
 
 /**
- *     orc_exec_s            -       Queue an SCB with the HA
+ *     orc_exec_scb            -       Queue an SCB with the HA
  *     @host: host adapter the SCB belongs to
  *     @scb: SCB to queue for execution
  */
@@ -586,7 +586,7 @@ static int orc_reset_scsi_bus(struct orc_host * host)
  *     orc_device_reset        -       device reset handler
  *     @host: host to reset
  *     @cmd: command causing the reset
- *     @target; target device
+ *     @target: target device
  *
  *     Reset registers, reset a hanging bus and kill active and disconnected
  *     commands for target w/o soft reset
@@ -727,7 +727,7 @@ static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
        spin_unlock_irqrestore(&(host->allocation_lock), flags);
 }
 
-/**
+/*
  *     orchid_abort_scb        -       abort a command
  *
  *     Abort a queued command that has been passed to the firmware layer
@@ -902,7 +902,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
 }
 
 /**
- *     inia100_queue           -       queue command with host
+ *     inia100_queue_lck               -       queue command with host
  *     @cmd: Command block
  *     @done: Completion function
  *
@@ -1088,8 +1088,6 @@ static int inia100_probe_one(struct pci_dev *pdev,
        unsigned long port, bios;
        int error = -ENODEV;
        u32 sz;
-       unsigned long biosaddr;
-       char *bios_phys;
 
        if (pci_enable_device(pdev))
                goto out;
@@ -1139,9 +1137,6 @@ static int inia100_probe_one(struct pci_dev *pdev,
                goto out_free_scb_array;
        }
 
-       biosaddr = host->BIOScfg;
-       biosaddr = (biosaddr << 4);
-       bios_phys = phys_to_virt(biosaddr);
        if (init_orchid(host)) {        /* Initialize orchid chip */
                printk("inia100: initial orchid fail!!\n");
                goto out_free_escb_array;
index 4ca5e13..8e06604 100644 (file)
@@ -786,8 +786,8 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
 }
 
 /**
- *     aac_probe_container             -       query a logical volume
- * @scsicmd: the scsi command block
+ *     aac_probe_container_callback1   -       query a logical volume
+ *     @scsicmd: the scsi command block
  *
  *     Queries the controller about the given volume. The volume information
  *     is updated in the struct fsa_dev_info structure rather than returned.
@@ -838,7 +838,7 @@ struct scsi_inq {
 };
 
 /**
- *     InqStrCopy      -       string merge
+ *     inqstrcpy       -       string merge
  *     @a:     string to copy from
  *     @b:     string to copy to
  *
@@ -1804,7 +1804,7 @@ static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
 }
 
 /**
- *     aac_get_safw_ciss_luns()        Process topology change
+ *     aac_get_safw_ciss_luns() - Process topology change
  *     @dev:           aac_dev structure
  *
  *     Execute a CISS REPORT PHYS LUNS and process the results into
index 1b1da16..e7cc927 100644 (file)
@@ -472,7 +472,7 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
 
 
 /**
- * aac_send_raw_scb
+ * aac_send_raw_srb()
  *     @dev:   adapter is being processed
  *     @arg:   arguments to the send call
  */
index 0ae0d1f..54eb4d4 100644 (file)
@@ -323,7 +323,7 @@ void aac_fib_init(struct fib *fibptr)
 }
 
 /**
- *     fib_deallocate          -       deallocate a fib
+ *     fib_dealloc             -       deallocate a fib
  *     @fibptr: fib to deallocate
  *
  *     Will deallocate and return to the free pool the FIB pointed to by the
@@ -1950,7 +1950,7 @@ void aac_src_reinit_aif_worker(struct work_struct *work)
 }
 
 /**
- *     aac_handle_sa_aif       Handle a message from the firmware
+ *     aac_handle_sa_aif -     Handle a message from the firmware
  *     @dev: Which adapter this fib is from
  *     @fibptr: Pointer to fibptr from adapter
  *
index cdccf9a..e06ff83 100644 (file)
@@ -532,7 +532,7 @@ int aac_rx_select_comm(struct aac_dev *dev, int comm)
 }
 
 /**
- *     aac_rx_init     -       initialize an i960 based AAC card
+ *     _aac_rx_init    -       initialize an i960 based AAC card
  *     @dev: device to configure
  *
  *     Allocate and set up resources for the i960 based AAC variants. The 
index ec56278..e9516de 100644 (file)
@@ -1812,7 +1812,7 @@ typedef struct adv_req {
  * Field naming convention:
  *
  *  *_able indicates both whether a feature should be enabled or disabled
- *  and whether a device isi capable of the feature. At initialization
+ *  and whether a device is capable of the feature. At initialization
  *  this field may be set, but later if a device is found to be incapable
  *  of the feature, the field is cleared.
  */
index 98978bc..8f24180 100644 (file)
@@ -33,7 +33,7 @@
 #ifdef ASD_DEBUG
 #define ASD_DPRINTK asd_printk
 #else
-#define ASD_DPRINTK(fmt, ...)
+#define ASD_DPRINTK(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
 #endif
 
 /* 2*ITNL timeout + 1 second */
index 7c4c53a..552f191 100644 (file)
@@ -720,154 +720,8 @@ static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
        PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
 }
 
-#if 0
-
 /**
- * asd_dump_ddb_site -- dump a CSEQ DDB site
- * @asd_ha: pointer to host adapter structure
- * @site_no: site number of interest
- */
-void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no)
-{
-       if (site_no >= asd_ha->hw_prof.max_ddbs)
-               return;
-
-#define DDB_FIELDB(__name)                                        \
-       asd_ddbsite_read_byte(asd_ha, site_no,                    \
-                             offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-#define DDB2_FIELDB(__name)                                       \
-       asd_ddbsite_read_byte(asd_ha, site_no,                    \
-                             offsetof(struct asd_ddb_stp_sata_target_port, __name))
-#define DDB_FIELDW(__name)                                        \
-       asd_ddbsite_read_word(asd_ha, site_no,                    \
-                             offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-
-#define DDB_FIELDD(__name)                                         \
-       asd_ddbsite_read_dword(asd_ha, site_no,                    \
-                              offsetof(struct asd_ddb_ssp_smp_target_port, __name))
-
-       asd_printk("DDB: 0x%02x\n", site_no);
-       asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type));
-       asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate));
-       asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag)));
-       asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head)));
-       asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended));
-       asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type));
-       asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def));
-       asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features));
-       asd_printk("Pathway Blocked Count: 0x%02x\n",
-                  DDB_FIELDB(pathway_blocked_count));
-       asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time));
-       asd_printk("more_compat_features: 0x%08x\n",
-                  DDB_FIELDD(more_compat_features));
-       asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask));
-       asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags));
-       asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2));
-       asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail));
-       asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail));
-       asd_printk("Active Task Count: 0x%04x\n",
-                  DDB_FIELDW(active_task_count));
-       asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason));
-       asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout));
-       asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp));
-}
-
-void asd_dump_ddb_0(struct asd_ha_struct *asd_ha)
-{
-#define DDB0_FIELDB(__name)                                  \
-       asd_ddbsite_read_byte(asd_ha, 0,                     \
-                             offsetof(struct asd_ddb_seq_shared, __name))
-#define DDB0_FIELDW(__name)                                  \
-       asd_ddbsite_read_word(asd_ha, 0,                     \
-                             offsetof(struct asd_ddb_seq_shared, __name))
-
-#define DDB0_FIELDD(__name)                                  \
-       asd_ddbsite_read_dword(asd_ha,0 ,                    \
-                              offsetof(struct asd_ddb_seq_shared, __name))
-
-#define DDB0_FIELDA(__name, _o)                              \
-       asd_ddbsite_read_byte(asd_ha, 0,                     \
-                             offsetof(struct asd_ddb_seq_shared, __name)+_o)
-
-
-       asd_printk("DDB: 0\n");
-       asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head));
-       asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail));
-       asd_printk("q_free_ddb_cnt:%04x\n",  DDB0_FIELDW(q_free_ddb_cnt));
-       asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head));
-       asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail));
-       asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock));
-       asd_printk("smp_conn_tag:%04x\n",    DDB0_FIELDW(smp_conn_tag));
-       asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt));
-       asd_printk("est_nexus_buf_thresh:%04x\n",
-                  DDB0_FIELDW(est_nexus_buf_thresh));
-       asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active));
-       asd_printk("phy_is_up:%02x\n",       DDB0_FIELDB(phy_is_up));
-       asd_printk("port_map_by_links:%02x %02x %02x %02x "
-                  "%02x %02x %02x %02x\n",
-                  DDB0_FIELDA(port_map_by_links, 0),
-                  DDB0_FIELDA(port_map_by_links, 1),
-                  DDB0_FIELDA(port_map_by_links, 2),
-                  DDB0_FIELDA(port_map_by_links, 3),
-                  DDB0_FIELDA(port_map_by_links, 4),
-                  DDB0_FIELDA(port_map_by_links, 5),
-                  DDB0_FIELDA(port_map_by_links, 6),
-                  DDB0_FIELDA(port_map_by_links, 7));
-}
-
-static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no)
-{
-
-#define SCB_FIELDB(__name)                                                 \
-       asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header)   \
-                             + offsetof(struct initiate_ssp_task, __name))
-#define SCB_FIELDW(__name)                                                 \
-       asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header)   \
-                             + offsetof(struct initiate_ssp_task, __name))
-#define SCB_FIELDD(__name)                                                 \
-       asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header)  \
-                              + offsetof(struct initiate_ssp_task, __name))
-
-       asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len));
-       asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type));
-       asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag));
-       asd_printk("Target Port Xfer Tag: 0x%04x.\n",
-                  SCB_FIELDW(ssp_frame.tptt));
-       asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs));
-       asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count));
-}
-
-/**
- * asd_dump_scb_sites -- dump currently used CSEQ SCB sites
- * @asd_ha: pointer to host adapter struct
- */
-void asd_dump_scb_sites(struct asd_ha_struct *asd_ha)
-{
-       u16     site_no;
-
-       for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) {
-               u8 opcode;
-
-               if (!SCB_SITE_VALID(site_no))
-                       continue;
-
-               /* We are only interested in SCB sites currently used.
-                */
-               opcode = asd_scbsite_read_byte(asd_ha, site_no,
-                                              offsetof(struct scb_header,
-                                                       opcode));
-               if (opcode == 0xFF)
-                       continue;
-
-               asd_printk("\nSCB: 0x%x\n", site_no);
-               asd_dump_scb_site(asd_ha, site_no);
-       }
-}
-
-#endif  /*  0  */
-
-/**
- * ads_dump_seq_state -- dump CSEQ and LSEQ states
+ * asd_dump_seq_state -- dump CSEQ and LSEQ states
  * @asd_ha: pointer to host adapter structure
  * @lseq_mask: mask of LSEQs of interest
  */
@@ -908,42 +762,4 @@ void asd_dump_frame_rcvd(struct asd_phy *phy,
        spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
 }
 
-#if 0
-
-static void asd_dump_scb(struct asd_ascb *ascb, int ind)
-{
-       asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
-                  "index:%d, opcode:0x%02x\n",
-                  ind, ascb->dma_scb.vaddr,
-                  (unsigned long long)ascb->dma_scb.dma_handle,
-                  (unsigned long long)
-                  le64_to_cpu(ascb->scb->header.next_scb),
-                  le16_to_cpu(ascb->scb->header.index),
-                  ascb->scb->header.opcode);
-}
-
-void asd_dump_scb_list(struct asd_ascb *ascb, int num)
-{
-       int i = 0;
-
-       asd_printk("dumping %d scbs:\n", num);
-
-       asd_dump_scb(ascb, i++);
-       --num;
-
-       if (num > 0 && !list_empty(&ascb->list)) {
-               struct list_head *el;
-
-               list_for_each(el, &ascb->list) {
-                       struct asd_ascb *s = list_entry(el, struct asd_ascb,
-                                                       list);
-                       asd_dump_scb(s, i++);
-                       if (--num <= 0)
-                               break;
-               }
-       }
-}
-
-#endif  /*  0  */
-
 #endif /* ASD_DEBUG */
index 9256ab7..3dd1101 100644 (file)
@@ -903,7 +903,7 @@ static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
 }
 
 /**
- * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
+ * asd_rbi_exsi_isr -- process external system interface interrupt (INITERR)
  * @asd_ha: pointer to host adapter structure
  */
 static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
@@ -1144,7 +1144,7 @@ static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
 }
 
 /**
- * asd_start_timers -- (add and) start timers of SCBs
+ * asd_start_scb_timers -- (add and) start timers of SCBs
  * @list: pointer to struct list_head of the scbs
  *
  * If an SCB in the @list has no timer function, assign the default
index 105adba..297a667 100644 (file)
@@ -1244,7 +1244,7 @@ int asd_chk_write_status(struct asd_ha_struct *asd_ha,
 }
 
 /**
- * asd_hwi_erase_nv_sector - Erase the flash memory sectors.
+ * asd_erase_nv_sector - Erase the flash memory sectors.
  * @asd_ha: pointer to the host adapter structure
  * @flash_addr: pointer to offset from flash memory
  * @size: total bytes to erase.
index da6ca2b..9d179cd 100644 (file)
@@ -612,7 +612,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 /**
- *     atp870u_queuecommand    -       Queue SCSI command
+ *     atp870u_queuecommand_lck -      Queue SCSI command
  *     @req_p: request block
  *     @done: completion function
  *
@@ -711,16 +711,15 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
 
 static DEF_SCSI_QCMD(atp870u_queuecommand)
 
-/**
+/*
  *     send_s870       -       send a command to the controller
- *     @host: host
  *
  *     On entry there is work queued to be done. We move some of that work to the
  *     controller itself.
  *
  *     Caller holds the host lock.
  */
-static void send_s870(struct atp_unit *dev,unsigned char c)
+static void send_s870(struct atp_unit *dev, unsigned char c)
 {
        struct scsi_cmnd *workreq = NULL;
        unsigned int i;//,k;
index a13c203..0e935c4 100644 (file)
@@ -295,7 +295,7 @@ void beiscsi_iface_destroy_default(struct beiscsi_hba *phba)
 }
 
 /**
- * beiscsi_set_vlan_tag()- Set the VLAN TAG
+ * beiscsi_iface_config_vlan()- Set the VLAN TAG
  * @shost: Scsi Host for the driver instance
  * @iface_param: Interface paramters
  *
index 90fcddb..22cf7f4 100644 (file)
@@ -4926,13 +4926,13 @@ void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
        schedule_work(&phba->boot_work);
 }
 
-/**
+#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
+/*
+ * beiscsi_show_boot_tgt_info()
  * Boot flag info for iscsi-utilities
  * Bit 0 Block valid flag
  * Bit 1 Firmware booting selected
  */
-#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
-
 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
 {
        struct beiscsi_hba *phba = data;
index 0d49285..462717b 100644 (file)
@@ -1256,7 +1256,7 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
 }
 
 /**
- * beiscsi_phys_port()- Display Physical Port Identifier
+ * beiscsi_phys_port_disp()- Display Physical Port Identifier
  * @dev: ptr to device not used.
  * @attr: device attribute, not used.
  * @buf: contains formatted text port identifier
index d536270..0314e4b 100644 (file)
@@ -1193,7 +1193,7 @@ enum {
 };
 
 /*
- * defintions for CT reason code
+ * definitions for CT reason code
  */
 enum {
        CT_RSN_INV_CMD          = 0x01,
@@ -1240,7 +1240,7 @@ enum {
 };
 
 /*
- * defintions for the explanation code for all servers
+ * definitions for the explanation code for all servers
  */
 enum {
        CT_EXP_AUTH_EXCEPTION           = 0xF1,
index 3e117fe..c1baf5c 100644 (file)
@@ -217,9 +217,6 @@ struct bfa_vf_event_s {
        u32        undefined;
 };
 
-struct bfa_fcs_s;
-struct bfa_fcs_fabric_s;
-
 /*
  * @todo : need to move to a global config file.
  * Maximum Rports supported per port (physical/logical).
index 49a1415..b12afcc 100644 (file)
@@ -1408,7 +1408,7 @@ static void     bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
                                               u32 resid_len,
                                               struct fchs_s *rsp_fchs);
 static void     bfa_fcs_lport_fdmi_timeout(void *arg);
-static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
+static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
                                                  u8 *pyld);
 static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
                                                  u8 *pyld);
@@ -1887,6 +1887,8 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
                bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
                                          (u8 *) ((struct ct_hdr_s *) pyld
                                                       + 1));
+       if (attr_len < 0)
+               return;
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                          FC_CLASS_3, (len + attr_len), &fchs,
@@ -1896,17 +1898,20 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
 }
 
-static          u16
+static int
 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
 {
        struct bfa_fcs_lport_s *port = fdmi->ms->port;
-       struct bfa_fcs_fdmi_hba_attr_s hba_attr;
-       struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
+       struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr;
        struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
        struct fdmi_attr_s *attr;
+       int        len;
        u8        *curr_ptr;
-       u16        len, count;
-       u16     templen;
+       u16     templen, count;
+
+       fcs_hba_attr = kzalloc(sizeof(*fcs_hba_attr), GFP_KERNEL);
+       if (!fcs_hba_attr)
+               return -ENOMEM;
 
        /*
         * get hba attributes
@@ -2148,6 +2153,9 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
        len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
 
        rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
+
+       kfree(fcs_hba_attr);
+
        return len;
 }
 
index fc51542..be8dfbe 100644 (file)
@@ -3409,7 +3409,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
 
        drv_fcxp->port = fcs_port->bfad_port;
 
-       if (drv_fcxp->port->bfad == 0)
+       if (!drv_fcxp->port->bfad)
                drv_fcxp->port->bfad = bfad;
 
        /* Fetch the bfa_rport - if nexus needed */
index 16bb6d2..8863a74 100644 (file)
@@ -1796,7 +1796,7 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
 /**
  * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
  *
- * @handle:    transport handle pointing to adapter struture
+ * @handle:    transport handle pointing to adapter structure
  */
 static int bnx2fc_ulp_get_stats(void *handle)
 {
index b37b0a9..0103f81 100644 (file)
@@ -1331,7 +1331,7 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
 }
 
 /**
- * bnx2fc_indicae_kcqe - process KCQE
+ * bnx2fc_indicate_kcqe() - process KCQE
  *
  * @context:   adapter structure pointer
  * @kcq:       kcqe pointer
index a3e2a38..9200b71 100644 (file)
@@ -819,7 +819,7 @@ mem_alloc_failure:
 }
 
 /**
- * bnx2i_free_session_resc - free qp resources for the session
+ * bnx2fc_free_session_resc - free qp resources for the session
  *
  * @hba:       adapter structure pointer
  * @tgt:       bnx2fc_rport structure pointer
index bad396e..43e8a1d 100644 (file)
@@ -2206,10 +2206,8 @@ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
 {
        struct bnx2i_conn *bnx2i_conn;
        u32 iscsi_cid;
-       char warn_notice[] = "iscsi_warning";
-       char error_notice[] = "iscsi_error";
-       char additional_notice[64];
-       char *message;
+       const char *additional_notice = "";
+       const char *message;
        int need_recovery;
        u64 err_mask64;
 
@@ -2224,133 +2222,132 @@ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
 
        if (err_mask64 & iscsi_error_mask) {
                need_recovery = 0;
-               message = warn_notice;
+               message = "iscsi_warning";
        } else {
                need_recovery = 1;
-               message = error_notice;
+               message = "iscsi_error";
        }
 
        switch (iscsi_err->completion_status) {
        case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
-               strcpy(additional_notice, "hdr digest err");
+               additional_notice = "hdr digest err";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
-               strcpy(additional_notice, "data digest err");
+               additional_notice = "data digest err";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
-               strcpy(additional_notice, "wrong opcode rcvd");
+               additional_notice = "wrong opcode rcvd";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
-               strcpy(additional_notice, "AHS len > 0 rcvd");
+               additional_notice = "AHS len > 0 rcvd";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
-               strcpy(additional_notice, "invalid ITT rcvd");
+               additional_notice = "invalid ITT rcvd";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
-               strcpy(additional_notice, "wrong StatSN rcvd");
+               additional_notice = "wrong StatSN rcvd";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
-               strcpy(additional_notice, "wrong DataSN rcvd");
+               additional_notice = "wrong DataSN rcvd";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
-               strcpy(additional_notice, "pend R2T violation");
+               additional_notice = "pend R2T violation";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
-               strcpy(additional_notice, "ERL0, UO");
+               additional_notice = "ERL0, UO";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
-               strcpy(additional_notice, "ERL0, U1");
+               additional_notice = "ERL0, U1";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
-               strcpy(additional_notice, "ERL0, U2");
+               additional_notice = "ERL0, U2";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
-               strcpy(additional_notice, "ERL0, U3");
+               additional_notice = "ERL0, U3";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
-               strcpy(additional_notice, "ERL0, U4");
+               additional_notice = "ERL0, U4";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
-               strcpy(additional_notice, "ERL0, U5");
+               additional_notice = "ERL0, U5";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
-               strcpy(additional_notice, "ERL0, U6");
+               additional_notice = "ERL0, U6";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
-               strcpy(additional_notice, "invalid resi len");
+               additional_notice = "invalid resi len";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
-               strcpy(additional_notice, "MRDSL violation");
+               additional_notice = "MRDSL violation";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
-               strcpy(additional_notice, "F-bit not set");
+               additional_notice = "F-bit not set";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
-               strcpy(additional_notice, "invalid TTT");
+               additional_notice = "invalid TTT";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
-               strcpy(additional_notice, "invalid DataSN");
+               additional_notice = "invalid DataSN";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
-               strcpy(additional_notice, "burst len violation");
+               additional_notice = "burst len violation";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
-               strcpy(additional_notice, "buf offset violation");
+               additional_notice = "buf offset violation";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
-               strcpy(additional_notice, "invalid LUN field");
+               additional_notice = "invalid LUN field";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
-               strcpy(additional_notice, "invalid R2TSN field");
+               additional_notice = "invalid R2TSN field";
                break;
 #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0      \
        ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
        case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
-               strcpy(additional_notice, "invalid cmd len1");
+               additional_notice = "invalid cmd len1";
                break;
 #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1      \
        ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
        case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
-               strcpy(additional_notice, "invalid cmd len2");
+               additional_notice = "invalid cmd len2";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
-               strcpy(additional_notice,
-                      "pend r2t exceeds MaxOutstandingR2T value");
+               additional_notice = "pend r2t exceeds MaxOutstandingR2T value";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
-               strcpy(additional_notice, "TTT is rsvd");
+               additional_notice = "TTT is rsvd";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
-               strcpy(additional_notice, "MBL violation");
+               additional_notice = "MBL violation";
                break;
 #define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO        \
        ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
        case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
-               strcpy(additional_notice, "data seg len != 0");
+               additional_notice = "data seg len != 0";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
-               strcpy(additional_notice, "reject pdu len error");
+               additional_notice = "reject pdu len error";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
-               strcpy(additional_notice, "async pdu len error");
+               additional_notice = "async pdu len error";
                break;
        case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
-               strcpy(additional_notice, "nopin pdu len error");
+               additional_notice = "nopin pdu len error";
                break;
 #define BNX2_ERR_PEND_R2T_IN_CLEANUP                   \
        ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
        case BNX2_ERR_PEND_R2T_IN_CLEANUP:
-               strcpy(additional_notice, "pend r2t in cleanup");
+               additional_notice = "pend r2t in cleanup";
                break;
 
        case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
-               strcpy(additional_notice, "IP fragments rcvd");
+               additional_notice = "IP fragments rcvd";
                break;
        case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
-               strcpy(additional_notice, "IP options error");
+               additional_notice = "IP options error";
                break;
        case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
-               strcpy(additional_notice, "urgent flag error");
+               additional_notice = "urgent flag error";
                break;
        default:
                printk(KERN_ALERT "iscsi_err - unknown err %x\n",
index 3dc7900..bea0007 100644 (file)
@@ -104,7 +104,7 @@ static ssize_t bnx2i_show_ccell_info(struct device *dev,
 
 
 /**
- * bnx2i_get_link_state - set command cell (HQ) size
+ * bnx2i_set_ccell_info - set command cell (HQ) size
  * @dev:       device pointer
  * @attr:      device attribute (unused)
  * @buf:       buffer to return current SQ size parameter
index 1df8891..86fded9 100644 (file)
@@ -244,7 +244,7 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
  *
  * Reads/writes an [almost] arbitrary memory region in the firmware: the
  * firmware memory address, length and host buffer must be aligned on
- * 32-bit boudaries.  The memory is transferred as a raw byte sequence
+ * 32-bit boundaries.  The memory is transferred as a raw byte sequence
  * from/to the firmware's memory.  If this memory contains data
  * structures which contain multi-byte integers, it's the callers
  * responsibility to perform appropriate byte order conversions.
index 55e74da..56b9ad0 100644 (file)
@@ -147,9 +147,9 @@ csio_scsi_itnexus_loss_error(uint16_t error)
        case FW_ERR_RDEV_LOST:
        case FW_ERR_RDEV_LOGO:
        case FW_ERR_RDEV_IMPL_LOGO:
-               return 1;
+               return true;
        }
-       return 0;
+       return false;
 }
 
 /*
index 37d9935..203f938 100644 (file)
@@ -1177,7 +1177,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
 }
 
 /**
- * cxgb3i_setup_conn_digest - setup conn. digest setting
+ * ddp_setup_conn_digest - setup conn. digest setting
  * @csk: cxgb tcp socket
  * @tid: connection id
  * @hcrc: header digest enabled
index e72440d..dc36531 100644 (file)
@@ -1357,7 +1357,7 @@ cxlflash_sync_err_irq_exit:
 
 /**
  * process_hrrq() - process the read-response queue
- * @afu:       AFU associated with the host.
+ * @hwq:       HWQ associated with the host.
  * @doneq:     Queue of commands harvested from the RRQ.
  * @budget:    Threshold of RRQ entries to process.
  *
@@ -1997,7 +1997,7 @@ out:
 /**
  * init_mc() - create and register as the master context
  * @cfg:       Internal structure associated with the host.
- * index:      HWQ Index of the master context.
+ * @index:     HWQ Index of the master context.
  *
  * Return: 0 on success, -errno on failure
  */
@@ -3294,7 +3294,7 @@ static char *decode_hioctl(unsigned int cmd)
 /**
  * cxlflash_lun_provision() - host LUN provisioning handler
  * @cfg:       Internal structure associated with the host.
- * @arg:       Kernel copy of userspace ioctl data structure.
+ * @lunprov:   Kernel copy of userspace ioctl data structure.
  *
  * Return: 0 on success, -errno on failure
  */
@@ -3385,7 +3385,7 @@ out:
 /**
  * cxlflash_afu_debug() - host AFU debug handler
  * @cfg:       Internal structure associated with the host.
- * @arg:       Kernel copy of userspace ioctl data structure.
+ * @afu_dbg:   Kernel copy of userspace ioctl data structure.
  *
  * For debug requests requiring a data buffer, always provide an aligned
  * (cache line) buffer to the AFU to appease any alignment requirements.
index 5dddf67..ee11ec3 100644 (file)
@@ -30,7 +30,7 @@ struct cxlflash_global global;
 
 /**
  * marshal_rele_to_resize() - translate release to resize structure
- * @rele:      Source structure from which to translate/copy.
+ * @release:   Source structure from which to translate/copy.
  * @resize:    Destination structure for the translate/copy.
  */
 static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
@@ -44,7 +44,7 @@ static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
 /**
  * marshal_det_to_rele() - translate detach to release structure
  * @detach:    Destination structure for the translate/copy.
- * @rele:      Source structure from which to translate/copy.
+ * @release:   Source structure from which to translate/copy.
  */
 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
                                struct dk_cxlflash_release *release)
@@ -517,7 +517,7 @@ void rhte_checkin(struct ctx_info *ctxi,
 }
 
 /**
- * rhte_format1() - populates a RHTE for format 1
+ * rht_format1() - populates a RHTE for format 1
  * @rhte:      RHTE to populate.
  * @lun_id:    LUN ID of LUN associated with RHTE.
  * @perm:      Desired permissions for RHTE.
index f1406ac..01917b2 100644 (file)
@@ -41,7 +41,7 @@ static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
 /**
  * marshal_clone_to_rele() - translate clone to release structure
  * @clone:     Source structure from which to translate/copy.
- * @rele:      Destination structure for the translate/copy.
+ * @release:   Destination structure for the translate/copy.
  */
 static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
                                  struct dk_cxlflash_release *release)
@@ -229,7 +229,7 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
 
 /**
  * validate_alloc() - validates the specified block has been allocated
- * @ba_lun_info:       LUN info owning the block allocator.
+ * @bali:              LUN info owning the block allocator.
  * @aun:               Block to validate.
  *
  * Return: 0 on success, -1 on failure
@@ -300,7 +300,7 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
 /**
  * ba_clone() - Clone a chunk of the block allocation table
  * @ba_lun:    Block allocator from which to allocate a block.
- * @to_free:   Block to free.
+ * @to_clone:  Block to clone.
  *
  * Return: 0 on success, -1 on failure
  */
@@ -361,7 +361,7 @@ void cxlflash_ba_terminate(struct ba_lun *ba_lun)
 
 /**
  * init_vlun() - initializes a LUN for virtual use
- * @lun_info:  LUN information structure that owns the block allocator.
+ * @lli:       LUN information structure that owns the block allocator.
  *
  * Return: 0 on success, -errno on failure
  */
index 3ea345c..1e9ec4d 100644 (file)
@@ -958,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
 
 
 /**
- * dc395x_queue_command - queue scsi command passed from the mid
+ * dc395x_queue_command_lck - queue scsi command passed from the mid
  * layer, invoke 'done' on completion
  *
  * @cmd: pointer to scsi command object
@@ -2918,7 +2918,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
        } else {
                if ((srb->state & (SRB_START_ + SRB_MSGOUT))
                    || !(srb->
-                        state & (SRB_DISCONNECT + SRB_COMPLETED))) {
+                        state & (SRB_DISCONNECT | SRB_COMPLETED))) {
                        /*
                         * Selection time out 
                         * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
@@ -4248,7 +4248,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
 
 
 /**
- * adapter_init_host - Initialize the scsi host instance based on
+ * adapter_init_scsi_host - Initialize the scsi host instance based on
  * values that we have already stored in the adapter instance. There's
  * some mention that a lot of these are deprecated, so we won't use
  * them (we'll use the ones in the adapter instance) but we'll fill
@@ -4336,13 +4336,14 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
 
 
 /**
- * init_adapter - Grab the resource for the card, setup the adapter
+ * adapter_init - Grab the resource for the card, setup the adapter
  * information, set the card into a known state, create the various
  * tables etc etc. This basically gets all adapter information all up
  * to date, initialised and gets the chip in sync with it.
  *
- * @host:      This hosts adapter structure
+ * @acb:       The adapter which we are to init.
  * @io_port:   The base I/O port
+ * @io_port_len: The I/O port size
  * @irq:       IRQ
  *
  * Returns 0 if the initialization succeeds, any other value on
index ea436a1..e6fde27 100644 (file)
@@ -515,6 +515,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
        struct scsi_sense_hdr sense_hdr;
        struct alua_port_group *tmp_pg;
        int len, k, off, bufflen = ALUA_RTPG_SIZE;
+       int group_id_old, state_old, pref_old, valid_states_old;
        unsigned char *desc, *buff;
        unsigned err, retval;
        unsigned int tpg_desc_tbl_off;
@@ -522,6 +523,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
        unsigned long flags;
        bool transitioning_sense = false;
 
+       group_id_old = pg->group_id;
+       state_old = pg->state;
+       pref_old = pg->pref;
+       valid_states_old = pg->valid_states;
+
        if (!pg->expiry) {
                unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
 
@@ -573,10 +579,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                 * even though it shouldn't according to T10.
                 * The retry without rtpg_ext_hdr_req set
                 * handles this.
+                * Note:  some arrays return a sense key of ILLEGAL_REQUEST
+                * with ASC 00h if they don't support the extended header.
                 */
                if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
-                   sense_hdr.sense_key == ILLEGAL_REQUEST &&
-                   sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
+                   sense_hdr.sense_key == ILLEGAL_REQUEST) {
                        pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
                        goto retry;
                }
@@ -686,17 +693,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
        if (transitioning_sense)
                pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
 
-       sdev_printk(KERN_INFO, sdev,
-                   "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
-                   ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
-                   pg->pref ? "preferred" : "non-preferred",
-                   pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
-                   pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
-                   pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
-                   pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
-                   pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
-                   pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
-                   pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+       if (group_id_old != pg->group_id || state_old != pg->state ||
+               pref_old != pg->pref || valid_states_old != pg->valid_states)
+               sdev_printk(KERN_INFO, sdev,
+                       "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
+                       ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
+                       pg->pref ? "preferred" : "non-preferred",
+                       pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+                       pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+                       pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+                       pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+                       pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+                       pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+                       pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
 
        switch (pg->state) {
        case SCSI_ACCESS_STATE_TRANSITIONING:
index b545798..d6c87a0 100644 (file)
@@ -101,6 +101,11 @@ static const char *translate_esas2r_event_level_to_kernel(const long level)
        }
 }
 
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
+
 /*
  * the master logging function.  this function will format the message as
  * outlined by the formatting string, the input device information and the
@@ -170,6 +175,8 @@ static int esas2r_log_master(const long level,
        return 0;
 }
 
+#pragma GCC diagnostic pop
+
 /*
  * formats and logs a message to the system log.
  *
index 007ccef..342535a 100644 (file)
@@ -647,7 +647,7 @@ static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
        ent->sense_ptr = NULL;
 }
 
-/* When a contingent allegiance conditon is created, we force feed a
+/* When a contingent allegiance condition is created, we force feed a
  * REQUEST_SENSE command to the device to fetch the sense data.  I
  * tried many other schemes, relying on the scsi error handling layer
  * to send out the REQUEST_SENSE automatically, but this was difficult
@@ -1341,7 +1341,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
        bytes_sent -= esp->send_cmd_residual;
 
        /*
-        * The am53c974 has a DMA 'pecularity'. The doc states:
+        * The am53c974 has a DMA 'peculiarity'. The doc states:
         * In some odd byte conditions, one residual byte will
         * be left in the SCSI FIFO, and the FIFO Flags will
         * never count to '0 '. When this happens, the residual
index 03bf49a..89ec735 100644 (file)
@@ -2771,7 +2771,7 @@ static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
 }
 
 /**
- * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
+ * fcoe_set_vport_symbolic_name() - append vport string to symbolic name
  * @vport: fc_vport with a new symbolic name string
  *
  * After generating a new symbolic name string, a new RSPN_ID request is
index 5ea426e..1756a0a 100644 (file)
@@ -1302,7 +1302,7 @@ drop:
 }
 
 /**
- * fcoe_ctlr_recv_els() - Handle an incoming link reset frame
+ * fcoe_ctlr_recv_clr_vlink() - Handle an incoming link reset frame
  * @fip: The FCoE controller that received the frame
  * @skb: The received FIP packet
  *
@@ -2952,7 +2952,7 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
 }
 
 /**
- * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
+ * fcoe_ctlr_vlan_disc_reply() - send FIP VLAN Discovery Notification.
  * @fip: The FCoE controller
  * @frport: The newly-parsed FCoE rport from the Discovery Request
  *
index 6c04936..e732650 100644 (file)
@@ -58,8 +58,7 @@ int fnic_debugfs_init(void)
                                                fnic_trace_debugfs_root);
 
        /* Allocate memory to structure */
-       fc_trc_flag = (struct fc_trace_flag_type *)
-               vmalloc(sizeof(struct fc_trace_flag_type));
+       fc_trc_flag = vmalloc(sizeof(struct fc_trace_flag_type));
 
        if (fc_trc_flag) {
                fc_trc_flag->fc_row_file = 0;
index e0cee4d..1885218 100644 (file)
@@ -296,7 +296,7 @@ void fnic_handle_event(struct work_struct *work)
 }
 
 /**
- * Check if the Received FIP FLOGI frame is rejected
+ * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
  * @fip: The FCoE controller that received the frame
  * @skb: The received FIP frame
  *
@@ -1343,9 +1343,10 @@ void fnic_handle_fip_timer(struct fnic *fnic)
        if (list_empty(&fnic->vlans)) {
                spin_unlock_irqrestore(&fnic->vlans_lock, flags);
                /* no vlans available, try again */
-               if (printk_ratelimit())
-                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
-                                 "Start VLAN Discovery\n");
+               if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
+                       if (printk_ratelimit())
+                               shost_printk(KERN_DEBUG, fnic->lport->host,
+                                               "Start VLAN Discovery\n");
                fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
                return;
        }
@@ -1363,9 +1364,10 @@ void fnic_handle_fip_timer(struct fnic *fnic)
        case FIP_VLAN_FAILED:
                spin_unlock_irqrestore(&fnic->vlans_lock, flags);
                /* if all vlans are in failed state, restart vlan disc */
-               if (printk_ratelimit())
-                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
-                                 "Start VLAN Discovery\n");
+               if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
+                       if (printk_ratelimit())
+                               shost_printk(KERN_DEBUG, fnic->lport->host,
+                                         "Start VLAN Discovery\n");
                fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
                break;
        case FIP_VLAN_SENT:
index 186c3ab..786f9d2 100644 (file)
@@ -1100,9 +1100,6 @@ static int __init fnic_init_module(void)
                goto err_create_fnic_workq;
        }
 
-       spin_lock_init(&fnic_list_lock);
-       INIT_LIST_HEAD(&fnic_list);
-
        fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
        if (!fnic_fip_queue) {
                printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
index 3674496..e619a82 100644 (file)
@@ -173,7 +173,7 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
 }
 
 
-/**
+/*
  * __fnic_set_state_flags
  * Sets/Clears bits in fnic's state_flags
  **/
@@ -2287,7 +2287,7 @@ clean_pending_aborts_end:
        return ret;
 }
 
-/**
+/*
  * fnic_scsi_host_start_tag
  * Allocates tagid from host's tag list
  **/
@@ -2307,7 +2307,7 @@ fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
        return dummy->tag;
 }
 
-/**
+/*
  * fnic_scsi_host_end_tag
  * frees tag allocated by fnic_scsi_host_start_tag.
  **/
index 9d52d83..4a7536b 100644 (file)
@@ -153,7 +153,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
                        if (rd_idx > (fnic_max_trace_entries-1))
                                rd_idx = 0;
                        /*
-                        * Continure dumpping trace buffer entries into
+                        * Continue dumping trace buffer entries into
                         * memory file till rd_idx reaches write index
                         */
                        if (rd_idx == wr_idx)
@@ -189,7 +189,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
                                  tbp->data[3], tbp->data[4]);
                        rd_idx++;
                        /*
-                        * Continue dumpping trace buffer entries into
+                        * Continue dumping trace buffer entries into
                         * memory file till rd_idx reaches write index
                         */
                        if (rd_idx == wr_idx)
@@ -632,7 +632,7 @@ void fnic_fc_trace_free(void)
  * fnic_fc_ctlr_set_trace_data:
  *       Maintain rd & wr idx accordingly and set data
  * Passed parameters:
- *       host_no: host number accociated with fnic
+ *       host_no: host number associated with fnic
  *       frame_type: send_frame, rece_frame or link event
  *       fc_frame: pointer to fc_frame
  *       frame_len: Length of the fc_frame
@@ -715,13 +715,13 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
  * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
  * Passed parameter:
  *       @fnic_dbgfs_t: pointer to debugfs trace buffer
- *       rdata_flag: 1 => Unformated file
- *                   0 => formated file
+ *       rdata_flag: 1 => Unformatted file
+ *                   0 => formatted file
  * Description:
  *       This routine will copy the trace data to memory file with
  *       proper formatting and also copy to another memory
- *       file without formatting for further procesing.
- * Retrun Value:
+ *       file without formatting for further processing.
+ * Return Value:
  *       Number of bytes that were dumped into fnic_dbgfs_t
  */
 
@@ -785,10 +785,10 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
  *      @fc_trace_hdr_t: pointer to trace data
  *      @fnic_dbgfs_t: pointer to debugfs trace buffer
  *      @orig_len: pointer to len
- *      rdata_flag: 0 => Formated file, 1 => Unformated file
+ *      rdata_flag: 0 => Formatted file, 1 => Unformatted file
  * Description:
  *      This routine will format and copy the passed trace data
- *      for formated file or unformated file accordingly.
+ *      for formatted file or unformatted file accordingly.
  */
 
 void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
index bb64e32..067c7c4 100644 (file)
@@ -326,6 +326,7 @@ static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
 
 /**
  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+ * @vhost:      ibmvfc host struct
  * @vfc_cmd:   ibmvfc command struct
  *
  * Return value:
@@ -650,8 +651,6 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
 /**
  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
  * @tgt:               ibmvfc target struct
- * @job_step:  job step to perform
- *
  **/
 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
 {
@@ -768,6 +767,8 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
 /**
  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
  * @vhost:     ibmvfc host who owns the event pool
+ * @queue:      ibmvfc queue struct
+ * @size:       pool size
  *
  * Returns zero on success.
  **/
@@ -820,6 +821,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
 /**
  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
  * @vhost:     ibmvfc host who owns the event pool
+ * @queue:      ibmvfc queue struct
  *
  **/
 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
@@ -1414,6 +1416,7 @@ static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
 
 /**
  * ibmvfc_gather_partition_info - Gather info about the LPAR
+ * @vhost:      ibmvfc host struct
  *
  * Return value:
  *     none
@@ -1484,7 +1487,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
 
 /**
  * ibmvfc_get_event - Gets the next free event in pool
- * @vhost:     ibmvfc host struct
+ * @queue:      ibmvfc queue struct
  *
  * Returns a free event from the pool.
  **/
@@ -1631,7 +1634,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
 
 /**
  * ibmvfc_timeout - Internal command timeout handler
- * @evt:       struct ibmvfc_event that timed out
+ * @t: struct ibmvfc_event that timed out
  *
  * Called when an internally generated command times out
  **/
@@ -1892,8 +1895,8 @@ static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct s
 
 /**
  * ibmvfc_queuecommand - The queuecommand function of the scsi template
+ * @shost:     scsi host struct
  * @cmnd:      struct scsi_cmnd to be executed
- * @done:      Callback function to be called when cmnd is completed
  *
  * Returns:
  *     0 on success / other on failure
@@ -2324,7 +2327,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
 /**
  * ibmvfc_match_rport - Match function for specified remote port
  * @evt:       ibmvfc event struct
- * @device:    device to match (rport)
+ * @rport:     device to match
  *
  * Returns:
  *     1 if event matches rport / 0 if event does not match rport
@@ -3176,8 +3179,9 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
  * @crq:       Command/Response queue
  * @vhost:     ibmvfc host struct
+ * @evt_doneq: Event done queue
  *
- **/
+**/
 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
                              struct list_head *evt_doneq)
 {
@@ -3358,7 +3362,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
  * ibmvfc_change_queue_depth - Change the device's queue depth
  * @sdev:      scsi device struct
  * @qdepth:    depth to set
- * @reason:    calling context
  *
  * Return value:
  *     actual depth set
@@ -3430,6 +3433,7 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
 /**
  * ibmvfc_show_log_level - Show the adapter's error logging level
  * @dev:       class device struct
+ * @attr:      unused
  * @buf:       buffer
  *
  * Return value:
@@ -3452,7 +3456,9 @@ static ssize_t ibmvfc_show_log_level(struct device *dev,
 /**
  * ibmvfc_store_log_level - Change the adapter's error logging level
  * @dev:       class device struct
+ * @attr:      unused
  * @buf:       buffer
+ * @count:      buffer size
  *
  * Return value:
  *     number of bytes printed to buffer
@@ -3530,7 +3536,7 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
                                 struct bin_attribute *bin_attr,
                                 char *buf, loff_t off, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct Scsi_Host *shost = class_to_shost(dev);
        struct ibmvfc_host *vhost = shost_priv(shost);
        unsigned long flags = 0;
@@ -4162,6 +4168,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
 /**
  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
  * @tgt:               ibmvfc target struct
+ * @done:              Routine to call when the event is responded to
  *
  * Returns:
  *     Allocated and initialized ibmvfc_event struct
@@ -4478,7 +4485,7 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
 
 /**
  * ibmvfc_adisc_timeout - Handle an ADISC timeout
- * @tgt:               ibmvfc target struct
+ * @t:         ibmvfc target struct
  *
  * If an ADISC times out, send a cancel. If the cancel times
  * out, reset the CRQ. When the ADISC comes back as cancelled,
@@ -4681,7 +4688,7 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
 /**
  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
  * @vhost:             ibmvfc host struct
- * @scsi_id:   SCSI ID to allocate target for
+ * @target:            Holds SCSI ID to allocate target forand the WWPN
  *
  * Returns:
  *     0 on success / other on failure
@@ -5111,7 +5118,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
 
 /**
  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
- * @vhost:             ibmvfc host struct
+ * @evt:               ibmvfc event struct
  *
  **/
 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
index 29fcc44..f33f566 100644 (file)
@@ -130,9 +130,10 @@ static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
 }
 
 /**
- * release_crq_queue: - Deallocates data and unregisters CRQ
- * @queue:     crq_queue to initialize and register
- * @host_data: ibmvscsi_host_data of host
+ * ibmvscsi_release_crq_queue() - Deallocates data and unregisters CRQ
+ * @queue:             crq_queue to initialize and register
+ * @hostdata:          ibmvscsi_host_data of host
+ * @max_requests:      maximum requests (unused)
  *
  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
  * the crq with the hypervisor.
@@ -276,10 +277,9 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
 }
 
 /**
- * reset_crq_queue: - resets a crq after a failure
+ * ibmvscsi_reset_crq_queue() - resets a crq after a failure
  * @queue:     crq_queue to initialize and register
  * @hostdata:  ibmvscsi_host_data of host
- *
  */
 static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
                                    struct ibmvscsi_host_data *hostdata)
@@ -314,9 +314,10 @@ static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
 }
 
 /**
- * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
- * @queue:     crq_queue to initialize and register
- * @hostdata:  ibmvscsi_host_data of host
+ * ibmvscsi_init_crq_queue() - Initializes and registers CRQ with hypervisor
+ * @queue:             crq_queue to initialize and register
+ * @hostdata:          ibmvscsi_host_data of host
+ * @max_requests:      maximum requests (unused)
  *
  * Allocates a page for messages, maps it for dma, and registers
  * the crq with the hypervisor.
@@ -404,10 +405,9 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
 }
 
 /**
- * reenable_crq_queue: - reenables a crq after
+ * ibmvscsi_reenable_crq_queue() - reenables a crq after
  * @queue:     crq_queue to initialize and register
  * @hostdata:  ibmvscsi_host_data of host
- *
  */
 static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
                                       struct ibmvscsi_host_data *hostdata)
@@ -439,7 +439,7 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
  * @hostdata:  ibmvscsi_host_data who owns the event pool
  *
  * Returns zero on success.
-*/
+ */
 static int initialize_event_pool(struct event_pool *pool,
                                 int size, struct ibmvscsi_host_data *hostdata)
 {
@@ -478,12 +478,12 @@ static int initialize_event_pool(struct event_pool *pool,
 }
 
 /**
- * release_event_pool: - Frees memory of an event pool of a host
+ * release_event_pool() - Frees memory of an event pool of a host
  * @pool:      event_pool to be released
  * @hostdata:  ibmvscsi_host_data who owns the even pool
  *
  * Returns zero on success.
-*/
+ */
 static void release_event_pool(struct event_pool *pool,
                               struct ibmvscsi_host_data *hostdata)
 {
@@ -526,11 +526,10 @@ static int valid_event_struct(struct event_pool *pool,
 }
 
 /**
- * ibmvscsi_free-event_struct: - Changes status of event to "free"
+ * free_event_struct() - Changes status of event to "free"
  * @pool:      event_pool that contains the event
  * @evt:       srp_event_struct to be modified
- *
-*/
+ */
 static void free_event_struct(struct event_pool *pool,
                                       struct srp_event_struct *evt)
 {
@@ -547,7 +546,7 @@ static void free_event_struct(struct event_pool *pool,
 }
 
 /**
- * get_evt_struct: - Gets the next free event in pool
+ * get_event_struct() - Gets the next free event in pool
  * @pool:      event_pool that contains the events to be searched
  *
  * Returns the next event in "free" state, and NULL if none are free.
@@ -575,7 +574,7 @@ static struct srp_event_struct *get_event_struct(struct event_pool *pool)
 /**
  * init_event_struct: Initialize fields in an event struct that are always 
  *                    required.
- * @evt:        The event
+ * @evt_struct: The event
  * @done:       Routine to call when the event is responded to
  * @format:     SRP or MAD format
  * @timeout:    timeout value set in the CRQ
@@ -597,7 +596,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
  * Routines for receiving SCSI responses from the hosting partition
  */
 
-/**
+/*
  * set_srp_direction: Set the fields in the srp related to data
  *     direction and number of buffers based on the direction in
  *     the scsi_cmnd and the number of buffers
@@ -632,9 +631,9 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
 /**
  * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
  * @cmd:       srp_cmd whose additional_data member will be unmapped
+ * @evt_struct: the event
  * @dev:       device for which the memory is mapped
- *
-*/
+ */
 static void unmap_cmd_data(struct srp_cmd *cmd,
                           struct srp_event_struct *evt_struct,
                           struct device *dev)
@@ -671,6 +670,7 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
 /**
  * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
  * @cmd:       struct scsi_cmnd with the scatterlist
+ * @evt_struct:        struct srp_event_struct to map
  * @srp_cmd:   srp_cmd that contains the memory descriptor
  * @dev:       device for which to map dma memory
  *
@@ -717,8 +717,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
 
        /* get indirect table */
        if (!evt_struct->ext_list) {
-               evt_struct->ext_list = (struct srp_direct_buf *)
-                       dma_alloc_coherent(dev,
+               evt_struct->ext_list = dma_alloc_coherent(dev,
                                           SG_ALL * sizeof(struct srp_direct_buf),
                                           &evt_struct->ext_list_token, 0);
                if (!evt_struct->ext_list) {
@@ -745,6 +744,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
 /**
  * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
  * @cmd:       struct scsi_cmnd with the memory to be mapped
+ * @evt_struct:        struct srp_event_struct to map
  * @srp_cmd:   srp_cmd that contains the memory descriptor
  * @dev:       dma device for which to map dma memory
  *
@@ -778,6 +778,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
 /**
  * purge_requests: Our virtual adapter just shut down.  purge any sent requests
  * @hostdata:    the adapter
+ * @error_code:  error code to return as the 'result'
  */
 static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
 {
@@ -838,7 +839,7 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
 
 /**
  * ibmvscsi_timeout - Internal command timeout handler
- * @evt_struct:        struct srp_event_struct that timed out
+ * @t: struct srp_event_struct that timed out
  *
  * Called when an internally generated command times out
 */
@@ -1034,8 +1035,8 @@ static inline u16 lun_from_dev(struct scsi_device *dev)
 }
 
 /**
- * ibmvscsi_queue: - The queuecommand function of the scsi template 
- * @cmd:       struct scsi_cmnd to be executed
+ * ibmvscsi_queuecommand_lck() - The queuecommand function of the scsi template
+ * @cmnd:      struct scsi_cmnd to be executed
  * @done:      Callback function to be called when cmd is completed
 */
 static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
@@ -1342,7 +1343,7 @@ static void fast_fail_rsp(struct srp_event_struct *evt_struct)
 }
 
 /**
- * init_host - Start host initialization
+ * enable_fast_fail() - Start host initialization
  * @hostdata:  ibmvscsi_host_data of host
  *
  * Returns zero if successful.
@@ -1456,16 +1457,15 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 };
 
-/**
- * init_adapter: Start virtual adapter initialization sequence
- *
+/*
+ * init_adapter() - Start virtual adapter initialization sequence
  */
 static void init_adapter(struct ibmvscsi_host_data *hostdata)
 {
        send_mad_adapter_info(hostdata);
 }
 
-/**
+/*
  * sync_completion: Signal that a synchronous command has completed
  * Note that after returning from this call, the evt_struct is freed.
  * the caller waiting on this completion shouldn't touch the evt_struct
@@ -1480,8 +1480,8 @@ static void sync_completion(struct srp_event_struct *evt_struct)
        complete(&evt_struct->comp);
 }
 
-/**
- * ibmvscsi_abort: Abort a command...from scsi host template
+/*
+ * ibmvscsi_eh_abort_handler: Abort a command...from scsi host template
  * send this over to the server and wait synchronously for the response
  */
 static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
@@ -1618,7 +1618,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
        return SUCCESS;
 }
 
-/**
+/*
  * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 
  * template send this over to the server and wait synchronously for the 
  * response
@@ -1884,7 +1884,6 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
  * ibmvscsi_change_queue_depth - Change the device's queue depth
  * @sdev:      scsi device struct
  * @qdepth:    depth to set
- * @reason:    calling context
  *
  * Return value:
  *     actual depth set
@@ -2214,7 +2213,7 @@ static int ibmvscsi_work(void *data)
        return 0;
 }
 
-/**
+/*
  * Called by bus code for each adapter
  */
 static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -2376,7 +2375,7 @@ static int ibmvscsi_resume(struct device *dev)
        return 0;
 }
 
-/**
+/*
  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
  * support.
  */
index cc3908c..f3dc60f 100644 (file)
@@ -128,10 +128,10 @@ static bool connection_broken(struct scsi_info *vscsi)
  * This function calls h_free_q then frees the interrupt bit etc.
  * It must release the lock before doing so because of the time it can take
  * for h_free_crq in PHYP
- * NOTE: the caller must make sure that state and or flags will prevent
- *      interrupt handler from scheduling work.
- * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
- *      we can't do it here, because we don't have the lock
+ * NOTE: the caller must make sure that state and or flags will prevent
+ *        interrupt handler from scheduling work.
+ *       * anyone calling this function may need to set the CRQ_CLOSED flag
+ *        we can't do it here, because we don't have the lock
  *
  * EXECUTION ENVIRONMENT:
  *     Process level
@@ -2670,7 +2670,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
        u64 data_len = 0;
        enum dma_data_direction dir;
        int attr = 0;
-       int rc = 0;
 
        nexus = vscsi->tport.ibmv_nexus;
        /*
@@ -2725,17 +2724,9 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
 
        srp->lun.scsi_lun[0] &= 0x3f;
 
-       rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
-                              cmd->sense_buf, scsilun_to_int(&srp->lun),
-                              data_len, attr, dir, 0);
-       if (rc) {
-               dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
-               spin_lock_bh(&vscsi->intr_lock);
-               list_del(&cmd->list);
-               ibmvscsis_free_cmd_resources(vscsi, cmd);
-               spin_unlock_bh(&vscsi->intr_lock);
-               goto fail;
-       }
+       target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
+                         cmd->sense_buf, scsilun_to_int(&srp->lun),
+                         data_len, attr, dir, 0);
        return;
 
 fail:
index 814acc5..9b75e19 100644 (file)
@@ -546,7 +546,6 @@ static int initio_reset_scsi(struct initio_host * host, int seconds)
 /**
  *     initio_init             -       set up an InitIO host adapter
  *     @host: InitIO host adapter
- *     @num_scbs: Number of SCBS
  *     @bios_addr: BIOS address
  *
  *     Set up the host adapter and devices according to the configuration
@@ -866,17 +865,16 @@ static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_b
 
 struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
 {
-       struct scsi_ctrl_blk *tmp, *prev;
+       struct scsi_ctrl_blk *tmp;
        u16 scbp_tarlun;
 
 
-       prev = tmp = host->first_busy;
+       tmp = host->first_busy;
        while (tmp != NULL) {
                scbp_tarlun = (tmp->lun << 8) | (tmp->target);
                if (scbp_tarlun == tarlun) {    /* Unlink this SCB              */
                        break;
                }
-               prev = tmp;
                tmp = tmp->next;
        }
 #if DEBUG_QUEUE
@@ -1888,7 +1886,7 @@ static int int_initio_scsi_rst(struct initio_host * host)
 }
 
 /**
- *     int_initio_scsi_resel   -       Reselection occurred
+ *     int_initio_resel        -       Reselection occurred
  *     @host: InitIO host adapter
  *
  *     A SCSI reselection event has been signalled and the interrupt
@@ -2602,7 +2600,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
 }
 
 /**
- *     i91u_queuecommand       -       Queue a new command if possible
+ *     i91u_queuecommand_lck   -       Queue a new command if possible
  *     @cmd: SCSI command block from the mid layer
  *     @done: Completion handler
  *
@@ -2651,9 +2649,9 @@ static int i91u_bus_reset(struct scsi_cmnd * cmnd)
 }
 
 /**
- *     i91u_biospararm                 -       return the "logical geometry
+ *     i91u_biospara                 -       return the "logical geometry
  *     @sdev: SCSI device
- *     @dev; Matching block device
+ *     @dev: Matching block device
  *     @capacity: Sector size of drive
  *     @info_array: Return space for BIOS geometry
  *
@@ -2728,10 +2726,8 @@ static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
        }
 }
 
-/**
+/*
  *     i91uSCBPost             -       SCSI callback
- *     @host: Pointer to host adapter control block.
- *     @cmnd: Pointer to SCSI control block.
  *
  *     This is callback routine be called when tulip finish one
  *     SCSI command.
index e451102..30c30a1 100644 (file)
@@ -5321,7 +5321,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
 }
 
 /**
- * ipr_eh_dev_reset - Reset the device
+ * __ipr_eh_dev_reset - Reset the device
  * @scsi_cmd:  scsi command struct
  *
  * This function issues a device reset to the affected device.
@@ -5583,7 +5583,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
 }
 
 /**
- * ipr_eh_abort - Abort a single op
+ * ipr_scan_finished - Report whether scan is done
  * @shost:           scsi host struct
  * @elapsed_time:    elapsed time
  *
@@ -5606,7 +5606,7 @@ static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time
 }
 
 /**
- * ipr_eh_host_reset - Reset the host adapter
+ * ipr_eh_abort - Reset the host adapter
  * @scsi_cmd:  scsi command struct
  *
  * Return value:
@@ -6715,7 +6715,7 @@ static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
 }
 
 /**
- * ipr_info - Get information about the card/driver
+ * ipr_ioa_info - Get information about the card/driver
  * @host:      scsi host struct
  *
  * Return value:
index 7ebfa3c..d690d9c 100644 (file)
 
 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT      100
 
-/**
- *
- *
+/*
  * The number of milliseconds to wait while a given phy is consuming power
  * before allowing another set of phys to consume power. Ultimately, this will
  * be specified by OEM parameter.
  */
 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
 
-/**
+/*
  * NORMALIZE_PUT_POINTER() -
  *
  * This macro will normalize the completion queue put pointer so its value can
        ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
 
 
-/**
+/*
  * NORMALIZE_EVENT_POINTER() -
  *
  * This macro will normalize the completion queue event entry so its value can
                >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
        )
 
-/**
+/*
  * NORMALIZE_GET_POINTER() -
  *
  * This macro will normalize the completion queue get pointer so its value can
 #define NORMALIZE_GET_POINTER(x) \
        ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
 
-/**
+/*
  * NORMALIZE_GET_POINTER_CYCLE_BIT() -
  *
  * This macro will normalize the completion queue cycle pointer so it matches
 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
        ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
 
-/**
+/*
  * COMPLETION_QUEUE_CYCLE_BIT() -
  *
  * This macro will return the cycle bit of the completion queue entry
@@ -637,7 +635,7 @@ irqreturn_t isci_error_isr(int vec, void *data)
 /**
  * isci_host_start_complete() - This function is called by the core library,
  *    through the ISCI Module, to indicate controller start status.
- * @isci_host: This parameter specifies the ISCI host object
+ * @ihost: This parameter specifies the ISCI host object
  * @completion_status: This parameter specifies the completion status from the
  *    core library.
  *
@@ -670,7 +668,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
  *    use any timeout value, but this method provides the suggested minimum
  *    start timeout value.  The returned value is based upon empirical
  *    information determined as a result of interoperability testing.
- * @controller: the handle to the controller object for which to return the
+ * @ihost: the handle to the controller object for which to return the
  *    suggested start timeout.
  *
  * This method returns the number of milliseconds for the suggested start
@@ -893,7 +891,7 @@ bool is_controller_start_complete(struct isci_host *ihost)
 
 /**
  * sci_controller_start_next_phy - start phy
- * @scic: controller
+ * @ihost: controller
  *
  * If all the phys have been started, then attempt to transition the
  * controller to the READY state and inform the user
@@ -1145,7 +1143,7 @@ void isci_host_completion_routine(unsigned long data)
  *    controller has been quiesced. This method will ensure that all IO
  *    requests are quiesced, phys are stopped, and all additional operation by
  *    the hardware is halted.
- * @controller: the handle to the controller object to stop.
+ * @ihost: the handle to the controller object to stop.
  * @timeout: This parameter specifies the number of milliseconds in which the
  *    stop operation should complete.
  *
@@ -1174,7 +1172,7 @@ static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
  *    considered destructive.  In other words, all current operations are wiped
  *    out.  No IO completions for outstanding devices occur.  Outstanding IO
  *    requests are not aborted or completed at the actual remote device.
- * @controller: the handle to the controller object to reset.
+ * @ihost: the handle to the controller object to reset.
  *
  * Indicate if the controller reset method succeeded or failed in some way.
  * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
@@ -1331,7 +1329,7 @@ static inline void sci_controller_starting_state_exit(struct sci_base_state_mach
 /**
  * sci_controller_set_interrupt_coalescence() - This method allows the user to
  *    configure the interrupt coalescence.
- * @controller: This parameter represents the handle to the controller object
+ * @ihost: This parameter represents the handle to the controller object
  *    for which its interrupt coalesce register is overridden.
  * @coalesce_number: Used to control the number of entries in the Completion
  *    Queue before an interrupt is generated. If the number of entries exceed
@@ -2479,12 +2477,13 @@ struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
 }
 
 /**
+ * sci_controller_allocate_remote_node_context()
  * This method allocates remote node index and the reserves the remote node
  *    context space for use. This method can fail if there are no more remote
  *    node index available.
- * @scic: This is the controller object which contains the set of
+ * @ihost: This is the controller object which contains the set of
  *    free remote node ids
- * @sci_dev: This is the device object which is requesting the a remote node
+ * @idev: This is the device object which is requesting the a remote node
  *    id
  * @node_id: This is the remote node id that is assinged to the device if one
  *    is available
@@ -2709,11 +2708,11 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
 /**
  * sci_controller_start_task() - This method is called by the SCIC user to
  *    send/start a framework task management request.
- * @controller: the handle to the controller object for which to start the task
+ * @ihost: the handle to the controller object for which to start the task
  *    management request.
- * @remote_device: the handle to the remote device object for which to start
+ * @idev: the handle to the remote device object for which to start
  *    the task management request.
- * @task_request: the handle to the task request object to start.
+ * @ireq: the handle to the task request object to start.
  */
 enum sci_status sci_controller_start_task(struct isci_host *ihost,
                                          struct isci_remote_device *idev,
index 1b87d90..aa87873 100644 (file)
@@ -339,10 +339,11 @@ done:
 }
 
 /**
- * This method returns the port currently containing this phy. If the phy is
- *    currently contained by the dummy port, then the phy is considered to not
- *    be part of a port.
- * @sci_phy: This parameter specifies the phy for which to retrieve the
+ * phy_get_non_dummy_port() - This method returns the port currently containing
+ * this phy. If the phy is currently contained by the dummy port, then the phy
+ * is considered to not be part of a port.
+ *
+ * @iphy: This parameter specifies the phy for which to retrieve the
  *    containing port.
  *
  * This method returns a handle to a port that contains the supplied phy.
@@ -360,12 +361,8 @@ struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
        return iphy->owning_port;
 }
 
-/**
- * This method will assign a port to the phy object.
- * @out]: iphy This parameter specifies the phy for which to assign a port
- *    object.
- *
- *
+/*
+ * sci_phy_set_port() - This method will assign a port to the phy object.
  */
 void sci_phy_set_port(
        struct isci_phy *iphy,
@@ -398,11 +395,11 @@ enum sci_status sci_phy_initialize(struct isci_phy *iphy,
 }
 
 /**
- * This method assigns the direct attached device ID for this phy.
+ * sci_phy_setup_transport() - This method assigns the direct attached device ID for this phy.
  *
- * @iphy The phy for which the direct attached device id is to
+ * @iphy: The phy for which the direct attached device id is to
  *       be assigned.
- * @device_id The direct attached device ID to assign to the phy.
+ * @device_id: The direct attached device ID to assign to the phy.
  *       This will either be the RNi for the device or an invalid RNi if there
  *       is no current device assigned to the phy.
  */
@@ -597,7 +594,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
 /**
  * sci_phy_complete_link_training - perform processing common to
  *    all protocols upon completion of link training.
- * @sci_phy: This parameter specifies the phy object for which link training
+ * @iphy: This parameter specifies the phy object for which link training
  *    has completed.
  * @max_link_rate: This parameter specifies the maximum link rate to be
  *    associated with this phy.
@@ -1167,8 +1164,8 @@ static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine
 }
 
 /**
- *
- * @sci_phy: This is the struct isci_phy object to stop.
+ * scu_link_layer_stop_protocol_engine()
+ * @iphy: This is the struct isci_phy object to stop.
  *
  * This method will stop the struct isci_phy object. This does not reset the
  * protocol engine it just suspends it and places it in a state where it will
@@ -1219,7 +1216,8 @@ static void scu_link_layer_start_oob(struct isci_phy *iphy)
 }
 
 /**
- *
+ * scu_link_layer_tx_hard_reset()
+ * @iphy: This is the struct isci_phy object to stop.
  *
  * This method will transmit a hard reset request on the specified phy. The SCU
  * hardware requires that we reset the OOB state machine and set the hard reset
@@ -1420,7 +1418,7 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
 /**
  * isci_phy_control() - This function is one of the SAS Domain Template
  *    functions. This is a phy management function.
- * @phy: This parameter specifies the sphy being controlled.
+ * @sas_phy: This parameter specifies the sphy being controlled.
  * @func: This parameter specifies the phy control function being invoked.
  * @buf: This parameter is specific to the phy function being invoked.
  *
index 448a8c3..1609aba 100644 (file)
@@ -62,7 +62,7 @@
 
 #undef C
 #define C(a) (#a)
-const char *port_state_name(enum sci_port_states state)
+static const char *port_state_name(enum sci_port_states state)
 {
        static const char * const strings[] = PORT_STATES;
 
@@ -115,9 +115,9 @@ static u32 sci_port_get_phys(struct isci_port *iport)
 /**
  * sci_port_get_properties() - This method simply returns the properties
  *    regarding the port, such as: physical index, protocols, sas address, etc.
- * @port: this parameter specifies the port for which to retrieve the physical
+ * @iport: this parameter specifies the port for which to retrieve the physical
  *    index.
- * @properties: This parameter specifies the properties structure into which to
+ * @prop: This parameter specifies the properties structure into which to
  *    copy the requested information.
  *
  * Indicate if the user specified a valid port. SCI_SUCCESS This value is
@@ -233,8 +233,8 @@ static void isci_port_link_up(struct isci_host *isci_host,
  * isci_port_link_down() - This function is called by the sci core when a link
  *    becomes inactive.
  * @isci_host: This parameter specifies the isci host object.
- * @phy: This parameter specifies the isci phy with the active link.
- * @port: This parameter specifies the isci port with the active link.
+ * @isci_phy: This parameter specifies the isci phy with the active link.
+ * @isci_port: This parameter specifies the isci port with the active link.
  *
  */
 static void isci_port_link_down(struct isci_host *isci_host,
@@ -308,7 +308,7 @@ static void port_state_machine_change(struct isci_port *iport,
 /**
  * isci_port_hard_reset_complete() - This function is called by the sci core
  *    when the hard reset complete notification has been received.
- * @port: This parameter specifies the sci port with the active link.
+ * @isci_port: This parameter specifies the sci port with the active link.
  * @completion_status: This parameter specifies the core status for the reset
  *    process.
  *
@@ -395,9 +395,10 @@ bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
 }
 
 /**
- *
- * @sci_port: This is the port object for which to determine if the phy mask
+ * sci_port_is_phy_mask_valid()
+ * @iport: This is the port object for which to determine if the phy mask
  *    can be supported.
+ * @phy_mask: Phy mask belonging to this port
  *
  * This method will return a true value if the port's phy mask can be supported
  * by the SCU. The following is a list of valid PHY mask configurations for
@@ -533,7 +534,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a
 /**
  * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
  *
- * @sci_port: logical port on which we need to create the remote node context
+ * @iport: logical port on which we need to create the remote node context
  * @rni: remote node index for this remote node context.
  *
  * This routine will construct a dummy remote node context data structure
@@ -677,8 +678,8 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
 
 /**
  * sci_port_general_link_up_handler - phy can be assigned to port?
- * @sci_port: sci_port object for which has a phy that has gone link up.
- * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @iport: sci_port object for which has a phy that has gone link up.
+ * @iphy: This is the struct isci_phy object that has gone link up.
  * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
  *
  * Determine if this phy can be assigned to this port . If the phy is
@@ -716,10 +717,11 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
 
 
 /**
+ * sci_port_is_wide()
  * This method returns false if the port only has a single phy object assigned.
  *     If there are no phys or more than one phy then the method will return
  *    true.
- * @sci_port: The port for which the wide port condition is to be checked.
+ * @iport: The port for which the wide port condition is to be checked.
  *
  * bool true Is returned if this is a wide ported port. false Is returned if
  * this is a narrow port.
@@ -739,12 +741,13 @@ static bool sci_port_is_wide(struct isci_port *iport)
 }
 
 /**
+ * sci_port_link_detected()
  * This method is called by the PHY object when the link is detected. if the
  *    port wants the PHY to continue on to the link up state then the port
  *    layer must return true.  If the port object returns false the phy object
  *    must halt its attempt to go link up.
- * @sci_port: The port associated with the phy object.
- * @sci_phy: The phy object that is trying to go link up.
+ * @iport: The port associated with the phy object.
+ * @iphy: The phy object that is trying to go link up.
  *
  * true if the phy object can continue to the link up condition. true Is
  * returned if this phy can continue to the ready state. false Is returned if
@@ -817,10 +820,8 @@ done:
 
 /* --------------------------------------------------------------------------- */
 
-/**
+/*
  * This function updates the hardwares VIIT entry for this port.
- *
- *
  */
 static void sci_port_update_viit_entry(struct isci_port *iport)
 {
@@ -874,7 +875,7 @@ static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
 
 /**
  * sci_port_post_dummy_request() - post dummy/workaround request
- * @sci_port: port to post task
+ * @iport: port to post task
  *
  * Prevent the hardware scheduler from posting new requests to the front
  * of the scheduler queue causing a starvation problem for currently
@@ -899,10 +900,11 @@ static void sci_port_post_dummy_request(struct isci_port *iport)
 }
 
 /**
- * This routine will abort the dummy request.  This will alow the hardware to
+ * sci_port_abort_dummy_request()
+ * This routine will abort the dummy request.  This will allow the hardware to
  * power down parts of the silicon to save power.
  *
- * @sci_port: The port on which the task must be aborted.
+ * @iport: The port on which the task must be aborted.
  *
  */
 static void sci_port_abort_dummy_request(struct isci_port *iport)
@@ -923,8 +925,8 @@ static void sci_port_abort_dummy_request(struct isci_port *iport)
 }
 
 /**
- *
- * @sci_port: This is the struct isci_port object to resume.
+ * sci_port_resume_port_task_scheduler()
+ * @iport: This is the struct isci_port object to resume.
  *
  * This method will resume the port task scheduler for this port object. none
  */
@@ -1014,8 +1016,8 @@ static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
 }
 
 /**
- *
- * @object: This is the object which is cast to a struct isci_port object.
+ * sci_port_ready_substate_operational_exit()
+ * @sm: This is the object which is cast to a struct isci_port object.
  *
  * This method will perform the actions required by the struct isci_port on
  * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
@@ -1186,9 +1188,9 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
 }
 
 /**
- * sci_port_add_phy() -
- * @sci_port: This parameter specifies the port in which the phy will be added.
- * @sci_phy: This parameter is the phy which is to be added to the port.
+ * sci_port_add_phy()
+ * @iport: This parameter specifies the port in which the phy will be added.
+ * @iphy: This parameter is the phy which is to be added to the port.
  *
  * This method will add a PHY to the selected port. This method returns an
  * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
@@ -1257,9 +1259,9 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
 }
 
 /**
- * sci_port_remove_phy() -
- * @sci_port: This parameter specifies the port in which the phy will be added.
- * @sci_phy: This parameter is the phy which is to be added to the port.
+ * sci_port_remove_phy()
+ * @iport: This parameter specifies the port in which the phy will be added.
+ * @iphy: This parameter is the phy which is to be added to the port.
  *
  * This method will remove the PHY from the selected PORT. This method returns
  * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
index b1c1975..c382a25 100644 (file)
@@ -73,7 +73,7 @@ enum SCIC_SDS_APC_ACTIVITY {
  * ****************************************************************************** */
 
 /**
- *
+ * sci_sas_address_compare()
  * @address_one: A SAS Address to be compared.
  * @address_two: A SAS Address to be compared.
  *
@@ -102,9 +102,9 @@ static s32 sci_sas_address_compare(
 }
 
 /**
- *
- * @controller: The controller object used for the port search.
- * @phy: The phy object to match.
+ * sci_port_configuration_agent_find_port()
+ * @ihost: The controller object used for the port search.
+ * @iphy: The phy object to match.
  *
  * This routine will find a matching port for the phy.  This means that the
  * port and phy both have the same broadcast sas address and same received sas
@@ -145,8 +145,8 @@ static struct isci_port *sci_port_configuration_agent_find_port(
 }
 
 /**
- *
- * @controller: This is the controller object that contains the port agent
+ * sci_port_configuration_agent_validate_ports()
+ * @ihost: This is the controller object that contains the port agent
  * @port_agent: This is the port configuration agent for the controller.
  *
  * This routine will validate the port configuration is correct for the SCU
@@ -373,15 +373,16 @@ static void sci_mpc_agent_link_up(struct isci_host *ihost,
 }
 
 /**
- *
- * @controller: This is the controller object that receives the link down
+ * sci_mpc_agent_link_down()
+ * @ihost: This is the controller object that receives the link down
  *    notification.
- * @port: This is the port object associated with the phy.  If the is no
+ * @port_agent: This is the port configuration agent for the controller.
+ * @iport: This is the port object associated with the phy.  If the is no
  *    associated port this is an NULL.  The port is an invalid
  *    handle only if the phy was never port of this port.  This happens when
  *    the phy is not broadcasting the same SAS address as the other phys in the
  *    assigned port.
- * @phy: This is the phy object which has gone link down.
+ * @iphy: This is the phy object which has gone link down.
  *
  * This function handles the manual port configuration link down notifications.
  * Since all ports and phys are associated at initialization time we just turn
@@ -590,11 +591,12 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
 
 /**
  * sci_apc_agent_link_up - handle apc link up events
- * @scic: This is the controller object that receives the link up
+ * @ihost: This is the controller object that receives the link up
  *    notification.
- * @sci_port: This is the port object associated with the phy.  If the is no
+ * @port_agent: This is the port configuration agent for the controller.
+ * @iport: This is the port object associated with the phy.  If the is no
  *    associated port this is an NULL.
- * @sci_phy: This is the phy object which has gone link up.
+ * @iphy: This is the phy object which has gone link up.
  *
  * This method handles the automatic port configuration for link up
  * notifications. Is it possible to get a link down notification from a phy
@@ -620,9 +622,10 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
 }
 
 /**
- *
- * @controller: This is the controller object that receives the link down
+ * sci_apc_agent_link_down()
+ * @ihost: This is the controller object that receives the link down
  *    notification.
+ * @port_agent: This is the port configuration agent for the controller.
  * @iport: This is the port object associated with the phy.  If the is no
  *    associated port this is an NULL.
  * @iphy: This is the phy object which has gone link down.
@@ -697,9 +700,7 @@ done:
  * Public port configuration agent routines
  * ****************************************************************************** */
 
-/**
- *
- *
+/*
  * This method will construct the port configuration agent for operation. This
  * call is universal for both manual port configuration and automatic port
  * configuration modes.
index c3f540b..866950a 100644 (file)
@@ -288,8 +288,9 @@ enum sci_status isci_remote_device_terminate_requests(
 * isci_remote_device_not_ready() - This function is called by the ihost when
 *    the remote device is not ready. We mark the isci device as ready (not
 *    "ready_for_io") and signal the waiting proccess.
-* @isci_host: This parameter specifies the isci host object.
-* @isci_device: This parameter specifies the remote device
+* @ihost: This parameter specifies the isci host object.
+* @idev: This parameter specifies the remote device
+* @reason: Reason to switch on
 *
 * sci_lock is held on entrance to this function.
 */
@@ -1000,7 +1001,7 @@ static void sci_remote_device_initial_state_enter(struct sci_base_state_machine
 
 /**
  * sci_remote_device_destruct() - free remote node context and destruct
- * @remote_device: This parameter specifies the remote device to be destructed.
+ * @idev: This parameter specifies the remote device to be destructed.
  *
  * Remote device objects are a limited resource.  As such, they must be
  * protected.  Thus calls to construct and destruct are mutually exclusive and
@@ -1236,8 +1237,8 @@ static const struct sci_base_state sci_remote_device_state_table[] = {
 
 /**
  * sci_remote_device_construct() - common construction
- * @sci_port: SAS/SATA port through which this device is accessed.
- * @sci_dev: remote device to construct
+ * @iport: SAS/SATA port through which this device is accessed.
+ * @idev: remote device to construct
  *
  * This routine just performs benign initialization and does not
  * allocate the remote_node_context which is left to
@@ -1256,7 +1257,7 @@ static void sci_remote_device_construct(struct isci_port *iport,
                                               SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
 }
 
-/**
+/*
  * sci_remote_device_da_construct() - construct direct attached device.
  *
  * The information (e.g. IAF, Signature FIS, etc.) necessary to build
@@ -1294,7 +1295,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
        return SCI_SUCCESS;
 }
 
-/**
+/*
  * sci_remote_device_ea_construct() - construct expander attached device
  *
  * Remote node context(s) is/are a global resource allocated by this
@@ -1384,7 +1385,7 @@ static bool isci_remote_device_test_resume_done(
        return done;
 }
 
-void isci_remote_device_wait_for_resume_from_abort(
+static void isci_remote_device_wait_for_resume_from_abort(
        struct isci_host *ihost,
        struct isci_remote_device *idev)
 {
@@ -1439,7 +1440,7 @@ enum sci_status isci_remote_device_resume_from_abort(
  * sci_remote_device_start() - This method will start the supplied remote
  *    device.  This method enables normal IO requests to flow through to the
  *    remote device.
- * @remote_device: This parameter specifies the device to be started.
+ * @idev: This parameter specifies the device to be started.
  * @timeout: This parameter specifies the number of milliseconds in which the
  *    start operation should complete.
  *
@@ -1501,10 +1502,11 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
 }
 
 /**
+ * isci_remote_device_alloc()
  * This function builds the isci_remote_device when a libsas dev_found message
  *    is received.
- * @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the isci_port connected to this device.
+ * @ihost: This parameter specifies the isci host object.
+ * @iport: This parameter specifies the isci_port connected to this device.
  *
  * pointer to new isci_remote_device.
  */
@@ -1549,8 +1551,8 @@ void isci_remote_device_release(struct kref *kref)
 /**
  * isci_remote_device_stop() - This function is called internally to stop the
  *    remote device.
- * @isci_host: This parameter specifies the isci host object.
- * @isci_device: This parameter specifies the remote device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device.
  *
  * The status of the ihost request to stop.
  */
@@ -1585,8 +1587,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
 /**
  * isci_remote_device_gone() - This function is called by libsas when a domain
  *    device is removed.
- * @domain_device: This parameter specifies the libsas domain device.
- *
+ * @dev: This parameter specifies the libsas domain device.
  */
 void isci_remote_device_gone(struct domain_device *dev)
 {
@@ -1606,7 +1607,7 @@ void isci_remote_device_gone(struct domain_device *dev)
  *    device is discovered. A remote device object is created and started. the
  *    function then sleeps until the sci core device started message is
  *    received.
- * @domain_device: This parameter specifies the libsas domain device.
+ * @dev: This parameter specifies the libsas domain device.
  *
  * status, zero indicates success.
  */
index 68333f5..77ba029 100644 (file)
@@ -74,7 +74,7 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
 #undef C
 
 /**
- *
+ * sci_remote_node_context_is_ready()
  * @sci_rnc: The state of the remote node context object to check.
  *
  * This method will return true if the remote node context is in a READY state
@@ -163,12 +163,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
        rnc->ssp.oaf_source_zone_group = 0;
        rnc->ssp.oaf_more_compatibility_features = 0;
 }
-/**
- *
- * @sci_rnc:
- * @callback:
- * @callback_parameter:
- *
+/*
  * This method will setup the remote node context object so it will transition
  * to its ready state.  If the remote node context is already setup to
  * transition to its final state then this function does nothing. none
@@ -202,9 +197,7 @@ static void sci_remote_node_context_setup_to_destroy(
        wake_up(&ihost->eventq);
 }
 
-/**
- *
- *
+/*
  * This method just calls the user callback function and then resets the
  * callback.
  */
index 301b314..1bcaf52 100644 (file)
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/**
+/*
  * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
  *    public, protected, and private methods.
- *
- *
  */
 #include "remote_node_table.h"
 #include "remote_node_context.h"
 
 /**
- *
+ * sci_remote_node_table_get_group_index()
  * @remote_node_table: This is the remote node index table from which the
  *    selection will be made.
  * @group_table_index: This is the index to the group table from which to
@@ -98,10 +96,10 @@ static u32 sci_remote_node_table_get_group_index(
 }
 
 /**
- *
- * @out]: remote_node_table This the remote node table in which to clear the
+ * sci_remote_node_table_clear_group_index()
+ * @remote_node_table: This the remote node table in which to clear the
  *    selector.
- * @set_index: This is the remote node selector in which the change will be
+ * @group_table_index: This is the remote node selector in which the change will be
  *    made.
  * @group_index: This is the bit index in the table to be modified.
  *
@@ -128,8 +126,8 @@ static void sci_remote_node_table_clear_group_index(
 }
 
 /**
- *
- * @out]: remote_node_table This the remote node table in which to set the
+ * sci_remote_node_table_set_group_index()
+ * @remote_node_table: This the remote node table in which to set the
  *    selector.
  * @group_table_index: This is the remote node selector in which the change
  *    will be made.
@@ -158,8 +156,8 @@ static void sci_remote_node_table_set_group_index(
 }
 
 /**
- *
- * @out]: remote_node_table This is the remote node table in which to modify
+ * sci_remote_node_table_set_node_index()
+ * @remote_node_table: This is the remote node table in which to modify
  *    the remote node availability.
  * @remote_node_index: This is the remote node index that is being returned to
  *    the table.
@@ -191,8 +189,8 @@ static void sci_remote_node_table_set_node_index(
 }
 
 /**
- *
- * @out]: remote_node_table This is the remote node table from which to clear
+ * sci_remote_node_table_clear_node_index()
+ * @remote_node_table: This is the remote node table from which to clear
  *    the available remote node bit.
  * @remote_node_index: This is the remote node index which is to be cleared
  *    from the table.
@@ -224,8 +222,8 @@ static void sci_remote_node_table_clear_node_index(
 }
 
 /**
- *
- * @out]: remote_node_table The remote node table from which the slot will be
+ * sci_remote_node_table_clear_group()
+ * @remote_node_table: The remote node table from which the slot will be
  *    cleared.
  * @group_index: The index for the slot that is to be cleared.
  *
@@ -252,9 +250,8 @@ static void sci_remote_node_table_clear_group(
        remote_node_table->available_remote_nodes[dword_location] = dword_value;
 }
 
-/**
- *
- * @remote_node_table:
+/*
+ * sci_remote_node_table_set_group()
  *
  * THis method sets an entire remote node group in the remote node table.
  */
@@ -280,7 +277,7 @@ static void sci_remote_node_table_set_group(
 }
 
 /**
- *
+ * sci_remote_node_table_get_group_value()
  * @remote_node_table: This is the remote node table that for which the group
  *    value is to be returned.
  * @group_index: This is the group index to use to find the group value.
@@ -307,8 +304,8 @@ static u8 sci_remote_node_table_get_group_value(
 }
 
 /**
- *
- * @out]: remote_node_table The remote that which is to be initialized.
+ * sci_remote_node_table_initialize()
+ * @remote_node_table: The remote that which is to be initialized.
  * @remote_node_entries: The number of entries to put in the table.
  *
  * This method will initialize the remote node table for use. none
@@ -365,10 +362,10 @@ void sci_remote_node_table_initialize(
 }
 
 /**
- *
- * @out]: remote_node_table The remote node table from which to allocate a
+ * sci_remote_node_table_allocate_single_remote_node()
+ * @remote_node_table: The remote node table from which to allocate a
  *    remote node.
- * @table_index: The group index that is to be used for the search.
+ * @group_table_index: The group index that is to be used for the search.
  *
  * This method will allocate a single RNi from the remote node table.  The
  * table index will determine from which remote node group table to search.
@@ -425,10 +422,10 @@ static u16 sci_remote_node_table_allocate_single_remote_node(
 }
 
 /**
- *
+ * sci_remote_node_table_allocate_triple_remote_node()
  * @remote_node_table: This is the remote node table from which to allocate the
  *    remote node entries.
- * @group_table_index: THis is the group table index which must equal two (2)
+ * @group_table_index: This is the group table index which must equal two (2)
  *    for this operation.
  *
  * This method will allocate three consecutive remote node context entries. If
@@ -462,7 +459,7 @@ static u16 sci_remote_node_table_allocate_triple_remote_node(
 }
 
 /**
- *
+ * sci_remote_node_table_allocate_remote_node()
  * @remote_node_table: This is the remote node table from which the remote node
  *    allocation is to take place.
  * @remote_node_count: This is ther remote node count which is one of
@@ -505,9 +502,10 @@ u16 sci_remote_node_table_allocate_remote_node(
 }
 
 /**
- *
- * @remote_node_table:
- *
+ * sci_remote_node_table_release_single_remote_node()
+ * @remote_node_table: This is the remote node table from which the remote node
+ *    release is to take place.
+ * @remote_node_index: This is the remote node index that is being released.
  * This method will free a single remote node index back to the remote node
  * table.  This routine will update the remote node groups
  */
@@ -550,9 +548,10 @@ static void sci_remote_node_table_release_single_remote_node(
 }
 
 /**
- *
+ * sci_remote_node_table_release_triple_remote_node()
  * @remote_node_table: This is the remote node table to which the remote node
  *    index is to be freed.
+ * @remote_node_index: This is the remote node index that is being released.
  *
  * This method will release a group of three consecutive remote nodes back to
  * the free remote nodes.
@@ -573,11 +572,12 @@ static void sci_remote_node_table_release_triple_remote_node(
 }
 
 /**
- *
+ * sci_remote_node_table_release_remote_node_index()
  * @remote_node_table: The remote node table to which the remote node index is
  *    to be freed.
  * @remote_node_count: This is the count of consecutive remote nodes that are
  *    to be freed.
+ * @remote_node_index: This is the remote node index that is being released.
  *
  * This method will release the remote node index back into the remote node
  * table free pool.
index 58e6216..e7c6cb4 100644 (file)
@@ -207,11 +207,8 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
                SCI_CONTROLLER_INVALID_IO_TAG;
 }
 
-/**
+/*
  * This method is will fill in the SCU Task Context for any type of SSP request.
- * @sci_req:
- * @task_context:
- *
  */
 static void scu_ssp_request_construct_task_context(
        struct isci_request *ireq,
@@ -410,10 +407,8 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
        tc->ref_tag_seed_gen = 0;
 }
 
-/**
+/*
  * This method is will fill in the SCU Task Context for a SSP IO request.
- * @sci_req:
- *
  */
 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
                                                      enum dma_data_direction dir,
@@ -456,17 +451,16 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
 }
 
 /**
- * This method will fill in the SCU Task Context for a SSP Task request.  The
- *    following important settings are utilized: -# priority ==
- *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
- *    ahead of other task destined for the same Remote Node. -# task_type ==
- *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
- *    (i.e. non-raw frame) is being utilized to perform task management. -#
- *    control_frame == 1.  This ensures that the proper endianess is set so
- *    that the bytes are transmitted in the right order for a task frame.
- * @sci_req: This parameter specifies the task request object being
- *    constructed.
- *
+ * scu_ssp_task_request_construct_task_context() - This method will fill in
+ *    the SCU Task Context for a SSP Task request.  The following important
+ *    settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH.  This
+ *    ensures that the task request is issued ahead of other task destined
+ *    for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD.  This
+ *    simply indicates that a normal request type (i.e. non-raw frame) is
+ *    being utilized to perform task management. -#control_frame == 1.  This
+ *    ensures that the proper endianness is set so that the bytes are
+ *    transmitted in the right order for a task frame.
+ * @ireq: This parameter specifies the task request object being constructed.
  */
 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
 {
@@ -484,9 +478,10 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
 }
 
 /**
+ * scu_sata_request_construct_task_context()
  * This method is will fill in the SCU Task Context for any type of SATA
  *    request.  This is called from the various SATA constructors.
- * @sci_req: The general IO request object which is to be used in
+ * @ireq: The general IO request object which is to be used in
  *    constructing the SCU task context.
  * @task_context: The buffer pointer for the SCU task context which is being
  *    constructed.
@@ -593,9 +588,9 @@ static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
        return SCI_SUCCESS;
 }
 
-/**
- *
- * @sci_req: This parameter specifies the request to be constructed as an
+/*
+ * sci_stp_optimized_request_construct()
+ * @ireq: This parameter specifies the request to be constructed as an
  *    optimized request.
  * @optimized_task_type: This parameter specifies whether the request is to be
  *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
@@ -778,11 +773,11 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
        return status;
 }
 
+#define SCU_TASK_CONTEXT_SRAM 0x200000
 /**
  * sci_req_tx_bytes - bytes transferred when reply underruns request
  * @ireq: request that was terminated early
  */
-#define SCU_TASK_CONTEXT_SRAM 0x200000
 static u32 sci_req_tx_bytes(struct isci_request *ireq)
 {
        struct isci_host *ihost = ireq->owning_controller;
@@ -1396,10 +1391,10 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
 }
 
 /**
- *
- * @stp_request: The request that is used for the SGL processing.
- * @data_buffer: The buffer of data to be copied.
- * @length: The length of the data transfer.
+ * sci_stp_request_pio_data_in_copy_data_buffer()
+ * @stp_req: The request that is used for the SGL processing.
+ * @data_buf: The buffer of data to be copied.
+ * @len: The length of the data transfer.
  *
  * Copy the data from the buffer for the length specified to the IO request SGL
  * specified data region. enum sci_status
@@ -1443,8 +1438,8 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
 }
 
 /**
- *
- * @sci_req: The PIO DATA IN request that is to receive the data.
+ * sci_stp_request_pio_data_in_copy_data()
+ * @stp_req: The PIO DATA IN request that is to receive the data.
  * @data_buffer: The buffer to copy from.
  *
  * Copy the data buffer to the io request data region. enum sci_status
@@ -2452,7 +2447,7 @@ sci_io_request_tc_completion(struct isci_request *ireq,
  * isci_request_process_response_iu() - This function sets the status and
  *    response iu, in the task struct, from the request object for the upper
  *    layer driver.
- * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @task: This parameter is the task struct from the upper layer driver.
  * @resp_iu: This parameter points to the response iu of the completed request.
  * @dev: This parameter specifies the linux device struct.
  *
@@ -2485,6 +2480,7 @@ static void isci_request_process_response_iu(
  * isci_request_set_open_reject_status() - This function prepares the I/O
  *    completion for OPEN_REJECT conditions.
  * @request: This parameter is the completed isci_request object.
+ * @task: This parameter is the task struct from the upper layer driver.
  * @response_ptr: This parameter specifies the service response for the I/O.
  * @status_ptr: This parameter specifies the exec status for the I/O.
  * @open_rej_reason: This parameter specifies the encoded reason for the
@@ -2509,7 +2505,9 @@ static void isci_request_set_open_reject_status(
 /**
  * isci_request_handle_controller_specific_errors() - This function decodes
  *    controller-specific I/O completion error conditions.
+ * @idev: Remote device
  * @request: This parameter is the completed isci_request object.
+ * @task: This parameter is the task struct from the upper layer driver.
  * @response_ptr: This parameter specifies the service response for the I/O.
  * @status_ptr: This parameter specifies the exec status for the I/O.
  *
@@ -3326,7 +3324,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq)
  * @ihost: This parameter specifies the ISCI host object
  * @request: This parameter points to the isci_request object allocated in the
  *    request construct function.
- * @sci_device: This parameter is the handle for the sci core's remote device
+ * @idev: This parameter is the handle for the sci core's remote device
  *    object that is the destination for this request.
  *
  * SCI_SUCCESS on successfull completion, or specific failure code.
index 26fa1a4..62062ed 100644 (file)
@@ -369,7 +369,7 @@ static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
        tmf->io_tag = old_request->io_tag;
 }
 
-/**
+/*
  * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
  *    Template functions.
  * @lun: This parameter specifies the lun to be reset.
@@ -668,7 +668,6 @@ int isci_task_clear_task_set(
  *    returned, libsas turns this into a LUN reset; when FUNC_FAILED is
  *    returned, libsas will turn this into a target reset
  * @task: This parameter specifies the sas task being queried.
- * @lun: This parameter specifies the lun associated with this request.
  *
  * status, zero indicates success.
  */
index b43b5f6..509eacd 100644 (file)
@@ -2248,7 +2248,7 @@ int fc_slave_alloc(struct scsi_device *sdev)
 EXPORT_SYMBOL(fc_slave_alloc);
 
 /**
- * fc_fcp_destory() - Tear down the FCP layer for a given local port
+ * fc_fcp_destroy() - Tear down the FCP layer for a given local port
  * @lport: The local port that no longer needs the FCP layer
  */
 void fc_fcp_destroy(struct fc_lport *lport)
index 2282654..78bd317 100644 (file)
@@ -703,7 +703,7 @@ static void fc_lport_disc_callback(struct fc_lport *lport,
 }
 
 /**
- * fc_rport_enter_ready() - Enter the ready state and start discovery
+ * fc_lport_enter_ready() - Enter the ready state and start discovery
  * @lport: The local port that is ready
  */
 static void fc_lport_enter_ready(struct fc_lport *lport)
@@ -747,7 +747,7 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
 }
 
 /**
- * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
+ * fc_lport_set_local_id() - set the local port Port ID for point-to-multipoint
  * @lport: The local port which will have its Port ID set.
  * @port_id: The new port ID.
  *
@@ -1393,7 +1393,7 @@ static struct fc_rport_operations fc_lport_rport_ops = {
 };
 
 /**
- * fc_rport_enter_dns() - Create a fc_rport for the name server
+ * fc_lport_enter_dns() - Create a fc_rport for the name server
  * @lport: The local port requesting a remote port for the name server
  */
 static void fc_lport_enter_dns(struct fc_lport *lport)
@@ -1509,7 +1509,7 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
 }
 
 /**
- * fc_rport_enter_fdmi() - Create a fc_rport for the management server
+ * fc_lport_enter_fdmi() - Create a fc_rport for the management server
  * @lport: The local port requesting a remote port for the management server
  */
 static void fc_lport_enter_fdmi(struct fc_lport *lport)
@@ -1640,7 +1640,7 @@ err:
 EXPORT_SYMBOL(fc_lport_logo_resp);
 
 /**
- * fc_rport_enter_logo() - Logout of the fabric
+ * fc_lport_enter_logo() - Logout of the fabric
  * @lport: The local port to be logged out
  */
 static void fc_lport_enter_logo(struct fc_lport *lport)
@@ -1782,7 +1782,7 @@ err:
 EXPORT_SYMBOL(fc_lport_flogi_resp);
 
 /**
- * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
+ * fc_lport_enter_flogi() - Send a FLOGI request to the fabric manager
  * @lport: Fibre Channel local port to be logged in to the fabric
  */
 static void fc_lport_enter_flogi(struct fc_lport *lport)
index 5600320..cd0fb8c 100644 (file)
@@ -1486,7 +1486,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
 }
 
 /**
- * fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses
+ * fc_rport_adisc_resp() - Handler for Address Discovery (ADISC) responses
  * @sp:               The sequence the ADISC response was on
  * @fp:               The ADISC response frame
  * @rdata_arg: The remote port that sent the ADISC response
index 024e5a5..c6f527d 100644 (file)
@@ -35,46 +35,40 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
        /* ts->resp == SAS_TASK_COMPLETE */
        /* task delivered, what happened afterwards? */
        switch (ts->stat) {
-               case SAS_DEV_NO_RESPONSE:
-                       return AC_ERR_TIMEOUT;
-
-               case SAS_INTERRUPTED:
-               case SAS_PHY_DOWN:
-               case SAS_NAK_R_ERR:
-                       return AC_ERR_ATA_BUS;
-
-
-               case SAS_DATA_UNDERRUN:
-                       /*
-                        * Some programs that use the taskfile interface
-                        * (smartctl in particular) can cause underrun
-                        * problems.  Ignore these errors, perhaps at our
-                        * peril.
-                        */
-                       return 0;
-
-               case SAS_DATA_OVERRUN:
-               case SAS_QUEUE_FULL:
-               case SAS_DEVICE_UNKNOWN:
-               case SAS_SG_ERR:
-                       return AC_ERR_INVALID;
-
-               case SAS_OPEN_TO:
-               case SAS_OPEN_REJECT:
-                       pr_warn("%s: Saw error %d.  What to do?\n",
-                               __func__, ts->stat);
-                       return AC_ERR_OTHER;
-
-               case SAM_STAT_CHECK_CONDITION:
-               case SAS_ABORTED_TASK:
-                       return AC_ERR_DEV;
-
-               case SAS_PROTO_RESPONSE:
-                       /* This means the ending_fis has the error
-                        * value; return 0 here to collect it */
-                       return 0;
-               default:
-                       return 0;
+       case SAS_DEV_NO_RESPONSE:
+               return AC_ERR_TIMEOUT;
+       case SAS_INTERRUPTED:
+       case SAS_PHY_DOWN:
+       case SAS_NAK_R_ERR:
+               return AC_ERR_ATA_BUS;
+       case SAS_DATA_UNDERRUN:
+               /*
+                * Some programs that use the taskfile interface
+                * (smartctl in particular) can cause underrun
+                * problems.  Ignore these errors, perhaps at our
+                * peril.
+                */
+               return 0;
+       case SAS_DATA_OVERRUN:
+       case SAS_QUEUE_FULL:
+       case SAS_DEVICE_UNKNOWN:
+       case SAS_SG_ERR:
+               return AC_ERR_INVALID;
+       case SAS_OPEN_TO:
+       case SAS_OPEN_REJECT:
+               pr_warn("%s: Saw error %d.  What to do?\n",
+                       __func__, ts->stat);
+               return AC_ERR_OTHER;
+       case SAM_STAT_CHECK_CONDITION:
+       case SAS_ABORTED_TASK:
+               return AC_ERR_DEV;
+       case SAS_PROTO_RESPONSE:
+               /* This means the ending_fis has the error
+                * value; return 0 here to collect it
+                */
+               return 0;
+       default:
+               return 0;
        }
 }
 
index 161c9b3..9f5068f 100644 (file)
@@ -75,7 +75,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
                struct dev_to_host_fis *fis =
                        (struct dev_to_host_fis *) dev->frame_rcvd;
                if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
-                   fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
+                   fis->byte_count_low == 0x69 && fis->byte_count_high == 0x96
                    && (fis->device & ~0x10) == 0)
                        dev->dev_type = SAS_SATA_PM;
                else
index 8d6bcc1..6d583e8 100644 (file)
@@ -553,7 +553,7 @@ static int sas_ex_manuf_info(struct domain_device *dev)
 
        mi_req[1] = SMP_REPORT_MANUF_INFO;
 
-       res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
+       res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp, MI_RESP_SIZE);
        if (res) {
                pr_notice("MI: ex %016llx failed:0x%x\n",
                          SAS_ADDR(dev->sas_addr), res);
@@ -594,13 +594,13 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
 
        pc_req[1] = SMP_PHY_CONTROL;
        pc_req[9] = phy_id;
-       pc_req[10]= phy_func;
+       pc_req[10] = phy_func;
        if (rates) {
                pc_req[32] = rates->minimum_linkrate << 4;
                pc_req[33] = rates->maximum_linkrate << 4;
        }
 
-       res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
+       res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp, PC_RESP_SIZE);
        if (res) {
                pr_err("ex %016llx phy%02d PHY control failed: %d\n",
                       SAS_ADDR(dev->sas_addr), phy_id, res);
@@ -678,7 +678,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
        req[9] = phy->number;
 
        res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
-                                   resp, RPEL_RESP_SIZE);
+                              resp, RPEL_RESP_SIZE);
 
        if (res)
                goto out;
@@ -714,7 +714,7 @@ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
        rps_req[9] = phy_id;
 
        res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
-                                   rps_resp, RPS_RESP_SIZE);
+                              rps_resp, RPS_RESP_SIZE);
 
        /* 0x34 is the FIS type for the D2H fis.  There's a potential
         * standards cockup here.  sas-2 explicitly specifies the FIS
@@ -1506,7 +1506,8 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id,
        if (res)
                return res;
        if (include ^ present)
-               return sas_configure_set(dev, phy_id, sas_addr, index,include);
+               return sas_configure_set(dev, phy_id, sas_addr, index,
+                                        include);
 
        return res;
 }
index 6ba5fa0..f8de0d1 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -782,6 +782,7 @@ struct lpfc_hba {
 #define HBA_NEEDS_CFG_PORT     0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */
 #define HBA_HBEAT_INP          0x4000000 /* mbox HBEAT is in progress */
 #define HBA_HBEAT_TMO          0x8000000 /* HBEAT initiated after timeout */
+#define HBA_FLOGI_OUTSTANDING  0x10000000 /* FLOGI is outstanding */
 
        uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
        struct lpfc_dmabuf slim2p;
index bdd9a29..59ca32d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -512,11 +512,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                "6314 Catching potential buffer "
                                "overflow > PAGE_SIZE = %lu bytes\n",
                                PAGE_SIZE);
-               strlcpy(buf + PAGE_SIZE - 1 -
-                       strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
+               strlcpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_NVME_INFO_MORE_STR),
                        LPFC_NVME_INFO_MORE_STR,
-                       strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
-                       + 1);
+                       sizeof(LPFC_NVME_INFO_MORE_STR) + 1);
        }
 
        return len;
@@ -864,7 +862,7 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
 }
 
 /**
- * lpfc_state_show - Return the link state of the port
+ * lpfc_link_state_show - Return the link state of the port
  * @dev: class converted to a Scsi_host structure.
  * @attr: device attribute, not used.
  * @buf: on return contains text describing the state of the link.
@@ -3819,7 +3817,7 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
                      LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
 
 /**
- * lpfc_tgt_queue_depth_store: Sets an attribute value.
+ * lpfc_tgt_queue_depth_set: Sets an attribute value.
  * @vport: lpfc vport structure pointer.
  * @val: integer attribute value.
  *
@@ -4004,7 +4002,7 @@ LPFC_ATTR(topology, 0, 0, 6,
        "Select Fibre Channel topology");
 
 /**
- * lpfc_topology_set - Set the adapters topology field
+ * lpfc_topology_store - Set the adapters topology field
  * @dev: class device that is converted into a scsi_host.
  * @attr:device attribute, not used.
  * @buf: buffer for passing information.
@@ -4457,7 +4455,7 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = {
 # Value range is [0,16]. Default value is 0.
 */
 /**
- * lpfc_link_speed_set - Set the adapters link speed
+ * lpfc_link_speed_store - Set the adapters link speed
  * @dev: Pointer to class device.
  * @attr: Unused.
  * @buf: Data buffer.
@@ -4858,7 +4856,7 @@ lpfc_param_show(sriov_nr_virtfn)
 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
 
 /**
- * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
+ * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade
  *
  * @dev: class device that is converted into a Scsi_host.
  * @attr: device attribute, not used.
@@ -5222,7 +5220,7 @@ lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
 
 /**
- * lpfc_state_show - Display current driver CPU affinity
+ * lpfc_fcp_cpu_map_show - Display current driver CPU affinity
  * @dev: class converted to a Scsi_host structure.
  * @attr: device attribute, not used.
  * @buf: on return contains text describing the state of the link.
index b974d39..503540c 100644 (file)
@@ -3580,7 +3580,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
 }
 
 /**
- * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
  * @phba: Pointer to HBA context object.
  *
  * This is routine clean up and reset BSG handling of multi-buffer mbox
@@ -3869,7 +3869,7 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
 }
 
 /**
- * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
  * @phba: Pointer to HBA context object.
  * @job: Pointer to the job object.
  * @nemb_tp: Enumerate of non-embedded mailbox command type.
@@ -4360,7 +4360,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
 }
 
 /**
- * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
  * @phba: Pointer to HBA context object.
  *
  * This routine is for requesting to abort a pass-through mailbox command with
index a0aad48..eb4cf36 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -103,6 +103,8 @@ int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
 struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did);
 struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
 int  lpfc_nlp_put(struct lpfc_nodelist *);
+void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+                         struct lpfc_iocbq *rspiocb);
 int  lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
 struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
 void lpfc_disc_list_loopmap(struct lpfc_vport *);
index dd0b432..37b0c20 100644 (file)
@@ -137,11 +137,11 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 }
 
 /**
- * lpfc_ct_reject_event : Issue reject for unhandled CT MIB commands
- * @ndlp : pointer to a node-list data structure.
- * ct_req : pointer to the CT request data structure.
- * rx_id : rx_id of the received UNSOL CT command
- * ox_id : ox_id of the UNSOL CT command
+ * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
+ * @ndlp: pointer to a node-list data structure.
+ * @ct_req: pointer to the CT request data structure.
+ * @rx_id: rx_id of the received UNSOL CT command
+ * @ox_id: ox_id of the UNSOL CT command
  *
  * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
  * a reject response. Reject response is sent for the unhandled commands.
@@ -272,7 +272,7 @@ ct_exit:
 /**
  * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer
  * @phba: pointer to lpfc hba data structure.
- * @ctiocb: pointer to lpfc CT command iocb data structure.
+ * @ctiocbq: pointer to lpfc CT command iocb data structure.
  *
  * This routine is used for processing the IOCB associated with a unsolicited
  * CT MIB request. It first determines whether there is an existing ndlp that
index 46a8f2d..658a962 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2007-2015 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -381,7 +381,7 @@ skipit:
 static int lpfc_debugfs_last_xripool;
 
 /**
- * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer
+ * lpfc_debugfs_commonxripools_data - Dump Hardware Queue info to a buffer
  * @phba: The HBA to gather host buffer info from.
  * @buf: The buffer to dump log into.
  * @size: The maximum amount of data to process.
@@ -869,7 +869,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                                "WWNN x%llx ",
                                wwn_to_u64(ndlp->nlp_nodename.u.wwn));
                if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
-                       len += scnprintf(buf+len, size-len, "RPI:%03d ",
+                       len += scnprintf(buf+len, size-len, "RPI:%04d ",
                                        ndlp->nlp_rpi);
                else
                        len += scnprintf(buf+len, size-len, "RPI:none ");
@@ -895,7 +895,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                if (ndlp->nlp_type & NLP_NVME_INITIATOR)
                        len += scnprintf(buf + len,
                                        size - len, "NVME_INITIATOR ");
-               len += scnprintf(buf+len, size-len, "refcnt:%x",
+               len += scnprintf(buf+len, size-len, "refcnt:%d",
                        kref_read(&ndlp->kref));
                if (iocnt) {
                        i = atomic_read(&ndlp->cmd_pending);
@@ -904,8 +904,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                                        i, ndlp->cmd_qdepth);
                        outio += i;
                }
-               len += scnprintf(buf + len, size - len, "defer:%x ",
-                       ndlp->nlp_defer_did);
+               len += scnprintf(buf+len, size-len, " xpt:x%x",
+                                ndlp->fc4_xpt_flags);
+               if (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)
+                       len += scnprintf(buf+len, size-len, " defer:%x",
+                                        ndlp->nlp_defer_did);
                len +=  scnprintf(buf+len, size-len, "\n");
        }
        spin_unlock_irq(shost->host_lock);
@@ -5151,7 +5154,7 @@ error_out:
  * This routine is to get the available extent information.
  *
  * Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
  **/
 static int
 lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
@@ -5202,7 +5205,7 @@ lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
  * This routine is to get the allocated extent information.
  *
  * Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
  **/
 static int
 lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
@@ -5274,7 +5277,7 @@ lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
  * This routine is to get the driver extent information.
  *
  * Returns:
- * overall lenth of the data read into the internal buffer.
+ * overall length of the data read into the internal buffer.
  **/
 static int
 lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
index 8ce13ef..08999aa 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -159,7 +159,6 @@ struct lpfc_node_rrq {
        uint16_t rxid;
        uint32_t         nlp_DID;               /* FC D_ID of entry */
        struct lpfc_vport *vport;
-       struct lpfc_nodelist *ndlp;
        unsigned long rrq_stop_time;
 };
 
index f0a7581..a04546e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -1182,7 +1182,8 @@ flogifail:
        phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
        spin_unlock_irq(&phba->hbalock);
 
-       lpfc_nlp_put(ndlp);
+       if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+               lpfc_nlp_put(ndlp);
        if (!lpfc_error_lost_link(irsp)) {
                /* FLOGI failed, so just use loop map to make discovery list */
                lpfc_disc_list_loopmap(vport);
@@ -1199,6 +1200,7 @@ flogifail:
                lpfc_issue_clear_la(phba, vport);
        }
 out:
+       phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING;
        lpfc_els_free_iocb(phba, cmdiocb);
        lpfc_nlp_put(ndlp);
 }
@@ -1249,10 +1251,9 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  * function field. The lpfc_issue_fabric_iocb routine is invoked to send
  * out FLOGI ELS command with one outstanding fabric IOCB at a time.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the FLOGI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the FLOGI ELS command.
  *
  * Return code
  *   0 - successfully issued flogi iocb for @vport
@@ -1341,14 +1342,19 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                phba->sli3_options, 0, 0);
 
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto out;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        rc = lpfc_issue_fabric_iocb(phba, elsiocb);
-       if (rc == IOCB_ERROR)
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
                lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
-       phba->hba_flag |= HBA_FLOGI_ISSUED;
+       phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
 
        /* Check for a deferred FLOGI ACC condition */
        if (phba->defer_flogi_acc_flag) {
@@ -1376,11 +1382,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                vport->fc_myDID = did;
        }
 
-       if (!rc)
-               return 0;
- out:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
+       return 0;
 }
 
 /**
@@ -1423,9 +1425,14 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
                icmd = &iocb->iocb;
                if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
                        ndlp = (struct lpfc_nodelist *)(iocb->context1);
-                       if (ndlp && (ndlp->nlp_DID == Fabric_DID))
+                       if (ndlp && ndlp->nlp_DID == Fabric_DID) {
+                               if ((phba->pport->fc_flag & FC_PT2PT) &&
+                                   !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
+                                       iocb->fabric_iocb_cmpl =
+                                               lpfc_ignore_els_cmpl;
                                lpfc_sli_issue_abort_iotag(phba, pring, iocb,
                                                           NULL);
+                       }
                }
        }
        /* Make sure HBA is alive */
@@ -1600,7 +1607,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
        struct lpfc_nodelist *new_ndlp;
        struct serv_parm *sp;
        uint8_t  name[sizeof(struct lpfc_name)];
-       uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
+       uint32_t keepDID = 0, keep_nlp_flag = 0;
        uint32_t keep_new_nlp_flag = 0;
        uint16_t keep_nlp_state;
        u32 keep_nlp_fc4_type = 0;
@@ -1622,7 +1629,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
        new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
 
        /* return immediately if the WWPN matches ndlp */
-       if (new_ndlp == ndlp)
+       if (!new_ndlp || (new_ndlp == ndlp))
                return ndlp;
 
        if (phba->sli_rev == LPFC_SLI_REV4) {
@@ -1641,30 +1648,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
                         (new_ndlp ? new_ndlp->nlp_flag : 0),
                         (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
 
-       if (!new_ndlp) {
-               rc = memcmp(&ndlp->nlp_portname, name,
-                           sizeof(struct lpfc_name));
-               if (!rc) {
-                       if (active_rrqs_xri_bitmap)
-                               mempool_free(active_rrqs_xri_bitmap,
-                                            phba->active_rrq_pool);
-                       return ndlp;
-               }
-               new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
-               if (!new_ndlp) {
-                       if (active_rrqs_xri_bitmap)
-                               mempool_free(active_rrqs_xri_bitmap,
-                                            phba->active_rrq_pool);
-                       return ndlp;
-               }
-       } else {
-               keepDID = new_ndlp->nlp_DID;
-               if (phba->sli_rev == LPFC_SLI_REV4 &&
-                   active_rrqs_xri_bitmap)
-                       memcpy(active_rrqs_xri_bitmap,
-                              new_ndlp->active_rrqs_xri_bitmap,
-                              phba->cfg_rrq_xri_bitmap_sz);
-       }
+       keepDID = new_ndlp->nlp_DID;
+
+       if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
+               memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
+                      phba->cfg_rrq_xri_bitmap_sz);
 
        /* At this point in this routine, we know new_ndlp will be
         * returned. however, any previous GID_FTs that were done
@@ -1849,7 +1837,7 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 {
        struct lpfc_vport *vport = cmdiocb->vport;
        IOCB_t *irsp;
-       struct lpfc_nodelist *ndlp;
+       struct lpfc_nodelist *ndlp = cmdiocb->context1;
        struct lpfc_node_rrq *rrq;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -1862,22 +1850,12 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                irsp->ulpStatus, irsp->un.ulpWord[4],
                irsp->un.elsreq64.remoteID);
 
-       ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
-       if (!ndlp || ndlp != rrq->ndlp) {
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
-                                "2882 RRQ completes to NPort x%x "
-                                "with no ndlp. Data: x%x x%x x%x\n",
-                                irsp->un.elsreq64.remoteID,
-                                irsp->ulpStatus, irsp->un.ulpWord[4],
-                                irsp->ulpIoTag);
-               goto out;
-       }
-
        /* rrq completes to NPort <nlp_DID> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "2880 RRQ completes to NPort x%x "
+                        "2880 RRQ completes to DID x%x "
                         "Data: x%x x%x x%x x%x x%x\n",
-                        ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+                        irsp->un.elsreq64.remoteID,
+                        irsp->ulpStatus, irsp->un.ulpWord[4],
                         irsp->ulpTimeout, rrq->xritag, rrq->rxid);
 
        if (irsp->ulpStatus) {
@@ -1893,10 +1871,8 @@ lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                         ndlp->nlp_DID, irsp->ulpStatus,
                                         irsp->un.ulpWord[4]);
        }
-out:
-       if (rrq)
-               lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 
+       lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
        lpfc_els_free_iocb(phba, cmdiocb);
        lpfc_nlp_put(ndlp);
        return;
@@ -1912,7 +1888,7 @@ out:
  * ndlp on the vport node list that matches the remote node ID from the
  * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
  * ignored and command IOCB released. The PLOGI response IOCB status is
- * checked for error conditons. If there is error status reported, PLOGI
+ * checked for error conditions. If there is error status reported, PLOGI
  * retry shall be attempted by invoking the lpfc_els_retry() routine.
  * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
  * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
@@ -2063,13 +2039,12 @@ out_freeiocb:
  * This routine issues a Port Login (PLOGI) command to a remote N_Port
  * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
  * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
- * This routine constructs the proper feilds of the PLOGI IOCB and invokes
+ * This routine constructs the proper fields of the PLOGI IOCB and invokes
  * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PLOGI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding
+ * the ndlp and the reference to ndlp will be stored into the context1 field
+ * of the IOCB for the completion callback function to the PLOGI ELS command.
  *
  * Return code
  *   0 - Successfully issued a plogi for @vport
@@ -2087,29 +2062,28 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
        int ret;
 
        ndlp = lpfc_findnode_did(vport, did);
+       if (!ndlp)
+               return 1;
 
-       if (ndlp) {
-               /* Defer the processing of the issue PLOGI until after the
-                * outstanding UNREG_RPI mbox command completes, unless we
-                * are going offline. This logic does not apply for Fabric DIDs
-                */
-               if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
-                   ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
-                   !(vport->fc_flag & FC_OFFLINE_MODE)) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-                                        "4110 Issue PLOGI x%x deferred "
-                                        "on NPort x%x rpi x%x Data: x%px\n",
-                                        ndlp->nlp_defer_did, ndlp->nlp_DID,
-                                        ndlp->nlp_rpi, ndlp);
-
-                       /* We can only defer 1st PLOGI */
-                       if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
-                               ndlp->nlp_defer_did = did;
-                       return 0;
-               }
+       /* Defer the processing of the issue PLOGI until after the
+        * outstanding UNREG_RPI mbox command completes, unless we
+        * are going offline. This logic does not apply for Fabric DIDs
+        */
+       if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
+           ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+           !(vport->fc_flag & FC_OFFLINE_MODE)) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+                                "4110 Issue PLOGI x%x deferred "
+                                "on NPort x%x rpi x%x Data: x%px\n",
+                                ndlp->nlp_defer_did, ndlp->nlp_DID,
+                                ndlp->nlp_rpi, ndlp);
+
+               /* We can only defer 1st PLOGI */
+               if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
+                       ndlp->nlp_defer_did = did;
+               return 0;
        }
 
-       /* If ndlp is not NULL, we will bump the reference count on it */
        cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
        elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
                                     ELS_CMD_PLOGI);
@@ -2165,19 +2139,19 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
                              "Issue PLOGI:     did:x%x refcnt %d",
                              did, kref_read(&ndlp->kref), 0);
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto io_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (ret) {
+               lpfc_els_free_iocb(phba, elsiocb);
                lpfc_nlp_put(ndlp);
-               goto io_err;
+               return 1;
        }
-       return 0;
 
- io_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
+       return 0;
 }
 
 /**
@@ -2306,10 +2280,9 @@ out:
  * is put to the IOCB completion callback func field before invoking the
  * routine lpfc_sli_issue_iocb() to send out PRLI command.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PRLI ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the PRLI ELS command.
  *
  * Return code
  *   0 - successfully issued prli iocb command for @vport
@@ -2471,12 +2444,17 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                              "Issue PRLI:  did:x%x refcnt %d",
                              ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto io_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               goto err;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto node_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               goto err;
+       }
 
 
        /* The driver supports 2 FC4 types.  Make sure
@@ -2488,13 +2466,10 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        else
                return 0;
 
- node_err:
-       lpfc_nlp_put(ndlp);
- io_err:
+err:
        spin_lock_irq(&ndlp->lock);
        ndlp->nlp_flag &= ~NLP_PRLI_SND;
        spin_unlock_irq(&ndlp->lock);
-       lpfc_els_free_iocb(phba, elsiocb);
        return 1;
 }
 
@@ -2733,10 +2708,9 @@ out:
  * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
  * to issue the ADISC ELS command.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the ADISC ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the ADISC ELS command.
  *
  * Return code
  *   0 - successfully issued adisc
@@ -2778,24 +2752,27 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        ndlp->nlp_flag |= NLP_ADISC_SND;
        spin_unlock_irq(&ndlp->lock);
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               goto err;
+       }
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                              "Issue ADISC:   did:x%x refcnt %d",
                              ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               goto err;
+       }
+
        return 0;
 
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
+err:
        spin_lock_irq(&ndlp->lock);
        ndlp->nlp_flag &= ~NLP_ADISC_SND;
        spin_unlock_irq(&ndlp->lock);
-       lpfc_els_free_iocb(phba, elsiocb);
        return 1;
 }
 
@@ -2808,8 +2785,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
  * This routine is the completion function for issuing the ELS Logout (LOGO)
  * command. If no error status was reported from the LOGO response, the
  * state machine of the associated ndlp shall be invoked for transition with
- * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
- * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
+ * respect to NLP_EVT_CMPL_LOGO event.
  **/
 static void
 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@@ -2946,10 +2922,9 @@ out:
  * payload of the IOCB, properly sets up the @ndlp state, and invokes the
  * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the LOGO ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the LOGO ELS command.
  *
  * Callers of this routine are expected to unregister the RPI first
  *
@@ -2996,15 +2971,20 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
        spin_unlock_irq(&ndlp->lock);
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               goto err;
+       }
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                              "Issue LOGO:      did:x%x refcnt %d",
                              ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               goto err;
+       }
 
        spin_lock_irq(&ndlp->lock);
        ndlp->nlp_prev_state = ndlp->nlp_state;
@@ -3012,13 +2992,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
        return 0;
 
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
+err:
        spin_lock_irq(&ndlp->lock);
        ndlp->nlp_flag &= ~NLP_LOGO_SND;
        spin_unlock_irq(&ndlp->lock);
-       lpfc_els_free_iocb(phba, elsiocb);
        return 1;
 }
 
@@ -3183,10 +3160,9 @@ out:
  * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
  * routine is invoked to send the SCR IOCB.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the SCR ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the SCR ELS command.
  *
  * Return code
  *   0 - Successfully issued scr command
@@ -3234,25 +3210,24 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
        phba->fc_stat.elsXmitSCR++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                              "Issue SCR:     did:x%x refcnt %d",
                              ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
        /* Keep the ndlp just in case RDF is being sent */
        return 0;
-
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
 }
 
 /**
@@ -3266,10 +3241,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
  *  in point-to-point mode. When sent to the Fabric Controller, it will
  *  replay the RSCN to registered recipients.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RSCN ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RSCN ELS command.
  *
  * Return code
  *   0 - Successfully issued RSCN command
@@ -3334,16 +3308,21 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
        phba->fc_stat.elsXmitRSCN++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                              "Issue RSCN:       did:x%x",
                              ndlp->nlp_DID, 0, 0);
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
        /* This will cause the callback-function lpfc_cmpl_els_cmd to
         * trigger the release of node.
@@ -3351,11 +3330,6 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
        if (!(vport->fc_flag & FC_PT2PT))
                lpfc_nlp_put(ndlp);
        return 0;
-io_err:
-       lpfc_nlp_put(ndlp);
-node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
 }
 
 /**
@@ -3371,10 +3345,9 @@ node_err:
  * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
  * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PARPR ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the FARPR ELS command.
  *
  * Return code
  *   0 - Successfully issued farpr command
@@ -3450,8 +3423,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
                 * lpfc_els_free_iocb routine to trigger the release of
                 * the node.
                 */
-               lpfc_nlp_put(ndlp);
                lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
                return 1;
        }
        /* This will cause the callback-function lpfc_cmpl_els_cmd to
@@ -3469,10 +3442,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
  * This routine issues an ELS RDF to the Fabric Controller to register
  * for diagnostic functions.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RDF ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RDF ELS command.
  *
  * Return code
  *   0 - Successfully issued rdf command
@@ -3531,23 +3503,22 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
 
        elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return -EIO;
+       }
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                              "Issue RDF:     did:x%x refcnt %d",
                              ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return -EIO;
+       }
        return 0;
-
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return -EIO;
 }
 
 /**
@@ -3784,7 +3755,7 @@ lpfc_link_reset(struct lpfc_vport *vport)
  * This routine makes a retry decision on an ELS command IOCB, which has
  * failed. The following ELS IOCBs use this function for retrying the command
  * when previously issued command responsed with error status: FLOGI, PLOGI,
- * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
+ * PRLI, ADISC and FDISC. Based on the ELS command type and the
  * returned error status, it makes the decision whether a retry shall be
  * issued for the command, and whether a retry shall be made immediately or
  * delayed. In the former case, the corresponding ELS command issuing-function
@@ -3829,12 +3800,12 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                did = irsp->un.elsreq64.remoteID;
                ndlp = lpfc_findnode_did(vport, did);
                if (!ndlp && (cmd != ELS_CMD_PLOGI))
-                       return 1;
+                       return 0;
        }
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                "Retry ELS:       wd7:x%x wd4:x%x did:x%x",
-               *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
+               *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did);
 
        switch (irsp->ulpStatus) {
        case IOSTAT_FCP_RSP_ERROR:
@@ -4684,10 +4655,10 @@ out:
  * field of the IOCB for the completion callback function to issue the
  * mailbox command to the HBA later when callback is invoked.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the corresponding response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the corresponding
+ * response ELS IOCB command.
  *
  * Return code
  *   0 - Successfully issued acc response
@@ -4834,12 +4805,17 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 
        phba->fc_stat.elsXmitACC++;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
        /* Xmit ELS ACC response tag <ulpIoTag> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -4850,12 +4826,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
                         ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
                         ndlp->nlp_rpi, vport->fc_flag);
        return 0;
-
-io_err:
-       lpfc_nlp_put(ndlp);
-node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
 }
 
 /**
@@ -4871,10 +4841,10 @@ node_err:
  * context_un.mbox field of the IOCB for the completion callback function
  * to issue to the HBA later.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the reject response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the reject response
+ * ELS IOCB command.
  *
  * Return code
  *   0 - Successfully issued reject response
@@ -4927,20 +4897,19 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
        phba->fc_stat.elsXmitLSRJT++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
        return 0;
-
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
 }
 
 /**
@@ -4953,10 +4922,10 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
  * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the ADISC Accept response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the ADISC Accept response
+ * ELS IOCB command.
  *
  * Return code
  *   0 - Successfully issued acc adisc response
@@ -5010,12 +4979,17 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
        phba->fc_stat.elsXmitACC++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
        /* Xmit ELS ACC response tag <ulpIoTag> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -5026,12 +5000,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
                         ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
                         ndlp->nlp_rpi, vport->fc_flag);
        return 0;
-
-io_err:
-       lpfc_nlp_put(ndlp);
-node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
 }
 
 /**
@@ -5044,10 +5012,10 @@ node_err:
  * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the PRLI Accept response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the PRLI Accept response
+ * ELS IOCB command.
  *
  * Return code
  *   0 - Successfully issued acc prli response
@@ -5185,19 +5153,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
        phba->fc_stat.elsXmitACC++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        elsiocb->context1 =  lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
-       return 0;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
+       return 0;
 }
 
 /**
@@ -5210,17 +5178,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
  * This routine issues a Request Node Identification Data (RNID) Accept
  * (ACC) response. It constructs the RNID ACC response command according to
  * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
- * issue the response. Note that this command does not need to hold the ndlp
- * reference count for the callback. So, the ndlp reference count taken by
- * the lpfc_prep_els_iocb() routine is put back and the context1 field of
- * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
- * there is no ndlp reference available.
- *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function. However, for the RNID Accept Response ELS command,
- * this is undone later by this routine after the IOCB is allocated.
+ * issue the response.
+ *
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function.
  *
  * Return code
  *   0 - Successfully issued acc rnid response
@@ -5292,20 +5254,19 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
        phba->fc_stat.elsXmitACC++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
        return 0;
-
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
 }
 
 /**
@@ -5407,19 +5368,19 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
        phba->fc_stat.elsXmitACC++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        elsiocb->context1 =  lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
-       return 0;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
+       return 0;
 }
 
 /**
@@ -6063,8 +6024,8 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
-               lpfc_nlp_put(ndlp);
                lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
        }
 
        goto free_rdp_context;
@@ -6095,8 +6056,8 @@ error:
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
-               lpfc_nlp_put(ndlp);
                lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
        }
 
 free_rdp_context:
@@ -6308,16 +6269,16 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        phba->fc_stat.elsXmitACC++;
 
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
-
-       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (!rc)
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
                goto out;
+       }
 
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+       }
  out:
        kfree(lcb_context);
        return;
@@ -6353,8 +6314,8 @@ error:
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
-               lpfc_nlp_put(ndlp);
                lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
        }
 free_lcb_context:
        kfree(lcb_context);
@@ -7342,16 +7303,16 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
  *
  * This routine is the completion callback function for the MBX_READ_LNK_STAT
  * mailbox command. This callback function is to actually send the Accept
- * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It
  * collects the link statistics from the completion of the MBX_READ_LNK_STAT
- * mailbox command, constructs the RPS response with the link statistics
+ * mailbox command, constructs the RLS response with the link statistics
  * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
- * response to the RPS.
+ * response to the RLS.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPS Accept Response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RLS Accept Response
+ * ELS IOCB command.
  *
  **/
 static void
@@ -7420,18 +7381,17 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitACC++;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+       }
        return;
-
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
 }
 
 /**
@@ -7510,10 +7470,10 @@ reject_out:
  * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
  * Value (RTV) unsolicited IOCB event.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RTV Accept Response ELS IOCB command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RTV Accept Response
+ * ELS IOCB command.
  *
  * Return codes
  *   0 - Successfully processed rtv iocb (currently always return 0)
@@ -7580,8 +7540,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
-               lpfc_nlp_put(ndlp);
                lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
        }
        return 0;
 
@@ -7619,9 +7579,6 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        uint16_t cmdsize;
        int ret;
 
-
-       if (ndlp != rrq->ndlp)
-               ndlp = rrq->ndlp;
        if (!ndlp)
                return 1;
 
@@ -7651,9 +7608,9 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                did, rrq->xritag, rrq->rxid);
        elsiocb->context_un.rrq = rrq;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
-       elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+
+       lpfc_nlp_get(ndlp);
+       elsiocb->context1 = ndlp;
 
        ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (ret == IOCB_ERROR)
@@ -7661,9 +7618,8 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        return 0;
 
  io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
        lpfc_els_free_iocb(phba, elsiocb);
+       lpfc_nlp_put(ndlp);
        return 1;
 }
 
@@ -7704,10 +7660,10 @@ lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
  * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
  * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the RPL Accept Response ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the RPL Accept Response
+ * ELS command.
  *
  * Return code
  *   0 - Successfully issued ACC RPL ELS command
@@ -7760,19 +7716,19 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitACC++;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
 
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
-       return 0;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
 
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
-       lpfc_els_free_iocb(phba, elsiocb);
-       return 1;
+       return 0;
 }
 
 /**
@@ -9598,10 +9554,9 @@ out:
  * routine to issue the IOCB, which makes sure only one outstanding fabric
  * IOCB will be sent off HBA at any given time.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the FDISC ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the FDISC ELS command.
  *
  * Return code
  *   0 - Successfully issued fdisc iocb command
@@ -9678,11 +9633,14 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                did, 0, 0);
 
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
                goto err_out;
+       }
 
        rc = lpfc_issue_fabric_iocb(phba, elsiocb);
        if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
                lpfc_nlp_put(ndlp);
                goto err_out;
        }
@@ -9691,7 +9649,6 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        return 0;
 
  err_out:
-       lpfc_els_free_iocb(phba, elsiocb);
        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                         "0256 Issue FDISC: Cannot send IOCB\n");
@@ -9757,10 +9714,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  *
  * This routine issues a LOGO ELS command to an @ndlp off a @vport.
  *
- * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
- * will be incremented by 1 for holding the ndlp and the reference to ndlp
- * will be stored into the context1 field of the IOCB for the completion
- * callback function to the LOGO ELS command.
+ * Note that the ndlp reference count will be incremented by 1 for holding the
+ * ndlp and the reference to ndlp will be stored into the context1 field of
+ * the IOCB for the completion callback function to the LOGO ELS command.
  *
  * Return codes
  *   0 - Successfully issued logo off the @vport
@@ -9799,20 +9755,23 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        ndlp->nlp_flag |= NLP_LOGO_SND;
        spin_unlock_irq(&ndlp->lock);
        elsiocb->context1 = lpfc_nlp_get(ndlp);
-       if (!elsiocb->context1)
-               goto node_err;
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               goto err;
+       }
+
        rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-       if (rc == IOCB_ERROR)
-               goto io_err;
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               goto err;
+       }
        return 0;
 
- io_err:
-       lpfc_nlp_put(ndlp);
- node_err:
+err:
        spin_lock_irq(&ndlp->lock);
        ndlp->nlp_flag &= ~NLP_LOGO_SND;
        spin_unlock_irq(&ndlp->lock);
-       lpfc_els_free_iocb(phba, elsiocb);
        return 1;
 }
 
@@ -10074,7 +10033,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
  * driver internal fabric IOCB list. The list contains fabric IOCBs to be
  * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
  * list, removes each IOCB associated with the @vport off the list, set the
- * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
  * associated with the IOCB.
  **/
 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
@@ -10107,7 +10066,7 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
  * driver internal fabric IOCB list. The list contains fabric IOCBs to be
  * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
  * list, removes each IOCB associated with the @ndlp off the list, set the
- * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
  * associated with the IOCB.
  **/
 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
@@ -10144,7 +10103,7 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
  * This routine aborts all the IOCBs currently on the driver internal
  * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
  * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
- * list, removes IOCBs off the list, set the status feild to
+ * list, removes IOCBs off the list, set the status field to
  * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
  * the IOCB.
  **/
index 48ca4a6..3b5cd23 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -1486,7 +1486,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
 }
 
 /**
- * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record - Update driver fcf record
  * @phba: pointer to lpfc hba data structure.
  * @fcf_rec: pointer to driver fcf record.
  * @new_fcf_record: pointer to hba fcf record.
@@ -6081,12 +6081,12 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
                 * Translate the physical vpi to the logical vpi.  The
                 * vport stores the logical vpi.
                 */
-               for (i = 0; i < phba->max_vpi; i++) {
+               for (i = 0; i <= phba->max_vpi; i++) {
                        if (vpi == phba->vpi_ids[i])
                                break;
                }
 
-               if (i >= phba->max_vpi) {
+               if (i > phba->max_vpi) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                        "2936 Could not find Vport mapped "
                                        "to vpi %d\n", vpi);
index 71f340d..5ea43c5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
index 135d8e8..9aa907c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -279,106 +279,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
        lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
 }
 
-/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
+/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
  * @phba: pointer to lpfc hba data structure.
- * @link_mbox: pointer to CONFIG_LINK mailbox object
+ * @login_mbox: pointer to REG_RPI mailbox object
  *
- * This routine is only called if we are SLI3, direct connect pt2pt
- * mode and the remote NPort issues the PLOGI after link up.
+ * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
  */
 static void
-lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
+lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
 {
-       LPFC_MBOXQ_t *login_mbox;
-       MAILBOX_t *mb = &link_mbox->u.mb;
        struct lpfc_iocbq *save_iocb;
        struct lpfc_nodelist *ndlp;
+       MAILBOX_t *mb = &login_mbox->u.mb;
+
        int rc;
 
-       ndlp = link_mbox->ctx_ndlp;
-       login_mbox = link_mbox->context3;
+       ndlp = login_mbox->ctx_ndlp;
        save_iocb = login_mbox->context3;
-       link_mbox->context3 = NULL;
-       login_mbox->context3 = NULL;
 
-       /* Check for CONFIG_LINK error */
-       if (mb->mbxStatus) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                               "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
-                               mb->mbxStatus);
-               mempool_free(login_mbox, phba->mbox_mem_pool);
-               mempool_free(link_mbox, phba->mbox_mem_pool);
-               kfree(save_iocb);
-               return;
-       }
-
-       /* Now that CONFIG_LINK completed, and our SID is configured,
-        * we can now proceed with sending the PLOGI ACC.
-        */
-       rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
-                             save_iocb, ndlp, login_mbox);
-       if (rc) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                               "4576 PLOGI ACC fails pt2pt discovery: %x\n",
-                               rc);
-               mempool_free(login_mbox, phba->mbox_mem_pool);
+       if (mb->mbxStatus == MBX_SUCCESS) {
+               /* Now that REG_RPI completed successfully,
+                * we can now proceed with sending the PLOGI ACC.
+                */
+               rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
+                                     save_iocb, ndlp, NULL);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+                                       "4576 PLOGI ACC fails pt2pt discovery: "
+                                       "DID %x Data: %x\n", ndlp->nlp_DID, rc);
+               }
        }
 
-       mempool_free(link_mbox, phba->mbox_mem_pool);
+       /* Now process the REG_RPI cmpl */
+       lpfc_mbx_cmpl_reg_login(phba, login_mbox);
+       ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
        kfree(save_iocb);
 }
 
-/**
- * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
- * @phba: Pointer to HBA context object.
- * @pmb: Pointer to mailbox object.
- *
- * This function provides the unreg rpi mailbox completion handler for a tgt.
- * The routine frees the memory resources associated with the completed
- * mailbox command and transmits the ELS ACC.
- *
- * This routine is only called if we are SLI4, acting in target
- * mode and the remote NPort issues the PLOGI after link up.
- **/
-static void
-lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
-{
-       struct lpfc_vport *vport = pmb->vport;
-       struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
-       LPFC_MBOXQ_t *mbox = pmb->context3;
-       struct lpfc_iocbq *piocb = NULL;
-       int rc;
-
-       if (mbox) {
-               pmb->context3 = NULL;
-               piocb = mbox->context3;
-               mbox->context3 = NULL;
-       }
-
-       /*
-        * Complete the unreg rpi mbx request, and update flags.
-        * This will also restart any deferred events.
-        */
-       lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
-
-       if (!piocb) {
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
-                                "4578 PLOGI ACC fail\n");
-               if (mbox)
-                       mempool_free(mbox, phba->mbox_mem_pool);
-               return;
-       }
-
-       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
-       if (rc) {
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
-                                "4579 PLOGI ACC fail %x\n", rc);
-               if (mbox)
-                       mempool_free(mbox, phba->mbox_mem_pool);
-       }
-       kfree(piocb);
-}
-
 static int
 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
               struct lpfc_iocbq *cmdiocb)
@@ -395,8 +332,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        struct lpfc_iocbq *save_iocb;
        struct ls_rjt stat;
        uint32_t vid, flag;
-       u16 rpi;
-       int rc, defer_acc;
+       int rc;
 
        memset(&stat, 0, sizeof (struct ls_rjt));
        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -445,7 +381,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        else
                ndlp->nlp_fcp_info |= CLASS3;
 
-       defer_acc = 0;
        ndlp->nlp_class_sup = 0;
        if (sp->cls1.classValid)
                ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -523,6 +458,16 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                /* rcv'ed PLOGI decides what our NPortId will be */
                vport->fc_myDID = icmd->un.rcvels.parmRo;
 
+               /* If there is an outstanding FLOGI, abort it now.
+                * The remote NPort is not going to ACC our FLOGI
+                * if its already issuing a PLOGI for pt2pt mode.
+                * This indicates our FLOGI was dropped; however, we
+                * must have ACCed the remote NPorts FLOGI to us
+                * to make it here.
+                */
+               if (phba->hba_flag & HBA_FLOGI_OUTSTANDING)
+                       lpfc_els_abort_flogi(phba);
+
                ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
                if (sp->cmn.edtovResolution) {
                        /* E_D_TOV ticks are in nanoseconds */
@@ -539,27 +484,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
                memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
 
-               /* Issue config_link / reg_vfi to account for updated TOV's */
-
+               /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
+                * to account for updated TOV's / parameters
+                */
                if (phba->sli_rev == LPFC_SLI_REV4)
                        lpfc_issue_reg_vfi(vport);
                else {
-                       defer_acc = 1;
                        link_mbox = mempool_alloc(phba->mbox_mem_pool,
                                                  GFP_KERNEL);
                        if (!link_mbox)
                                goto out;
                        lpfc_config_link(phba, link_mbox);
-                       link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
+                       link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                        link_mbox->vport = vport;
                        link_mbox->ctx_ndlp = ndlp;
 
-                       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
-                       if (!save_iocb)
+                       rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+                       if (rc == MBX_NOT_FINISHED) {
+                               mempool_free(link_mbox, phba->mbox_mem_pool);
                                goto out;
-                       /* Save info from cmd IOCB used in rsp */
-                       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
-                              sizeof(struct lpfc_iocbq));
+                       }
                }
 
                lpfc_can_disctmo(vport);
@@ -578,59 +522,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (!login_mbox)
                goto out;
 
-       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
-       if (phba->nvmet_support && !defer_acc) {
-               link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-               if (!link_mbox)
-                       goto out;
-
-               /* As unique identifiers such as iotag would be overwritten
-                * with those from the cmdiocb, allocate separate temporary
-                * storage for the copy.
-                */
-               save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
-               if (!save_iocb)
-                       goto out;
-
-               /* Unreg RPI is required for SLI4. */
-               rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
-               lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
-               link_mbox->vport = vport;
-               link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
-               if (!link_mbox->ctx_ndlp)
-                       goto out;
-
-               link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
-
-               if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
-                   (!(vport->fc_flag & FC_OFFLINE_MODE)))
-                       ndlp->nlp_flag |= NLP_UNREG_INP;
+       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+       if (!save_iocb)
+               goto out;
 
-               /* Save info from cmd IOCB used in rsp */
-               memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
+       /* Save info from cmd IOCB to be used in rsp after all mbox completes */
+       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+              sizeof(struct lpfc_iocbq));
 
-               /* Delay sending ACC till unreg RPI completes. */
-               defer_acc = 1;
-       } else if (phba->sli_rev == LPFC_SLI_REV4)
+       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+       if (phba->sli_rev == LPFC_SLI_REV4)
                lpfc_unreg_rpi(vport, ndlp);
 
+       /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
+        * always be deferring the ACC.
+        */
        rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
                            (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
        if (rc)
                goto out;
 
-       /* ACC PLOGI rsp command needs to execute first,
-        * queue this login_mbox command to be processed later.
-        */
        login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
-       /*
-        * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
-        * command issued in lpfc_cmpl_els_acc().
-        */
        login_mbox->vport = vport;
-       spin_lock_irq(&ndlp->lock);
-       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
-       spin_unlock_irq(&ndlp->lock);
 
        /*
         * If there is an outstanding PLOGI issued, abort it before
@@ -660,7 +573,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                 * to register, then unregister the RPI.
                 */
                spin_lock_irq(&ndlp->lock);
-               ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
+               ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
+                                  NLP_RCV_PLOGI);
                spin_unlock_irq(&ndlp->lock);
                stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
                stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
@@ -670,42 +584,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                        mempool_free(login_mbox, phba->mbox_mem_pool);
                return 1;
        }
-       if (defer_acc) {
-               /* So the order here should be:
-                * SLI3 pt2pt
-                *   Issue CONFIG_LINK mbox
-                *   CONFIG_LINK cmpl
-                * SLI4 tgt
-                *   Issue UNREG RPI mbx
-                *   UNREG RPI cmpl
-                * Issue PLOGI ACC
-                * PLOGI ACC cmpl
-                * Issue REG_LOGIN mbox
-                */
 
-               /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
-               link_mbox->context3 = login_mbox;
-               login_mbox->context3 = save_iocb;
+       /* So the order here should be:
+        * SLI3 pt2pt
+        *   Issue CONFIG_LINK mbox
+        *   CONFIG_LINK cmpl
+        * SLI4 pt2pt
+        *   Issue REG_VFI mbox
+        *   REG_VFI cmpl
+        * SLI4
+        *   Issue UNREG RPI mbx
+        *   UNREG RPI cmpl
+        * Issue REG_RPI mbox
+        * REG RPI cmpl
+        * Issue PLOGI ACC
+        * PLOGI ACC cmpl
+        */
+       login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
+       login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+       login_mbox->context3 = save_iocb; /* For PLOGI ACC */
 
-               /* Start the ball rolling by issuing CONFIG_LINK here */
-               rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
-               if (rc == MBX_NOT_FINISHED)
-                       goto out;
-               return 1;
-       }
+       spin_lock_irq(&ndlp->lock);
+       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+       spin_unlock_irq(&ndlp->lock);
+
+       /* Start the ball rolling by issuing REG_LOGIN here */
+       rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED)
+               goto out;
+       lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
 
-       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
-       if (rc)
-               mempool_free(login_mbox, phba->mbox_mem_pool);
        return 1;
 out:
-       if (defer_acc)
-               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                               "4577 discovery failure: %p %p %p\n",
-                               save_iocb, link_mbox, login_mbox);
        kfree(save_iocb);
-       if (link_mbox)
-               mempool_free(link_mbox, phba->mbox_mem_pool);
        if (login_mbox)
                mempool_free(login_mbox, phba->mbox_mem_pool);
 
@@ -913,9 +824,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                }
        } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
                ((ndlp->nlp_type & NLP_FCP_TARGET) ||
-               !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+               (ndlp->nlp_type & NLP_NVME_TARGET) ||
+               (vport->fc_flag & FC_PT2PT))) ||
                (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
-               /* Only try to re-login if this is NOT a Fabric Node */
+               /* Only try to re-login if this is NOT a Fabric Node
+                * AND the remote NPORT is a FCP/NVME Target or we
+                * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
+                * case for LOGO as a response to ADISC behavior.
+                */
                mod_timer(&ndlp->nlp_delayfunc,
                          jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(&ndlp->lock);
@@ -2569,6 +2485,16 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        return ndlp->nlp_state;
 }
 
+static uint32_t
+lpfc_device_rm_unmap_node(struct lpfc_vport *vport,
+                         struct lpfc_nodelist *ndlp,
+                         void *arg,
+                         uint32_t evt)
+{
+       lpfc_drop_node(vport, ndlp);
+       return NLP_STE_FREED_NODE;
+}
+
 static uint32_t
 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
                             struct lpfc_nodelist *ndlp,
@@ -3062,7 +2988,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
        lpfc_disc_illegal,              /* CMPL_LOGO       */
        lpfc_disc_illegal,              /* CMPL_ADISC      */
        lpfc_disc_illegal,              /* CMPL_REG_LOGIN  */
-       lpfc_disc_illegal,              /* DEVICE_RM       */
+       lpfc_device_rm_unmap_node,      /* DEVICE_RM       */
        lpfc_device_recov_unmap_node,   /* DEVICE_RECOVERY */
 
        lpfc_rcv_plogi_mapped_node,     /* RCV_PLOGI   MAPPED_NODE    */
index 4d819e5..4d78ead 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -2002,7 +2002,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
 
 /**
  * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
- * @vport - the lpfc_vport instance requesting a localport.
+ * @vport: the lpfc_vport instance requesting a localport.
  *
  * This routine is invoked to create an nvme localport instance to bind
  * to the nvme_fc_transport.  It is called once during driver load
index bb2a4a0..c84da8e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
- * Fibre Channsel Host Bus Adapters.                               *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -3304,7 +3304,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
        bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
 
        /* Word 10 */
-       bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
        bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
        bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
               LPFC_WQE_LENLOC_WORD12);
index a4d6973..85f6a06 100644 (file)
@@ -1,8 +1,8 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.broadcom.com                                                *
@@ -132,6 +132,8 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
        }
 }
 
+#define LPFC_INVALID_REFTAG ((u32)-1)
+
 /**
  * lpfc_update_stats - Update statistical data for the command completion
  * @vport: The virtual port on which this call is executing.
@@ -734,7 +736,7 @@ lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 }
 
 /**
- * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
+ * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
  * @phba: The Hba for which this call is being executed.
  * @psb: The scsi buffer which is being released.
  *
@@ -972,10 +974,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 #define BG_ERR_TGT     0x2
 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
 #define BG_ERR_SWAP    0x10
-/**
+/*
  * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
  * error injection
- **/
+ */
 #define BG_ERR_CHECK   0x20
 
 /**
@@ -1000,7 +1002,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        uint32_t op = scsi_get_prot_op(sc);
        uint32_t blksize;
        uint32_t numblks;
-       sector_t lba;
+       u32 lba;
        int rc = 0;
        int blockoff = 0;
 
@@ -1008,7 +1010,9 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                return 0;
 
        sgpe = scsi_prot_sglist(sc);
-       lba = scsi_get_lba(sc);
+       lba = t10_pi_ref_tag(sc->request);
+       if (lba == LPFC_INVALID_REFTAG)
+               return 0;
 
        /* First check if we need to match the LBA */
        if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
@@ -1016,11 +1020,11 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
 
                /* Make sure we have the right LBA if one is specified */
-               if ((phba->lpfc_injerr_lba < lba) ||
-                       (phba->lpfc_injerr_lba >= (lba + numblks)))
+               if (phba->lpfc_injerr_lba < (u64)lba ||
+                   (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
                        return 0;
                if (sgpe) {
-                       blockoff = phba->lpfc_injerr_lba - lba;
+                       blockoff = phba->lpfc_injerr_lba - (u64)lba;
                        numblks = sg_dma_len(sgpe) /
                                sizeof(struct scsi_dif_tuple);
                        if (numblks < blockoff)
@@ -1589,7 +1593,9 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                goto out;
 
        /* extract some info from the scsi command for pde*/
-       reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+       reftag = t10_pi_ref_tag(sc->request);
+       if (reftag == LPFC_INVALID_REFTAG)
+               goto out;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1750,7 +1756,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        /* extract some info from the scsi command */
        blksize = lpfc_cmd_blksize(sc);
-       reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+       reftag = t10_pi_ref_tag(sc->request);
+       if (reftag == LPFC_INVALID_REFTAG)
+               goto out;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -1979,7 +1987,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                goto out;
 
        /* extract some info from the scsi command for pde*/
-       reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+       reftag = t10_pi_ref_tag(sc->request);
+       if (reftag == LPFC_INVALID_REFTAG)
+               goto out;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -2178,7 +2188,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        /* extract some info from the scsi command */
        blksize = lpfc_cmd_blksize(sc);
-       reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+       reftag = t10_pi_ref_tag(sc->request);
+       if (reftag == LPFC_INVALID_REFTAG)
+               goto out;
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
@@ -2770,7 +2782,9 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
                        chk_guard = 1;
 
                src = (struct scsi_dif_tuple *)sg_virt(sgpe);
-               start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+               start_ref_tag = t10_pi_ref_tag(cmd->request);
+               if (start_ref_tag == LPFC_INVALID_REFTAG)
+                       goto out;
                start_app_tag = src->app_tag;
                len = sgpe->length;
                while (src && protsegcnt) {
@@ -2861,8 +2875,8 @@ out:
                              SAM_STAT_CHECK_CONDITION;
                phba->bg_guard_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
-                               (unsigned long)scsi_get_lba(cmd),
+                               "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
+                               t10_pi_ref_tag(cmd->request),
                                sum, guard_tag);
 
        } else if (err_type == BGS_REFTAG_ERR_MASK) {
@@ -2873,8 +2887,8 @@ out:
 
                phba->bg_reftag_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
-                               (unsigned long)scsi_get_lba(cmd),
+                               "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
+                               t10_pi_ref_tag(cmd->request),
                                ref_tag, start_ref_tag);
 
        } else if (err_type == BGS_APPTAG_ERR_MASK) {
@@ -2885,8 +2899,8 @@ out:
 
                phba->bg_apptag_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
-                               (unsigned long)scsi_get_lba(cmd),
+                               "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
+                               t10_pi_ref_tag(cmd->request),
                                app_tag, start_app_tag);
        }
 }
@@ -3062,10 +3076,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
        if (lpfc_bgs_get_invalid_prof(bgstat)) {
                cmd->result = DID_ERROR << 16;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9072 BLKGRD: Invalid BG Profile in cmd"
-                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "9072 BLKGRD: Invalid BG Profile in cmd "
+                               "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               (unsigned long long)scsi_get_lba(cmd),
+                               t10_pi_ref_tag(cmd->request),
                                blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
@@ -3074,10 +3088,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
        if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
                cmd->result = DID_ERROR << 16;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9073 BLKGRD: Invalid BG PDIF Block in cmd"
-                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "9073 BLKGRD: Invalid BG PDIF Block in cmd "
+                               "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               (unsigned long long)scsi_get_lba(cmd),
+                               t10_pi_ref_tag(cmd->request),
                                blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
@@ -3092,10 +3106,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                              SAM_STAT_CHECK_CONDITION;
                phba->bg_guard_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9055 BLKGRD: Guard Tag error in cmd"
-                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "9055 BLKGRD: Guard Tag error in cmd "
+                               "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               (unsigned long long)scsi_get_lba(cmd),
+                               t10_pi_ref_tag(cmd->request),
                                blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
@@ -3109,10 +3123,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
 
                phba->bg_reftag_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9056 BLKGRD: Ref Tag error in cmd"
-                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "9056 BLKGRD: Ref Tag error in cmd "
+                               "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               (unsigned long long)scsi_get_lba(cmd),
+                               t10_pi_ref_tag(cmd->request),
                                blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
@@ -3126,10 +3140,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
 
                phba->bg_apptag_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9061 BLKGRD: App Tag error in cmd"
-                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "9061 BLKGRD: App Tag error in cmd "
+                               "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               (unsigned long long)scsi_get_lba(cmd),
+                               t10_pi_ref_tag(cmd->request),
                                blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
@@ -3170,10 +3184,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
        if (!ret) {
                /* No error was reported - problem in FW? */
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
-                               "9057 BLKGRD: Unknown error in cmd"
-                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "9057 BLKGRD: Unknown error in cmd "
+                               "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               (unsigned long long)scsi_get_lba(cmd),
+                               t10_pi_ref_tag(cmd->request),
                                blk_rq_sectors(cmd->request), bgstat, bghm);
 
                /* Calcuate what type of error it was */
@@ -3685,7 +3699,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
 /**
  * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
  * buffer
- * @phba: The Hba for which this call is being executed.
+ * @vport: Pointer to vport object.
  * @lpfc_cmd: The scsi buffer which is going to be mapped.
  * @tmo: Timeout value for IO
  *
@@ -3707,7 +3721,7 @@ lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
  * @phba: Pointer to hba context object.
  * @vport: Pointer to vport object.
  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
- * @rsp_iocb: Pointer to response iocb object which reported error.
+ * @fcpi_parm: FCP Initiator parameter.
  *
  * This function posts an event when there is a SCSI command reporting
  * error from the scsi device.
@@ -3822,10 +3836,10 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
 }
 
 /**
- * lpfc_handler_fcp_err - FCP response handler
+ * lpfc_handle_fcp_err - FCP response handler
  * @vport: The virtual port for which this call is being executed.
  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
- * @rsp_iocb: The response IOCB which contains FCP error.
+ * @fcpi_parm: FCP Initiator parameter.
  *
  * This routine is called to process response IOCB with status field
  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
@@ -4009,7 +4023,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
  * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
  * @phba: The hba for which this call is being executed.
  * @pwqeIn: The command WQE for the scsi cmnd.
- * @pwqeOut: The response WQE for the scsi cmnd.
+ * @wcqe: Pointer to driver response CQE object.
  *
  * This routine assigns scsi command result by looking into response WQE
  * status field appropriately. This routine handles QUEUE FULL condition as
@@ -4060,7 +4074,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 
        /* Sanity check on return of outstanding command */
        cmd = lpfc_cmd->pCmd;
-       if (!cmd || !phba) {
+       if (!cmd) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                 "9042 I/O completion: Not an active IO\n");
                spin_unlock(&lpfc_cmd->buf_lock);
@@ -4605,7 +4619,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 
 /**
  * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
- * @phba: Pointer to vport object for which I/O is executed
+ * @vport: Pointer to vport object.
  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
  * @tmo: timeout value for the IO
  *
@@ -4682,7 +4696,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
 
 /**
  * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
- * @phba: Pointer to vport object for which I/O is executed
+ * @vport: Pointer to vport object.
  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
  * @tmo: timeout value for the IO
  *
@@ -4939,7 +4953,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 }
 
 /**
- * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
+ * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
  * @phba: The Hba for which this call is being executed.
  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
  * @rspiocbq: Pointer to lpfc_iocbq data structure.
@@ -4998,7 +5012,7 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba)
                        break;
                default:
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                                       "8347 Invalid device found: "
+                                       "8347 Incapable PCI reset device: "
                                        "0x%04x\n", ptr->device);
                        return -EBADSLT;
                }
@@ -5084,7 +5098,7 @@ buffer_done:
 }
 
 /**
- * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
+ * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
  * @phba: The Hba for which this call is being executed.
  *
  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
@@ -5252,10 +5266,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                        lpfc_printf_vlog(vport,
                                         KERN_INFO, LOG_SCSI_CMD,
                                         "9033 BLKGRD: rcvd %s cmd:x%x "
-                                        "sector x%llx cnt %u pt %x\n",
+                                        "reftag x%x cnt %u pt %x\n",
                                         dif_op_str[scsi_get_prot_op(cmnd)],
                                         cmnd->cmnd[0],
-                                        (unsigned long long)scsi_get_lba(cmnd),
+                                        t10_pi_ref_tag(cmnd->request),
                                         blk_rq_sectors(cmnd->request),
                                         (cmnd->cmnd[1]>>5));
                }
@@ -5265,9 +5279,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                        lpfc_printf_vlog(vport,
                                         KERN_INFO, LOG_SCSI_CMD,
                                         "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
-                                        "x%x sector x%llx cnt %u pt %x\n",
+                                        "x%x reftag x%x cnt %u pt %x\n",
                                         cmnd->cmnd[0],
-                                        (unsigned long long)scsi_get_lba(cmnd),
+                                        t10_pi_ref_tag(cmnd->request),
                                         blk_rq_sectors(cmnd->request),
                                         (cmnd->cmnd[1]>>5));
                }
index fa1a714..f6e1e36 100644 (file)
@@ -1,8 +1,8 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
- * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.broadcom.com                                                *
@@ -987,16 +987,10 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,
 {
        struct lpfc_nodelist *ndlp = NULL;
 
+       /* Lookup did to verify if did is still active on this vport */
        if (rrq->vport)
                ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
 
-       /* The target DID could have been swapped (cable swap)
-        * we should use the ndlp from the findnode if it is
-        * available.
-        */
-       if ((!ndlp) && rrq->ndlp)
-               ndlp = rrq->ndlp;
-
        if (!ndlp)
                goto out;
 
@@ -1118,9 +1112,14 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
        }
        spin_lock_irqsave(&phba->hbalock, iflags);
-       list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
-               if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
+       list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+               if (rrq->vport != vport)
+                       continue;
+
+               if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
                        list_move(&rrq->list, &rrq_list);
+
+       }
        spin_unlock_irqrestore(&phba->hbalock, iflags);
 
        list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
@@ -1213,7 +1212,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        rrq->xritag = xritag;
        rrq->rrq_stop_time = jiffies +
                                msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
-       rrq->ndlp = ndlp;
        rrq->nlp_DID = ndlp->nlp_DID;
        rrq->vport = ndlp->vport;
        rrq->rxid = rxid;
@@ -1405,7 +1403,6 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
                        goto out;
                }
 
-               pring = phba->sli4_hba.els_wq->pring;
                if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
                        (sglq->state != SGL_XRI_ABORTED)) {
                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
@@ -1428,9 +1425,9 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
                                      &phba->sli4_hba.lpfc_els_sgl_list);
                        spin_unlock_irqrestore(
                                &phba->sli4_hba.sgl_list_lock, iflag);
-
+                       pring = lpfc_phba_elsring(phba);
                        /* Check if TXQ queue needs to be serviced */
-                       if (!list_empty(&pring->txq))
+                       if (pring && (!list_empty(&pring->txq)))
                                lpfc_worker_wake_up(phba);
                }
        }
@@ -9635,7 +9632,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 }
 
 /**
- * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
+ * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
  * @phba: Pointer to HBA context object.
  * @iocbq: Pointer to command iocb.
  * @wqe: Pointer to the work queue entry.
@@ -10421,7 +10418,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
        return 0;
 }
 
-/**
+/*
  * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
  *
  * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
@@ -11593,7 +11590,7 @@ release_iocb:
  * which are aborted. The function frees memory resources used for
  * the aborted ELS commands.
  **/
-static void
+void
 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                     struct lpfc_iocbq *rspiocb)
 {
@@ -14170,7 +14167,7 @@ rearm_and_exit:
 }
 
 /**
- * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
+ * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
  * @cq: pointer to CQ to process
  *
  * This routine calls the cq processing routine with a handler specific
@@ -14744,7 +14741,7 @@ lpfc_sli4_hba_process_cq(struct work_struct *work)
 }
 
 /**
- * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
+ * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
  * @work: pointer to work element
  *
  * translates from the work handler and calls the fast-path handler.
@@ -17218,7 +17215,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli4_free_xri - Release an xri for reuse.
+ * __lpfc_sli4_free_xri - Release an xri for reuse.
  * @phba: pointer to lpfc hba data structure.
  * @xri: xri to release.
  *
@@ -18938,7 +18935,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * __lpfc_sli4_free_rpi - Release an rpi for reuse.
  * @phba: pointer to lpfc hba data structure.
  * @rpi: rpi to free
  *
index fade044..bee74bd 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.8.0.7"
+#define LPFC_DRIVER_VERSION "12.8.0.8"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
 
 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
                LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2020 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2021 Broadcom. All Rights " \
                "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
                "and/or its subsidiaries."
index ccf7b6c..da9a1f7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -190,7 +190,7 @@ lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
              ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
                return 1;
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+       lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
                        "1822 Invalid %s: %02x:%02x:%02x:%02x:"
                        "%02x:%02x:%02x:%02x\n",
                        name_type,
@@ -531,7 +531,7 @@ disable_vport(struct fc_vport *fc_vport)
        }
 
        lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
-       lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
                         "1826 Vport Disabled.\n");
        return VPORT_OK;
 }
@@ -579,7 +579,7 @@ enable_vport(struct fc_vport *fc_vport)
        }
 
 out:
-       lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
                         "1827 Vport Enabled.\n");
        return VPORT_OK;
 }
@@ -725,7 +725,7 @@ skip_logo:
        spin_lock_irq(&phba->port_list_lock);
        list_del_init(&vport->listentry);
        spin_unlock_irq(&phba->port_list_lock);
-       lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
                         "1828 Vport Deleted.\n");
        scsi_host_put(shost);
        return VPORT_OK;
index 9e98977..ec9840d 100644 (file)
@@ -346,7 +346,7 @@ static void cmd_done(struct fsc_state *state, int result)
        struct scsi_cmnd *cmd;
 
        cmd = state->current_req;
-       if (cmd != 0) {
+       if (cmd) {
                cmd->result = result;
                (*cmd->scsi_done)(cmd);
                state->current_req = NULL;
@@ -467,12 +467,13 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
                dma_cmd_space = kmalloc_array(host->sg_tablesize + 2,
                                             sizeof(struct dbdma_cmd),
                                             GFP_KERNEL);
-               if (dma_cmd_space == 0) {
-                       printk(KERN_ERR "mac53c94: couldn't allocate dma "
-                              "command space for %pOF\n", node);
+       if (!dma_cmd_space) {
+               printk(KERN_ERR "mac53c94: couldn't allocate dma "
+                      "command space for %pOF\n", node);
                rc = -ENOMEM;
-                       goto out_free;
-               }
+               goto out_free;
+       }
+
        state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space);
        memset(state->dma_cmds, 0, (host->sg_tablesize + 1)
               * sizeof(struct dbdma_cmd));
index d57e938..b1a2d35 100644 (file)
@@ -1427,7 +1427,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
 
 
 /**
- * megaraid_queue_command - generic queue entry point for all LLDs
+ * megaraid_queue_command_lck - generic queue entry point for all LLDs
  * @scp                : pointer to the scsi command to be executed
  * @done       : callback routine to be called after the cmd has be completed
  *
index 8df5344..abf7b40 100644 (file)
@@ -490,7 +490,7 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
 }
 
 /**
- * mraid_mm_attch_buf - Attach a free dma buffer for required size
+ * mraid_mm_attach_buf - Attach a free dma buffer for required size
  * @adp                : Adapter softstate
  * @kioc       : kioc that the buffer needs to be attached to
  * @xferlen    : required length for buffer
index 0f808d6..b5a765b 100644 (file)
@@ -2019,10 +2019,12 @@ union megasas_frame {
  * struct MR_PRIV_DEVICE - sdev private hostdata
  * @is_tm_capable: firmware managed tm_capable flag
  * @tm_busy: TM request is in progress
+ * @sdev_priv_busy: pending command per sdev
  */
 struct MR_PRIV_DEVICE {
        bool is_tm_capable;
        bool tm_busy;
+       atomic_t sdev_priv_busy;
        atomic_t r1_ldio_hint;
        u8 interface_type;
        u8 task_abort_tmo;
@@ -2212,6 +2214,7 @@ struct megasas_irq_context {
        struct irq_poll irqpoll;
        bool irq_poll_scheduled;
        bool irq_line_enable;
+       atomic_t   in_used;
 };
 
 struct MR_DRV_SYSTEM_INFO {
@@ -2446,6 +2449,7 @@ struct megasas_instance {
        bool support_pci_lane_margining;
        u8  low_latency_index_start;
        int perf_mode;
+       int iopoll_q_count;
 };
 
 struct MR_LD_VF_MAP {
@@ -2726,5 +2730,6 @@ void megasas_init_debugfs(void);
 void megasas_exit_debugfs(void);
 void megasas_setup_debugfs(struct megasas_instance *instance);
 void megasas_destroy_debugfs(struct megasas_instance *instance);
+int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
 
 #endif                         /*LSI_MEGARAID_SAS_H */
index 63a4f48..4d4e9db 100644 (file)
@@ -114,6 +114,15 @@ unsigned int enable_sdev_max_qd;
 module_param(enable_sdev_max_qd, int, 0444);
 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
 
+int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
+               "This parameter is effective only if host_tagset_enable=1 &\n\t\t"
+               "It is not applicable for MFI_SERIES. &\n\t\t"
+               "Driver will work in latency mode. &\n\t\t"
+               "High iops queues are not allocated &\n\t\t"
+               );
+
 int host_tagset_enable = 1;
 module_param(host_tagset_enable, int, 0444);
 MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
@@ -207,6 +216,7 @@ static bool support_pci_lane_margining;
 static spinlock_t poll_aen_lock;
 
 extern struct dentry *megasas_debugfs_root;
+extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
 
 void
 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
@@ -475,7 +485,7 @@ megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
        return readl(&instance->reg_set->outbound_msg_0);
 }
 /**
- * megasas_clear_interrupt_xscale -    Check & clear interrupt
+ * megasas_clear_intr_xscale - Check & clear interrupt
  * @instance:  Adapter soft state
  */
 static int
@@ -658,7 +668,7 @@ megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
 }
 
 /**
- * megasas_clear_interrupt_ppc -       Check & clear interrupt
+ * megasas_clear_intr_ppc -    Check & clear interrupt
  * @instance:  Adapter soft state
  */
 static int
@@ -787,7 +797,7 @@ megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
 }
 
 /**
- * megasas_clear_interrupt_skinny -    Check & clear interrupt
+ * megasas_clear_intr_skinny - Check & clear interrupt
  * @instance:  Adapter soft state
  */
 static int
@@ -935,7 +945,7 @@ megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
 }
 
 /**
- * megasas_clear_interrupt_gen2 -      Check & clear interrupt
+ * megasas_clear_intr_gen2 -      Check & clear interrupt
  * @instance:  Adapter soft state
  */
 static int
@@ -3127,14 +3137,37 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
 static int megasas_map_queues(struct Scsi_Host *shost)
 {
        struct megasas_instance *instance;
+       int qoff = 0, offset;
+       struct blk_mq_queue_map *map;
 
        instance = (struct megasas_instance *)shost->hostdata;
 
        if (shost->nr_hw_queues == 1)
                return 0;
 
-       return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
-                       instance->pdev, instance->low_latency_index_start);
+       offset = instance->low_latency_index_start;
+
+       /* Setup Default hctx */
+       map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+       map->nr_queues = instance->msix_vectors - offset;
+       map->queue_offset = 0;
+       blk_mq_pci_map_queues(map, instance->pdev, offset);
+       qoff += map->nr_queues;
+       offset += map->nr_queues;
+
+       /* Setup Poll hctx */
+       map = &shost->tag_set.map[HCTX_TYPE_POLL];
+       map->nr_queues = instance->iopoll_q_count;
+       if (map->nr_queues) {
+               /*
+                * The poll queue(s) doesn't have an IRQ (and hence IRQ
+                * affinity), so use the regular blk-mq cpu mapping
+                */
+               map->queue_offset = qoff;
+               blk_mq_map_queues(map);
+       }
+
+       return 0;
 }
 
 static void megasas_aen_polling(struct work_struct *work);
@@ -3446,6 +3479,7 @@ static struct scsi_host_template megasas_template = {
        .shost_attrs = megaraid_host_attrs,
        .bios_param = megasas_bios_param,
        .map_queues = megasas_map_queues,
+       .mq_poll = megasas_blk_mq_poll,
        .change_queue_depth = scsi_change_queue_depth,
        .max_segment_size = 0xffffffff,
 };
@@ -4884,6 +4918,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
 }
 
 /**
+ * megasas_host_device_list_query
  * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
  * dcmd.mbox              - reserved
  * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
@@ -5161,7 +5196,7 @@ void megasas_get_snapdump_properties(struct megasas_instance *instance)
 }
 
 /**
- * megasas_get_controller_info -       Returns FW's controller structure
+ * megasas_get_ctrl_info -     Returns FW's controller structure
  * @instance:                          Adapter soft state
  *
  * Issues an internal command (DCMD) to get the FW's controller structure.
@@ -5834,13 +5869,16 @@ __megasas_alloc_irq_vectors(struct megasas_instance *instance)
        irq_flags = PCI_IRQ_MSIX;
 
        if (instance->smp_affinity_enable)
-               irq_flags |= PCI_IRQ_AFFINITY;
+               irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
        else
                descp = NULL;
 
+       /* Do not allocate msix vectors for poll_queues.
+        * msix_vectors is always within a range of FW supported reply queue.
+        */
        i = pci_alloc_irq_vectors_affinity(instance->pdev,
                instance->low_latency_index_start,
-               instance->msix_vectors, irq_flags, descp);
+               instance->msix_vectors - instance->iopoll_q_count, irq_flags, descp);
 
        return i;
 }
@@ -5856,10 +5894,30 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
        int i;
        unsigned int num_msix_req;
 
+       instance->iopoll_q_count = 0;
+       if ((instance->adapter_type != MFI_SERIES) &&
+               poll_queues) {
+
+               instance->perf_mode = MR_LATENCY_PERF_MODE;
+               instance->low_latency_index_start = 1;
+
+               /* reserve for default and non-mananged pre-vector. */
+               if (instance->msix_vectors > (poll_queues + 2))
+                       instance->iopoll_q_count = poll_queues;
+               else
+                       instance->iopoll_q_count = 0;
+
+               num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+               instance->msix_vectors = min(num_msix_req,
+                               instance->msix_vectors);
+
+       }
+
        i = __megasas_alloc_irq_vectors(instance);
 
-       if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
-           (i != instance->msix_vectors)) {
+       if (((instance->perf_mode == MR_BALANCED_PERF_MODE)
+               || instance->iopoll_q_count) &&
+           (i != (instance->msix_vectors - instance->iopoll_q_count))) {
                if (instance->msix_vectors)
                        pci_free_irq_vectors(instance->pdev);
                /* Disable Balanced IOPS mode and try realloc vectors */
@@ -5870,12 +5928,15 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
                instance->msix_vectors = min(num_msix_req,
                                instance->msix_vectors);
 
+               instance->iopoll_q_count = 0;
                i = __megasas_alloc_irq_vectors(instance);
 
        }
 
        dev_info(&instance->pdev->dev,
-               "requested/available msix %d/%d\n", instance->msix_vectors, i);
+               "requested/available msix %d/%d poll_queue %d\n",
+                       instance->msix_vectors - instance->iopoll_q_count,
+                       i, instance->iopoll_q_count);
 
        if (i > 0)
                instance->msix_vectors = i;
@@ -6841,12 +6902,18 @@ static int megasas_io_attach(struct megasas_instance *instance)
                instance->smp_affinity_enable) {
                host->host_tagset = 1;
                host->nr_hw_queues = instance->msix_vectors -
-                       instance->low_latency_index_start;
+                       instance->low_latency_index_start + instance->iopoll_q_count;
+               if (instance->iopoll_q_count)
+                       host->nr_maps = 3;
+       } else {
+               instance->iopoll_q_count = 0;
        }
 
        dev_info(&instance->pdev->dev,
-               "Max firmware commands: %d shared with nr_hw_queues = %d\n",
-               instance->max_fw_cmds, host->nr_hw_queues);
+               "Max firmware commands: %d shared with default "
+               "hw_queues = %d poll_queues %d\n", instance->max_fw_cmds,
+               host->nr_hw_queues - instance->iopoll_q_count,
+               instance->iopoll_q_count);
        /*
         * Notify the mid-layer about the new controller
         */
@@ -8859,6 +8926,7 @@ static int __init megasas_init(void)
                msix_vectors = 1;
                rdpq_enable = 0;
                dual_qdepth_disable = 1;
+               poll_queues = 0;
        }
 
        /*
index 38fc946..2221175 100644 (file)
@@ -220,6 +220,40 @@ megasas_clear_intr_fusion(struct megasas_instance *instance)
        return 1;
 }
 
+static inline void
+megasas_sdev_busy_inc(struct megasas_instance *instance,
+                     struct scsi_cmnd *scmd)
+{
+       if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+               struct MR_PRIV_DEVICE *mr_device_priv_data =
+                       scmd->device->hostdata;
+               atomic_inc(&mr_device_priv_data->sdev_priv_busy);
+       }
+}
+
+static inline void
+megasas_sdev_busy_dec(struct megasas_instance *instance,
+                     struct scsi_cmnd *scmd)
+{
+       if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+               struct MR_PRIV_DEVICE *mr_device_priv_data =
+                       scmd->device->hostdata;
+               atomic_dec(&mr_device_priv_data->sdev_priv_busy);
+       }
+}
+
+static inline int
+megasas_sdev_busy_read(struct megasas_instance *instance,
+                      struct scsi_cmnd *scmd)
+{
+       if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
+               struct MR_PRIV_DEVICE *mr_device_priv_data =
+                       scmd->device->hostdata;
+               return atomic_read(&mr_device_priv_data->sdev_priv_busy);
+       }
+       return 0;
+}
+
 /**
  * megasas_get_cmd_fusion -    Get a command from the free pool
  * @instance:          Adapter soft state
@@ -357,15 +391,9 @@ megasas_get_msix_index(struct megasas_instance *instance,
                       struct megasas_cmd_fusion *cmd,
                       u8 data_arms)
 {
-       int sdev_busy;
-
-       /* TBD - if sml remove device_busy in future, driver
-        * should track counter in internal structure.
-        */
-       sdev_busy = atomic_read(&scmd->device->device_busy);
-
        if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
-           sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
+           (megasas_sdev_busy_read(instance, scmd) >
+            (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))) {
                cmd->request_desc->SCSIIO.MSIxIndex =
                        mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
                                        MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
@@ -685,6 +713,8 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
        fusion = instance->ctrl_context;
 
        count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+       count += instance->iopoll_q_count;
+
        fusion->reply_frames_desc_pool =
                        dma_pool_create("mr_reply", &instance->pdev->dev,
                                fusion->reply_alloc_sz * count, 16, 0);
@@ -779,6 +809,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
        }
 
        msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+       msix_count += instance->iopoll_q_count;
 
        fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
                                                         &instance->pdev->dev,
@@ -1129,7 +1160,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                        MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
        IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
        IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
-       IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+       IOCInitMessage->HostMSIxVectors = instance->msix_vectors + instance->iopoll_q_count;
        IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
 
        time = ktime_get_real();
@@ -1823,6 +1854,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
                 sizeof(union MPI2_SGE_IO_UNION))/16;
 
        count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+       count += instance->iopoll_q_count;
+
        for (i = 0 ; i < count; i++)
                fusion->last_reply_idx[i] = 0;
 
@@ -1835,6 +1868,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
                                MEGASAS_FUSION_IOCTL_CMDS);
        sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
 
+       for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++)
+               atomic_set(&fusion->busy_mq_poll[i], 0);
+
        if (megasas_alloc_ioc_init_frame(instance))
                return 1;
 
@@ -3390,6 +3426,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
         * Issue the command to the FW
         */
 
+       megasas_sdev_busy_inc(instance, scmd);
        megasas_fire_cmd_fusion(instance, req_desc);
 
        if (r1_cmd)
@@ -3450,6 +3487,7 @@ megasas_complete_r1_command(struct megasas_instance *instance,
                scmd_local->SCp.ptr = NULL;
                megasas_return_cmd_fusion(instance, cmd);
                scsi_dma_unmap(scmd_local);
+               megasas_sdev_busy_dec(instance, scmd_local);
                scmd_local->scsi_done(scmd_local);
        }
 }
@@ -3500,6 +3538,9 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
        if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
                return IRQ_NONE;
 
+       if (irq_context && !atomic_add_unless(&irq_context->in_used, 1, 1))
+               return 0;
+
        num_completed = 0;
 
        while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
@@ -3550,6 +3591,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
                                scmd_local->SCp.ptr = NULL;
                                megasas_return_cmd_fusion(instance, cmd_fusion);
                                scsi_dma_unmap(scmd_local);
+                               megasas_sdev_busy_dec(instance, scmd_local);
                                scmd_local->scsi_done(scmd_local);
                        } else  /* Optimal VD - R1 FP command completion. */
                                megasas_complete_r1_command(instance, cmd_fusion);
@@ -3613,6 +3655,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
                                        irq_context->irq_line_enable = true;
                                        irq_poll_sched(&irq_context->irqpoll);
                                }
+                               atomic_dec(&irq_context->in_used);
                                return num_completed;
                        }
                }
@@ -3630,9 +3673,35 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
                                instance->reply_post_host_index_addr[0]);
                megasas_check_and_restore_queue_depth(instance);
        }
+
+       if (irq_context)
+               atomic_dec(&irq_context->in_used);
+
        return num_completed;
 }
 
+int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+
+       struct megasas_instance *instance;
+       int num_entries = 0;
+       struct fusion_context *fusion;
+
+       instance = (struct megasas_instance *)shost->hostdata;
+
+       fusion = instance->ctrl_context;
+
+       queue_num = queue_num + instance->low_latency_index_start;
+
+       if (!atomic_add_unless(&fusion->busy_mq_poll[queue_num], 1, 1))
+               return 0;
+
+       num_entries = complete_cmd_fusion(instance, queue_num, NULL);
+       atomic_dec(&fusion->busy_mq_poll[queue_num]);
+
+       return num_entries;
+}
+
 /**
  * megasas_enable_irq_poll() - enable irqpoll
  * @instance:                  Adapter soft state
@@ -4163,6 +4232,8 @@ void  megasas_reset_reply_desc(struct megasas_instance *instance)
 
        fusion = instance->ctrl_context;
        count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+       count += instance->iopoll_q_count;
+
        for (i = 0 ; i < count ; i++) {
                fusion->last_reply_idx[i] = 0;
                reply_desc = fusion->reply_frames_desc[i];
index 30de4b0..ce84f81 100644 (file)
@@ -1303,6 +1303,8 @@ struct fusion_context {
        u8 *sense;
        dma_addr_t sense_phys_addr;
 
+       atomic_t   busy_mq_poll[MAX_MSIX_QUEUES_FUSION];
+
        dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
        union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
        struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
index 43a3bf8..d00431f 100644 (file)
@@ -992,7 +992,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
  *one and check the value returned for GPIOCount at runtime.
  */
 #ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
-#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX    (1)
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX    (36)
 #endif
 
 typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 {
index ac0eef9..84c5075 100644 (file)
@@ -2905,23 +2905,22 @@ static int
 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
        struct sysinfo s;
-       int dma_mask;
 
        if (ioc->is_mcpu_endpoint ||
            sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
            dma_get_required_mask(&pdev->dev) <= 32)
-               dma_mask = 32;
+               ioc->dma_mask = 32;
        /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
        else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
-               dma_mask = 63;
+               ioc->dma_mask = 63;
        else
-               dma_mask = 64;
+               ioc->dma_mask = 64;
 
-       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
-           dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
+           dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
                return -ENODEV;
 
-       if (dma_mask > 32) {
+       if (ioc->dma_mask > 32) {
                ioc->base_add_sg_single = &_base_add_sg_single_64;
                ioc->sge_size = sizeof(Mpi2SGESimple64_t);
        } else {
@@ -2931,7 +2930,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 
        si_meminfo(&s);
        ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
-               dma_mask, convert_to_kb(s.totalram));
+               ioc->dma_mask, convert_to_kb(s.totalram));
 
        return 0;
 }
@@ -3678,8 +3677,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
         * IOs on the target device is >=8.
         */
 
-       if (atomic_read(&scmd->device->device_busy) >
-           MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
+       if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
                return base_mod64((
                    atomic64_add_return(1, &ioc->high_iops_outstanding) /
                    MPT3SAS_HIGH_IOPS_BATCH_COUNT),
@@ -4173,7 +4171,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 }
 
 /**
- * _base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default_atomic - Default, primarily used for config pages
  * use Atomic Request Descriptor
  * @ioc: per adapter object
  * @smid: system request message index
@@ -5232,7 +5230,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
  * mpt3sas_free_enclosure_list - release memory
  * @ioc: per adapter object
  *
- * Free memory allocated during encloure add.
+ * Free memory allocated during enclosure add.
  */
 void
 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
@@ -5338,10 +5336,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                        dma_pool_free(ioc->pcie_sgl_dma_pool,
                                        ioc->pcie_sg_lookup[i].pcie_sgl,
                                        ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+                       ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
                }
                dma_pool_destroy(ioc->pcie_sgl_dma_pool);
        }
-
        if (ioc->config_page) {
                dexitprintk(ioc,
                            ioc_info(ioc, "config_page(0x%p): free\n",
@@ -5399,6 +5397,271 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
                return 0;
 }
 
+/**
+ * _base_reduce_hba_queue_depth- Retry with reduced queue depth
+ * @ioc: Adapter object
+ *
+ * Return: 0 for success, non-zero for failure.
+ **/
+static inline int
+_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
+{
+       int reduce_sz = 64;
+
+       if ((ioc->hba_queue_depth - reduce_sz) >
+           (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
+               ioc->hba_queue_depth -= reduce_sz;
+               return 0;
+       } else
+               return -ENOMEM;
+}
+
+/**
+ * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
+ *                     for pcie sgl pools.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * @ct: Chain tracker
+ * Return: 0 for success, non-zero for failure.
+ */
+
+static int
+_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+       int i = 0, j = 0;
+       struct chain_tracker *ct;
+
+       ioc->pcie_sgl_dma_pool =
+           dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
+           ioc->page_size, 0);
+       if (!ioc->pcie_sgl_dma_pool) {
+               ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
+               return -ENOMEM;
+       }
+
+       ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
+       ioc->chains_per_prp_buffer =
+           min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
+       for (i = 0; i < ioc->scsiio_depth; i++) {
+               ioc->pcie_sg_lookup[i].pcie_sgl =
+                   dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+                   &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+               if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
+                       ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
+                       return -EAGAIN;
+               }
+
+               if (!mpt3sas_check_same_4gb_region(
+                   (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
+                       ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
+                           ioc->pcie_sg_lookup[i].pcie_sgl,
+                           (unsigned long long)
+                           ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+                       ioc->use_32bit_dma = true;
+                       return -EAGAIN;
+               }
+
+               for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
+                       ct = &ioc->chain_lookup[i].chains_per_smid[j];
+                       ct->chain_buffer =
+                           ioc->pcie_sg_lookup[i].pcie_sgl +
+                           (j * ioc->chain_segment_sz);
+                       ct->chain_buffer_dma =
+                           ioc->pcie_sg_lookup[i].pcie_sgl_dma +
+                           (j * ioc->chain_segment_sz);
+               }
+       }
+       dinitprintk(ioc, ioc_info(ioc,
+           "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+           ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
+       dinitprintk(ioc, ioc_info(ioc,
+           "Number of chains can fit in a PRP page(%d)\n",
+           ioc->chains_per_prp_buffer));
+       return 0;
+}
+
+/**
+ * _base_allocate_chain_dma_pool - Allocating DMA'able memory
+ *                     for chain dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * @ctr: Chain tracker
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+       int i = 0, j = 0;
+       struct chain_tracker *ctr;
+
+       ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
+           ioc->chain_segment_sz, 16, 0);
+       if (!ioc->chain_dma_pool)
+               return -ENOMEM;
+
+       for (i = 0; i < ioc->scsiio_depth; i++) {
+               for (j = ioc->chains_per_prp_buffer;
+                   j < ioc->chains_needed_per_io; j++) {
+                       ctr = &ioc->chain_lookup[i].chains_per_smid[j];
+                       ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
+                           GFP_KERNEL, &ctr->chain_buffer_dma);
+                       if (!ctr->chain_buffer)
+                               return -EAGAIN;
+                       if (!mpt3sas_check_same_4gb_region((long)
+                           ctr->chain_buffer, ioc->chain_segment_sz)) {
+                               ioc_err(ioc,
+                                   "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
+                                   ctr->chain_buffer,
+                                   (unsigned long long)ctr->chain_buffer_dma);
+                               ioc->use_32bit_dma = true;
+                               return -EAGAIN;
+                       }
+               }
+       }
+       dinitprintk(ioc, ioc_info(ioc,
+           "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
+           ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
+           (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
+           ioc->chain_segment_sz))/1024));
+       return 0;
+}
+
+/**
+ * _base_allocate_sense_dma_pool - Allocating DMA'able memory
+ *                     for sense dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+       ioc->sense_dma_pool =
+           dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
+       if (!ioc->sense_dma_pool)
+               return -ENOMEM;
+       ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
+           GFP_KERNEL, &ioc->sense_dma);
+       if (!ioc->sense)
+               return -EAGAIN;
+       if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
+               dinitprintk(ioc, pr_err(
+                   "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
+                   ioc->sense, (unsigned long long) ioc->sense_dma));
+               ioc->use_32bit_dma = true;
+               return -EAGAIN;
+       }
+       ioc_info(ioc,
+           "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
+           ioc->sense, (unsigned long long)ioc->sense_dma,
+           ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
+       return 0;
+}
+
+/**
+ * _base_allocate_reply_pool - Allocating DMA'able memory
+ *                     for reply pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+       /* reply pool, 4 byte align */
+       ioc->reply_dma_pool = dma_pool_create("reply pool",
+           &ioc->pdev->dev, sz, 4, 0);
+       if (!ioc->reply_dma_pool)
+               return -ENOMEM;
+       ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
+           &ioc->reply_dma);
+       if (!ioc->reply)
+               return -EAGAIN;
+       if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
+               dinitprintk(ioc, pr_err(
+                   "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
+                   ioc->reply, (unsigned long long) ioc->reply_dma));
+               ioc->use_32bit_dma = true;
+               return -EAGAIN;
+       }
+       ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+       ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
+       ioc_info(ioc,
+           "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+           ioc->reply, (unsigned long long)ioc->reply_dma,
+           ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
+       return 0;
+}
+
+/**
+ * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
+ *                     for reply free dma pool.
+ * @ioc: Adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
+{
+       /* reply free queue, 16 byte align */
+       ioc->reply_free_dma_pool = dma_pool_create(
+           "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
+       if (!ioc->reply_free_dma_pool)
+               return -ENOMEM;
+       ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
+           GFP_KERNEL, &ioc->reply_free_dma);
+       if (!ioc->reply_free)
+               return -EAGAIN;
+       if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
+               dinitprintk(ioc,
+                   pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
+                   ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
+               ioc->use_32bit_dma = true;
+               return -EAGAIN;
+       }
+       memset(ioc->reply_free, 0, sz);
+       dinitprintk(ioc, ioc_info(ioc,
+           "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+           ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
+       dinitprintk(ioc, ioc_info(ioc,
+           "reply_free_dma (0x%llx)\n",
+           (unsigned long long)ioc->reply_free_dma));
+       return 0;
+}
+
+/**
+ * _base_allocate_reply_post_free_array - Allocating DMA'able memory
+ *                     for reply post free array.
+ * @ioc: Adapter object
+ * @reply_post_free_array_sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+
+static int
+_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
+       u32 reply_post_free_array_sz)
+{
+       ioc->reply_post_free_array_dma_pool =
+           dma_pool_create("reply_post_free_array pool",
+           &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
+       if (!ioc->reply_post_free_array_dma_pool)
+               return -ENOMEM;
+       ioc->reply_post_free_array =
+           dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
+           GFP_KERNEL, &ioc->reply_post_free_array_dma);
+       if (!ioc->reply_post_free_array)
+               return -EAGAIN;
+       if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array,
+           reply_post_free_array_sz)) {
+               dinitprintk(ioc, pr_err(
+                   "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
+                   ioc->reply_free,
+                   (unsigned long long) ioc->reply_free_dma));
+               ioc->use_32bit_dma = true;
+               return -EAGAIN;
+       }
+       return 0;
+}
 /**
  * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
  *                     for reply queues.
@@ -5492,13 +5755,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        u16 chains_needed_per_io;
        u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
        u32 retry_sz;
-       u32 rdpq_sz = 0;
+       u32 rdpq_sz = 0, sense_sz = 0;
        u16 max_request_credit, nvme_blocks_needed;
        unsigned short sg_tablesize;
        u16 sge_size;
-       int i, j;
-       int ret = 0;
-       struct chain_tracker *ct;
+       int i;
+       int ret = 0, rc = 0;
 
        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
@@ -5802,6 +6064,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
         * be required for NVMe PRP's, only each set of NVMe blocks will be
         * contiguous, so a new set is allocated for each possible I/O.
         */
+
        ioc->chains_per_prp_buffer = 0;
        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
                nvme_blocks_needed =
@@ -5816,190 +6079,67 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                        goto out;
                }
                sz = nvme_blocks_needed * ioc->page_size;
-               ioc->pcie_sgl_dma_pool =
-                       dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
-               if (!ioc->pcie_sgl_dma_pool) {
-                       ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
-                       goto out;
-               }
-
-               ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
-               ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
-                                               ioc->chains_needed_per_io);
-
-               for (i = 0; i < ioc->scsiio_depth; i++) {
-                       ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
-                               ioc->pcie_sgl_dma_pool, GFP_KERNEL,
-                               &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
-                       if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
-                               ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
-                               goto out;
-                       }
-                       for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
-                               ct = &ioc->chain_lookup[i].chains_per_smid[j];
-                               ct->chain_buffer =
-                                   ioc->pcie_sg_lookup[i].pcie_sgl +
-                                   (j * ioc->chain_segment_sz);
-                               ct->chain_buffer_dma =
-                                   ioc->pcie_sg_lookup[i].pcie_sgl_dma +
-                                   (j * ioc->chain_segment_sz);
-                       }
-               }
-
-               dinitprintk(ioc,
-                           ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
-                                    ioc->scsiio_depth, sz,
-                                    (sz * ioc->scsiio_depth) / 1024));
-               dinitprintk(ioc,
-                           ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
-                                    ioc->chains_per_prp_buffer));
+               rc = _base_allocate_pcie_sgl_pool(ioc, sz);
+               if (rc == -ENOMEM)
+                       return -ENOMEM;
+               else if (rc == -EAGAIN)
+                       goto try_32bit_dma;
                total_sz += sz * ioc->scsiio_depth;
        }
 
-       ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
-           ioc->chain_segment_sz, 16, 0);
-       if (!ioc->chain_dma_pool) {
-               ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
-               goto out;
-       }
-       for (i = 0; i < ioc->scsiio_depth; i++) {
-               for (j = ioc->chains_per_prp_buffer;
-                               j < ioc->chains_needed_per_io; j++) {
-                       ct = &ioc->chain_lookup[i].chains_per_smid[j];
-                       ct->chain_buffer = dma_pool_alloc(
-                                       ioc->chain_dma_pool, GFP_KERNEL,
-                                       &ct->chain_buffer_dma);
-                       if (!ct->chain_buffer) {
-                               ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
-                               goto out;
-                       }
-               }
-               total_sz += ioc->chain_segment_sz;
-       }
-
+       rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
+       if (rc == -ENOMEM)
+               return -ENOMEM;
+       else if (rc == -EAGAIN)
+               goto try_32bit_dma;
+       total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
+               ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
        dinitprintk(ioc,
-                   ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
-                            ioc->chain_depth, ioc->chain_segment_sz,
-                            (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
-
+           ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+           ioc->chain_depth, ioc->chain_segment_sz,
+           (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
        /* sense buffers, 4 byte align */
-       sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
-       ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
-                                             4, 0);
-       if (!ioc->sense_dma_pool) {
-               ioc_err(ioc, "sense pool: dma_pool_create failed\n");
-               goto out;
-       }
-       ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
-           &ioc->sense_dma);
-       if (!ioc->sense) {
-               ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
-               goto out;
-       }
-       /* sense buffer requires to be in same 4 gb region.
-        * Below function will check the same.
-        * In case of failure, new pci pool will be created with updated
-        * alignment. Older allocation and pool will be destroyed.
-        * Alignment will be used such a way that next allocation if
-        * success, will always meet same 4gb region requirement.
-        * Actual requirement is not alignment, but we need start and end of
-        * DMA address must have same upper 32 bit address.
-        */
-       if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
-               //Release Sense pool & Reallocate
-               dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
-               dma_pool_destroy(ioc->sense_dma_pool);
-               ioc->sense = NULL;
-
-               ioc->sense_dma_pool =
-                       dma_pool_create("sense pool", &ioc->pdev->dev, sz,
-                                               roundup_pow_of_two(sz), 0);
-               if (!ioc->sense_dma_pool) {
-                       ioc_err(ioc, "sense pool: pci_pool_create failed\n");
-                       goto out;
-               }
-               ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
-                               &ioc->sense_dma);
-               if (!ioc->sense) {
-                       ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
-                       goto out;
-               }
-       }
+       sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
+       rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
+       if (rc  == -ENOMEM)
+               return -ENOMEM;
+       else if (rc == -EAGAIN)
+               goto try_32bit_dma;
+       total_sz += sense_sz;
        ioc_info(ioc,
            "sense pool(0x%p)- dma(0x%llx): depth(%d),"
            "element_size(%d), pool_size(%d kB)\n",
            ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
            SCSI_SENSE_BUFFERSIZE, sz / 1024);
-
-       total_sz += sz;
-
        /* reply pool, 4 byte align */
        sz = ioc->reply_free_queue_depth * ioc->reply_sz;
-       ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
-                                             4, 0);
-       if (!ioc->reply_dma_pool) {
-               ioc_err(ioc, "reply pool: dma_pool_create failed\n");
-               goto out;
-       }
-       ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
-           &ioc->reply_dma);
-       if (!ioc->reply) {
-               ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
-               goto out;
-       }
-       ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
-       ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
-       dinitprintk(ioc,
-                   ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-                            ioc->reply, ioc->reply_free_queue_depth,
-                            ioc->reply_sz, sz / 1024));
-       dinitprintk(ioc,
-                   ioc_info(ioc, "reply_dma(0x%llx)\n",
-                            (unsigned long long)ioc->reply_dma));
+       rc = _base_allocate_reply_pool(ioc, sz);
+       if (rc == -ENOMEM)
+               return -ENOMEM;
+       else if (rc == -EAGAIN)
+               goto try_32bit_dma;
        total_sz += sz;
 
        /* reply free queue, 16 byte align */
        sz = ioc->reply_free_queue_depth * 4;
-       ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
-           &ioc->pdev->dev, sz, 16, 0);
-       if (!ioc->reply_free_dma_pool) {
-               ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
-               goto out;
-       }
-       ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
-           &ioc->reply_free_dma);
-       if (!ioc->reply_free) {
-               ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
-               goto out;
-       }
-       dinitprintk(ioc,
-                   ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
-                            ioc->reply_free, ioc->reply_free_queue_depth,
-                            4, sz / 1024));
+       rc = _base_allocate_reply_free_dma_pool(ioc, sz);
+       if (rc  == -ENOMEM)
+               return -ENOMEM;
+       else if (rc == -EAGAIN)
+               goto try_32bit_dma;
        dinitprintk(ioc,
                    ioc_info(ioc, "reply_free_dma (0x%llx)\n",
                             (unsigned long long)ioc->reply_free_dma));
        total_sz += sz;
-
        if (ioc->rdpq_array_enable) {
                reply_post_free_array_sz = ioc->reply_queue_count *
                    sizeof(Mpi2IOCInitRDPQArrayEntry);
-               ioc->reply_post_free_array_dma_pool =
-                   dma_pool_create("reply_post_free_array pool",
-                   &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
-               if (!ioc->reply_post_free_array_dma_pool) {
-                       dinitprintk(ioc,
-                                   ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
-                       goto out;
-               }
-               ioc->reply_post_free_array =
-                   dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
-                   GFP_KERNEL, &ioc->reply_post_free_array_dma);
-               if (!ioc->reply_post_free_array) {
-                       dinitprintk(ioc,
-                                   ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
-                       goto out;
-               }
+               rc = _base_allocate_reply_post_free_array(ioc,
+                   reply_post_free_array_sz);
+               if (rc == -ENOMEM)
+                       return -ENOMEM;
+               else if (rc == -EAGAIN)
+                       goto try_32bit_dma;
        }
        ioc->config_page_sz = 512;
        ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
@@ -6022,6 +6162,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                 ioc->shost->sg_tablesize);
        return 0;
 
+try_32bit_dma:
+       _base_release_memory_pools(ioc);
+       if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
+               /* Change dma coherent mask to 32 bit and reallocate */
+               if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
+                       pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
+                           pci_name(ioc->pdev));
+                       return -ENODEV;
+               }
+       } else if (_base_reduce_hba_queue_depth(ioc) != 0)
+               return -ENOMEM;
+       goto retry_allocation;
+
  out:
        return -ENOMEM;
 }
@@ -7252,6 +7405,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 
        ioc_info(ioc, "sending diag reset !!\n");
 
+       pci_cfg_access_lock(ioc->pdev);
+
        drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
 
        count = 0;
@@ -7342,10 +7497,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
                goto out;
        }
 
+       pci_cfg_access_unlock(ioc->pdev);
        ioc_info(ioc, "diag reset: SUCCESS\n");
        return 0;
 
  out:
+       pci_cfg_access_unlock(ioc->pdev);
        ioc_err(ioc, "diag reset: FAILED\n");
        return -EFAULT;
 }
@@ -7682,6 +7839,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 
        ioc->rdpq_array_enable_assigned = 0;
        ioc->use_32bit_dma = false;
+       ioc->dma_mask = 64;
        if (ioc->is_aero_ioc)
                ioc->base_readl = &_base_readl_aero;
        else
index 315aee6..98558d9 100644 (file)
@@ -77,9 +77,9 @@
 #define MPT3SAS_DRIVER_NAME            "mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION    "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION         "37.100.00.00"
+#define MPT3SAS_DRIVER_VERSION         "37.101.00.00"
 #define MPT3SAS_MAJOR_VERSION          37
-#define MPT3SAS_MINOR_VERSION          100
+#define MPT3SAS_MINOR_VERSION          101
 #define MPT3SAS_BUILD_VERSION          0
 #define MPT3SAS_RELEASE_VERSION        00
 
@@ -1371,6 +1371,7 @@ struct MPT3SAS_ADAPTER {
        u16             thresh_hold;
        u8              high_iops_queues;
        u32             drv_support_bitmap;
+       u32             dma_mask;
        bool            enable_sdev_max_qd;
        bool            use_32bit_dma;
 
index 8238843..55cd329 100644 (file)
@@ -1781,7 +1781,7 @@ mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * mpt3sas_config_set_driver_trigger_pg0 - write driver trigger page 0
+ * _config_set_driver_trigger_pg0 - write driver trigger page 0
  * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
  * @config_page: contents of the config page
@@ -1915,7 +1915,7 @@ mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * mpt3sas_config_set_driver_trigger_pg1 - write driver trigger page 1
+ * _config_set_driver_trigger_pg1 - write driver trigger page 1
  * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
  * @config_page: contents of the config page
@@ -2066,7 +2066,7 @@ mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * mpt3sas_config_set_driver_trigger_pg2 - write driver trigger page 2
+ * _config_set_driver_trigger_pg2 - write driver trigger page 2
  * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
  * @config_page: contents of the config page
@@ -2226,7 +2226,7 @@ mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * mpt3sas_config_set_driver_trigger_pg3 - write driver trigger page 3
+ * _config_set_driver_trigger_pg3 - write driver trigger page 3
  * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
  * @config_page: contents of the config page
@@ -2383,7 +2383,7 @@ mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * mpt3sas_config_set_driver_trigger_pg4 - write driver trigger page 4
+ * _config_set_driver_trigger_pg4 - write driver trigger page 4
  * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
  * @config_page: contents of the config page
index 44f9a05..e7582fb 100644 (file)
@@ -454,7 +454,7 @@ out:
 }
 
 /**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl)
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
@@ -486,7 +486,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd.
+ * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd.
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
@@ -503,7 +503,7 @@ void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl)
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
@@ -2759,7 +2759,7 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 }
 #ifdef CONFIG_COMPAT
 /**
- *ctl_ioctl_compat - main ioctl entry point (compat)
+ * _ctl_ioctl_compat - main ioctl entry point (compat)
  * @file: ?
  * @cmd: ?
  * @arg: ?
@@ -2777,7 +2777,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
 }
 
 /**
- *ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
+ * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
  * @file: ?
  * @cmd: ?
  * @arg: ?
@@ -3045,7 +3045,7 @@ fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
 static DEVICE_ATTR_RO(fw_queue_depth);
 
 /**
- * sas_address_show - sas address
+ * host_sas_address_show - sas address
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3203,7 +3203,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
 {
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
-       Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
+       Mpi2IOUnitPage3_t io_unit_pg3;
        Mpi2ConfigReply_t mpi_reply;
        u16 backup_rail_monitor_status = 0;
        u16 ioc_status;
@@ -3220,17 +3220,10 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
        if (ioc->pci_error_recovery || ioc->remove_host)
                goto out;
 
-       /* allocate upto GPIOVal 36 entries */
-       sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
-       io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
-       if (!io_unit_pg3) {
-               rc = -ENOMEM;
-               ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
-                       __func__, sz);
-               goto out;
-       }
+       sz = sizeof(io_unit_pg3);
+       memset(&io_unit_pg3, 0, sz);
 
-       if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
+       if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) !=
            0) {
                ioc_err(ioc, "%s: failed reading iounit_pg3\n",
                        __func__);
@@ -3246,19 +3239,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
                goto out;
        }
 
-       if (io_unit_pg3->GPIOCount < 25) {
-               ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
-                       __func__, io_unit_pg3->GPIOCount);
+       if (io_unit_pg3.GPIOCount < 25) {
+               ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n",
+                       __func__, io_unit_pg3.GPIOCount);
                rc = -EINVAL;
                goto out;
        }
 
        /* BRM status is in bit zero of GPIOVal[24] */
-       backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
+       backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]);
        rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
 
  out:
-       kfree(io_unit_pg3);
        mutex_unlock(&ioc->pci_access_mutex);
        return rc;
 }
@@ -3669,7 +3661,7 @@ static DEVICE_ATTR_RW(diag_trigger_scsi);
 
 
 /**
- * diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * diag_trigger_mpi_show - show the diag_trigger_mpi attribute
  * @cdev: pointer to embedded class device
  * @attr: ?
  * @buf: the buffer returned
@@ -3928,7 +3920,7 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR_RO(sas_device_handle);
 
 /**
- * sas_ncq_io_prio_show - send prioritized io commands to device
+ * sas_ncq_prio_enable_show - send prioritized io commands to device
  * @dev: pointer to embedded device
  * @attr: ?
  * @buf: the buffer returned
index 6aa6de7..d00aca3 100644 (file)
@@ -749,9 +749,10 @@ __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
+ * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
  *                             sas address from sas_device_list list
  * @ioc: per adapter object
+ * @sas_address: device sas address
  * @port: port number
  *
  * Search for _sas_device object corresponding to provided sas address,
@@ -3423,7 +3424,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
                MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
                tr_timeout, tr_method);
        /* Check for busy commands after reset */
-       if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
+       if (r == SUCCESS && scsi_device_busy(scmd->device))
                r = FAILED;
  out:
        sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
@@ -4518,7 +4519,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * _scsih_check_for_pending_internal_cmds - check for pending internal messages
+ * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
  * @ioc: per adapter object
  * @smid: system request message index
  *
@@ -6174,10 +6175,10 @@ enum hba_port_matched_codes {
  * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
  *                                     from HBA port table
  * @ioc: per adapter object
- * @port_entry - hba port entry from temporary port table which needs to be
+ * @port_entry: hba port entry from temporary port table which needs to be
  *             searched for matched entry in the HBA port table
- * @matched_port_entry - save matched hba port entry here
- * @count - count of matched entries
+ * @matched_port_entry: save matched hba port entry here
+ * @count: count of matched entries
  *
  * return type of matched entry found.
  */
@@ -6483,6 +6484,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
                if (!vphy)
                        return NULL;
 
+               if (!port->vphys_mask)
+                       INIT_LIST_HEAD(&port->vphys_list);
+
                /*
                 * Enable bit corresponding to HBA phy number on its
                 * parent hba_port object's vphys_mask field.
@@ -6490,7 +6494,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
                port->vphys_mask |= (1 << phy_num);
                vphy->phy_mask |= (1 << phy_num);
 
-               INIT_LIST_HEAD(&port->vphys_list);
                list_add_tail(&vphy->list, &port->vphys_list);
 
                ioc_info(ioc,
@@ -6952,6 +6955,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  * mpt3sas_expander_remove - removing expander object
  * @ioc: per adapter object
  * @sas_address: expander sas_address
+ * @port: hba port entry
  */
 void
 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -10219,8 +10223,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
        Mpi2ExpanderPage0_t expander_pg0;
        Mpi2SasDevicePage0_t sas_device_pg0;
        Mpi26PCIeDevicePage0_t pcie_device_pg0;
-       Mpi2RaidVolPage1_t volume_pg1;
-       Mpi2RaidVolPage0_t volume_pg0;
+       Mpi2RaidVolPage1_t *volume_pg1;
+       Mpi2RaidVolPage0_t *volume_pg0;
        Mpi2RaidPhysDiskPage0_t pd_pg0;
        Mpi2EventIrConfigElement_t element;
        Mpi2ConfigReply_t mpi_reply;
@@ -10235,6 +10239,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
        u8 retry_count;
        unsigned long flags;
 
+       volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
+       if (!volume_pg0)
+               return;
+
+       volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
+       if (!volume_pg1) {
+               kfree(volume_pg0);
+               return;
+       }
+
        ioc_info(ioc, "scan devices: start\n");
 
        _scsih_sas_host_refresh(ioc);
@@ -10344,7 +10358,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
        /* volumes */
        handle = 0xFFFF;
        while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
-           &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+           volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
@@ -10352,15 +10366,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                                 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
-               handle = le16_to_cpu(volume_pg1.DevHandle);
+               handle = le16_to_cpu(volume_pg1->DevHandle);
                spin_lock_irqsave(&ioc->raid_device_lock, flags);
                raid_device = _scsih_raid_device_find_by_wwid(ioc,
-                   le64_to_cpu(volume_pg1.WWID));
+                   le64_to_cpu(volume_pg1->WWID));
                spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
                if (raid_device)
                        continue;
                if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
-                   &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+                   volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
                     sizeof(Mpi2RaidVolPage0_t)))
                        continue;
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
@@ -10370,17 +10384,17 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                                 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
-               if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
-                   volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
-                   volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+               if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+                   volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+                   volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
                        memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
                        element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
-                       element.VolDevHandle = volume_pg1.DevHandle;
+                       element.VolDevHandle = volume_pg1->DevHandle;
                        ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
-                                volume_pg1.DevHandle);
+                                volume_pg1->DevHandle);
                        _scsih_sas_volume_add(ioc, &element);
                        ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
-                                volume_pg1.DevHandle);
+                                volume_pg1->DevHandle);
                }
        }
 
@@ -10468,12 +10482,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
                         handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
        }
+
+       kfree(volume_pg0);
+       kfree(volume_pg1);
+
        ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
        ioc_info(ioc, "scan devices: complete\n");
 }
 
 /**
- * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
@@ -10514,7 +10532,7 @@ mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
@@ -10802,7 +10820,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
                        pr_notice("cannot be powered and devices connected\n");
                        pr_notice("to this active cable will not be seen\n");
                        pr_notice("This active cable requires %d mW of power\n",
-                            ActiveCableEventData->ActiveCablePowerRequirement);
+                           le32_to_cpu(
+                           ActiveCableEventData->ActiveCablePowerRequirement));
                        break;
 
                case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
@@ -12281,7 +12300,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
 }
 
 /**
- * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * scsih_ncq_prio_supp - Check for NCQ command priority support
  * @sdev: scsi device struct
  *
  * This is called when a user indicates they would like to enable
index 6f47082..0681dae 100644 (file)
@@ -62,7 +62,7 @@
 
 /**
  * _transport_get_port_id_by_sas_phy - get zone's port id that Phy belong to
- * @phy - sas_phy object
+ * @phy: sas_phy object
  *
  * Return Port number
  */
@@ -339,10 +339,11 @@ struct rep_manu_reply {
 };
 
 /**
- * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * _transport_expander_report_manufacture - obtain SMP report_manufacture
  * @ioc: per adapter object
  * @sas_address: expander sas address
  * @edev: the sas_expander_device object
+ * @port_id: Port ID number
  *
  * Fills in the sas_expander_device object when SMP port is created.
  *
@@ -671,7 +672,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
  * @ioc: per adapter object
  * @handle: handle of attached device
  * @sas_address: sas address of parent expander or sas host
- * @port: hba port entry
+ * @hba_port: hba port entry
  * Context: This function will acquire ioc->sas_node_lock.
  *
  * Adding new port object to the sas_node->sas_port_list.
index 327fdd5..8ff976c 100644 (file)
@@ -40,7 +40,7 @@
 #define mv_dprintk(format, arg...)     \
        printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
 #else
-#define mv_dprintk(format, arg...)
+#define mv_dprintk(format, arg...) no_printk(format, ## arg)
 #endif
 #define MV_MAX_U32                     0xffffffff
 
index 71b6a1f..9d57436 100644 (file)
@@ -66,9 +66,9 @@ static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
 static bool tag_is_empty(struct mvumi_tag *st)
 {
        if (st->top == 0)
-               return 1;
+               return true;
        else
-               return 0;
+               return false;
 }
 
 static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
@@ -182,7 +182,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
  * @mhba:              Adapter soft state
  * @scmd:              SCSI command from the mid-layer
  * @sgl_p:             SGL to be filled in
- * @sg_count           return the number of SG elements
+ * @sg_count:          return the number of SG elements
  *
  * If successful, this function returns 0. otherwise, it returns -1.
  */
@@ -1295,6 +1295,7 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba)
  * mvumi_complete_cmd -        Completes a command
  * @mhba:                      Adapter soft state
  * @cmd:                       Command to be completed
+ * @ob_frame:                  Command response
  */
 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
                                        struct mvumi_rsp_frame *ob_frame)
@@ -2076,8 +2077,8 @@ error:
 
 /**
  * mvumi_queue_command -       Queue entry point
+ * @shost:                     Scsi host to queue command on
  * @scmd:                      SCSI command to be queued
- * @done:                      Callback entry point
  */
 static int mvumi_queue_command(struct Scsi_Host *shost,
                                        struct scsi_cmnd *scmd)
index 3d8e91c..56767f8 100644 (file)
@@ -82,7 +82,7 @@ static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
        return NULL;
 }
 
-/**
+/*
  * myrb_create_mempools - allocates auxiliary data structures
  *
  * Return: true on success, false otherwise.
@@ -134,7 +134,7 @@ static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
        return true;
 }
 
-/**
+/*
  * myrb_destroy_mempools - tears down the memory pools for the controller
  */
 static void myrb_destroy_mempools(struct myrb_hba *cb)
@@ -146,7 +146,7 @@ static void myrb_destroy_mempools(struct myrb_hba *cb)
        dma_pool_destroy(cb->dcdb_pool);
 }
 
-/**
+/*
  * myrb_reset_cmd - reset command block
  */
 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
@@ -157,7 +157,7 @@ static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
        cmd_blk->status = 0;
 }
 
-/**
+/*
  * myrb_qcmd - queues command block for execution
  */
 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
@@ -177,7 +177,7 @@ static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
        cb->next_cmd_mbox = next_mbox;
 }
 
-/**
+/*
  * myrb_exec_cmd - executes command block and waits for completion.
  *
  * Return: command status
@@ -198,7 +198,7 @@ static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
        return cmd_blk->status;
 }
 
-/**
+/*
  * myrb_exec_type3 - executes a type 3 command and waits for completion.
  *
  * Return: command status
@@ -220,7 +220,7 @@ static unsigned short myrb_exec_type3(struct myrb_hba *cb,
        return status;
 }
 
-/**
+/*
  * myrb_exec_type3D - executes a type 3D command and waits for completion.
  *
  * Return: command status
@@ -332,7 +332,7 @@ static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
                          ev_buf, ev_addr);
 }
 
-/**
+/*
  * myrb_get_errtable - retrieves the error table from the controller
  *
  * Executes a type 3 command and logs the error table from the controller.
@@ -377,7 +377,7 @@ static void myrb_get_errtable(struct myrb_hba *cb)
        }
 }
 
-/**
+/*
  * myrb_get_ldev_info - retrieves the logical device table from the controller
  *
  * Executes a type 3 command and updates the logical device table.
@@ -427,7 +427,7 @@ static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
        return status;
 }
 
-/**
+/*
  * myrb_get_rbld_progress - get rebuild progress information
  *
  * Executes a type 3 command and returns the rebuild progress
@@ -462,11 +462,10 @@ static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
        return status;
 }
 
-/**
+/*
  * myrb_update_rbld_progress - updates the rebuild status
  *
  * Updates the rebuild status for the attached logical devices.
- *
  */
 static void myrb_update_rbld_progress(struct myrb_hba *cb)
 {
@@ -523,7 +522,7 @@ static void myrb_update_rbld_progress(struct myrb_hba *cb)
        cb->last_rbld_status = status;
 }
 
-/**
+/*
  * myrb_get_cc_progress - retrieve the rebuild status
  *
  * Execute a type 3 Command and fetch the rebuild / consistency check
@@ -571,7 +570,7 @@ static void myrb_get_cc_progress(struct myrb_hba *cb)
                          rbld_buf, rbld_addr);
 }
 
-/**
+/*
  * myrb_bgi_control - updates background initialisation status
  *
  * Executes a type 3B command and updates the background initialisation status
@@ -660,7 +659,7 @@ static void myrb_bgi_control(struct myrb_hba *cb)
                          bgi, bgi_addr);
 }
 
-/**
+/*
  * myrb_hba_enquiry - updates the controller status
  *
  * Executes a DAC_V1_Enquiry command and updates the controller status.
@@ -772,7 +771,7 @@ static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
        return MYRB_STATUS_SUCCESS;
 }
 
-/**
+/*
  * myrb_set_pdev_state - sets the device state for a physical device
  *
  * Return: command status
@@ -796,7 +795,7 @@ static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
        return status;
 }
 
-/**
+/*
  * myrb_enable_mmio - enables the Memory Mailbox Interface
  *
  * PD and P controller types have no memory mailbox, but still need the
@@ -901,7 +900,7 @@ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
        return true;
 }
 
-/**
+/*
  * myrb_get_hba_config - reads the configuration information
  *
  * Reads the configuration information from the controller and
@@ -1193,7 +1192,7 @@ out_free:
        return ret;
 }
 
-/**
+/*
  * myrb_unmap - unmaps controller structures
  */
 static void myrb_unmap(struct myrb_hba *cb)
@@ -1229,7 +1228,7 @@ static void myrb_unmap(struct myrb_hba *cb)
        }
 }
 
-/**
+/*
  * myrb_cleanup - cleanup controller structures
  */
 static void myrb_cleanup(struct myrb_hba *cb)
@@ -2243,7 +2242,7 @@ static struct scsi_host_template myrb_template = {
 
 /**
  * myrb_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
  */
 static int myrb_is_raid(struct device *dev)
 {
@@ -2254,7 +2253,7 @@ static int myrb_is_raid(struct device *dev)
 
 /**
  * myrb_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
  */
 static void myrb_get_resync(struct device *dev)
 {
@@ -2281,7 +2280,7 @@ static void myrb_get_resync(struct device *dev)
 
 /**
  * myrb_get_state - get raid volume status
- * @dev the device struct object
+ * @dev: the device struct object
  */
 static void myrb_get_state(struct device *dev)
 {
@@ -2480,7 +2479,7 @@ static void myrb_monitor(struct work_struct *work)
        queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
 }
 
-/**
+/*
  * myrb_err_status - reports controller BIOS messages
  *
  * Controller BIOS messages are passed through the Error Status Register
@@ -2810,7 +2809,7 @@ static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-struct myrb_privdata DAC960_LA_privdata = {
+static struct myrb_privdata DAC960_LA_privdata = {
        .hw_init =      DAC960_LA_hw_init,
        .irq_handler =  DAC960_LA_intr_handler,
        .mmio_size =    DAC960_LA_mmio_size,
@@ -3086,7 +3085,7 @@ static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-struct myrb_privdata DAC960_PG_privdata = {
+static struct myrb_privdata DAC960_PG_privdata = {
        .hw_init =      DAC960_PG_hw_init,
        .irq_handler =  DAC960_PG_intr_handler,
        .mmio_size =    DAC960_PG_mmio_size,
@@ -3289,7 +3288,7 @@ static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-struct myrb_privdata DAC960_PD_privdata = {
+static struct myrb_privdata DAC960_PD_privdata = {
        .hw_init =      DAC960_PD_hw_init,
        .irq_handler =  DAC960_PD_intr_handler,
        .mmio_size =    DAC960_PD_mmio_size,
@@ -3487,7 +3486,7 @@ static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-struct myrb_privdata DAC960_P_privdata = {
+static struct myrb_privdata DAC960_P_privdata = {
        .hw_init =      DAC960_P_hw_init,
        .irq_handler =  DAC960_P_intr_handler,
        .mmio_size =    DAC960_PD_mmio_size,
index 329fd02..d5ec1cd 100644 (file)
@@ -1190,7 +1190,6 @@ static ssize_t consistency_check_show(struct device *dev,
        struct myrs_hba *cs = shost_priv(sdev->host);
        struct myrs_ldev_info *ldev_info;
        unsigned short ldev_num;
-       unsigned char status;
 
        if (sdev->channel < cs->ctlr_info->physchan_present)
                return snprintf(buf, 32, "physical device - not checking\n");
@@ -1199,7 +1198,7 @@ static ssize_t consistency_check_show(struct device *dev,
        if (!ldev_info)
                return -ENXIO;
        ldev_num = ldev_info->ldev_num;
-       status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+       myrs_get_ldev_info(cs, ldev_num, ldev_info);
        if (ldev_info->cc_active)
                return snprintf(buf, 32, "checking block %zu of %zu\n",
                                (size_t)ldev_info->cc_lba,
@@ -1959,7 +1958,7 @@ static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
 
 /**
  * myrs_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * @dev: the device struct object
  */
 static int
 myrs_is_raid(struct device *dev)
@@ -1972,7 +1971,7 @@ myrs_is_raid(struct device *dev)
 
 /**
  * myrs_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * @dev: the device struct object
  */
 static void
 myrs_get_resync(struct device *dev)
@@ -1981,14 +1980,13 @@ myrs_get_resync(struct device *dev)
        struct myrs_hba *cs = shost_priv(sdev->host);
        struct myrs_ldev_info *ldev_info = sdev->hostdata;
        u64 percent_complete = 0;
-       u8 status;
 
        if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
                return;
        if (ldev_info->rbld_active) {
                unsigned short ldev_num = ldev_info->ldev_num;
 
-               status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+               myrs_get_ldev_info(cs, ldev_num, ldev_info);
                percent_complete = ldev_info->rbld_lba * 100;
                do_div(percent_complete, ldev_info->cfg_devsize);
        }
@@ -1997,7 +1995,7 @@ myrs_get_resync(struct device *dev)
 
 /**
  * myrs_get_state - get raid volume status
- * @dev the device struct object
+ * @dev: the device struct object
  */
 static void
 myrs_get_state(struct device *dev)
@@ -2658,7 +2656,7 @@ static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-struct myrs_privdata DAC960_GEM_privdata = {
+static struct myrs_privdata DAC960_GEM_privdata = {
        .hw_init =              DAC960_GEM_hw_init,
        .irq_handler =          DAC960_GEM_intr_handler,
        .mmio_size =            DAC960_GEM_mmio_size,
@@ -2908,7 +2906,7 @@ static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-struct myrs_privdata DAC960_BA_privdata = {
+static struct myrs_privdata DAC960_BA_privdata = {
        .hw_init =              DAC960_BA_hw_init,
        .irq_handler =          DAC960_BA_intr_handler,
        .mmio_size =            DAC960_BA_mmio_size,
@@ -3158,7 +3156,7 @@ static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-struct myrs_privdata DAC960_LP_privdata = {
+static struct myrs_privdata DAC960_LP_privdata = {
        .hw_init =              DAC960_LP_hw_init,
        .irq_handler =          DAC960_LP_intr_handler,
        .mmio_size =            DAC960_LP_mmio_size,
index e44b1a0..134bbd2 100644 (file)
@@ -309,6 +309,7 @@ static struct scsi_host_template nsp32_template = {
 
 #define NSP32_DEBUG_BUF_LEN            100
 
+__printf(4, 5)
 static void nsp32_message(const char *func, int line, char *type, char *fmt, ...)
 {
        va_list args;
@@ -580,7 +581,6 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
        int             status;
        unsigned short  command = 0;
        unsigned int    msgout  = 0;
-       unsigned short  execph;
        int             i;
 
        nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
@@ -604,7 +604,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
        /*
         * clear execph
         */
-       execph = nsp32_read2(base, SCSI_EXECUTE_PHASE);
+       nsp32_read2(base, SCSI_EXECUTE_PHASE);
 
        /*
         * clear FIFO counter to set CDBs
@@ -876,7 +876,7 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
 
                        if (le32_to_cpu(sgt[i].len) > 0x10000) {
                                nsp32_msg(KERN_ERR,
-                                       "can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len));
+                                       "can't transfer over 64KB at a time, size=0x%x", le32_to_cpu(sgt[i].len));
                                return FALSE;
                        }
                        nsp32_dbg(NSP32_DEBUG_SGLIST,
@@ -1780,8 +1780,6 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
 {
        nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
        unsigned int base   = SCpnt->device->host->io_port;
-       //unsigned short command;
-       long new_sgtp;
        int i;
        
        nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
@@ -1795,14 +1793,6 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
                nsp32_build_nop(SCpnt);
        }
 
-       /*
-        * Set SGTP ADDR current entry for restarting AUTOSCSI, 
-        * because SGTP is incremented next point.
-        * There is few statement in the specification...
-        */
-       new_sgtp = data->cur_lunt->sglun_paddr + 
-                  (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
-
        /*
         * send messages
         */
@@ -2219,17 +2209,12 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
 {
        nsp32_hw_data   *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
        nsp32_target     *target     = data->cur_target;
-       nsp32_sync_table *synct;
        unsigned char     get_period = data->msginbuf[3];
        unsigned char     get_offset = data->msginbuf[4];
        int               entry;
-       int               syncnum;
 
        nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
 
-       synct   = data->synct;
-       syncnum = data->syncnum;
-
        /*
         * If this inititor sent the SDTR message, then target responds SDTR,
         * initiator SYNCREG, ACKWIDTH from SDTR parameter.
@@ -2731,7 +2716,7 @@ static int nsp32_detect(struct pci_dev *pdev)
        res = request_region(host->io_port, host->n_io_port, "nsp32");
        if (res == NULL) {
                nsp32_msg(KERN_ERR, 
-                         "I/O region 0x%lx+0x%lx is already used",
+                         "I/O region 0x%x+0x%x is already used",
                          data->BaseAddress, data->NumAddress);
                goto free_irq;
         }
@@ -2837,8 +2822,8 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
 static void nsp32_do_bus_reset(nsp32_hw_data *data)
 {
        unsigned int   base = data->BaseAddress;
-       unsigned short intrdat;
        int i;
+       unsigned short __maybe_unused intrdat;
 
        nsp32_dbg(NSP32_DEBUG_BUSRESET, "in");
 
@@ -2908,7 +2893,8 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
 {
        int vendor = data->pci_devid->vendor;
        int device = data->pci_devid->device;
-       int ret, val, i;
+       int ret, i;
+       int __maybe_unused val;
 
        /*
         * EEPROM checking.
@@ -3278,7 +3264,8 @@ static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct Scsi_Host *host = pci_get_drvdata(pdev);
 
-       nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host);
+       nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state.event=%x, slot=%s, host=0x%p",
+                 pdev, state.event, pci_name(pdev), host);
 
        pci_save_state     (pdev);
        pci_disable_device (pdev);
index 12035ba..1921e69 100644 (file)
@@ -299,7 +299,7 @@ static DEVICE_ATTR(sas_spec_support, S_IRUGO,
                   pm8001_ctl_sas_spec_support_show, NULL);
 
 /**
- * pm8001_ctl_sas_address_show - sas address
+ * pm8001_ctl_host_sas_address_show - sas address
  * @cdev: pointer to embedded class device
  * @attr: device attribute (unused)
  * @buf: the buffer returned
@@ -518,7 +518,7 @@ static ssize_t event_log_size_show(struct device *cdev,
 }
 static DEVICE_ATTR_RO(event_log_size);
 /**
- * pm8001_ctl_aap_log_show - IOP event log
+ * pm8001_ctl_iop_log_show - IOP event log
  * @cdev: pointer to embedded class device
  * @attr: device attribute (unused)
  * @buf: the buffer returned
index 31e5455..d048455 100644 (file)
@@ -1175,7 +1175,7 @@ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
 
 #ifndef PM8001_USE_MSIX
 /**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
  */
 static void
@@ -1248,7 +1248,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 }
 
 /**
- * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * pm8001_chip_interrupt_disable - disable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
  * @vec: unused
  */
@@ -3219,7 +3219,7 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
 }
 
 /**
- * asd_get_attached_sas_addr -- extract/generate attached SAS address
+ * pm8001_get_attached_sas_addr - extract/generate attached SAS address
  * @phy: pointer to asd_phy
  * @sas_addr: pointer to buffer where the SAS address is to be written
  *
@@ -3546,7 +3546,7 @@ int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * fw_flash_update_resp - Response from FW for flash update command.
+ * pm8001_mpi_fw_flash_update_resp - Response from FW for flash update command.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  */
index bd626ef..0f7b3ff 100644 (file)
@@ -184,7 +184,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
 #ifdef PM8001_USE_TASKLET
 
 /**
- * tasklet for 64 msi-x interrupt handler
+ * pm8001_tasklet() - tasklet for 64 msi-x interrupt handler
  * @opaque: the passed general host adapter struct
  * Note: pm8001_tasklet is common for pm8001 & pm80xx
  */
@@ -864,7 +864,7 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask)
 }
 
 /**
- * pm8001_set_phy_settings_ven_117c_12Gb : Configure ATTO 12Gb PHY settings
+ * pm8001_set_phy_settings_ven_117c_12G() : Configure ATTO 12Gb PHY settings
  * @pm8001_ha : our adapter
  */
 static
@@ -963,6 +963,7 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
 {
        u32 i = 0, j = 0;
        int flag = 0, rc = 0;
+       int nr_irqs = pm8001_ha->number_of_intr;
 
        if (pm8001_ha->chip_id != chip_8001)
                flag &= ~IRQF_SHARED;
@@ -971,7 +972,10 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
                   "pci_enable_msix request number of intr %d\n",
                   pm8001_ha->number_of_intr);
 
-       for (i = 0; i < pm8001_ha->number_of_intr; i++) {
+       if (nr_irqs > ARRAY_SIZE(pm8001_ha->intr_drvname))
+               nr_irqs = ARRAY_SIZE(pm8001_ha->intr_drvname);
+
+       for (i = 0; i < nr_irqs; i++) {
                snprintf(pm8001_ha->intr_drvname[i],
                        sizeof(pm8001_ha->intr_drvname[0]),
                        "%s-%d", pm8001_ha->name, i);
index a98d449..6f4753d 100644 (file)
@@ -738,7 +738,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
                if (pm8001_ha->chip_id != chip_8001) {
                        pm8001_dev->setds_completion = &completion_setstate;
                        PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
-                               pm8001_dev, 0x01);
+                               pm8001_dev, DS_OPERATIONAL);
                        wait_for_completion(&completion_setstate);
                }
                res = -TMF_RESP_FUNC_FAILED;
@@ -981,6 +981,7 @@ void pm8001_open_reject_retry(
 }
 
 /**
+ * pm8001_I_T_nexus_reset()
   * Standard mandates link reset for ATA  (type 0) and hard reset for
   * SSP (type 1) , only for RECOVERY
   * @dev: the device structure for the device to reset.
@@ -1110,7 +1111,7 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
                sas_put_local_phy(phy);
                pm8001_dev->setds_completion = &completion_setstate;
                rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
-                       pm8001_dev, 0x01);
+                       pm8001_dev, DS_OPERATIONAL);
                wait_for_completion(&completion_setstate);
        } else {
                tmf_task.tmf = TMF_LU_RESET;
@@ -1229,7 +1230,7 @@ int pm8001_abort_task(struct sas_task *task)
                        /* 1. Set Device state as Recovery */
                        pm8001_dev->setds_completion = &completion;
                        PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
-                               pm8001_dev, 0x03);
+                               pm8001_dev, DS_IN_RECOVERY);
                        wait_for_completion(&completion);
 
                        /* 2. Send Phy Control Hard Reset */
@@ -1300,7 +1301,7 @@ int pm8001_abort_task(struct sas_task *task)
                        reinit_completion(&completion);
                        pm8001_dev->setds_completion = &completion;
                        PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
-                               pm8001_dev, 0x01);
+                               pm8001_dev, DS_OPERATIONAL);
                        wait_for_completion(&completion);
                } else {
                        rc = pm8001_exec_internal_task_abort(pm8001_ha,
index 039ed91..9ae9f1e 100644 (file)
@@ -281,7 +281,6 @@ struct pm8001_prd {
  * CCB(Command Control Block)
  */
 struct pm8001_ccb_info {
-       struct list_head        entry;
        struct sas_task         *task;
        u32                     n_elem;
        u32                     ccb_tag;
index 8431556..74ed072 100644 (file)
@@ -1420,7 +1420,7 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * pm80xx_chip_init - the main init function that initialize whole PM8001 chip.
  * @pm8001_ha: our hba card information
  */
 static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
@@ -1574,7 +1574,7 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
  * the FW register status to the originated status.
  * @pm8001_ha: our hba card information
  */
@@ -1703,7 +1703,7 @@ static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
  */
 static void
@@ -1714,7 +1714,7 @@ pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
  */
 static void
@@ -1724,7 +1724,7 @@ pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
  * @vec: interrupt number to enable
  */
@@ -1743,7 +1743,7 @@ pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 }
 
 /**
- * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * pm80xx_chip_interrupt_disable - disable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
  * @vec: interrupt number to disable
  */
@@ -4183,7 +4183,7 @@ static void build_smp_cmd(u32 deviceID, __le32 hTag,
 }
 
 /**
- * pm8001_chip_smp_req - send a SMP task to FW
+ * pm80xx_chip_smp_req - send a SMP task to FW
  * @pm8001_ha: our hba card information.
  * @ccb: the ccb information this request used.
  */
@@ -4766,7 +4766,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
 }
 
 /**
- * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * pm80xx_chip_phy_stop_req - start phy via PHY_STOP COMMAND
  * @pm8001_ha: our hba card information.
  * @phy_id: the phy id which we wanted to start up.
  */
@@ -4898,7 +4898,7 @@ static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm8001_chip_isr - PM8001 isr handler.
+ * pm80xx_chip_isr - PM8001 isr handler.
  * @pm8001_ha: our hba card information.
  * @vec: irq number.
  */
index 834556e..bffd9a9 100644 (file)
@@ -443,15 +443,14 @@ static void pmcraid_disable_interrupts(
  * pmcraid_enable_interrupts - Enables specified interrupts
  *
  * @pinstance: pointer to per adapter instance structure
- * @intr: interrupts to enable
+ * @intrs: interrupts to enable
  *
  * Return Value
  *      None
  */
 static void pmcraid_enable_interrupts(
        struct pmcraid_instance *pinstance,
-       u32 intrs
-)
+       u32 intrs)
 {
        u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
        u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
@@ -533,15 +532,13 @@ static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
                pinstance->ioa_unit_check = 1;
 }
 
+static void pmcraid_ioa_reset(struct pmcraid_cmd *);
 /**
  * pmcraid_bist_done - completion function for PCI BIST
- * @cmd: pointer to reset command
+ * @t: pointer to reset command
  * Return Value
  *     none
  */
-
-static void pmcraid_ioa_reset(struct pmcraid_cmd *);
-
 static void pmcraid_bist_done(struct timer_list *t)
 {
        struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
@@ -595,7 +592,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
 
 /**
  * pmcraid_reset_alert_done - completion routine for reset_alert
- * @cmd: pointer to command block used in reset sequence
+ * @t: pointer to command block used in reset sequence
  * Return value
  *  None
  */
@@ -626,16 +623,16 @@ static void pmcraid_reset_alert_done(struct timer_list *t)
        }
 }
 
+static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
 /**
  * pmcraid_reset_alert - alerts IOA for a possible reset
- * @cmd : command block to be used for reset sequence.
+ * @cmd: command block to be used for reset sequence.
  *
  * Return Value
  *     returns 0 if pci config-space is accessible and RESET_DOORBELL is
  *     successfully written to IOA. Returns non-zero in case pci_config_space
  *     is not accessible
  */
-static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
 static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
 {
        struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -676,7 +673,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
 /**
  * pmcraid_timeout_handler -  Timeout handler for internally generated ops
  *
- * @cmd : pointer to command structure, that got timedout
+ * @t: pointer to command structure, that got timedout
  *
  * This function blocks host requests and initiates an adapter reset.
  *
@@ -844,7 +841,7 @@ static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
 }
 
 /**
- * pmcraid_fire_command - sends an IOA command to adapter
+ * _pmcraid_fire_command - sends an IOA command to adapter
  *
  * This function adds the given block into pending command list
  * and returns without waiting
@@ -961,6 +958,7 @@ static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
                         pmcraid_timeout_handler);
 }
 
+static void pmcraid_querycfg(struct pmcraid_cmd *);
 /**
  * pmcraid_get_fwversion_done - completion function for get_fwversion
  *
@@ -969,8 +967,6 @@ static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
  * Return Value
  *     none
  */
-static void pmcraid_querycfg(struct pmcraid_cmd *);
-
 static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
 {
        struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -1382,10 +1378,9 @@ static void pmcraid_netlink_release(void)
        genl_unregister_family(&pmcraid_event_family);
 }
 
-/**
+/*
  * pmcraid_notify_aen - sends event msg to user space application
  * @pinstance: pointer to adapter instance structure
- * @type: HCAM type
  *
  * Return value:
  *     0 if success, error value in case of any failure.
@@ -1393,8 +1388,7 @@ static void pmcraid_netlink_release(void)
 static int pmcraid_notify_aen(
        struct pmcraid_instance *pinstance,
        struct pmcraid_aen_msg  *aen_msg,
-       u32    data_size
-)
+       u32    data_size)
 {
        struct sk_buff *skb;
        void *msg_header;
@@ -1771,6 +1765,8 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
        }
 }
 
+static void pmcraid_initiate_reset(struct pmcraid_instance *);
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
 /**
  * pmcraid_process_ldn - op done function for an LDN
  * @cmd: pointer to command block
@@ -1778,9 +1774,6 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
  * Return value
  *   none
  */
-static void pmcraid_initiate_reset(struct pmcraid_instance *);
-static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
-
 static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
 {
        struct pmcraid_instance *pinstance = cmd->drv_inst;
@@ -1878,14 +1871,14 @@ static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
        pmcraid_cancel_ldn(cmd);
 }
 
+static void pmcraid_reinit_buffers(struct pmcraid_instance *);
+
 /**
  * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
  * @pinstance: pointer to adapter instance structure
  * Return Value
  *  1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
  */
-static void pmcraid_reinit_buffers(struct pmcraid_instance *);
-
 static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
 {
        u32 intrs;
@@ -2687,6 +2680,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
  * pmcraid_reset_device - device reset handler functions
  *
  * @scsi_cmd: scsi command struct
+ * @timeout: command timeout
  * @modifier: reset modifier indicating the reset sequence to be performed
  *
  * This function issues a device reset to the affected device.
@@ -2699,8 +2693,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
 static int pmcraid_reset_device(
        struct scsi_cmnd *scsi_cmd,
        unsigned long timeout,
-       u8 modifier
-)
+       u8 modifier)
 {
        struct pmcraid_cmd *cmd;
        struct pmcraid_instance *pinstance;
@@ -3008,7 +3001,7 @@ static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
 }
 
 /**
- * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
+ * pmcraid_eh_device_reset_handler - bus/target/device reset handler callbacks
  *
  * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
  *
@@ -3307,7 +3300,7 @@ static int pmcraid_copy_sglist(
 }
 
 /**
- * pmcraid_queuecommand - Queue a mid-layer request
+ * pmcraid_queuecommand_lck - Queue a mid-layer request
  * @scsi_cmd: scsi command struct
  * @done: done function
  *
@@ -3430,7 +3423,7 @@ static int pmcraid_queuecommand_lck(
 
 static DEF_SCSI_QCMD(pmcraid_queuecommand)
 
-/**
+/*
  * pmcraid_open -char node "open" entry, allowed only users with admin access
  */
 static int pmcraid_chr_open(struct inode *inode, struct file *filep)
@@ -3447,7 +3440,7 @@ static int pmcraid_chr_open(struct inode *inode, struct file *filep)
        return 0;
 }
 
-/**
+/*
  * pmcraid_fasync - Async notifier registration from applications
  *
  * This function adds the calling process to a driver global queue. When an
@@ -3559,7 +3552,8 @@ static void pmcraid_release_passthrough_ioadls(
  * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
  *
  * @pinstance: pointer to adapter instance structure
- * @cmd: ioctl code
+ * @ioctl_cmd: ioctl code
+ * @buflen: unused
  * @arg: pointer to pmcraid_passthrough_buffer user buffer
  *
  * Return value
@@ -3894,7 +3888,7 @@ static int pmcraid_check_ioctl_buffer(
        return 0;
 }
 
-/**
+/*
  *  pmcraid_ioctl - char node ioctl entry point
  */
 static long pmcraid_chr_ioctl(
@@ -3963,7 +3957,7 @@ static long pmcraid_chr_ioctl(
        return retval;
 }
 
-/**
+/*
  * File operations structure for management interface
  */
 static const struct file_operations pmcraid_fops = {
@@ -3981,6 +3975,7 @@ static const struct file_operations pmcraid_fops = {
 /**
  * pmcraid_show_log_level - Display adapter's error logging level
  * @dev: class device struct
+ * @attr: unused
  * @buf: buffer
  *
  * Return value:
@@ -4000,6 +3995,7 @@ static ssize_t pmcraid_show_log_level(
 /**
  * pmcraid_store_log_level - Change the adapter's error logging level
  * @dev: class device struct
+ * @attr: unused
  * @buf: buffer
  * @count: not used
  *
@@ -4042,6 +4038,7 @@ static struct device_attribute pmcraid_log_level_attr = {
 /**
  * pmcraid_show_drv_version - Display driver version
  * @dev: class device struct
+ * @attr: unused
  * @buf: buffer
  *
  * Return value:
@@ -4066,8 +4063,9 @@ static struct device_attribute pmcraid_driver_version_attr = {
 };
 
 /**
- * pmcraid_show_io_adapter_id - Display driver assigned adapter id
+ * pmcraid_show_adapter_id - Display driver assigned adapter id
  * @dev: class device struct
+ * @attr: unused
  * @buf: buffer
  *
  * Return value:
@@ -4589,7 +4587,7 @@ pmcraid_release_control_blocks(
 
 /**
  * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
- * @pinstance - pointer to per adapter instance structure
+ * @pinstance: pointer to per adapter instance structure
  *
  * Allocates memory for command blocks using kernel slab allocator.
  *
@@ -5134,7 +5132,7 @@ static void pmcraid_shutdown(struct pci_dev *pdev)
 }
 
 
-/**
+/*
  * pmcraid_get_minor - returns unused minor number from minor number bitmap
  */
 static unsigned short pmcraid_get_minor(void)
@@ -5146,7 +5144,7 @@ static unsigned short pmcraid_get_minor(void)
        return minor;
 }
 
-/**
+/*
  * pmcraid_release_minor - releases given minor back to minor number bitmap
  */
 static void pmcraid_release_minor(unsigned short minor)
index 88a592d..0583b07 100644 (file)
@@ -11,8 +11,6 @@
 #include <scsi/fc/fc_fip.h>
 #include <scsi/fc/fc_fc2.h>
 #include <scsi/scsi_tcq.h>
-#include <linux/version.h>
-
 
 /* qedf_hsi.h needs to before included any qed includes */
 #include "qedf_hsi.h"
index 2386bfb..f4d8112 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/kernel.h>
 #include <linux/compiler.h>
 #include <linux/string.h>
-#include <linux/version.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <scsi/scsi_transport.h>
index 69c5b5e..2455d14 100644 (file)
@@ -276,10 +276,8 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
        }
 
        udev = kzalloc(sizeof(*udev), GFP_KERNEL);
-       if (!udev) {
-               rc = -ENOMEM;
+       if (!udev)
                goto err_udev;
-       }
 
        udev->uio_dev = -1;
 
index 46de254..8f35174 100644 (file)
@@ -633,13 +633,13 @@ static int qla1280_read_nvram(struct scsi_qla_host *ha)
         * to be read a word (two bytes) at a time.
         *
         * The net result of this would be that the word (and
-        * doubleword) quantites in the firmware would be correct, but
+        * doubleword) quantities in the firmware would be correct, but
         * the bytes would be pairwise reversed.  Since most of the
-        * firmware quantites are, in fact, bytes, we do an extra
+        * firmware quantities are, in fact, bytes, we do an extra
         * le16_to_cpu() in the firmware read routine.
         *
         * The upshot of all this is that the bytes in the firmware
-        * are in the correct places, but the 16 and 32 bit quantites
+        * are in the correct places, but the 16 and 32 bit quantities
         * are still in little endian format.  We fix that up below by
         * doing extra reverses on them */
        nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
@@ -687,7 +687,7 @@ qla1280_info(struct Scsi_Host *host)
  * The mid-level driver tries to ensures that queuecommand never gets invoked
  * concurrently with itself or the interrupt handler (although the
  * interrupt handler may call this routine as part of request-completion
- * handling).   Unfortunely, it sometimes calls the scheduler in interrupt
+ * handling).   Unfortunately, it sometimes calls the scheduler in interrupt
  * context which is a big NO! NO!.
  **************************************************************************/
 static int
@@ -3054,7 +3054,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
 
        /* Check for empty slot in outstanding command list. */
        for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
-                    (ha->outstanding_cmds[cnt] != 0); cnt++) ;
+            ha->outstanding_cmds[cnt]; cnt++);
 
        if (cnt >= MAX_OUTSTANDING_COMMANDS) {
                status = SCSI_MLQUEUE_HOST_BUSY;
index 63391c9..3aa9869 100644 (file)
@@ -2864,6 +2864,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
        vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
 
        if (IS_FWI2_CAPABLE(ha)) {
+               int rval;
+
                stats = dma_alloc_coherent(&ha->pdev->dev,
                    sizeof(*stats), &stats_dma, GFP_KERNEL);
                if (!stats) {
@@ -2873,7 +2875,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
                }
 
                /* reset firmware statistics */
-               qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+               rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+               if (rval != QLA_SUCCESS)
+                       ql_log(ql_log_warn, vha, 0x70de,
+                              "Resetting ISP statistics failed: rval = %d\n",
+                              rval);
 
                dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
                    stats, stats_dma);
index bee8cf9..aef2f7c 100644 (file)
@@ -25,10 +25,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
        struct bsg_job *bsg_job = sp->u.bsg_job;
        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 
+       sp->free(sp);
+
        bsg_reply->result = res;
        bsg_job_done(bsg_job, bsg_reply->result,
                       bsg_reply->reply_payload_rcv_len);
-       sp->free(sp);
 }
 
 void qla2x00_bsg_sp_free(srb_t *sp)
@@ -2583,6 +2584,10 @@ qla2x00_get_host_stats(struct bsg_job *bsg_job)
        }
 
        data = kzalloc(response_len, GFP_KERNEL);
+       if (!data) {
+               kfree(req_data);
+               return -ENOMEM;
+       }
 
        ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
                                    data, response_len);
index 144a893..f2d0559 100644 (file)
@@ -113,8 +113,13 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
        uint32_t stat;
        ulong i, j, timer = 6000000;
        int rval = QLA_FUNCTION_FAILED;
+       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
        clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+       if (qla_pci_disconnected(vha, reg))
+               return rval;
+
        for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
                if (i + dwords > ram_dwords)
                        dwords = ram_dwords - i;
@@ -138,6 +143,9 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
                while (timer--) {
                        udelay(5);
 
+                       if (qla_pci_disconnected(vha, reg))
+                               return rval;
+
                        stat = rd_reg_dword(&reg->host_status);
                        /* Check for pending interrupts. */
                        if (!(stat & HSRX_RISC_INT))
@@ -192,9 +200,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
        uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
        uint32_t stat;
        ulong i, j, timer = 6000000;
+       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
        clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
+       if (qla_pci_disconnected(vha, reg))
+               return rval;
+
        for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
                if (i + dwords > ram_dwords)
                        dwords = ram_dwords - i;
@@ -216,8 +228,10 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
                ha->flags.mbox_int = 0;
                while (timer--) {
                        udelay(5);
-                       stat = rd_reg_dword(&reg->host_status);
+                       if (qla_pci_disconnected(vha, reg))
+                               return rval;
 
+                       stat = rd_reg_dword(&reg->host_status);
                        /* Check for pending interrupts. */
                        if (!(stat & HSRX_RISC_INT))
                                continue;
index 2e59e75..9eb708e 100644 (file)
@@ -308,7 +308,7 @@ struct qla2xxx_fw_dump {
 };
 
 #define QL_MSGHDR "qla2xxx"
-#define QL_DBG_DEFAULT1_MASK    0x1e400000
+#define QL_DBG_DEFAULT1_MASK    0x1e600000
 
 #define ql_log_fatal           0 /* display fatal errors */
 #define ql_log_warn            1 /* display critical errors */
index 49b42b4..def4d99 100644 (file)
@@ -396,6 +396,7 @@ typedef union {
        } b;
 } port_id_t;
 #define INVALID_PORT_ID        0xFFFFFF
+#define ISP_REG16_DISCONNECT 0xFFFF
 
 static inline le_id_t be_id_to_le(be_id_t id)
 {
@@ -1527,7 +1528,7 @@ struct init_sf_cb {
         * BIT_12 = Remote Write Optimization (1 - Enabled, 0 - Disabled)
         * BIT 11-0 = Reserved
         */
-       uint16_t flags;
+       __le16  flags;
        uint8_t reserved1[32];
        uint16_t discard_OHRB_timeout_value;
        uint16_t remote_write_opt_queue_num;
@@ -3815,7 +3816,7 @@ struct qlt_hw_data {
        __le32 __iomem *atio_q_in;
        __le32 __iomem *atio_q_out;
 
-       struct qla_tgt_func_tmpl *tgt_ops;
+       const struct qla_tgt_func_tmpl *tgt_ops;
        struct qla_tgt_vp_map *tgt_vp_map;
 
        int saved_set;
@@ -3857,6 +3858,13 @@ struct qla_hw_data_stat {
        u32 num_mpi_reset;
 };
 
+/* refer to pcie_do_recovery reference */
+typedef enum {
+       QLA_PCI_RESUME,
+       QLA_PCI_ERR_DETECTED,
+       QLA_PCI_MMIO_ENABLED,
+       QLA_PCI_SLOT_RESET,
+} pci_error_state_t;
 /*
  * Qlogic host adapter specific data structure.
 */
@@ -4607,6 +4615,7 @@ struct qla_hw_data {
 #define DEFAULT_ZIO_THRESHOLD 5
 
        struct qla_hw_data_stat stat;
+       pci_error_state_t pci_error_state;
 };
 
 struct active_regions {
@@ -4727,7 +4736,7 @@ typedef struct scsi_qla_host {
 #define FX00_CRITEMP_RECOVERY  25
 #define FX00_HOST_INFO_RESEND  26
 #define QPAIR_ONLINE_CHECK_NEEDED      27
-#define SET_NVME_ZIO_THRESHOLD_NEEDED  28
+#define DO_EEH_RECOVERY                28
 #define DETECT_SFP_CHANGE      29
 #define N2N_LOGIN_NEEDED       30
 #define IOCB_WORK_ACTIVE       31
index 6486f97..fae5cae 100644 (file)
@@ -224,6 +224,7 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 
 extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla_eeh_work(struct work_struct *);
 extern void qla2x00_sp_compl(srb_t *sp, int);
 extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
 extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
@@ -235,6 +236,8 @@ int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
 void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
 void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
                               struct purex_item *pkt);
+void qla_pci_set_eeh_busy(struct scsi_qla_host *);
+void qla_schedule_eeh_work(struct scsi_qla_host *);
 
 /*
  * Global Functions in qla_mid.c source file.
index 517d358..5b6e04a 100644 (file)
@@ -1247,7 +1247,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
 }
 
 /**
- * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
+ * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
  * @vha: HA context
  *
  * This command uses the old Exectute SNS Command mailbox routine.
@@ -1479,7 +1479,7 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
 }
 
 /**
- * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
+ * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query.
  * @p: CT request buffer
  * @cmd: GS command
  * @rsp_size: response size in bytes
@@ -1582,7 +1582,7 @@ qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
 }
 
 /**
- * qla2x00_hba_attributes() perform HBA attributes registration
+ * qla2x00_hba_attributes() perform HBA attributes registration
  * @vha: HA context
  * @entries: number of entries to use
  * @callopt: Option to issue extended or standard FDMI
@@ -1837,7 +1837,7 @@ done:
 }
 
 /**
- * qla2x00_port_attributes() perform Port attributes registration
+ * qla2x00_port_attributes() perform Port attributes registration
  * @vha: HA context
  * @entries: number of entries to use
  * @callopt: Option to issue extended or standard FDMI
@@ -2272,7 +2272,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
 }
 
 /**
- * qla2x00_fdmi_rprt() perform RPRT registration
+ * qla2x00_fdmi_rprt() perform RPRT registration
  * @vha: HA context
  * @callopt: Option to issue extended or standard FDMI
  *           command parameter
@@ -3443,6 +3443,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                        list_for_each_entry(fcport, &vha->vp_fcports, list) {
                                if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
                                        fcport->scan_state = QLA_FCPORT_SCAN;
+                                       if (fcport->loop_id == FC_NO_LOOP_ID)
+                                               fcport->logout_on_delete = 0;
+                                       else
+                                               fcport->logout_on_delete = 1;
                                }
                        }
                        goto login_logout;
index f01f071..9c5782e 100644 (file)
@@ -718,6 +718,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                ql_dbg(ql_dbg_disc, vha, 0x20e0,
                    "%s %8phC login gen changed\n",
                    __func__, fcport->port_name);
+               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                return;
        }
 
@@ -2766,6 +2767,49 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
        return qla81xx_write_mpi_register(vha, mb);
 }
 
+static int
+qla_chk_risc_recovery(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       __le16 __iomem *mbptr = &reg->mailbox0;
+       int i;
+       u16 mb[32];
+       int rc = QLA_SUCCESS;
+
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+               return rc;
+
+       /* this check is only valid after RISC reset */
+       mb[0] = rd_reg_word(mbptr);
+       mbptr++;
+       if (mb[0] == 0xf) {
+               rc = QLA_FUNCTION_FAILED;
+
+               for (i = 1; i < 32; i++) {
+                       mb[i] = rd_reg_word(mbptr);
+                       mbptr++;
+               }
+
+               ql_log(ql_log_warn, vha, 0x1015,
+                      "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+                      mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
+               ql_log(ql_log_warn, vha, 0x1015,
+                      "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+                      mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
+                      mb[15]);
+               ql_log(ql_log_warn, vha, 0x1015,
+                      "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+                      mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
+                      mb[23]);
+               ql_log(ql_log_warn, vha, 0x1015,
+                      "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
+                      mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
+                      mb[31]);
+       }
+       return rc;
+}
+
 /**
  * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
  * @vha: HA context
@@ -2782,6 +2826,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
        uint16_t wd;
        static int abts_cnt; /* ISP abort retry counts */
        int rval = QLA_SUCCESS;
+       int print = 1;
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
@@ -2870,17 +2915,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
        rd_reg_dword(&reg->hccr);
 
        wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+       mdelay(10);
        rd_reg_dword(&reg->hccr);
 
-       rd_reg_word(&reg->mailbox0);
-       for (cnt = 60; rd_reg_word(&reg->mailbox0) != 0 &&
-           rval == QLA_SUCCESS; cnt--) {
+       wd = rd_reg_word(&reg->mailbox0);
+       for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
                barrier();
-               if (cnt)
-                       udelay(5);
-               else
+               if (cnt) {
+                       mdelay(1);
+                       if (print && qla_chk_risc_recovery(vha))
+                               print = 0;
+
+                       wd = rd_reg_word(&reg->mailbox0);
+               } else {
                        rval = QLA_FUNCTION_TIMEOUT;
+
+                       ql_log(ql_log_warn, vha, 0x015e,
+                              "RISC reset timeout\n");
+               }
        }
+
        if (rval == QLA_SUCCESS)
                set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
 
@@ -5512,13 +5566,14 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
        if (fcport->port_type & FCT_NVME_DISCOVERY)
                rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
 
+       fc_remote_port_rolechg(rport, rport_ids.roles);
+
        ql_dbg(ql_dbg_disc, vha, 0x20ee,
-           "%s %8phN. rport %p is %s mode\n",
-           __func__, fcport->port_name, rport,
+           "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
+           __func__, fcport->port_name, vha->host_no,
+           rport->scsi_target_id, rport,
            (fcport->port_type == FCT_TARGET) ? "tgt" :
            ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
-
-       fc_remote_port_rolechg(rport, rport_ids.roles);
 }
 
 /*
@@ -6877,22 +6932,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        }
        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
-       if (!ha->flags.eeh_busy) {
-               /* Make sure for ISP 82XX IO DMA is complete */
-               if (IS_P3P_TYPE(ha)) {
-                       qla82xx_chip_reset_cleanup(vha);
-                       ql_log(ql_log_info, vha, 0x00b4,
-                           "Done chip reset cleanup.\n");
-
-                       /* Done waiting for pending commands.
-                        * Reset the online flag.
-                        */
-                       vha->flags.online = 0;
-               }
+       /* Make sure for ISP 82XX IO DMA is complete */
+       if (IS_P3P_TYPE(ha)) {
+               qla82xx_chip_reset_cleanup(vha);
+               ql_log(ql_log_info, vha, 0x00b4,
+                      "Done chip reset cleanup.\n");
 
-               /* Requeue all commands in outstanding command list. */
-               qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+               /* Done waiting for pending commands. Reset online flag */
+               vha->flags.online = 0;
        }
+
+       /* Requeue all commands in outstanding command list. */
+       qla2x00_abort_all_cmds(vha, DID_RESET << 16);
        /* memory barrier */
        wmb();
 }
@@ -6923,6 +6974,12 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                if (vha->hw->flags.port_isolated)
                        return status;
 
+               if (qla2x00_isp_reg_stat(ha)) {
+                       ql_log(ql_log_info, vha, 0x803f,
+                              "ISP Abort - ISP reg disconnect, exiting.\n");
+                       return status;
+               }
+
                if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
                        ha->flags.chip_reset_done = 1;
                        vha->flags.online = 1;
@@ -6962,8 +7019,18 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
 
                ha->isp_ops->get_flash_version(vha, req->ring);
 
+               if (qla2x00_isp_reg_stat(ha)) {
+                       ql_log(ql_log_info, vha, 0x803f,
+                              "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
+                       return status;
+               }
                ha->isp_ops->nvram_config(vha);
 
+               if (qla2x00_isp_reg_stat(ha)) {
+                       ql_log(ql_log_info, vha, 0x803f,
+                              "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
+                       return status;
+               }
                if (!qla2x00_restart_isp(vha)) {
                        clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
 
index e80e41b..82937c6 100644 (file)
@@ -432,3 +432,49 @@ qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
        }
        iores->res_type = RESOURCE_NONE;
 }
+
+#define ISP_REG_DISCONNECT 0xffffffffU
+/**************************************************************************
+ * qla2x00_isp_reg_stat
+ *
+ * Description:
+ *        Read the host status register of ISP before aborting the command.
+ *
+ * Input:
+ *       ha = pointer to host adapter structure.
+ *
+ *
+ * Returns:
+ *       Either true or false.
+ *
+ * Note: Return true if there is register disconnect.
+ **************************************************************************/
+static inline
+uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
+{
+       struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+
+       if (IS_P3P_TYPE(ha))
+               return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
+       else
+               return ((rd_reg_dword(&reg->host_status)) ==
+                       ISP_REG_DISCONNECT);
+}
+
+static inline
+bool qla_pci_disconnected(struct scsi_qla_host *vha,
+                         struct device_reg_24xx __iomem *reg)
+{
+       uint32_t stat;
+       bool ret = false;
+
+       stat = rd_reg_dword(&reg->host_status);
+       if (stat == 0xffffffff) {
+               ql_log(ql_log_info, vha, 0x8041,
+                      "detected PCI disconnect.\n");
+               qla_schedule_eeh_work(vha);
+               ret = true;
+       }
+       return ret;
+}
index 8b41cba..38b5bdd 100644 (file)
@@ -491,7 +491,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 }
 
 /**
- * qla2x00_marker() - Send a marker IOCB to the firmware.
+ * __qla2x00_marker() - Send a marker IOCB to the firmware.
  * @vha: HA context
  * @qpair: queue pair pointer
  * @loop_id: loop ID
@@ -1600,12 +1600,14 @@ qla24xx_start_scsi(srb_t *sp)
        uint16_t        req_cnt;
        uint16_t        tot_dsds;
        struct req_que *req = NULL;
+       struct rsp_que *rsp;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
        struct scsi_qla_host *vha = sp->vha;
        struct qla_hw_data *ha = vha->hw;
 
        /* Setup device pointers. */
        req = vha->req;
+       rsp = req->rsp;
 
        /* So we know we haven't pci_map'ed anything yet */
        tot_dsds = 0;
@@ -1643,8 +1645,14 @@ qla24xx_start_scsi(srb_t *sp)
                goto queuing_error;
 
        if (req->cnt < (req_cnt + 2)) {
-               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-                   rd_reg_dword_relaxed(req->req_q_out);
+               if (IS_SHADOW_REG_CAPABLE(ha)) {
+                       cnt = *req->out_ptr;
+               } else {
+                       cnt = rd_reg_dword_relaxed(req->req_q_out);
+                       if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+                               goto queuing_error;
+               }
+
                if (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
@@ -1707,6 +1715,11 @@ qla24xx_start_scsi(srb_t *sp)
        /* Set chip new ring index. */
        wrt_reg_dword(req->req_q_in, req->ring_index);
 
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (vha->flags.process_response_queue &&
+           rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+               qla24xx_process_response_queue(vha, rsp);
+
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return QLA_SUCCESS;
 
@@ -1835,8 +1848,13 @@ qla24xx_dif_start_scsi(srb_t *sp)
                goto queuing_error;
 
        if (req->cnt < (req_cnt + 2)) {
-               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-                   rd_reg_dword_relaxed(req->req_q_out);
+               if (IS_SHADOW_REG_CAPABLE(ha)) {
+                       cnt = *req->out_ptr;
+               } else {
+                       cnt = rd_reg_dword_relaxed(req->req_q_out);
+                       if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+                               goto queuing_error;
+               }
                if (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
@@ -1897,6 +1915,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
        /* Set chip new ring index. */
        wrt_reg_dword(req->req_q_in, req->ring_index);
 
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (vha->flags.process_response_queue &&
+           rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+               qla24xx_process_response_queue(vha, rsp);
+
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        return QLA_SUCCESS;
@@ -1910,6 +1933,7 @@ queuing_error:
 
        qla_put_iocbs(sp->qpair, &sp->iores);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        return QLA_FUNCTION_FAILED;
 }
 
@@ -1931,6 +1955,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
        uint16_t        req_cnt;
        uint16_t        tot_dsds;
        struct req_que *req = NULL;
+       struct rsp_que *rsp;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
        struct scsi_qla_host *vha = sp->fcport->vha;
        struct qla_hw_data *ha = vha->hw;
@@ -1941,6 +1966,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
 
        /* Setup qpair pointers */
        req = qpair->req;
+       rsp = qpair->rsp;
 
        /* So we know we haven't pci_map'ed anything yet */
        tot_dsds = 0;
@@ -1977,8 +2003,14 @@ qla2xxx_start_scsi_mq(srb_t *sp)
                goto queuing_error;
 
        if (req->cnt < (req_cnt + 2)) {
-               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-                   rd_reg_dword_relaxed(req->req_q_out);
+               if (IS_SHADOW_REG_CAPABLE(ha)) {
+                       cnt = *req->out_ptr;
+               } else {
+                       cnt = rd_reg_dword_relaxed(req->req_q_out);
+                       if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+                               goto queuing_error;
+               }
+
                if (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
@@ -2041,6 +2073,11 @@ qla2xxx_start_scsi_mq(srb_t *sp)
        /* Set chip new ring index. */
        wrt_reg_dword(req->req_q_in, req->ring_index);
 
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (vha->flags.process_response_queue &&
+           rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+               qla24xx_process_response_queue(vha, rsp);
+
        spin_unlock_irqrestore(&qpair->qp_lock, flags);
        return QLA_SUCCESS;
 
@@ -2184,8 +2221,14 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
                goto queuing_error;
 
        if (req->cnt < (req_cnt + 2)) {
-               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-                   rd_reg_dword_relaxed(req->req_q_out);
+               if (IS_SHADOW_REG_CAPABLE(ha)) {
+                       cnt = *req->out_ptr;
+               } else {
+                       cnt = rd_reg_dword_relaxed(req->req_q_out);
+                       if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+                               goto queuing_error;
+               }
+
                if (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
@@ -2262,6 +2305,7 @@ queuing_error:
 
        qla_put_iocbs(sp->qpair, &sp->iores);
        spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
        return QLA_FUNCTION_FAILED;
 }
 
@@ -2306,6 +2350,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
                        cnt = qla2x00_debounce_register(
                            ISP_REQ_Q_OUT(ha, &reg->isp));
 
+               if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
+                       qla_schedule_eeh_work(vha);
+                       return NULL;
+               }
+
                if  (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
@@ -2379,7 +2428,8 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
                                cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
                if (sp->vha->flags.nvme2_enabled) {
                        /* Set service parameter BIT_7 for NVME CONF support */
-                       logio->io_parameter[0] |= NVME_PRLI_SP_CONF;
+                       logio->io_parameter[0] |=
+                               cpu_to_le32(NVME_PRLI_SP_CONF);
                        /* Set service parameter BIT_8 for SLER support */
                        logio->io_parameter[0] |=
                                cpu_to_le32(NVME_PRLI_SP_SLER);
@@ -3720,6 +3770,9 @@ qla2x00_start_sp(srb_t *sp)
        void *pkt;
        unsigned long flags;
 
+       if (vha->hw->flags.eeh_busy)
+               return -EIO;
+
        spin_lock_irqsave(qp->qp_lock_ptr, flags);
        pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
        if (!pkt) {
@@ -3937,8 +3990,14 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
 
        /* Check for room on request queue. */
        if (req->cnt < req_cnt + 2) {
-               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-                   rd_reg_dword_relaxed(req->req_q_out);
+               if (IS_SHADOW_REG_CAPABLE(ha)) {
+                       cnt = *req->out_ptr;
+               } else {
+                       cnt = rd_reg_dword_relaxed(req->req_q_out);
+                       if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+                               goto queuing_error;
+               }
+
                if  (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
@@ -3977,5 +4036,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
        qla2x00_start_iocbs(vha, req);
 queuing_error:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        return rval;
 }
index 5e18837..11d6e0d 100644 (file)
@@ -270,12 +270,7 @@ qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
                if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
                    !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
                    !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
-                       /*
-                        * Schedule this (only once) on the default system
-                        * workqueue so that all the adapter workqueues and the
-                        * DPC thread can be shutdown cleanly.
-                        */
-                       schedule_work(&vha->hw->board_disable);
+                       qla_schedule_eeh_work(vha);
                }
                return true;
        } else
@@ -1657,8 +1652,6 @@ global_port_update:
        case MBA_TEMPERATURE_ALERT:
                ql_dbg(ql_dbg_async, vha, 0x505e,
                    "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
-               if (mb[1] == 0x12)
-                       schedule_work(&ha->board_disable);
                break;
 
        case MBA_TRANS_INSERT:
@@ -3440,7 +3433,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
                return;
 
        abt = &sp->u.iocb_cmd;
-       abt->u.abt.comp_status = le16_to_cpu(pkt->comp_status);
+       abt->u.abt.comp_status = pkt->comp_status;
        orig_sp = sp->cmd_sp;
        /* Need to pass original sp */
        if (orig_sp)
index 06c9996..0bcd8af 100644 (file)
@@ -102,7 +102,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
        int             rval, i;
        unsigned long    flags = 0;
        device_reg_t *reg;
-       uint8_t         abort_active;
+       uint8_t         abort_active, eeh_delay;
        uint8_t         io_lock_on;
        uint16_t        command = 0;
        uint16_t        *iptr;
@@ -136,7 +136,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                    "PCI error, exiting.\n");
                return QLA_FUNCTION_TIMEOUT;
        }
-
+       eeh_delay = 0;
        reg = ha->iobase;
        io_lock_on = base_vha->flags.init_done;
 
@@ -159,10 +159,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
        }
 
        /* check if ISP abort is active and return cmd with timeout */
-       if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
-           test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
-           test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
-           !is_rom_cmd(mcp->mb[0])) {
+       if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+             test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+             test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+             !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
                ql_log(ql_log_info, vha, 0x1005,
                    "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
                    mcp->mb[0]);
@@ -185,7 +185,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                return QLA_FUNCTION_TIMEOUT;
        }
        atomic_dec(&ha->num_pend_mbx_stage1);
-       if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+       if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+           ha->flags.eeh_busy) {
+               ql_log(ql_log_warn, vha, 0xd035,
+                      "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
+                      ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
                rval = QLA_ABORTED;
                goto premature_exit;
        }
@@ -265,6 +269,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
                    mcp->tov * HZ)) {
                        if (chip_reset != ha->chip_reset) {
+                               eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
                                spin_lock_irqsave(&ha->hardware_lock, flags);
                                ha->flags.mbox_busy = 0;
                                spin_unlock_irqrestore(&ha->hardware_lock,
@@ -282,6 +288,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 
                } else if (ha->flags.purge_mbox ||
                    chip_reset != ha->chip_reset) {
+                       eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
                        spin_lock_irqsave(&ha->hardware_lock, flags);
                        ha->flags.mbox_busy = 0;
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -323,6 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                while (!ha->flags.mbox_int) {
                        if (ha->flags.purge_mbox ||
                            chip_reset != ha->chip_reset) {
+                               eeh_delay = ha->flags.eeh_busy ? 1 : 0;
+
                                spin_lock_irqsave(&ha->hardware_lock, flags);
                                ha->flags.mbox_busy = 0;
                                spin_unlock_irqrestore(&ha->hardware_lock,
@@ -531,7 +541,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                                /* Allow next mbx cmd to come in. */
                                complete(&ha->mbx_cmd_comp);
-                               if (ha->isp_ops->abort_isp(vha)) {
+                               if (ha->isp_ops->abort_isp(vha) &&
+                                   !ha->flags.eeh_busy) {
                                        /* Failed. retry later. */
                                        set_bit(ISP_ABORT_NEEDED,
                                            &vha->dpc_flags);
@@ -584,6 +595,17 @@ mbx_done:
                ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
        }
 
+       i = 500;
+       while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
+               /*
+                * The caller of this mailbox encounter pci error.
+                * Hold the thread until PCIE link reset complete to make
+                * sure caller does not unmap dma while recovery is
+                * in progress.
+                */
+               msleep(1);
+               i--;
+       }
        return rval;
 }
 
index ca73066..6e920da 100644 (file)
@@ -516,7 +516,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
 }
 
 /**
- * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
+ * qlafx00_soc_cpu_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
  * @vha: HA context
  *
  */
@@ -2860,7 +2860,7 @@ qlafx00_async_event(scsi_qla_host_t *vha)
 }
 
 /**
- * qlafx00x_mbx_completion() - Process mailbox command completions.
+ * qlafx00_mbx_completion() - Process mailbox command completions.
  * @vha: SCSI driver HA context
  * @mb0: value to be written into mailbox register 0
  */
@@ -3266,8 +3266,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                        fx_iocb.req_xfrcnt =
                            cpu_to_le16(fxio->u.fxiocb.req_len);
                        put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
-                                          &fx_iocb.dseg_rq.address);
-                       fx_iocb.dseg_rq.length =
+                                          &fx_iocb.dseg_rq[0].address);
+                       fx_iocb.dseg_rq[0].length =
                            cpu_to_le32(fxio->u.fxiocb.req_len);
                }
 
@@ -3276,8 +3276,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                        fx_iocb.rsp_xfrcnt =
                            cpu_to_le16(fxio->u.fxiocb.rsp_len);
                        put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
-                                          &fx_iocb.dseg_rsp.address);
-                       fx_iocb.dseg_rsp.length =
+                                          &fx_iocb.dseg_rsp[0].address);
+                       fx_iocb.dseg_rsp[0].length =
                            cpu_to_le32(fxio->u.fxiocb.rsp_len);
                }
 
@@ -3314,7 +3314,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                            cpu_to_le16(bsg_job->request_payload.sg_cnt);
                        tot_dsds =
                            bsg_job->request_payload.sg_cnt;
-                       cur_dsd = &fx_iocb.dseg_rq;
+                       cur_dsd = &fx_iocb.dseg_rq[0];
                        avail_dsds = 1;
                        for_each_sg(bsg_job->request_payload.sg_list, sg,
                            tot_dsds, index) {
@@ -3369,7 +3369,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                        fx_iocb.rsp_dsdcnt =
                           cpu_to_le16(bsg_job->reply_payload.sg_cnt);
                        tot_dsds = bsg_job->reply_payload.sg_cnt;
-                       cur_dsd = &fx_iocb.dseg_rsp;
+                       cur_dsd = &fx_iocb.dseg_rsp[0];
                        avail_dsds = 1;
 
                        for_each_sg(bsg_job->reply_payload.sg_list, sg,
index 73be834..4f63aff 100644 (file)
@@ -176,8 +176,12 @@ struct fxdisc_entry_fx00 {
        uint8_t flags;
        uint8_t reserved_1;
 
-       struct dsd64 dseg_rq;
-       struct dsd64 dseg_rsp;
+       /*
+        * Use array size 1 below to prevent that Coverity complains about
+        * the append_dsd64() calls for the two arrays below.
+        */
+       struct dsd64 dseg_rq[1];
+       struct dsd64 dseg_rsp[1];
 
        __le32 dataword;
        __le32 adapid;
index 0237588..0cacb66 100644 (file)
@@ -398,8 +398,13 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
        }
        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
        if (req->cnt < (req_cnt + 2)) {
-               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-                   rd_reg_dword_relaxed(req->req_q_out);
+               if (IS_SHADOW_REG_CAPABLE(ha)) {
+                       cnt = *req->out_ptr;
+               } else {
+                       cnt = rd_reg_dword_relaxed(req->req_q_out);
+                       if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+                               goto queuing_error;
+               }
 
                if (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
@@ -536,6 +541,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
 
 queuing_error:
        spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
        return rval;
 }
 
index 68a16c9..5ceecc9 100644 (file)
@@ -2028,7 +2028,7 @@ exit_error:
 }
 
 /**
- * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * qla8044_check_temp - Check the ISP82XX temperature.
  * @vha: adapter block pointer.
  *
  * Note: The caller should not hold the idc lock.
@@ -2226,19 +2226,16 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
                if (opcode & QLA82XX_DBG_OPCODE_WR) {
                        qla8044_wr_reg_indirect(vha, crb_addr,
                            crb_entry->value_1);
-                       opcode &= ~QLA82XX_DBG_OPCODE_WR;
                }
 
                if (opcode & QLA82XX_DBG_OPCODE_RW) {
                        qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
                        qla8044_wr_reg_indirect(vha, crb_addr, read_value);
-                       opcode &= ~QLA82XX_DBG_OPCODE_RW;
                }
 
                if (opcode & QLA82XX_DBG_OPCODE_AND) {
                        qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
                        read_value &= crb_entry->value_2;
-                       opcode &= ~QLA82XX_DBG_OPCODE_AND;
                        if (opcode & QLA82XX_DBG_OPCODE_OR) {
                                read_value |= crb_entry->value_3;
                                opcode &= ~QLA82XX_DBG_OPCODE_OR;
@@ -2249,7 +2246,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
                        qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
                        read_value |= crb_entry->value_3;
                        qla8044_wr_reg_indirect(vha, crb_addr, read_value);
-                       opcode &= ~QLA82XX_DBG_OPCODE_OR;
                }
                if (opcode & QLA82XX_DBG_OPCODE_POLL) {
                        poll_time = crb_entry->crb_strd.poll_timeout;
@@ -2269,7 +2265,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
                                            crb_addr, &read_value);
                                }
                        } while (1);
-                       opcode &= ~QLA82XX_DBG_OPCODE_POLL;
                }
 
                if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
@@ -2283,7 +2278,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
                        qla8044_rd_reg_indirect(vha, addr, &read_value);
                        index = crb_entry->crb_ctrl.state_index_v;
                        tmplt_hdr->saved_state_array[index] = read_value;
-                       opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
                }
 
                if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
@@ -2303,7 +2297,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
                        }
 
                        qla8044_wr_reg_indirect(vha, addr, read_value);
-                       opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
                }
 
                if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
@@ -2316,7 +2309,6 @@ qla8044_minidump_process_control(struct scsi_qla_host *vha,
                        read_value |= crb_entry->value_3;
                        read_value += crb_entry->value_1;
                        tmplt_hdr->saved_state_array[index] = read_value;
-                       opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
                }
                crb_addr += crb_entry->crb_strd.addr_stride;
        }
index 0743925..12959e3 100644 (file)
@@ -971,6 +971,13 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
                goto qc24_fail_command;
        }
 
+       if (!qpair->online) {
+               ql_dbg(ql_dbg_io, vha, 0x3077,
+                      "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
+               cmd->result = DID_NO_CONNECT << 16;
+               goto qc24_fail_command;
+       }
+
        if (!fcport || fcport->deleted) {
                cmd->result = DID_IMM_RETRY << 16;
                goto qc24_fail_command;
@@ -1013,8 +1020,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
                    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
-               if (rval == QLA_INTERFACE_ERROR)
-                       goto qc24_free_sp_fail_command;
                goto qc24_host_busy_free_sp;
        }
 
@@ -1026,11 +1031,6 @@ qc24_host_busy_free_sp:
 qc24_target_busy:
        return SCSI_MLQUEUE_TARGET_BUSY;
 
-qc24_free_sp_fail_command:
-       sp->free(sp);
-       CMD_SP(cmd) = NULL;
-       qla2xxx_rel_qpair_sp(sp->qpair, sp);
-
 qc24_fail_command:
        cmd->scsi_done(cmd);
 
@@ -1207,35 +1207,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
        return return_status;
 }
 
-#define ISP_REG_DISCONNECT 0xffffffffU
-/**************************************************************************
-* qla2x00_isp_reg_stat
-*
-* Description:
-*      Read the host status register of ISP before aborting the command.
-*
-* Input:
-*      ha = pointer to host adapter structure.
-*
-*
-* Returns:
-*      Either true or false.
-*
-* Note:        Return true if there is register disconnect.
-**************************************************************************/
-static inline
-uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
-{
-       struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
-       struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
-
-       if (IS_P3P_TYPE(ha))
-               return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
-       else
-               return ((rd_reg_dword(&reg->host_status)) ==
-                       ISP_REG_DISCONNECT);
-}
-
 /**************************************************************************
 * qla2xxx_eh_abort
 *
@@ -1269,6 +1240,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        if (qla2x00_isp_reg_stat(ha)) {
                ql_log(ql_log_info, vha, 0x8042,
                    "PCI/Register disconnect, exiting.\n");
+               qla_pci_set_eeh_busy(vha);
                return FAILED;
        }
 
@@ -1462,6 +1434,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
        if (qla2x00_isp_reg_stat(ha)) {
                ql_log(ql_log_info, vha, 0x803e,
                    "PCI/Register disconnect, exiting.\n");
+               qla_pci_set_eeh_busy(vha);
                return FAILED;
        }
 
@@ -1478,6 +1451,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
        if (qla2x00_isp_reg_stat(ha)) {
                ql_log(ql_log_info, vha, 0x803f,
                    "PCI/Register disconnect, exiting.\n");
+               qla_pci_set_eeh_busy(vha);
                return FAILED;
        }
 
@@ -1513,6 +1487,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
        if (qla2x00_isp_reg_stat(ha)) {
                ql_log(ql_log_info, vha, 0x8040,
                    "PCI/Register disconnect, exiting.\n");
+               qla_pci_set_eeh_busy(vha);
                return FAILED;
        }
 
@@ -1590,7 +1565,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
        if (qla2x00_isp_reg_stat(ha)) {
                ql_log(ql_log_info, vha, 0x8041,
                    "PCI/Register disconnect, exiting.\n");
-               schedule_work(&ha->board_disable);
+               qla_pci_set_eeh_busy(vha);
                return SUCCESS;
        }
 
@@ -4238,11 +4213,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 
        /* Get consistent memory allocated for Special Features-CB. */
        if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
-               ha->sf_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+               ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL,
                                                &ha->sf_init_cb_dma);
                if (!ha->sf_init_cb)
                        goto fail_sf_init_cb;
-               memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
                           "sf_init_cb=%p.\n", ha->sf_init_cb);
        }
@@ -6676,6 +6650,9 @@ qla2x00_do_dpc(void *data)
 
                schedule();
 
+               if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
+                       qla_pci_set_eeh_busy(base_vha);
+
                if (!base_vha->flags.init_done || ha->flags.mbox_busy)
                        goto end_loop;
 
@@ -6969,28 +6946,23 @@ intr_on_check:
                        mutex_unlock(&ha->mq_lock);
                }
 
-               if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
-                   &base_vha->dpc_flags)) {
+               if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
+                                      &base_vha->dpc_flags)) {
+                       u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
+
+                       if (threshold > ha->orig_fw_xcb_count)
+                               threshold = ha->orig_fw_xcb_count;
+
                        ql_log(ql_log_info, base_vha, 0xffffff,
-                               "nvme: SET ZIO Activity exchange threshold to %d.\n",
-                                               ha->nvme_last_rptd_aen);
-                       if (qla27xx_set_zio_threshold(base_vha,
-                           ha->nvme_last_rptd_aen)) {
+                              "SET ZIO Activity exchange threshold to %d.\n",
+                              threshold);
+                       if (qla27xx_set_zio_threshold(base_vha, threshold)) {
                                ql_log(ql_log_info, base_vha, 0xffffff,
-                                   "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
-                                   ha->nvme_last_rptd_aen);
+                                      "Unable to SET ZIO Activity exchange threshold to %d.\n",
+                                      threshold);
                        }
                }
 
-               if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
-                   &base_vha->dpc_flags)) {
-                       ql_log(ql_log_info, base_vha, 0xffffff,
-                           "SET ZIO Activity exchange threshold to %d.\n",
-                           ha->last_zio_threshold);
-                       qla27xx_set_zio_threshold(base_vha,
-                           ha->last_zio_threshold);
-               }
-
                if (!IS_QLAFX00(ha))
                        qla2x00_do_dpc_all_vps(base_vha);
 
@@ -7218,14 +7190,13 @@ qla2x00_timer(struct timer_list *t)
        index = atomic_read(&ha->nvme_active_aen_cnt);
        if (!vha->vp_idx &&
            (index != ha->nvme_last_rptd_aen) &&
-           (index >= DEFAULT_ZIO_THRESHOLD) &&
            ha->zio_mode == QLA_ZIO_MODE_6 &&
            !ha->flags.host_shutting_down) {
+               ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
                ql_log(ql_log_info, vha, 0x3002,
                    "nvme: Sched: Set ZIO exchange threshold to %d.\n",
                    ha->nvme_last_rptd_aen);
-               ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
-               set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+               set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
                start_dpc++;
        }
 
@@ -7398,6 +7369,8 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
        int i;
        unsigned long flags;
 
+       ql_dbg(ql_dbg_aer, vha, 0x9000,
+              "%s\n", __func__);
        ha->chip_reset++;
 
        ha->base_qpair->chip_reset = ha->chip_reset;
@@ -7407,28 +7380,16 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
                            ha->base_qpair->chip_reset;
        }
 
-       /* purge MBox commands */
-       if (atomic_read(&ha->num_pend_mbx_stage3)) {
-               clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
-               complete(&ha->mbx_intr_comp);
-       }
-
-       i = 0;
-
-       while (atomic_read(&ha->num_pend_mbx_stage3) ||
-           atomic_read(&ha->num_pend_mbx_stage2) ||
-           atomic_read(&ha->num_pend_mbx_stage1)) {
-               msleep(20);
-               i++;
-               if (i > 50)
-                       break;
-       }
-
-       ha->flags.purge_mbox = 0;
+       /*
+        * purge mailbox might take a while. Slot Reset/chip reset
+        * will take care of the purge
+        */
 
        mutex_lock(&ha->mq_lock);
+       ha->base_qpair->online = 0;
        list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
                qpair->online = 0;
+       wmb();
        mutex_unlock(&ha->mq_lock);
 
        qla2x00_mark_all_devices_lost(vha);
@@ -7465,14 +7426,17 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(pdev);
        struct qla_hw_data *ha = vha->hw;
+       pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
 
-       ql_dbg(ql_dbg_aer, vha, 0x9000,
-           "PCI error detected, state %x.\n", state);
+       ql_log(ql_log_warn, vha, 0x9000,
+              "PCI error detected, state %x.\n", state);
+       ha->pci_error_state = QLA_PCI_ERR_DETECTED;
 
        if (!atomic_read(&pdev->enable_cnt)) {
                ql_log(ql_log_info, vha, 0xffff,
                        "PCI device is disabled,state %x\n", state);
-               return PCI_ERS_RESULT_NEED_RESET;
+               ret = PCI_ERS_RESULT_NEED_RESET;
+               goto out;
        }
 
        switch (state) {
@@ -7482,11 +7446,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                        set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
                        qla2xxx_wake_dpc(vha);
                }
-               return PCI_ERS_RESULT_CAN_RECOVER;
+               ret = PCI_ERS_RESULT_CAN_RECOVER;
+               break;
        case pci_channel_io_frozen:
-               ha->flags.eeh_busy = 1;
-               qla_pci_error_cleanup(vha);
-               return PCI_ERS_RESULT_NEED_RESET;
+               qla_pci_set_eeh_busy(vha);
+               ret = PCI_ERS_RESULT_NEED_RESET;
+               break;
        case pci_channel_io_perm_failure:
                ha->flags.pci_channel_io_perm_failure = 1;
                qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
@@ -7494,9 +7459,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                        set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
                        qla2xxx_wake_dpc(vha);
                }
-               return PCI_ERS_RESULT_DISCONNECT;
+               ret = PCI_ERS_RESULT_DISCONNECT;
        }
-       return PCI_ERS_RESULT_NEED_RESET;
+out:
+       ql_dbg(ql_dbg_aer, vha, 0x600d,
+              "PCI error detected returning [%x].\n", ret);
+       return ret;
 }
 
 static pci_ers_result_t
@@ -7510,6 +7478,10 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
        struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
 
+       ql_log(ql_log_warn, base_vha, 0x9000,
+              "mmio enabled\n");
+
+       ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
        if (IS_QLA82XX(ha))
                return PCI_ERS_RESULT_RECOVERED;
 
@@ -7533,10 +7505,11 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
                ql_log(ql_log_info, base_vha, 0x9003,
                    "RISC paused -- mmio_enabled, Dumping firmware.\n");
                qla2xxx_dump_fw(base_vha);
-
-               return PCI_ERS_RESULT_NEED_RESET;
-       } else
-               return PCI_ERS_RESULT_RECOVERED;
+       }
+       /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
+       ql_dbg(ql_dbg_aer, base_vha, 0x600d,
+              "mmio enabled returning.\n");
+       return PCI_ERS_RESULT_NEED_RESET;
 }
 
 static pci_ers_result_t
@@ -7548,9 +7521,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
        int rc;
        struct qla_qpair *qpair = NULL;
 
-       ql_dbg(ql_dbg_aer, base_vha, 0x9004,
-           "Slot Reset.\n");
+       ql_log(ql_log_warn, base_vha, 0x9004,
+              "Slot Reset.\n");
 
+       ha->pci_error_state = QLA_PCI_SLOT_RESET;
        /* Workaround: qla2xxx driver which access hardware earlier
         * needs error state to be pci_channel_io_online.
         * Otherwise mailbox command timesout.
@@ -7584,16 +7558,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
                qpair->online = 1;
        mutex_unlock(&ha->mq_lock);
 
+       ha->flags.eeh_busy = 0;
        base_vha->flags.online = 1;
        set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
-       if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
-               ret =  PCI_ERS_RESULT_RECOVERED;
+       ha->isp_ops->abort_isp(base_vha);
        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 
+       if (qla2x00_isp_reg_stat(ha)) {
+               ha->flags.eeh_busy = 1;
+               qla_pci_error_cleanup(base_vha);
+               ql_log(ql_log_warn, base_vha, 0x9005,
+                      "Device unable to recover from PCI error.\n");
+       } else {
+               ret =  PCI_ERS_RESULT_RECOVERED;
+       }
 
 exit_slot_reset:
        ql_dbg(ql_dbg_aer, base_vha, 0x900e,
-           "slot_reset return %x.\n", ret);
+           "Slot Reset returning %x.\n", ret);
 
        return ret;
 }
@@ -7605,16 +7587,55 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
        struct qla_hw_data *ha = base_vha->hw;
        int ret;
 
-       ql_dbg(ql_dbg_aer, base_vha, 0x900f,
-           "pci_resume.\n");
+       ql_log(ql_log_warn, base_vha, 0x900f,
+              "Pci Resume.\n");
 
-       ha->flags.eeh_busy = 0;
 
        ret = qla2x00_wait_for_hba_online(base_vha);
        if (ret != QLA_SUCCESS) {
                ql_log(ql_log_fatal, base_vha, 0x9002,
                    "The device failed to resume I/O from slot/link_reset.\n");
        }
+       ha->pci_error_state = QLA_PCI_RESUME;
+       ql_dbg(ql_dbg_aer, base_vha, 0x600d,
+              "Pci Resume returning.\n");
+}
+
+void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+       bool do_cleanup = false;
+       unsigned long flags;
+
+       if (ha->flags.eeh_busy)
+               return;
+
+       spin_lock_irqsave(&base_vha->work_lock, flags);
+       if (!ha->flags.eeh_busy) {
+               ha->flags.eeh_busy = 1;
+               do_cleanup = true;
+       }
+       spin_unlock_irqrestore(&base_vha->work_lock, flags);
+
+       if (do_cleanup)
+               qla_pci_error_cleanup(base_vha);
+}
+
+/*
+ * this routine will schedule a task to pause IO from interrupt context
+ * if caller sees a PCIE error event (register read = 0xf's)
+ */
+void qla_schedule_eeh_work(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+       if (ha->flags.eeh_busy)
+               return;
+
+       set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
+       qla2xxx_wake_dpc(base_vha);
 }
 
 static void
index f771fab..060c892 100644 (file)
@@ -2621,10 +2621,11 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
 }
 
 static int
-qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf,
+qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, __le32 *buf,
     uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf)
 {
-       uint32_t *p, check_sum = 0;
+       uint32_t check_sum = 0;
+       __le32 *p;
        int i;
 
        p = buf + buf_size_without_sfub;
@@ -2790,8 +2791,8 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
                        goto done;
                }
 
-               rval = qla28xx_extract_sfub_and_verify(vha, dwptr, dwords,
-                       buf_size_without_sfub, (uint8_t *)sfub);
+               rval = qla28xx_extract_sfub_and_verify(vha, (__le32 *)dwptr,
+                       dwords, buf_size_without_sfub, (uint8_t *)sfub);
 
                if (rval != QLA_SUCCESS)
                        goto done;
index 480e7d2..b2008fb 100644 (file)
@@ -1029,7 +1029,12 @@ void qlt_free_session_done(struct work_struct *work)
                        }
                        msleep(100);
                        cnt++;
-                       if (cnt > 200)
+                       /*
+                        * Driver timeout is set to 22 Sec, update count value to loop
+                        * long enough for log-out to complete before advancing. Otherwise,
+                        * straddling logout can interfere with re-login attempt.
+                        */
+                       if (cnt > 230)
                                break;
                }
 
@@ -6459,7 +6464,7 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
 }
 
 /**
- * qla_tgt_lport_register - register lport with external module
+ * qlt_lport_register - register lport with external module
  *
  * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
  * @phys_wwpn: physical port WWPN
@@ -6535,7 +6540,7 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
 EXPORT_SYMBOL(qlt_lport_register);
 
 /**
- * qla_tgt_lport_deregister - Degister lport
+ * qlt_lport_deregister - Degister lport
  *
  * @vha:  Registered scsi_qla_host pointer
  */
index 72c6484..da11829 100644 (file)
@@ -6,9 +6,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.02.00.105-k"
+#define QLA2XXX_VERSION      "10.02.00.106-k"
 
 #define QLA_DRIVER_MAJOR_VER   10
 #define QLA_DRIVER_MINOR_VER   2
 #define QLA_DRIVER_PATCH_VER   0
-#define QLA_DRIVER_BETA_VER    105
+#define QLA_DRIVER_BETA_VER    106
index 8b4890c..03de1bc 100644 (file)
@@ -451,7 +451,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
        struct se_portal_group *se_tpg;
        struct tcm_qla2xxx_tpg *tpg;
 #endif
-       int target_flags = TARGET_SCF_ACK_KREF;
+       int rc, target_flags = TARGET_SCF_ACK_KREF;
        unsigned long flags;
 
        if (bidi)
@@ -486,9 +486,18 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
        list_add_tail(&cmd->sess_cmd_list, &sess->sess_cmd_list);
        spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
 
-       return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
-                                cmd->unpacked_lun, data_length, fcp_task_attr,
-                                data_dir, target_flags);
+       rc = target_init_cmd(se_cmd, se_sess, &cmd->sense_buffer[0],
+                            cmd->unpacked_lun, data_length, fcp_task_attr,
+                            data_dir, target_flags);
+       if (rc)
+               return rc;
+
+       if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+                              GFP_KERNEL))
+               return 0;
+
+       target_submit(se_cmd);
+       return 0;
 }
 
 static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
@@ -1569,7 +1578,7 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
 /*
  * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
  */
-static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
        .find_cmd_by_tag        = tcm_qla2xxx_find_cmd_by_tag,
        .handle_cmd             = tcm_qla2xxx_handle_cmd,
        .handle_data            = tcm_qla2xxx_handle_data,
index 17b719a..187d78a 100644 (file)
@@ -44,7 +44,7 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
 }
 
 /**
- * qla4xxx_is_intr_poll_mode  Are we allowed to poll for interrupts?
+ * qla4xxx_is_intr_poll_mode - Are we allowed to poll for interrupts?
  * @ha: Pointer to host adapter structure.
  * returns: 1=polling mode, 0=non-polling mode
  **/
@@ -933,7 +933,7 @@ int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
 }
 
 /**
- * qla4xxx_set_fwddb_entry - sets a ddb entry.
+ * qla4xxx_set_ddb_entry - sets a ddb entry.
  * @ha: Pointer to host adapter structure.
  * @fw_ddb_index: Firmware's device database index
  * @fw_ddb_entry_dma: dma address of ddb entry
index 7bd9a4a..867730e 100644 (file)
@@ -618,7 +618,7 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
 }
 
 /**
- * qla4xxx_create chap_list - Create CHAP list from FLASH
+ * qla4xxx_create_chap_list - Create CHAP list from FLASH
  * @ha: pointer to adapter structure
  *
  * Read flash and make a list of CHAP entries, during login when a CHAP entry
@@ -6961,7 +6961,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
        if (is_reset == RESET_ADAPTER) {
                iscsi_block_session(cls_sess);
                /* Use the relogin path to discover new devices
-                *  by short-circuting the logic of setting
+                *  by short-circuiting the logic of setting
                 *  timer to relogin - instead set the flags
                 *  to initiate login right away.
                 */
@@ -9633,7 +9633,7 @@ qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 }
 
 /**
- * qla4xxx_pci_mmio_enabled() gets called if
+ * qla4xxx_pci_mmio_enabled() gets called if
  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
  * and read/write to the device still works.
  * @pdev: PCI device pointer
index 24619c3..e9e2f0e 100644 (file)
@@ -214,6 +214,15 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
        scsi_io_completion(cmd, good_bytes);
 }
 
+
+/*
+ * 1024 is big enough for saturating the fast scsi LUN now
+ */
+int scsi_device_max_queue_depth(struct scsi_device *sdev)
+{
+       return max_t(int, sdev->host->can_queue, 1024);
+}
+
 /**
  * scsi_change_queue_depth - change a device's queue depth
  * @sdev: SCSI Device in question
@@ -223,6 +232,8 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
  */
 int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
 {
+       depth = min_t(int, depth, scsi_device_max_queue_depth(sdev));
+
        if (depth > 0) {
                sdev->queue_depth = depth;
                wmb();
@@ -231,6 +242,8 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
        if (sdev->request_queue)
                blk_set_queue_depth(sdev->request_queue, depth);
 
+       sbitmap_resize(&sdev->budget_map, sdev->queue_depth);
+
        return sdev->queue_depth;
 }
 EXPORT_SYMBOL(scsi_change_queue_depth);
index 3cdeaeb..70165be 100644 (file)
@@ -322,17 +322,19 @@ struct sdeb_store_info {
        container_of(d, struct sdebug_host_info, dev)
 
 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
-                     SDEB_DEFER_WQ = 2};
+                     SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
 
 struct sdebug_defer {
        struct hrtimer hrt;
        struct execute_work ew;
+       ktime_t cmpl_ts;/* time since boot to complete this cmd */
        int sqa_idx;    /* index of sdebug_queue array */
        int qc_idx;     /* index of sdebug_queued_cmd array within sqa_idx */
        int hc_idx;     /* hostwide tag index */
        int issuing_cpu;
        bool init_hrt;
        bool init_wq;
+       bool init_poll;
        bool aborted;   /* true when blk_abort_request() already called */
        enum sdeb_defer_type defer_t;
 };
@@ -357,6 +359,7 @@ static atomic_t sdebug_completions;  /* count of deferred completions */
 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
 static atomic_t sdebug_a_tsf;       /* 'almost task set full' counter */
 static atomic_t sdeb_inject_pending;
+static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
 
 struct opcode_info_t {
        u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
@@ -829,6 +832,7 @@ static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
 
 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
+static int poll_queues; /* iouring iopoll interface.*/
 static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
 
 static DEFINE_RWLOCK(atomic_rw);
@@ -4729,7 +4733,6 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
        struct scsi_cmnd *scp;
        struct sdebug_dev_info *devip;
 
-       sd_dp->defer_t = SDEB_DEFER_NONE;
        if (unlikely(aborted))
                sd_dp->aborted = false;
        qc_idx = sd_dp->qc_idx;
@@ -4744,6 +4747,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
                return;
        }
        spin_lock_irqsave(&sqp->qc_lock, iflags);
+       sd_dp->defer_t = SDEB_DEFER_NONE;
        sqcp = &sqp->qc_arr[qc_idx];
        scp = sqcp->a_cmnd;
        if (unlikely(scp == NULL)) {
@@ -5363,6 +5367,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 {
        bool new_sd_dp;
        bool inject = false;
+       bool hipri = (cmnd->request->cmd_flags & REQ_HIPRI);
        int k, num_in_q, qdepth;
        unsigned long iflags;
        u64 ns_from_boot = 0;
@@ -5432,6 +5437,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
        cmnd->host_scribble = (unsigned char *)sqcp;
        sd_dp = sqcp->sd_dp;
        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+
        if (!sd_dp) {
                sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
                if (!sd_dp) {
@@ -5448,7 +5454,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
        if (sdebug_host_max_queue)
                sd_dp->hc_idx = get_tag(cmnd);
 
-       if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
+       if (hipri)
                ns_from_boot = ktime_get_boottime_ns();
 
        /* one of the resp_*() response functions is called here */
@@ -5508,40 +5514,66 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
                                kt -= d;
                        }
                }
-               if (!sd_dp->init_hrt) {
-                       sd_dp->init_hrt = true;
-                       sqcp->sd_dp = sd_dp;
-                       hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
-                                    HRTIMER_MODE_REL_PINNED);
-                       sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
-                       sd_dp->sqa_idx = sqp - sdebug_q_arr;
-                       sd_dp->qc_idx = k;
+               if (hipri) {
+                       sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
+                       spin_lock_irqsave(&sqp->qc_lock, iflags);
+                       if (!sd_dp->init_poll) {
+                               sd_dp->init_poll = true;
+                               sqcp->sd_dp = sd_dp;
+                               sd_dp->sqa_idx = sqp - sdebug_q_arr;
+                               sd_dp->qc_idx = k;
+                       }
+                       sd_dp->defer_t = SDEB_DEFER_POLL;
+                       spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+               } else {
+                       if (!sd_dp->init_hrt) {
+                               sd_dp->init_hrt = true;
+                               sqcp->sd_dp = sd_dp;
+                               hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
+                                            HRTIMER_MODE_REL_PINNED);
+                               sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+                               sd_dp->sqa_idx = sqp - sdebug_q_arr;
+                               sd_dp->qc_idx = k;
+                       }
+                       sd_dp->defer_t = SDEB_DEFER_HRT;
+                       /* schedule the invocation of scsi_done() for a later time */
+                       hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
                }
                if (sdebug_statistics)
                        sd_dp->issuing_cpu = raw_smp_processor_id();
-               sd_dp->defer_t = SDEB_DEFER_HRT;
-               /* schedule the invocation of scsi_done() for a later time */
-               hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
        } else {        /* jdelay < 0, use work queue */
-               if (!sd_dp->init_wq) {
-                       sd_dp->init_wq = true;
-                       sqcp->sd_dp = sd_dp;
-                       sd_dp->sqa_idx = sqp - sdebug_q_arr;
-                       sd_dp->qc_idx = k;
-                       INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
-               }
-               if (sdebug_statistics)
-                       sd_dp->issuing_cpu = raw_smp_processor_id();
-               sd_dp->defer_t = SDEB_DEFER_WQ;
                if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
                             atomic_read(&sdeb_inject_pending)))
                        sd_dp->aborted = true;
-               schedule_work(&sd_dp->ew.work);
-               if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
-                            atomic_read(&sdeb_inject_pending))) {
+               if (hipri) {
+                       sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
+                       spin_lock_irqsave(&sqp->qc_lock, iflags);
+                       if (!sd_dp->init_poll) {
+                               sd_dp->init_poll = true;
+                               sqcp->sd_dp = sd_dp;
+                               sd_dp->sqa_idx = sqp - sdebug_q_arr;
+                               sd_dp->qc_idx = k;
+                       }
+                       sd_dp->defer_t = SDEB_DEFER_POLL;
+                       spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+               } else {
+                       if (!sd_dp->init_wq) {
+                               sd_dp->init_wq = true;
+                               sqcp->sd_dp = sd_dp;
+                               sd_dp->sqa_idx = sqp - sdebug_q_arr;
+                               sd_dp->qc_idx = k;
+                               INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
+                       }
+                       sd_dp->defer_t = SDEB_DEFER_WQ;
+                       schedule_work(&sd_dp->ew.work);
+               }
+               if (sdebug_statistics)
+                       sd_dp->issuing_cpu = raw_smp_processor_id();
+               if (unlikely(sd_dp->aborted)) {
                        sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
                        blk_abort_request(cmnd->request);
                        atomic_set(&sdeb_inject_pending, 0);
+                       sd_dp->aborted = false;
                }
        }
        if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
@@ -5615,6 +5647,7 @@ module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
+module_param_named(poll_queues, poll_queues, int, S_IRUGO);
 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
@@ -5677,6 +5710,7 @@ MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent
 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1)");
 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
@@ -5768,11 +5802,12 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
                   dix_reads, dix_writes, dif_errors);
        seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
                   sdebug_statistics);
-       seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
+       seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
                   atomic_read(&sdebug_cmnd_count),
                   atomic_read(&sdebug_completions),
                   "miss_cpus", atomic_read(&sdebug_miss_cpus),
-                  atomic_read(&sdebug_a_tsf));
+                  atomic_read(&sdebug_a_tsf),
+                  atomic_read(&sdeb_mq_poll_count));
 
        seq_printf(m, "submit_queues=%d\n", submit_queues);
        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
@@ -7202,6 +7237,121 @@ static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        return check_condition_result;
 }
 
+static int sdebug_map_queues(struct Scsi_Host *shost)
+{
+       int i, qoff;
+
+       if (shost->nr_hw_queues == 1)
+               return 0;
+
+       for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
+               struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+               map->nr_queues  = 0;
+
+               if (i == HCTX_TYPE_DEFAULT)
+                       map->nr_queues = submit_queues - poll_queues;
+               else if (i == HCTX_TYPE_POLL)
+                       map->nr_queues = poll_queues;
+
+               if (!map->nr_queues) {
+                       BUG_ON(i == HCTX_TYPE_DEFAULT);
+                       continue;
+               }
+
+               map->queue_offset = qoff;
+               blk_mq_map_queues(map);
+
+               qoff += map->nr_queues;
+       }
+
+       return 0;
+
+}
+
+static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+       bool first;
+       bool retiring = false;
+       int num_entries = 0;
+       unsigned int qc_idx = 0;
+       unsigned long iflags;
+       ktime_t kt_from_boot = ktime_get_boottime();
+       struct sdebug_queue *sqp;
+       struct sdebug_queued_cmd *sqcp;
+       struct scsi_cmnd *scp;
+       struct sdebug_dev_info *devip;
+       struct sdebug_defer *sd_dp;
+
+       sqp = sdebug_q_arr + queue_num;
+       spin_lock_irqsave(&sqp->qc_lock, iflags);
+
+       for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
+               if (first) {
+                       qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+                       first = false;
+               } else {
+                       qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
+               }
+               if (unlikely(qc_idx >= sdebug_max_queue))
+                       break;
+
+               sqcp = &sqp->qc_arr[qc_idx];
+               sd_dp = sqcp->sd_dp;
+               if (unlikely(!sd_dp))
+                       continue;
+               scp = sqcp->a_cmnd;
+               if (unlikely(scp == NULL)) {
+                       pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
+                              queue_num, qc_idx, __func__);
+                       break;
+               }
+               if (sd_dp->defer_t == SDEB_DEFER_POLL) {
+                       if (kt_from_boot < sd_dp->cmpl_ts)
+                               continue;
+
+               } else          /* ignoring non REQ_HIPRI requests */
+                       continue;
+               devip = (struct sdebug_dev_info *)scp->device->hostdata;
+               if (likely(devip))
+                       atomic_dec(&devip->num_in_q);
+               else
+                       pr_err("devip=NULL from %s\n", __func__);
+               if (unlikely(atomic_read(&retired_max_queue) > 0))
+                       retiring = true;
+
+               sqcp->a_cmnd = NULL;
+               if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
+                       pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
+                               sqp, queue_num, qc_idx, __func__);
+                       break;
+               }
+               if (unlikely(retiring)) {       /* user has reduced max_queue */
+                       int k, retval;
+
+                       retval = atomic_read(&retired_max_queue);
+                       if (qc_idx >= retval) {
+                               pr_err("index %d too large\n", retval);
+                               break;
+                       }
+                       k = find_last_bit(sqp->in_use_bm, retval);
+                       if ((k < sdebug_max_queue) || (k == retval))
+                               atomic_set(&retired_max_queue, 0);
+                       else
+                               atomic_set(&retired_max_queue, k + 1);
+               }
+               sd_dp->defer_t = SDEB_DEFER_NONE;
+               spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+               scp->scsi_done(scp); /* callback to mid level */
+               spin_lock_irqsave(&sqp->qc_lock, iflags);
+               num_entries++;
+       }
+       spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+       if (num_entries > 0)
+               atomic_add(num_entries, &sdeb_mq_poll_count);
+       return num_entries;
+}
+
 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
                                   struct scsi_cmnd *scp)
 {
@@ -7381,6 +7531,8 @@ static struct scsi_host_template sdebug_driver_template = {
        .ioctl =                scsi_debug_ioctl,
        .queuecommand =         scsi_debug_queuecommand,
        .change_queue_depth =   sdebug_change_qdepth,
+       .map_queues =           sdebug_map_queues,
+       .mq_poll =              sdebug_blk_mq_poll,
        .eh_abort_handler =     scsi_debug_abort,
        .eh_device_reset_handler = scsi_debug_device_reset,
        .eh_target_reset_handler = scsi_debug_target_reset,
@@ -7428,6 +7580,25 @@ static int sdebug_driver_probe(struct device *dev)
        if (sdebug_host_max_queue)
                hpnt->host_tagset = 1;
 
+       /* poll queues are possible for nr_hw_queues > 1 */
+       if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
+               pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
+                        my_name, poll_queues, hpnt->nr_hw_queues);
+               poll_queues = 0;
+       }
+
+       /*
+        * Poll queues don't need interrupts, but we need at least one I/O queue
+        * left over for non-polled I/O.
+        * If condition not met, trim poll_queues to 1 (just for simplicity).
+        */
+       if (poll_queues >= submit_queues) {
+               pr_warn("%s: trim poll_queues to 1\n", my_name);
+               poll_queues = 1;
+       }
+       if (poll_queues)
+               hpnt->nr_maps = 3;
+
        sdbg_host->shost = hpnt;
        *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
        if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
index 6f41e4b..7b56e00 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * SCSI device handler infrastruture.
+ * SCSI device handler infrastructure.
  *
  * Copyright IBM Corporation, 2007
  *      Authors:
index 7d52a11..4bcd744 100644 (file)
@@ -328,7 +328,8 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
        if (starget->can_queue > 0)
                atomic_dec(&starget->target_busy);
 
-       atomic_dec(&sdev->device_busy);
+       sbitmap_put(&sdev->budget_map, cmd->budget_token);
+       cmd->budget_token = -1;
 }
 
 static void scsi_kick_queue(struct request_queue *q)
@@ -384,7 +385,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
 
 static inline bool scsi_device_is_busy(struct scsi_device *sdev)
 {
-       if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
+       if (scsi_device_busy(sdev) >= sdev->queue_depth)
                return true;
        if (atomic_read(&sdev->device_blocked) > 0)
                return true;
@@ -1143,6 +1144,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
        unsigned long jiffies_at_alloc;
        int retries, to_clear;
        bool in_flight;
+       int budget_token = cmd->budget_token;
 
        if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
                flags |= SCMD_INITIALIZED;
@@ -1171,6 +1173,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
        cmd->retries = retries;
        if (in_flight)
                __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+       cmd->budget_token = budget_token;
 
 }
 
@@ -1254,19 +1257,20 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
 }
 
 /*
- * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
- * return 0.
- *
- * Called with the queue_lock held.
+ * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
+ * and return the token else return -1.
  */
 static inline int scsi_dev_queue_ready(struct request_queue *q,
                                  struct scsi_device *sdev)
 {
-       unsigned int busy;
+       int token;
 
-       busy = atomic_inc_return(&sdev->device_busy) - 1;
+       token = sbitmap_get(&sdev->budget_map);
        if (atomic_read(&sdev->device_blocked)) {
-               if (busy)
+               if (token < 0)
+                       goto out;
+
+               if (scsi_device_busy(sdev) > 1)
                        goto out_dec;
 
                /*
@@ -1278,13 +1282,12 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
                                   "unblocking device at zero depth\n"));
        }
 
-       if (busy >= sdev->queue_depth)
-               goto out_dec;
-
-       return 1;
+       return token;
 out_dec:
-       atomic_dec(&sdev->device_busy);
-       return 0;
+       if (token >= 0)
+               sbitmap_put(&sdev->budget_map, token);
+out:
+       return -1;
 }
 
 /*
@@ -1605,19 +1608,20 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
        blk_mq_complete_request(cmd->request);
 }
 
-static void scsi_mq_put_budget(struct request_queue *q)
+static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
 {
        struct scsi_device *sdev = q->queuedata;
 
-       atomic_dec(&sdev->device_busy);
+       sbitmap_put(&sdev->budget_map, budget_token);
 }
 
-static bool scsi_mq_get_budget(struct request_queue *q)
+static int scsi_mq_get_budget(struct request_queue *q)
 {
        struct scsi_device *sdev = q->queuedata;
+       int token = scsi_dev_queue_ready(q, sdev);
 
-       if (scsi_dev_queue_ready(q, sdev))
-               return true;
+       if (token >= 0)
+               return token;
 
        atomic_inc(&sdev->restarts);
 
@@ -1636,10 +1640,24 @@ static bool scsi_mq_get_budget(struct request_queue *q)
         * the .restarts flag, and the request queue will be run for handling
         * this request, see scsi_end_request().
         */
-       if (unlikely(atomic_read(&sdev->device_busy) == 0 &&
+       if (unlikely(scsi_device_busy(sdev) == 0 &&
                                !scsi_device_blocked(sdev)))
                blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
-       return false;
+       return -1;
+}
+
+static void scsi_mq_set_rq_budget_token(struct request *req, int token)
+{
+       struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+
+       cmd->budget_token = token;
+}
+
+static int scsi_mq_get_rq_budget_token(struct request *req)
+{
+       struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+
+       return cmd->budget_token;
 }
 
 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1653,6 +1671,8 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_status_t ret;
        int reason;
 
+       WARN_ON_ONCE(cmd->budget_token < 0);
+
        /*
         * If the device is not in running state we will reject some or all
         * commands.
@@ -1704,7 +1724,8 @@ out_dec_target_busy:
        if (scsi_target(sdev)->can_queue > 0)
                atomic_dec(&scsi_target(sdev)->target_busy);
 out_put_budget:
-       scsi_mq_put_budget(q);
+       scsi_mq_put_budget(q, cmd->budget_token);
+       cmd->budget_token = -1;
        switch (ret) {
        case BLK_STS_OK:
                break;
@@ -1789,6 +1810,26 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
                               cmd->sense_buffer);
 }
 
+
+static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx)
+{
+       struct Scsi_Host *shost = hctx->driver_data;
+
+       if (shost->hostt->mq_poll)
+               return shost->hostt->mq_poll(shost, hctx->queue_num);
+
+       return 0;
+}
+
+static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+                         unsigned int hctx_idx)
+{
+       struct Scsi_Host *shost = data;
+
+       hctx->driver_data = shost;
+       return 0;
+}
+
 static int scsi_map_queues(struct blk_mq_tag_set *set)
 {
        struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
@@ -1856,14 +1897,16 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
        .cleanup_rq     = scsi_cleanup_rq,
        .busy           = scsi_mq_lld_busy,
        .map_queues     = scsi_map_queues,
+       .init_hctx      = scsi_init_hctx,
+       .poll           = scsi_mq_poll,
+       .set_rq_budget_token = scsi_mq_set_rq_budget_token,
+       .get_rq_budget_token = scsi_mq_get_rq_budget_token,
 };
 
 
 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
 {
-       struct request_queue *q = hctx->queue;
-       struct scsi_device *sdev = q->queuedata;
-       struct Scsi_Host *shost = sdev->host;
+       struct Scsi_Host *shost = hctx->driver_data;
 
        shost->hostt->commit_rqs(shost, hctx->queue_num);
 }
@@ -1884,6 +1927,10 @@ static const struct blk_mq_ops scsi_mq_ops = {
        .cleanup_rq     = scsi_cleanup_rq,
        .busy           = scsi_mq_lld_busy,
        .map_queues     = scsi_map_queues,
+       .init_hctx      = scsi_init_hctx,
+       .poll           = scsi_mq_poll,
+       .set_rq_budget_token = scsi_mq_set_rq_budget_token,
+       .get_rq_budget_token = scsi_mq_get_rq_budget_token,
 };
 
 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -1916,6 +1963,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
        else
                tag_set->ops = &scsi_mq_ops_no_commit;
        tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
+       tag_set->nr_maps = shost->nr_maps ? : 1;
        tag_set->queue_depth = shost->can_queue;
        tag_set->cmd_size = cmd_size;
        tag_set->numa_node = NUMA_NO_NODE;
index 180636d..ed240f0 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/device.h>
 #include <linux/async.h>
 #include <scsi/scsi_device.h>
+#include <linux/sbitmap.h>
 
 struct request_queue;
 struct request;
@@ -96,8 +97,6 @@ extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
 extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
 extern void scsi_exit_queue(void);
 extern void scsi_evt_thread(struct work_struct *work);
-struct request_queue;
-struct request;
 
 /* scsi_proc.c */
 #ifdef CONFIG_SCSI_PROC_FS
@@ -182,6 +181,8 @@ static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
 static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
 #endif
 
+extern int scsi_device_max_queue_depth(struct scsi_device *sdev);
+
 /* 
  * internal scsi timeout functions: for use by mid-layer and transport
  * classes.
index 9af50e6..9f1b7f3 100644 (file)
@@ -215,6 +215,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
                                           u64 lun, void *hostdata)
 {
+       unsigned int depth;
        struct scsi_device *sdev;
        int display_failure_msg = 1, ret;
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
@@ -276,8 +277,25 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
        WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
        sdev->request_queue->queuedata = sdev;
 
-       scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
-                                       sdev->host->cmd_per_lun : 1);
+       depth = sdev->host->cmd_per_lun ?: 1;
+
+       /*
+        * Use .can_queue as budget map's depth because we have to
+        * support adjusting queue depth from sysfs. Meantime use
+        * default device queue depth to figure out sbitmap shift
+        * since we use this queue depth most of times.
+        */
+       if (sbitmap_init_node(&sdev->budget_map,
+                               scsi_device_max_queue_depth(sdev),
+                               sbitmap_calculate_shift(depth),
+                               GFP_KERNEL, sdev->request_queue->node,
+                               false, true)) {
+               put_device(&starget->dev);
+               kfree(sdev);
+               goto out;
+       }
+
+       scsi_change_queue_depth(sdev, depth);
 
        scsi_sysfs_device_initialize(sdev);
 
@@ -979,6 +997,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
                scsi_attach_vpd(sdev);
 
        sdev->max_queue_depth = sdev->queue_depth;
+       WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
        sdev->sdev_bflags = *bflags;
 
        /*
index b6378c8..6d2a282 100644 (file)
@@ -477,6 +477,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
        /* NULL queue means the device can't be used */
        sdev->request_queue = NULL;
 
+       sbitmap_free(&sdev->budget_map);
+
        mutex_lock(&sdev->inquiry_mutex);
        vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
                                       lockdep_is_held(&sdev->inquiry_mutex));
@@ -670,7 +672,7 @@ sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
        struct scsi_device *sdev = to_scsi_device(dev);
-       return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
+       return snprintf(buf, 20, "%d\n", scsi_device_busy(sdev));
 }
 static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
 
@@ -1458,7 +1460,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
 
        /*
         * Paired with the kref_get() in scsi_sysfs_initialize().  We have
-        * remoed sysfs visibility from the device, so make the target
+        * removed sysfs visibility from the device, so make the target
         * invisible if this was the last device underneath it.
         */
        scsi_target_reap(scsi_target(sdev));
index ed0b1bb..91c34ee 100644 (file)
@@ -1519,7 +1519,7 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 }
 
 /**
- *     sd_ioctl - process an ioctl
+ *     sd_ioctl_common - process an ioctl
  *     @bdev: target block device
  *     @mode: FMODE_* mask
  *     @cmd: ioctl command number
index 994f1b8..e45d8d9 100644 (file)
@@ -134,7 +134,7 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
 }
 
 /**
- * Allocate a buffer for report zones reply.
+ * sd_zbc_alloc_report_buffer() - Allocate a buffer for report zones reply.
  * @sdkp: The target disk
  * @nr_zones: Maximum number of zones to report
  * @buflen: Size of the buffer allocated
index 4383d93..737cea9 100644 (file)
@@ -2503,7 +2503,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
                              scsidp->id, scsidp->lun, (int) scsidp->type,
                              1,
                              (int) scsidp->queue_depth,
-                             (int) atomic_read(&scsidp->device_busy),
+                             (int) scsi_device_busy(scsidp),
                              (int) scsi_device_online(scsidp));
        }
        read_unlock_irqrestore(&sg_index_lock, iflags);
index 2230261..e519df6 100644 (file)
@@ -213,21 +213,19 @@ static struct eisa_driver sim710_eisa_driver = {
 
 static int __init sim710_init(void)
 {
-       int err = -ENODEV;
-
 #ifdef MODULE
        if (sim710)
                param_setup(sim710);
 #endif
 
 #ifdef CONFIG_EISA
-       err = eisa_driver_register(&sim710_eisa_driver);
+       /*
+        * FIXME: We'd really like to return -ENODEV if no devices have actually
+        * been found.  However eisa_driver_register() only reports problems
+        * with kobject_register() so simply return success for now.
+        */
+       eisa_driver_register(&sim710_eisa_driver);
 #endif
-       /* FIXME: what we'd really like to return here is -ENODEV if
-        * no devices have actually been found.  Instead, the err
-        * above actually only reports problems with kobject_register,
-        * so for the moment return success */
-
        return 0;
 }
 
index 3e54590..d7dac55 100644 (file)
@@ -79,7 +79,8 @@ struct pqi_ctrl_registers {
        __le32  sis_ctrl_to_host_doorbell_clear;        /* A0h */
        u8      reserved4[0xb0 - (0xa0 + sizeof(__le32))];
        __le32  sis_driver_scratch;                     /* B0h */
-       u8      reserved5[0xbc - (0xb0 + sizeof(__le32))];
+       __le32  sis_product_identifier;                 /* B4h */
+       u8      reserved5[0xbc - (0xb4 + sizeof(__le32))];
        __le32  sis_firmware_status;                    /* BCh */
        u8      reserved6[0x1000 - (0xbc + sizeof(__le32))];
        __le32  sis_mailbox[8];                         /* 1000h */
@@ -128,10 +129,13 @@ struct pqi_iu_header {
        __le16  iu_length;      /* in bytes - does not include the length */
                                /* of this header */
        __le16  response_queue_id;      /* specifies the OQ where the */
-                                       /*   response IU is to be delivered */
-       u8      work_area[2];   /* reserved for driver use */
+                                       /* response IU is to be delivered */
+       u16     driver_flags;   /* reserved for driver use */
 };
 
+/* manifest constants for pqi_iu_header.driver_flags */
+#define PQI_DRIVER_NONBLOCKABLE_REQUEST                0x1
+
 /*
  * According to the PQI spec, the IU header is only the first 4 bytes of our
  * pqi_iu_header structure.
@@ -256,6 +260,7 @@ struct pqi_device_capability {
 };
 
 #define PQI_MAX_EMBEDDED_SG_DESCRIPTORS                4
+#define PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS    3
 
 struct pqi_raid_path_request {
        struct pqi_iu_header header;
@@ -279,8 +284,7 @@ struct pqi_raid_path_request {
        u8      cdb[16];
        u8      reserved6[12];
        __le32  timeout;
-       struct pqi_sg_descriptor
-               sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+       struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
 };
 
 struct pqi_aio_path_request {
@@ -307,8 +311,73 @@ struct pqi_aio_path_request {
        u8      cdb_length;
        u8      lun_number[8];
        u8      reserved4[4];
-       struct pqi_sg_descriptor
-               sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+       struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+#define PQI_RAID1_NVME_XFER_LIMIT      (32 * 1024)     /* 32 KiB */
+
+struct pqi_aio_r1_path_request {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       __le16  volume_id;      /* ID of the RAID volume */
+       __le32  it_nexus_1;     /* IT nexus of the 1st drive in the RAID volume */
+       __le32  it_nexus_2;     /* IT nexus of the 2nd drive in the RAID volume */
+       __le32  it_nexus_3;     /* IT nexus of the 3rd drive in the RAID volume */
+       __le32  data_length;    /* total bytes to read/write */
+       u8      data_direction : 2;
+       u8      partial : 1;
+       u8      memory_type : 1;
+       u8      fence : 1;
+       u8      encryption_enable : 1;
+       u8      reserved : 2;
+       u8      task_attribute : 3;
+       u8      command_priority : 4;
+       u8      reserved2 : 1;
+       __le16  data_encryption_key_index;
+       u8      cdb[16];
+       __le16  error_index;
+       u8      num_sg_descriptors;
+       u8      cdb_length;
+       u8      num_drives;     /* number of drives in the RAID volume (2 or 3) */
+       u8      reserved3[3];
+       __le32  encrypt_tweak_lower;
+       __le32  encrypt_tweak_upper;
+       struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+#define PQI_DEFAULT_MAX_WRITE_RAID_5_6                 (8 * 1024U)
+#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA    (~0U)
+#define PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME                (32 * 1024U)
+
+struct pqi_aio_r56_path_request {
+       struct pqi_iu_header header;
+       __le16  request_id;
+       __le16  volume_id;              /* ID of the RAID volume */
+       __le32  data_it_nexus;          /* IT nexus for the data drive */
+       __le32  p_parity_it_nexus;      /* IT nexus for the P parity drive */
+       __le32  q_parity_it_nexus;      /* IT nexus for the Q parity drive */
+       __le32  data_length;            /* total bytes to read/write */
+       u8      data_direction : 2;
+       u8      partial : 1;
+       u8      mem_type : 1;           /* 0 = PCIe, 1 = DDR */
+       u8      fence : 1;
+       u8      encryption_enable : 1;
+       u8      reserved : 2;
+       u8      task_attribute : 3;
+       u8      command_priority : 4;
+       u8      reserved1 : 1;
+       __le16  data_encryption_key_index;
+       u8      cdb[16];
+       __le16  error_index;
+       u8      num_sg_descriptors;
+       u8      cdb_length;
+       u8      xor_multiplier;
+       u8      reserved2[3];
+       __le32  encrypt_tweak_lower;
+       __le32  encrypt_tweak_upper;
+       __le64  row;                    /* row = logical LBA/blocks per row */
+       u8      reserved3[8];
+       struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS];
 };
 
 struct pqi_io_response {
@@ -353,7 +422,7 @@ struct pqi_event_config {
 
 #define PQI_EVENT_OFA_MEMORY_ALLOCATION        0x0
 #define PQI_EVENT_OFA_QUIESCE          0x1
-#define PQI_EVENT_OFA_CANCELLED                0x2
+#define PQI_EVENT_OFA_CANCELED         0x2
 
 struct pqi_event_response {
        struct pqi_iu_header header;
@@ -442,10 +511,6 @@ struct pqi_vendor_general_response {
 #define PQI_OFA_SIGNATURE              "OFA_QRM"
 #define PQI_OFA_MAX_SG_DESCRIPTORS     64
 
-#define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \
-       (offsetof(struct pqi_ofa_memory, sg_descriptor) + \
-       (PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor)))
-
 struct pqi_ofa_memory {
        __le64  signature;      /* "OFA_QRM" */
        __le16  version;        /* version of this struct (1 = 1st version) */
@@ -453,7 +518,7 @@ struct pqi_ofa_memory {
        __le32  bytes_allocated;        /* total allocated memory in bytes */
        __le16  num_memory_descriptors;
        u8      reserved1[2];
-       struct pqi_sg_descriptor sg_descriptor[1];
+       struct pqi_sg_descriptor sg_descriptor[PQI_OFA_MAX_SG_DESCRIPTORS];
 };
 
 struct pqi_aio_error_info {
@@ -483,6 +548,9 @@ struct pqi_raid_error_info {
 #define PQI_REQUEST_IU_TASK_MANAGEMENT                 0x13
 #define PQI_REQUEST_IU_RAID_PATH_IO                    0x14
 #define PQI_REQUEST_IU_AIO_PATH_IO                     0x15
+#define PQI_REQUEST_IU_AIO_PATH_RAID5_IO               0x18
+#define PQI_REQUEST_IU_AIO_PATH_RAID6_IO               0x19
+#define PQI_REQUEST_IU_AIO_PATH_RAID1_IO               0x1A
 #define PQI_REQUEST_IU_GENERAL_ADMIN                   0x60
 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG      0x72
 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG         0x73
@@ -585,6 +653,7 @@ struct pqi_raid_error_info {
 /* these values are defined by the PQI spec */
 #define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE       255
 #define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535
+
 #define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT      64
 #define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT     16
 #define PQI_ADMIN_INDEX_ALIGNMENT              64
@@ -654,7 +723,7 @@ struct pqi_admin_queues_aligned {
 struct pqi_admin_queues {
        void            *iq_element_array;
        void            *oq_element_array;
-       pqi_index_t     *iq_ci;
+       pqi_index_t __iomem *iq_ci;
        pqi_index_t __iomem *oq_pi;
        dma_addr_t      iq_element_array_bus_addr;
        dma_addr_t      oq_element_array_bus_addr;
@@ -679,8 +748,8 @@ struct pqi_queue_group {
        dma_addr_t      oq_element_array_bus_addr;
        __le32 __iomem  *iq_pi[2];
        pqi_index_t     iq_pi_copy[2];
-       pqi_index_t __iomem     *iq_ci[2];
-       pqi_index_t __iomem     *oq_pi;
+       pqi_index_t __iomem *iq_ci[2];
+       pqi_index_t __iomem *oq_pi;
        dma_addr_t      iq_ci_bus_addr[2];
        dma_addr_t      oq_pi_bus_addr;
        __le32 __iomem  *oq_ci;
@@ -693,7 +762,7 @@ struct pqi_event_queue {
        u16             oq_id;
        u16             int_msg_num;
        void            *oq_element_array;
-       pqi_index_t __iomem     *oq_pi;
+       pqi_index_t __iomem *oq_pi;
        dma_addr_t      oq_element_array_bus_addr;
        dma_addr_t      oq_pi_bus_addr;
        __le32 __iomem  *oq_ci;
@@ -759,13 +828,29 @@ struct pqi_config_table_firmware_features {
        u8      features_supported[];
 /*     u8      features_requested_by_host[]; */
 /*     u8      features_enabled[]; */
-};
-
-#define PQI_FIRMWARE_FEATURE_OFA                       0
-#define PQI_FIRMWARE_FEATURE_SMP                       1
-#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE      11
-#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT           13
-#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT            14
+/* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */
+/*     __le16  firmware_max_known_feature; */
+/*     __le16  host_max_known_feature; */
+};
+
+#define PQI_FIRMWARE_FEATURE_OFA                               0
+#define PQI_FIRMWARE_FEATURE_SMP                               1
+#define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE                 2
+#define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS                        3
+#define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS                        4
+#define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS                        5
+#define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS                        6
+#define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS               7
+#define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS               8
+#define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS               9
+#define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS               10
+#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE              11
+#define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN                   12
+#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT                   13
+#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT                    14
+#define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME     15
+#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN    16
+#define PQI_FIRMWARE_FEATURE_MAXIMUM                           16
 
 struct pqi_config_table_debug {
        struct pqi_config_table_section_header header;
@@ -906,8 +991,65 @@ struct raid_map {
 
 #pragma pack()
 
+struct pqi_scsi_dev_raid_map_data {
+       bool    is_write;
+       u8      raid_level;
+       u32     map_index;
+       u64     first_block;
+       u64     last_block;
+       u32     data_length;
+       u32     block_cnt;
+       u32     blocks_per_row;
+       u64     first_row;
+       u64     last_row;
+       u32     first_row_offset;
+       u32     last_row_offset;
+       u32     first_column;
+       u32     last_column;
+       u64     r5or6_first_row;
+       u64     r5or6_last_row;
+       u32     r5or6_first_row_offset;
+       u32     r5or6_last_row_offset;
+       u32     r5or6_first_column;
+       u32     r5or6_last_column;
+       u16     data_disks_per_row;
+       u32     total_disks_per_row;
+       u16     layout_map_count;
+       u32     stripesize;
+       u16     strip_size;
+       u32     first_group;
+       u32     last_group;
+       u32     map_row;
+       u32     aio_handle;
+       u64     disk_block;
+       u32     disk_block_cnt;
+       u8      cdb[16];
+       u8      cdb_length;
+
+       /* RAID 1 specific */
+#define NUM_RAID1_MAP_ENTRIES  3
+       u32     num_it_nexus_entries;
+       u32     it_nexus[NUM_RAID1_MAP_ENTRIES];
+
+       /* RAID 5 / RAID 6 specific */
+       u32     p_parity_it_nexus;      /* aio_handle */
+       u32     q_parity_it_nexus;      /* aio_handle */
+       u8      xor_mult;
+       u64     row;
+       u64     stripe_lba;
+       u32     p_index;
+       u32     q_index;
+};
+
 #define RAID_CTLR_LUNID                "\0\0\0\0\0\0\0\0"
 
+#define NUM_STREAMS_PER_LUN    8
+
+struct pqi_stream_data {
+       u64     next_lba;
+       u32     last_accessed;
+};
+
 struct pqi_scsi_dev {
        int     devtype;                /* as reported by INQUIRY commmand */
        u8      device_type;            /* as reported by */
@@ -929,7 +1071,6 @@ struct pqi_scsi_dev {
        u8      volume_offline : 1;
        u8      rescan : 1;
        bool    aio_enabled;            /* only valid for physical disks */
-       bool    in_reset;
        bool    in_remove;
        bool    device_offline;
        u8      vendor[8];              /* bytes 8-15 of inquiry data */
@@ -948,11 +1089,12 @@ struct pqi_scsi_dev {
        u8      phy_connected_dev_type;
        u8      box[8];
        u16     phys_connector[8];
+       u8      phy_id;
        bool    raid_bypass_configured; /* RAID bypass configured */
        bool    raid_bypass_enabled;    /* RAID bypass enabled */
-       int     offload_to_mirror;      /* Send next RAID bypass request */
-                                       /* to mirror drive. */
+       u32     next_bypass_group;
        struct raid_map *raid_map;      /* RAID bypass map */
+       u32     max_transfer_encrypted;
 
        struct pqi_sas_port *sas_port;
        struct scsi_device *sdev;
@@ -962,8 +1104,10 @@ struct pqi_scsi_dev {
        struct list_head add_list_entry;
        struct list_head delete_list_entry;
 
+       struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
        atomic_t scsi_cmds_outstanding;
        atomic_t raid_bypass_cnt;
+       u8      page_83_identifier[16];
 };
 
 /* VPD inquiry pages */
@@ -1069,10 +1213,8 @@ struct pqi_io_request {
 struct pqi_event {
        bool    pending;
        u8      event_type;
-       __le16  event_id;
-       __le32  additional_event_id;
-       __le32  ofa_bytes_requested;
-       __le16  ofa_cancel_reason;
+       u16     event_id;
+       u32     additional_event_id;
 };
 
 #define PQI_RESERVED_IO_SLOTS_LUN_RESET                        1
@@ -1082,13 +1224,20 @@ struct pqi_event {
        (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
        PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
 
+#define PQI_CTRL_PRODUCT_ID_GEN1       0
+#define PQI_CTRL_PRODUCT_ID_GEN2       7
+#define PQI_CTRL_PRODUCT_REVISION_A    0
+#define PQI_CTRL_PRODUCT_REVISION_B    1
+
 struct pqi_ctrl_info {
        unsigned int    ctrl_id;
        struct pci_dev  *pci_dev;
-       char            firmware_version[11];
+       char            firmware_version[32];
        char            serial_number[17];
        char            model[17];
        char            vendor[9];
+       u8              product_id;
+       u8              product_revision;
        void __iomem    *iomem_base;
        struct pqi_ctrl_registers __iomem *registers;
        struct pqi_device_registers __iomem *pqi_registers;
@@ -1118,6 +1267,7 @@ struct pqi_ctrl_info {
        u16             max_inbound_iu_length_per_firmware;
        u16             max_inbound_iu_length;
        unsigned int    max_sg_per_iu;
+       unsigned int    max_sg_per_r56_iu;
        void            *admin_queue_memory_base;
        u32             admin_queue_memory_length;
        dma_addr_t      admin_queue_memory_base_dma_handle;
@@ -1136,19 +1286,29 @@ struct pqi_ctrl_info {
 
        struct mutex    scan_mutex;
        struct mutex    lun_reset_mutex;
-       struct mutex    ofa_mutex; /* serialize ofa */
        bool            controller_online;
        bool            block_requests;
-       bool            block_device_reset;
-       bool            in_ofa;
-       bool            in_shutdown;
+       bool            scan_blocked;
        u8              inbound_spanning_supported : 1;
        u8              outbound_spanning_supported : 1;
        u8              pqi_mode_enabled : 1;
        u8              pqi_reset_quiesce_supported : 1;
        u8              soft_reset_handshake_supported : 1;
-       u8              raid_iu_timeout_supported: 1;
-       u8              tmf_iu_timeout_supported: 1;
+       u8              raid_iu_timeout_supported : 1;
+       u8              tmf_iu_timeout_supported : 1;
+       u8              unique_wwid_in_report_phys_lun_supported : 1;
+       u8              enable_r1_writes : 1;
+       u8              enable_r5_writes : 1;
+       u8              enable_r6_writes : 1;
+       u8              lv_drive_type_mix_valid : 1;
+       u8              enable_stream_detection : 1;
+
+       u8              ciss_report_log_flags;
+       u32             max_transfer_encrypted_sas_sata;
+       u32             max_transfer_encrypted_nvme;
+       u32             max_write_raid_5_6;
+       u32             max_write_raid_1_10_2drive;
+       u32             max_write_raid_1_10_3drive;
 
        struct list_head scsi_device_list;
        spinlock_t      scsi_device_list_lock;
@@ -1178,14 +1338,14 @@ struct pqi_ctrl_info {
        atomic_t        num_blocked_threads;
        wait_queue_head_t block_requests_wait;
 
-       struct list_head raid_bypass_retry_list;
-       spinlock_t      raid_bypass_retry_list_lock;
-       struct work_struct raid_bypass_retry_work;
-
+       struct mutex    ofa_mutex;
        struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
        dma_addr_t      pqi_ofa_mem_dma_handle;
        void            **pqi_ofa_chunk_virt_addr;
-       atomic_t        sync_cmds_outstanding;
+       struct work_struct ofa_memory_alloc_work;
+       struct work_struct ofa_quiesce_work;
+       u32             ofa_bytes_requested;
+       u16             ofa_cancel_reason;
 };
 
 enum pqi_ctrl_mode {
@@ -1209,6 +1369,7 @@ enum pqi_ctrl_mode {
 #define BMIC_IDENTIFY_PHYSICAL_DEVICE          0x15
 #define BMIC_READ                              0x26
 #define BMIC_WRITE                             0x27
+#define BMIC_SENSE_FEATURE                     0x61
 #define BMIC_SENSE_CONTROLLER_PARAMETERS       0x64
 #define BMIC_SENSE_SUBSYSTEM_INFORMATION       0x66
 #define BMIC_CSMI_PASSTHRU                     0x68
@@ -1228,6 +1389,19 @@ enum pqi_ctrl_mode {
        (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
        CISS_GET_LEVEL_2_TARGET((lunid)))
 
+#define LV_GET_DRIVE_TYPE_MIX(lunid)           ((lunid)[6])
+
+#define LV_DRIVE_TYPE_MIX_UNKNOWN              0
+#define LV_DRIVE_TYPE_MIX_NO_RESTRICTION       1
+#define LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY         2
+#define LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY                3
+#define LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY 4
+#define LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY         5
+#define LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY                6
+#define LV_DRIVE_TYPE_MIX_SAS_ONLY             7
+#define LV_DRIVE_TYPE_MIX_SATA_ONLY            8
+#define LV_DRIVE_TYPE_MIX_NVME_ONLY            9
+
 #define NO_TIMEOUT             ((unsigned long) -1)
 
 #pragma pack(1)
@@ -1235,7 +1409,7 @@ enum pqi_ctrl_mode {
 struct bmic_identify_controller {
        u8      configured_logical_drive_count;
        __le32  configuration_signature;
-       u8      firmware_version[4];
+       u8      firmware_version_short[4];
        u8      reserved[145];
        __le16  extended_logical_unit_count;
        u8      reserved1[34];
@@ -1243,11 +1417,17 @@ struct bmic_identify_controller {
        u8      reserved2[8];
        u8      vendor_id[8];
        u8      product_id[16];
-       u8      reserved3[68];
+       u8      reserved3[62];
+       __le32  extra_controller_flags;
+       u8      reserved4[2];
        u8      controller_mode;
-       u8      reserved4[32];
+       u8      spare_part_number[32];
+       u8      firmware_version_long[32];
 };
 
+/* constants for extra_controller_flags field of bmic_identify_controller */
+#define BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED    0x20000000
+
 struct bmic_sense_subsystem_info {
        u8      reserved[44];
        u8      ctrl_serial_number[16];
@@ -1341,6 +1521,34 @@ struct bmic_identify_physical_device {
        u8      padding_to_multiple_of_512[9];
 };
 
+#define BMIC_SENSE_FEATURE_IO_PAGE             0x8
+#define BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE 0x2
+
+struct bmic_sense_feature_buffer_header {
+       u8      page_code;
+       u8      subpage_code;
+       __le16  buffer_length;
+};
+
+struct bmic_sense_feature_page_header {
+       u8      page_code;
+       u8      subpage_code;
+       __le16  page_length;
+};
+
+struct bmic_sense_feature_io_page_aio_subpage {
+       struct bmic_sense_feature_page_header header;
+       u8      firmware_read_support;
+       u8      driver_read_support;
+       u8      firmware_write_support;
+       u8      driver_write_support;
+       __le16  max_transfer_encrypted_sas_sata;
+       __le16  max_transfer_encrypted_nvme;
+       __le16  max_write_raid_5_6;
+       __le16  max_write_raid_1_10_2drive;
+       __le16  max_write_raid_1_10_3drive;
+};
+
 struct bmic_smp_request {
        u8      frame_type;
        u8      function;
@@ -1408,16 +1616,6 @@ struct bmic_diag_options {
 
 #pragma pack()
 
-static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
-{
-       atomic_inc(&ctrl_info->num_busy_threads);
-}
-
-static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
-{
-       atomic_dec(&ctrl_info->num_busy_threads);
-}
-
 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
 {
        void *hostdata = shost_priv(shost);
index c53f456..3b0f281 100644 (file)
 #define BUILD_TIMESTAMP
 #endif
 
-#define DRIVER_VERSION         "1.2.16-012"
-#define DRIVER_MAJOR           1
-#define DRIVER_MINOR           2
-#define DRIVER_RELEASE         16
-#define DRIVER_REVISION                12
+#define DRIVER_VERSION         "2.1.8-045"
+#define DRIVER_MAJOR           2
+#define DRIVER_MINOR           1
+#define DRIVER_RELEASE         8
+#define DRIVER_REVISION                45
 
 #define DRIVER_NAME            "Microsemi PQI Driver (v" \
                                DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -45,6 +45,9 @@
 
 #define PQI_EXTRA_SGL_MEMORY   (12 * sizeof(struct pqi_sg_descriptor))
 
+#define PQI_POST_RESET_DELAY_SECS                      5
+#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS     10
+
 MODULE_AUTHOR("Microsemi");
 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
        DRIVER_VERSION);
@@ -54,7 +57,6 @@ MODULE_LICENSE("GPL");
 
 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
 static void pqi_ctrl_offline_worker(struct work_struct *work);
-static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
 static void pqi_scan_start(struct Scsi_Host *shost);
 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
@@ -62,20 +64,27 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
        struct pqi_io_request *io_request);
 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
        struct pqi_iu_header *request, unsigned int flags,
-       struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
+       struct pqi_raid_error_info *error_info);
 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
        struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
        unsigned int cdb_length, struct pqi_queue_group *queue_group,
        struct pqi_encryption_info *encryption_info, bool raid_bypass);
+static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+       struct pqi_scsi_dev_raid_map_data *rmd);
+static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+       struct pqi_scsi_dev_raid_map_data *rmd);
 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
-       u32 bytes_requested);
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device, unsigned long timeout_secs);
+       struct pqi_scsi_dev *device, unsigned long timeout_msecs);
 
 /* for flags argument to pqi_submit_raid_request_synchronous() */
 #define PQI_SYNC_FLAGS_INTERRUPTABLE   0x1
@@ -148,14 +157,12 @@ MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
 static int pqi_expose_ld_first;
 module_param_named(expose_ld_first,
        pqi_expose_ld_first, int, 0644);
-MODULE_PARM_DESC(expose_ld_first,
-       "Expose logical drives before physical drives.");
+MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
 
 static int pqi_hide_vsep;
 module_param_named(hide_vsep,
        pqi_hide_vsep, int, 0644);
-MODULE_PARM_DESC(hide_vsep,
-       "Hide the virtual SEP for direct attached drives.");
+MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
 
 static char *raid_levels[] = {
        "RAID-0",
@@ -163,8 +170,8 @@ static char *raid_levels[] = {
        "RAID-1(1+0)",
        "RAID-5",
        "RAID-5+1",
-       "RAID-ADG",
-       "RAID-1(ADM)",
+       "RAID-6",
+       "RAID-1(Triple)",
 };
 
 static char *pqi_raid_level_to_string(u8 raid_level)
@@ -181,8 +188,8 @@ static char *pqi_raid_level_to_string(u8 raid_level)
 #define SA_RAID_5              3       /* also used for RAID 50 */
 #define SA_RAID_51             4
 #define SA_RAID_6              5       /* also used for RAID 60 */
-#define SA_RAID_ADM            6       /* also used for RAID 1+0 ADM */
-#define SA_RAID_MAX            SA_RAID_ADM
+#define SA_RAID_TRIPLE         6       /* also used for RAID 1+0 Triple */
+#define SA_RAID_MAX            SA_RAID_TRIPLE
 #define SA_RAID_UNKNOWN                0xff
 
 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
@@ -228,8 +235,7 @@ static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
        return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
 }
 
-static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
-       struct pqi_ctrl_info *ctrl_info)
+static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
 {
        return sis_read_driver_scratch(ctrl_info);
 }
@@ -240,14 +246,66 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
        sis_write_driver_scratch(ctrl_info, mode);
 }
 
+static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
+{
+       ctrl_info->scan_blocked = true;
+       mutex_lock(&ctrl_info->scan_mutex);
+}
+
+static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
+{
+       ctrl_info->scan_blocked = false;
+       mutex_unlock(&ctrl_info->scan_mutex);
+}
+
+static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+       return ctrl_info->scan_blocked;
+}
+
 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
 {
-       ctrl_info->block_device_reset = true;
+       mutex_lock(&ctrl_info->lun_reset_mutex);
+}
+
+static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+       mutex_unlock(&ctrl_info->lun_reset_mutex);
+}
+
+static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
+{
+       struct Scsi_Host *shost;
+       unsigned int num_loops;
+       int msecs_sleep;
+
+       shost = ctrl_info->scsi_host;
+
+       scsi_block_requests(shost);
+
+       num_loops = 0;
+       msecs_sleep = 20;
+       while (scsi_host_busy(shost)) {
+               num_loops++;
+               if (num_loops == 10)
+                       msecs_sleep = 500;
+               msleep(msecs_sleep);
+       }
+}
+
+static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
+{
+       scsi_unblock_requests(ctrl_info->scsi_host);
+}
+
+static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+{
+       atomic_inc(&ctrl_info->num_busy_threads);
 }
 
-static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
 {
-       return ctrl_info->block_device_reset;
+       atomic_dec(&ctrl_info->num_busy_threads);
 }
 
 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
@@ -258,51 +316,53 @@ static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
 {
        ctrl_info->block_requests = true;
-       scsi_block_requests(ctrl_info->scsi_host);
 }
 
 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
 {
        ctrl_info->block_requests = false;
        wake_up_all(&ctrl_info->block_requests_wait);
-       pqi_retry_raid_bypass_requests(ctrl_info);
-       scsi_unblock_requests(ctrl_info->scsi_host);
 }
 
-static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
-       unsigned long timeout_msecs)
+static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
 {
-       unsigned long remaining_msecs;
-
        if (!pqi_ctrl_blocked(ctrl_info))
-               return timeout_msecs;
+               return;
 
        atomic_inc(&ctrl_info->num_blocked_threads);
-
-       if (timeout_msecs == NO_TIMEOUT) {
-               wait_event(ctrl_info->block_requests_wait,
-                       !pqi_ctrl_blocked(ctrl_info));
-               remaining_msecs = timeout_msecs;
-       } else {
-               unsigned long remaining_jiffies;
-
-               remaining_jiffies =
-                       wait_event_timeout(ctrl_info->block_requests_wait,
-                               !pqi_ctrl_blocked(ctrl_info),
-                               msecs_to_jiffies(timeout_msecs));
-               remaining_msecs = jiffies_to_msecs(remaining_jiffies);
-       }
-
+       wait_event(ctrl_info->block_requests_wait,
+               !pqi_ctrl_blocked(ctrl_info));
        atomic_dec(&ctrl_info->num_blocked_threads);
-
-       return remaining_msecs;
 }
 
+#define PQI_QUIESCE_WARNING_TIMEOUT_SECS               10
+
 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
 {
+       unsigned long start_jiffies;
+       unsigned long warning_timeout;
+       bool displayed_warning;
+
+       displayed_warning = false;
+       start_jiffies = jiffies;
+       warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+
        while (atomic_read(&ctrl_info->num_busy_threads) >
-               atomic_read(&ctrl_info->num_blocked_threads))
+               atomic_read(&ctrl_info->num_blocked_threads)) {
+               if (time_after(jiffies, warning_timeout)) {
+                       dev_warn(&ctrl_info->pci_dev->dev,
+                               "waiting %u seconds for driver activity to quiesce\n",
+                               jiffies_to_msecs(jiffies - start_jiffies) / 1000);
+                       displayed_warning = true;
+                       warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+               }
                usleep_range(1000, 2000);
+       }
+
+       if (displayed_warning)
+               dev_warn(&ctrl_info->pci_dev->dev,
+                       "driver activity quiesced after waiting for %u seconds\n",
+                       jiffies_to_msecs(jiffies - start_jiffies) / 1000);
 }
 
 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
@@ -310,34 +370,25 @@ static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
        return device->device_offline;
 }
 
-static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
-{
-       device->in_reset = true;
-}
-
-static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
-{
-       device->in_reset = false;
-}
-
-static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
+static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
 {
-       return device->in_reset;
+       mutex_lock(&ctrl_info->ofa_mutex);
 }
 
-static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
 {
-       ctrl_info->in_ofa = true;
+       mutex_unlock(&ctrl_info->ofa_mutex);
 }
 
-static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
 {
-       ctrl_info->in_ofa = false;
+       mutex_lock(&ctrl_info->ofa_mutex);
+       mutex_unlock(&ctrl_info->ofa_mutex);
 }
 
-static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
+static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
 {
-       return ctrl_info->in_ofa;
+       return mutex_is_locked(&ctrl_info->ofa_mutex);
 }
 
 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
@@ -350,23 +401,27 @@ static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
        return device->in_remove;
 }
 
-static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+static inline int pqi_event_type_to_event_index(unsigned int event_type)
 {
-       ctrl_info->in_shutdown = true;
+       int index;
+
+       for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
+               if (event_type == pqi_supported_event_types[index])
+                       return index;
+
+       return -1;
 }
 
-static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+static inline bool pqi_is_supported_event(unsigned int event_type)
 {
-       return ctrl_info->in_shutdown;
+       return pqi_event_type_to_event_index(event_type) != -1;
 }
 
-static inline void pqi_schedule_rescan_worker_with_delay(
-       struct pqi_ctrl_info *ctrl_info, unsigned long delay)
+static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
+       unsigned long delay)
 {
        if (pqi_ctrl_offline(ctrl_info))
                return;
-       if (pqi_ctrl_in_ofa(ctrl_info))
-               return;
 
        schedule_delayed_work(&ctrl_info->rescan_work, delay);
 }
@@ -378,8 +433,7 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
 
 #define PQI_RESCAN_WORK_DELAY  (10 * PQI_HZ)
 
-static inline void pqi_schedule_rescan_worker_delayed(
-       struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
 {
        pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
 }
@@ -404,22 +458,15 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
 
 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
 {
-       if (!ctrl_info->soft_reset_status)
-               return 0;
-
        return readb(ctrl_info->soft_reset_status);
 }
 
-static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
-       u8 clear)
+static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
 {
        u8 status;
 
-       if (!ctrl_info->soft_reset_status)
-               return;
-
        status = pqi_read_soft_reset_status(ctrl_info);
-       status &= ~clear;
+       status &= ~PQI_SOFT_RESET_ABORT;
        writeb(status, ctrl_info->soft_reset_status);
 }
 
@@ -498,7 +545,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
                if (cmd == CISS_REPORT_PHYS)
                        cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
                else
-                       cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+                       cdb[1] = ctrl_info->ciss_report_log_flags;
                put_unaligned_be32(cdb_length, &cdb[6]);
                break;
        case CISS_GET_RAID_MAP:
@@ -508,6 +555,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
                put_unaligned_be32(cdb_length, &cdb[6]);
                break;
        case SA_FLUSH_CACHE:
+               request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
                request->data_direction = SOP_WRITE_FLAG;
                cdb[0] = BMIC_WRITE;
                cdb[6] = BMIC_FLUSH_CACHE;
@@ -519,6 +567,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
        case BMIC_IDENTIFY_CONTROLLER:
        case BMIC_IDENTIFY_PHYSICAL_DEVICE:
        case BMIC_SENSE_SUBSYSTEM_INFORMATION:
+       case BMIC_SENSE_FEATURE:
                request->data_direction = SOP_READ_FLAG;
                cdb[0] = BMIC_READ;
                cdb[6] = cmd;
@@ -601,20 +650,18 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
 
 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
        u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
-       struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+       struct pqi_raid_error_info *error_info)
 {
        int rc;
        struct pqi_raid_path_request request;
        enum dma_data_direction dir;
 
-       rc = pqi_build_raid_path_request(ctrl_info, &request,
-               cmd, scsi3addr, buffer,
-               buffer_length, vpd_page, &dir);
+       rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
+               buffer, buffer_length, vpd_page, &dir);
        if (rc)
                return rc;
 
-       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-               error_info, timeout_msecs);
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
 
        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
 
@@ -627,7 +674,7 @@ static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
        u8 cmd, void *buffer, size_t buffer_length)
 {
        return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
-               buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+               buffer, buffer_length, 0, NULL);
 }
 
 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
@@ -635,7 +682,7 @@ static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
        struct pqi_raid_error_info *error_info)
 {
        return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
-               buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+               buffer, buffer_length, 0, error_info);
 }
 
 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
@@ -657,7 +704,7 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
        u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
 {
        return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
-               buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
+               buffer, buffer_length, vpd_page, NULL);
 }
 
 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
@@ -679,11 +726,107 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
        request.cdb[2] = (u8)bmic_device_index;
        request.cdb[9] = (u8)(bmic_device_index >> 8);
 
-       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               0, NULL, NO_TIMEOUT);
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
+
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
+       return rc;
+}
+
+static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
+{
+       u32 bytes;
+
+       bytes = get_unaligned_le16(limit);
+       if (bytes == 0)
+               bytes = ~0;
+       else
+               bytes *= 1024;
+
+       return bytes;
+}
+
+#pragma pack(1)
+
+struct bmic_sense_feature_buffer {
+       struct bmic_sense_feature_buffer_header header;
+       struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
+};
+
+#pragma pack()
+
+#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH      \
+       offsetofend(struct bmic_sense_feature_buffer, \
+               aio_subpage.max_write_raid_1_10_3drive)
+
+#define MINIMUM_AIO_SUBPAGE_LENGTH     \
+       (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
+               max_write_raid_1_10_3drive) - \
+               sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
+
+static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
+{
+       int rc;
+       enum dma_data_direction dir;
+       struct pqi_raid_path_request request;
+       struct bmic_sense_feature_buffer *buffer;
+
+       buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
+               buffer, sizeof(*buffer), 0, &dir);
+       if (rc)
+               goto error;
+
+       request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
+       request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
+
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 
        pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
 
+       if (rc)
+               goto error;
+
+       if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
+               buffer->header.subpage_code !=
+                       BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+               get_unaligned_le16(&buffer->header.buffer_length) <
+                       MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
+               buffer->aio_subpage.header.page_code !=
+                       BMIC_SENSE_FEATURE_IO_PAGE ||
+               buffer->aio_subpage.header.subpage_code !=
+                       BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+               get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
+                       MINIMUM_AIO_SUBPAGE_LENGTH) {
+               goto error;
+       }
+
+       ctrl_info->max_transfer_encrypted_sas_sata =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
+
+       ctrl_info->max_transfer_encrypted_nvme =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_transfer_encrypted_nvme);
+
+       ctrl_info->max_write_raid_5_6 =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_write_raid_5_6);
+
+       ctrl_info->max_write_raid_1_10_2drive =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_write_raid_1_10_2drive);
+
+       ctrl_info->max_write_raid_1_10_3drive =
+               pqi_aio_limit_to_bytes(
+                       &buffer->aio_subpage.max_write_raid_1_10_3drive);
+
+error:
+       kfree(buffer);
+
        return rc;
 }
 
@@ -693,13 +836,6 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
        int rc;
        struct bmic_flush_cache *flush_cache;
 
-       /*
-        * Don't bother trying to flush the cache if the controller is
-        * locked up.
-        */
-       if (pqi_ctrl_offline(ctrl_info))
-               return -ENXIO;
-
        flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
        if (!flush_cache)
                return -ENOMEM;
@@ -878,9 +1014,6 @@ static void pqi_update_time_worker(struct work_struct *work)
        ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
                update_time_work);
 
-       if (pqi_ctrl_offline(ctrl_info))
-               return;
-
        rc = pqi_write_current_time_to_host_wellness(ctrl_info);
        if (rc)
                dev_warn(&ctrl_info->pci_dev->dev,
@@ -890,27 +1023,23 @@ static void pqi_update_time_worker(struct work_struct *work)
                PQI_UPDATE_TIME_WORK_INTERVAL);
 }
 
-static inline void pqi_schedule_update_time_worker(
-       struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
 {
        schedule_delayed_work(&ctrl_info->update_time_work, 0);
 }
 
-static inline void pqi_cancel_update_time_worker(
-       struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
 {
        cancel_delayed_work_sync(&ctrl_info->update_time_work);
 }
 
-static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
-       void *buffer, size_t buffer_length)
+static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
+       size_t buffer_length)
 {
-       return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
-               buffer_length);
+       return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
 }
 
-static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
-       void **buffer)
+static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
 {
        int rc;
        size_t lun_list_length;
@@ -925,8 +1054,7 @@ static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
                goto out;
        }
 
-       rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
-               sizeof(*report_lun_header));
+       rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
        if (rc)
                goto out;
 
@@ -950,8 +1078,8 @@ again:
        if (rc)
                goto out;
 
-       new_lun_list_length = get_unaligned_be32(
-               &((struct report_lun_header *)lun_data)->list_length);
+       new_lun_list_length =
+               get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
 
        if (new_lun_list_length > lun_list_length) {
                lun_list_length = new_lun_list_length;
@@ -972,15 +1100,12 @@ out:
        return rc;
 }
 
-static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
-       void **buffer)
+static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
 {
-       return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
-               buffer);
+       return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer);
 }
 
-static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
-       void **buffer)
+static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
 {
        return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
 }
@@ -1137,9 +1262,9 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
                        err_msg = "invalid RAID-1 map";
                        goto bad_raid_map;
                }
-       } else if (device->raid_level == SA_RAID_ADM) {
+       } else if (device->raid_level == SA_RAID_TRIPLE) {
                if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
-                       err_msg = "invalid RAID-1(ADM) map";
+                       err_msg = "invalid RAID-1(Triple) map";
                        goto bad_raid_map;
                }
        } else if ((device->raid_level == SA_RAID_5 ||
@@ -1178,9 +1303,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
                return -ENOMEM;
 
        rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
-               device->scsi3addr, raid_map, sizeof(*raid_map),
-               0, NULL, NO_TIMEOUT);
-
+               device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
        if (rc)
                goto error;
 
@@ -1195,15 +1318,14 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
                        return -ENOMEM;
 
                rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
-                       device->scsi3addr, raid_map, raid_map_size,
-                       0, NULL, NO_TIMEOUT);
+                       device->scsi3addr, raid_map, raid_map_size, 0, NULL);
                if (rc)
                        goto error;
 
                if (get_unaligned_le32(&raid_map->structure_size)
                        != raid_map_size) {
                        dev_warn(&ctrl_info->pci_dev->dev,
-                               "Requested %d bytes, received %d bytes",
+                               "requested %u bytes, received %u bytes\n",
                                raid_map_size,
                                get_unaligned_le32(&raid_map->structure_size));
                        goto error;
@@ -1224,6 +1346,39 @@ error:
        return rc;
 }
 
+static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device)
+{
+       if (!ctrl_info->lv_drive_type_mix_valid) {
+               device->max_transfer_encrypted = ~0;
+               return;
+       }
+
+       switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
+       case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
+       case LV_DRIVE_TYPE_MIX_SAS_ONLY:
+       case LV_DRIVE_TYPE_MIX_SATA_ONLY:
+               device->max_transfer_encrypted =
+                       ctrl_info->max_transfer_encrypted_sas_sata;
+               break;
+       case LV_DRIVE_TYPE_MIX_NVME_ONLY:
+               device->max_transfer_encrypted =
+                       ctrl_info->max_transfer_encrypted_nvme;
+               break;
+       case LV_DRIVE_TYPE_MIX_UNKNOWN:
+       case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
+       default:
+               device->max_transfer_encrypted =
+                       min(ctrl_info->max_transfer_encrypted_sas_sata,
+                               ctrl_info->max_transfer_encrypted_nvme);
+               break;
+       }
+}
+
 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *device)
 {
@@ -1249,8 +1404,12 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
                (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
        if (device->raid_bypass_configured &&
                (bypass_status & RAID_BYPASS_ENABLED) &&
-               pqi_get_raid_map(ctrl_info, device) == 0)
+               pqi_get_raid_map(ctrl_info, device) == 0) {
                device->raid_bypass_enabled = true;
+               if (get_unaligned_le16(&device->raid_map->flags) &
+                       RAID_MAP_ENCRYPTION_ENABLED)
+                       pqi_set_max_transfer_encrypted(ctrl_info, device);
+       }
 
 out:
        kfree(buffer);
@@ -1298,6 +1457,8 @@ no_buffer:
        device->volume_offline = volume_offline;
 }
 
+#define PQI_DEVICE_PHY_MAP_SUPPORTED   0x10
+
 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *device,
        struct bmic_identify_physical_device *id_phys)
@@ -1334,6 +1495,16 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
                sizeof(device->phys_connector));
        device->bay = id_phys->phys_bay_in_box;
 
+       memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
+               sizeof(device->page_83_identifier));
+
+       if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
+               id_phys->phy_count)
+               device->phy_id =
+                       id_phys->phy_to_phy_map[device->active_path_index];
+       else
+               device->phy_id = 0xFF;
+
        return 0;
 }
 
@@ -1521,16 +1692,16 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
        return rc;
 }
 
-#define PQI_PENDING_IO_TIMEOUT_SECS    20
+#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS     (20 * 1000)
 
-static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device)
+static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
 {
        int rc;
 
        pqi_device_remove_start(device);
 
-       rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
+       rc = pqi_device_wait_for_pending_io(ctrl_info, device,
+               PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
        if (rc)
                dev_err(&ctrl_info->pci_dev->dev,
                        "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
@@ -1558,8 +1729,7 @@ static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
        return NULL;
 }
 
-static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
-       struct pqi_scsi_dev *dev2)
+static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
 {
        if (dev1->is_physical_device != dev2->is_physical_device)
                return false;
@@ -1567,8 +1737,7 @@ static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
        if (dev1->is_physical_device)
                return dev1->wwid == dev2->wwid;
 
-       return memcmp(dev1->volume_id, dev2->volume_id,
-               sizeof(dev1->volume_id)) == 0;
+       return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
 }
 
 enum pqi_find_result {
@@ -1613,7 +1782,7 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
        ssize_t count;
        char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
 
-       count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
+       count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
                "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
 
        if (device->target_lun_valid)
@@ -1671,7 +1840,6 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
        struct pqi_scsi_dev *new_device)
 {
-       existing_device->devtype = new_device->devtype;
        existing_device->device_type = new_device->device_type;
        existing_device->bus = new_device->bus;
        if (new_device->target_lun_valid) {
@@ -1703,17 +1871,17 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
        existing_device->aio_handle = new_device->aio_handle;
        existing_device->volume_status = new_device->volume_status;
        existing_device->active_path_index = new_device->active_path_index;
+       existing_device->phy_id = new_device->phy_id;
        existing_device->path_map = new_device->path_map;
        existing_device->bay = new_device->bay;
        existing_device->box_index = new_device->box_index;
        existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
-       existing_device->phy_connected_dev_type =
-               new_device->phy_connected_dev_type;
+       existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
        memcpy(existing_device->box, new_device->box,
                sizeof(existing_device->box));
        memcpy(existing_device->phys_connector, new_device->phys_connector,
                sizeof(existing_device->phys_connector));
-       existing_device->offload_to_mirror = 0;
+       existing_device->next_bypass_group = 0;
        kfree(existing_device->raid_map);
        existing_device->raid_map = new_device->raid_map;
        existing_device->raid_bypass_configured =
@@ -1844,8 +2012,18 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
 
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 
-       if (pqi_ctrl_in_ofa(ctrl_info))
-               pqi_ctrl_ofa_done(ctrl_info);
+       /*
+        * If OFA is in progress and there are devices that need to be deleted,
+        * allow any pending reset operations to continue and unblock any SCSI
+        * requests before removal.
+        */
+       if (pqi_ofa_in_progress(ctrl_info)) {
+               list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
+                       if (pqi_is_device_added(device))
+                               pqi_device_remove_start(device);
+               pqi_ctrl_unblock_device_reset(ctrl_info);
+               pqi_scsi_unblock_requests(ctrl_info);
+       }
 
        /* Remove all devices that have gone away. */
        list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
@@ -1867,15 +2045,10 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
         * Notify the SCSI ML if the queue depth of any existing device has
         * changed.
         */
-       list_for_each_entry(device, &ctrl_info->scsi_device_list,
-               scsi_device_list_entry) {
-               if (device->sdev) {
-                       if (device->queue_depth !=
-                               device->advertised_queue_depth) {
-                               device->advertised_queue_depth = device->queue_depth;
-                               scsi_change_queue_depth(device->sdev,
-                                       device->advertised_queue_depth);
-                       }
+       list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+               if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
+                       device->advertised_queue_depth = device->queue_depth;
+                       scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
                        if (device->rescan) {
                                scsi_rescan_device(&device->sdev->sdev_gendev);
                                device->rescan = false;
@@ -1911,7 +2084,7 @@ static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
         */
        if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
                !pqi_is_hba_lunid(device->scsi3addr))
-               return false;
+                       return false;
 
        return true;
 }
@@ -1944,8 +2117,17 @@ static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
 
 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
 {
-       return !device->is_physical_device ||
-               !pqi_skip_device(device->scsi3addr);
+       return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
+}
+
+static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+{
+       if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
+               pqi_is_device_with_sas_address(device))
+               device->wwid = phys_lun_ext_entry->wwid;
+       else
+               device->wwid = cpu_to_be64(get_unaligned_be64(&device->page_83_identifier));
 }
 
 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
@@ -2009,17 +2191,18 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
                        for (i = num_physicals - 1; i >= 0; i--) {
                                phys_lun_ext_entry =
                                                &physdev_list->lun_entries[i];
-                               if (CISS_GET_DRIVE_NUMBER(
-                                       phys_lun_ext_entry->lunid) ==
-                                               PQI_VSEP_CISS_BTL) {
-                                       pqi_mask_device(
-                                               phys_lun_ext_entry->lunid);
+                               if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) {
+                                       pqi_mask_device(phys_lun_ext_entry->lunid);
                                        break;
                                }
                        }
                }
        }
 
+       if (num_logicals &&
+               (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
+               ctrl_info->lv_drive_type_mix_valid = true;
+
        num_new_devices = num_physicals + num_logicals;
 
        new_device_list = kmalloc_array(num_new_devices,
@@ -2099,8 +2282,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
                        if (device->is_physical_device)
                                dev_warn(&ctrl_info->pci_dev->dev,
                                        "obtaining device info failed, skipping physical device %016llx\n",
-                                       get_unaligned_be64(
-                                               &phys_lun_ext_entry->wwid));
+                                       get_unaligned_be64(&phys_lun_ext_entry->wwid));
                        else
                                dev_warn(&ctrl_info->pci_dev->dev,
                                        "obtaining device info failed, skipping logical device %08x%08x\n",
@@ -2113,13 +2295,13 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
                pqi_assign_bus_target_lun(device);
 
                if (device->is_physical_device) {
-                       device->wwid = phys_lun_ext_entry->wwid;
+                       pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry);
                        if ((phys_lun_ext_entry->device_flags &
                                CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
                                phys_lun_ext_entry->aio_handle) {
-                               device->aio_enabled = true;
-                               device->aio_handle =
-                                       phys_lun_ext_entry->aio_handle;
+                                       device->aio_enabled = true;
+                                       device->aio_handle =
+                                               phys_lun_ext_entry->aio_handle;
                        }
                } else {
                        memcpy(device->volume_id, log_lun_ext_entry->volume_id,
@@ -2153,21 +2335,27 @@ out:
 
 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
 {
-       int rc = 0;
+       int rc;
+       int mutex_acquired;
 
        if (pqi_ctrl_offline(ctrl_info))
                return -ENXIO;
 
-       if (!mutex_trylock(&ctrl_info->scan_mutex)) {
+       mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+
+       if (!mutex_acquired) {
+               if (pqi_ctrl_scan_blocked(ctrl_info))
+                       return -EBUSY;
                pqi_schedule_rescan_worker_delayed(ctrl_info);
-               rc = -EINPROGRESS;
-       } else {
-               rc = pqi_update_scsi_devices(ctrl_info);
-               if (rc)
-                       pqi_schedule_rescan_worker_delayed(ctrl_info);
-               mutex_unlock(&ctrl_info->scan_mutex);
+               return -EINPROGRESS;
        }
 
+       rc = pqi_update_scsi_devices(ctrl_info);
+       if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
+               pqi_schedule_rescan_worker_delayed(ctrl_info);
+
+       mutex_unlock(&ctrl_info->scan_mutex);
+
        return rc;
 }
 
@@ -2176,8 +2364,6 @@ static void pqi_scan_start(struct Scsi_Host *shost)
        struct pqi_ctrl_info *ctrl_info;
 
        ctrl_info = shost_to_hba(shost);
-       if (pqi_ctrl_in_ofa(ctrl_info))
-               return;
 
        pqi_scan_scsi_devices(ctrl_info);
 }
@@ -2194,27 +2380,8 @@ static int pqi_scan_finished(struct Scsi_Host *shost,
        return !mutex_is_locked(&ctrl_info->scan_mutex);
 }
 
-static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
-{
-       mutex_lock(&ctrl_info->scan_mutex);
-       mutex_unlock(&ctrl_info->scan_mutex);
-}
-
-static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
-{
-       mutex_lock(&ctrl_info->lun_reset_mutex);
-       mutex_unlock(&ctrl_info->lun_reset_mutex);
-}
-
-static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
-{
-       mutex_lock(&ctrl_info->ofa_mutex);
-       mutex_unlock(&ctrl_info->ofa_mutex);
-}
-
-static inline void pqi_set_encryption_info(
-       struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
-       u64 first_block)
+static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
+       struct raid_map *raid_map, u64 first_block)
 {
        u32 volume_blk_size;
 
@@ -2237,332 +2404,415 @@ static inline void pqi_set_encryption_info(
  * Attempt to perform RAID bypass mapping for a logical volume I/O.
  */
 
+static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev_raid_map_data *rmd)
+{
+       bool is_supported = true;
+
+       switch (rmd->raid_level) {
+       case SA_RAID_0:
+               break;
+       case SA_RAID_1:
+               if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+                       rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
+                       is_supported = false;
+               break;
+       case SA_RAID_TRIPLE:
+               if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+                       rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
+                       is_supported = false;
+               break;
+       case SA_RAID_5:
+               if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
+                       rmd->data_length > ctrl_info->max_write_raid_5_6))
+                       is_supported = false;
+               break;
+       case SA_RAID_6:
+               if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
+                       rmd->data_length > ctrl_info->max_write_raid_5_6))
+                       is_supported = false;
+               break;
+       default:
+               is_supported = false;
+               break;
+       }
+
+       return is_supported;
+}
+
 #define PQI_RAID_BYPASS_INELIGIBLE     1
 
-static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
-       struct pqi_queue_group *queue_group)
+static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
+       struct pqi_scsi_dev_raid_map_data *rmd)
 {
-       struct raid_map *raid_map;
-       bool is_write = false;
-       u32 map_index;
-       u64 first_block;
-       u64 last_block;
-       u32 block_cnt;
-       u32 blocks_per_row;
-       u64 first_row;
-       u64 last_row;
-       u32 first_row_offset;
-       u32 last_row_offset;
-       u32 first_column;
-       u32 last_column;
-       u64 r0_first_row;
-       u64 r0_last_row;
-       u32 r5or6_blocks_per_row;
-       u64 r5or6_first_row;
-       u64 r5or6_last_row;
-       u32 r5or6_first_row_offset;
-       u32 r5or6_last_row_offset;
-       u32 r5or6_first_column;
-       u32 r5or6_last_column;
-       u16 data_disks_per_row;
-       u32 total_disks_per_row;
-       u16 layout_map_count;
-       u32 stripesize;
-       u16 strip_size;
-       u32 first_group;
-       u32 last_group;
-       u32 current_group;
-       u32 map_row;
-       u32 aio_handle;
-       u64 disk_block;
-       u32 disk_block_cnt;
-       u8 cdb[16];
-       u8 cdb_length;
-       int offload_to_mirror;
-       struct pqi_encryption_info *encryption_info_ptr;
-       struct pqi_encryption_info encryption_info;
-#if BITS_PER_LONG == 32
-       u64 tmpdiv;
-#endif
-
        /* Check for valid opcode, get LBA and block count. */
        switch (scmd->cmnd[0]) {
        case WRITE_6:
-               is_write = true;
+               rmd->is_write = true;
                fallthrough;
        case READ_6:
-               first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
+               rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
                        (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
-               block_cnt = (u32)scmd->cmnd[4];
-               if (block_cnt == 0)
-                       block_cnt = 256;
+               rmd->block_cnt = (u32)scmd->cmnd[4];
+               if (rmd->block_cnt == 0)
+                       rmd->block_cnt = 256;
                break;
        case WRITE_10:
-               is_write = true;
+               rmd->is_write = true;
                fallthrough;
        case READ_10:
-               first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
-               block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+               rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+               rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
                break;
        case WRITE_12:
-               is_write = true;
+               rmd->is_write = true;
                fallthrough;
        case READ_12:
-               first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
-               block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+               rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+               rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
                break;
        case WRITE_16:
-               is_write = true;
+               rmd->is_write = true;
                fallthrough;
        case READ_16:
-               first_block = get_unaligned_be64(&scmd->cmnd[2]);
-               block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
+               rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
+               rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
                break;
        default:
                /* Process via normal I/O path. */
                return PQI_RAID_BYPASS_INELIGIBLE;
        }
 
-       /* Check for write to non-RAID-0. */
-       if (is_write && device->raid_level != SA_RAID_0)
-               return PQI_RAID_BYPASS_INELIGIBLE;
+       put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
 
-       if (unlikely(block_cnt == 0))
-               return PQI_RAID_BYPASS_INELIGIBLE;
+       return 0;
+}
 
-       last_block = first_block + block_cnt - 1;
-       raid_map = device->raid_map;
+static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
+{
+#if BITS_PER_LONG == 32
+       u64 tmpdiv;
+#endif
+
+       rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
 
        /* Check for invalid block or wraparound. */
-       if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
-               last_block < first_block)
+       if (rmd->last_block >=
+               get_unaligned_le64(&raid_map->volume_blk_cnt) ||
+               rmd->last_block < rmd->first_block)
                return PQI_RAID_BYPASS_INELIGIBLE;
 
-       data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
-       strip_size = get_unaligned_le16(&raid_map->strip_size);
-       layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
+       rmd->data_disks_per_row =
+               get_unaligned_le16(&raid_map->data_disks_per_row);
+       rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
+       rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
 
        /* Calculate stripe information for the request. */
-       blocks_per_row = data_disks_per_row * strip_size;
+       rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
 #if BITS_PER_LONG == 32
-       tmpdiv = first_block;
-       do_div(tmpdiv, blocks_per_row);
-       first_row = tmpdiv;
-       tmpdiv = last_block;
-       do_div(tmpdiv, blocks_per_row);
-       last_row = tmpdiv;
-       first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
-       last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
-       tmpdiv = first_row_offset;
-       do_div(tmpdiv, strip_size);
-       first_column = tmpdiv;
-       tmpdiv = last_row_offset;
-       do_div(tmpdiv, strip_size);
-       last_column = tmpdiv;
+       tmpdiv = rmd->first_block;
+       do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->first_row = tmpdiv;
+       tmpdiv = rmd->last_block;
+       do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->last_row = tmpdiv;
+       rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
+       rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
+       tmpdiv = rmd->first_row_offset;
+       do_div(tmpdiv, rmd->strip_size);
+       rmd->first_column = tmpdiv;
+       tmpdiv = rmd->last_row_offset;
+       do_div(tmpdiv, rmd->strip_size);
+       rmd->last_column = tmpdiv;
 #else
-       first_row = first_block / blocks_per_row;
-       last_row = last_block / blocks_per_row;
-       first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
-       last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
-       first_column = first_row_offset / strip_size;
-       last_column = last_row_offset / strip_size;
+       rmd->first_row = rmd->first_block / rmd->blocks_per_row;
+       rmd->last_row = rmd->last_block / rmd->blocks_per_row;
+       rmd->first_row_offset = (u32)(rmd->first_block -
+               (rmd->first_row * rmd->blocks_per_row));
+       rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
+               rmd->blocks_per_row));
+       rmd->first_column = rmd->first_row_offset / rmd->strip_size;
+       rmd->last_column = rmd->last_row_offset / rmd->strip_size;
 #endif
 
        /* If this isn't a single row/column then give to the controller. */
-       if (first_row != last_row || first_column != last_column)
+       if (rmd->first_row != rmd->last_row ||
+               rmd->first_column != rmd->last_column)
                return PQI_RAID_BYPASS_INELIGIBLE;
 
        /* Proceeding with driver mapping. */
-       total_disks_per_row = data_disks_per_row +
+       rmd->total_disks_per_row = rmd->data_disks_per_row +
                get_unaligned_le16(&raid_map->metadata_disks_per_row);
-       map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+       rmd->map_row = ((u32)(rmd->first_row >>
+               raid_map->parity_rotation_shift)) %
                get_unaligned_le16(&raid_map->row_cnt);
-       map_index = (map_row * total_disks_per_row) + first_column;
+       rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
+               rmd->first_column;
 
-       /* RAID 1 */
-       if (device->raid_level == SA_RAID_1) {
-               if (device->offload_to_mirror)
-                       map_index += data_disks_per_row;
-               device->offload_to_mirror = !device->offload_to_mirror;
-       } else if (device->raid_level == SA_RAID_ADM) {
-               /* RAID ADM */
-               /*
-                * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
-                * divisible by 3.
-                */
-               offload_to_mirror = device->offload_to_mirror;
-               if (offload_to_mirror == 0)  {
-                       /* use physical disk in the first mirrored group. */
-                       map_index %= data_disks_per_row;
-               } else {
-                       do {
-                               /*
-                                * Determine mirror group that map_index
-                                * indicates.
-                                */
-                               current_group = map_index / data_disks_per_row;
-
-                               if (offload_to_mirror != current_group) {
-                                       if (current_group <
-                                               layout_map_count - 1) {
-                                               /*
-                                                * Select raid index from
-                                                * next group.
-                                                */
-                                               map_index += data_disks_per_row;
-                                               current_group++;
-                                       } else {
-                                               /*
-                                                * Select raid index from first
-                                                * group.
-                                                */
-                                               map_index %= data_disks_per_row;
-                                               current_group = 0;
-                                       }
-                               }
-                       } while (offload_to_mirror != current_group);
-               }
+       return 0;
+}
 
-               /* Set mirror group to use next time. */
-               offload_to_mirror =
-                       (offload_to_mirror >= layout_map_count - 1) ?
-                               0 : offload_to_mirror + 1;
-               device->offload_to_mirror = offload_to_mirror;
-               /*
-                * Avoid direct use of device->offload_to_mirror within this
-                * function since multiple threads might simultaneously
-                * increment it beyond the range of device->layout_map_count -1.
-                */
-       } else if ((device->raid_level == SA_RAID_5 ||
-               device->raid_level == SA_RAID_6) && layout_map_count > 1) {
-               /* RAID 50/60 */
-               /* Verify first and last block are in same RAID group */
-               r5or6_blocks_per_row = strip_size * data_disks_per_row;
-               stripesize = r5or6_blocks_per_row * layout_map_count;
+static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
+       struct raid_map *raid_map)
+{
+#if BITS_PER_LONG == 32
+       u64 tmpdiv;
+#endif
+       /* RAID 50/60 */
+       /* Verify first and last block are in same RAID group. */
+       rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
 #if BITS_PER_LONG == 32
-               tmpdiv = first_block;
-               first_group = do_div(tmpdiv, stripesize);
-               tmpdiv = first_group;
-               do_div(tmpdiv, r5or6_blocks_per_row);
-               first_group = tmpdiv;
-               tmpdiv = last_block;
-               last_group = do_div(tmpdiv, stripesize);
-               tmpdiv = last_group;
-               do_div(tmpdiv, r5or6_blocks_per_row);
-               last_group = tmpdiv;
+       tmpdiv = rmd->first_block;
+       rmd->first_group = do_div(tmpdiv, rmd->stripesize);
+       tmpdiv = rmd->first_group;
+       do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->first_group = tmpdiv;
+       tmpdiv = rmd->last_block;
+       rmd->last_group = do_div(tmpdiv, rmd->stripesize);
+       tmpdiv = rmd->last_group;
+       do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->last_group = tmpdiv;
 #else
-               first_group = (first_block % stripesize) / r5or6_blocks_per_row;
-               last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+       rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
+       rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
 #endif
-               if (first_group != last_group)
-                       return PQI_RAID_BYPASS_INELIGIBLE;
+       if (rmd->first_group != rmd->last_group)
+               return PQI_RAID_BYPASS_INELIGIBLE;
 
-               /* Verify request is in a single row of RAID 5/6 */
+       /* Verify request is in a single row of RAID 5/6. */
 #if BITS_PER_LONG == 32
-               tmpdiv = first_block;
-               do_div(tmpdiv, stripesize);
-               first_row = r5or6_first_row = r0_first_row = tmpdiv;
-               tmpdiv = last_block;
-               do_div(tmpdiv, stripesize);
-               r5or6_last_row = r0_last_row = tmpdiv;
+       tmpdiv = rmd->first_block;
+       do_div(tmpdiv, rmd->stripesize);
+       rmd->first_row = tmpdiv;
+       rmd->r5or6_first_row = tmpdiv;
+       tmpdiv = rmd->last_block;
+       do_div(tmpdiv, rmd->stripesize);
+       rmd->r5or6_last_row = tmpdiv;
 #else
-               first_row = r5or6_first_row = r0_first_row =
-                       first_block / stripesize;
-               r5or6_last_row = r0_last_row = last_block / stripesize;
+       rmd->first_row = rmd->r5or6_first_row =
+               rmd->first_block / rmd->stripesize;
+       rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
 #endif
-               if (r5or6_first_row != r5or6_last_row)
-                       return PQI_RAID_BYPASS_INELIGIBLE;
+       if (rmd->r5or6_first_row != rmd->r5or6_last_row)
+               return PQI_RAID_BYPASS_INELIGIBLE;
 
-               /* Verify request is in a single column */
+       /* Verify request is in a single column. */
 #if BITS_PER_LONG == 32
-               tmpdiv = first_block;
-               first_row_offset = do_div(tmpdiv, stripesize);
-               tmpdiv = first_row_offset;
-               first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
-               r5or6_first_row_offset = first_row_offset;
-               tmpdiv = last_block;
-               r5or6_last_row_offset = do_div(tmpdiv, stripesize);
-               tmpdiv = r5or6_last_row_offset;
-               r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
-               tmpdiv = r5or6_first_row_offset;
-               do_div(tmpdiv, strip_size);
-               first_column = r5or6_first_column = tmpdiv;
-               tmpdiv = r5or6_last_row_offset;
-               do_div(tmpdiv, strip_size);
-               r5or6_last_column = tmpdiv;
+       tmpdiv = rmd->first_block;
+       rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
+       tmpdiv = rmd->first_row_offset;
+       rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
+       rmd->r5or6_first_row_offset = rmd->first_row_offset;
+       tmpdiv = rmd->last_block;
+       rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
+       tmpdiv = rmd->r5or6_last_row_offset;
+       rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
+       tmpdiv = rmd->r5or6_first_row_offset;
+       do_div(tmpdiv, rmd->strip_size);
+       rmd->first_column = rmd->r5or6_first_column = tmpdiv;
+       tmpdiv = rmd->r5or6_last_row_offset;
+       do_div(tmpdiv, rmd->strip_size);
+       rmd->r5or6_last_column = tmpdiv;
 #else
-               first_row_offset = r5or6_first_row_offset =
-                       (u32)((first_block % stripesize) %
-                       r5or6_blocks_per_row);
-
-               r5or6_last_row_offset =
-                       (u32)((last_block % stripesize) %
-                       r5or6_blocks_per_row);
-
-               first_column = r5or6_first_row_offset / strip_size;
-               r5or6_first_column = first_column;
-               r5or6_last_column = r5or6_last_row_offset / strip_size;
+       rmd->first_row_offset = rmd->r5or6_first_row_offset =
+               (u32)((rmd->first_block % rmd->stripesize) %
+               rmd->blocks_per_row);
+
+       rmd->r5or6_last_row_offset =
+               (u32)((rmd->last_block % rmd->stripesize) %
+               rmd->blocks_per_row);
+
+       rmd->first_column =
+               rmd->r5or6_first_row_offset / rmd->strip_size;
+       rmd->r5or6_first_column = rmd->first_column;
+       rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
 #endif
-               if (r5or6_first_column != r5or6_last_column)
+       if (rmd->r5or6_first_column != rmd->r5or6_last_column)
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       /* Request is eligible. */
+       rmd->map_row =
+               ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
+               get_unaligned_le16(&raid_map->row_cnt);
+
+       rmd->map_index = (rmd->first_group *
+               (get_unaligned_le16(&raid_map->row_cnt) *
+               rmd->total_disks_per_row)) +
+               (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
+
+       if (rmd->is_write) {
+               u32 index;
+
+               /*
+                * p_parity_it_nexus and q_parity_it_nexus are pointers to the
+                * parity entries inside the device's raid_map.
+                *
+                * A device's RAID map is bounded by: number of RAID disks squared.
+                *
+                * The devices RAID map size is checked during device
+                * initialization.
+                */
+               index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
+               index *= rmd->total_disks_per_row;
+               index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
+
+               rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
+               if (rmd->raid_level == SA_RAID_6) {
+                       rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
+                       rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
+               }
+               if (rmd->blocks_per_row == 0)
                        return PQI_RAID_BYPASS_INELIGIBLE;
+#if BITS_PER_LONG == 32
+               tmpdiv = rmd->first_block;
+               do_div(tmpdiv, rmd->blocks_per_row);
+               rmd->row = tmpdiv;
+#else
+               rmd->row = rmd->first_block / rmd->blocks_per_row;
+#endif
+       }
+
+       return 0;
+}
+
+static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
+{
+       /* Build the new CDB for the physical disk I/O. */
+       if (rmd->disk_block > 0xffffffff) {
+               rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
+               rmd->cdb[1] = 0;
+               put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
+               put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
+               rmd->cdb[14] = 0;
+               rmd->cdb[15] = 0;
+               rmd->cdb_length = 16;
+       } else {
+               rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
+               rmd->cdb[1] = 0;
+               put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
+               rmd->cdb[6] = 0;
+               put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
+               rmd->cdb[9] = 0;
+               rmd->cdb_length = 10;
+       }
+}
+
+static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
+       struct pqi_scsi_dev_raid_map_data *rmd)
+{
+       u32 index;
+       u32 group;
+
+       group = rmd->map_index / rmd->data_disks_per_row;
+
+       index = rmd->map_index - (group * rmd->data_disks_per_row);
+       rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
+       index += rmd->data_disks_per_row;
+       rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
+       if (rmd->layout_map_count > 2) {
+               index += rmd->data_disks_per_row;
+               rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
+       }
+
+       rmd->num_it_nexus_entries = rmd->layout_map_count;
+}
+
+static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+       struct pqi_queue_group *queue_group)
+{
+       int rc;
+       struct raid_map *raid_map;
+       u32 group;
+       u32 next_bypass_group;
+       struct pqi_encryption_info *encryption_info_ptr;
+       struct pqi_encryption_info encryption_info;
+       struct pqi_scsi_dev_raid_map_data rmd = { 0 };
+
+       rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+       if (rc)
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       rmd.raid_level = device->raid_level;
+
+       if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       if (unlikely(rmd.block_cnt == 0))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       raid_map = device->raid_map;
 
-               /* Request is eligible */
-               map_row =
-                       ((u32)(first_row >> raid_map->parity_rotation_shift)) %
-                       get_unaligned_le16(&raid_map->row_cnt);
+       rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
+       if (rc)
+               return PQI_RAID_BYPASS_INELIGIBLE;
 
-               map_index = (first_group *
-                       (get_unaligned_le16(&raid_map->row_cnt) *
-                       total_disks_per_row)) +
-                       (map_row * total_disks_per_row) + first_column;
+       if (device->raid_level == SA_RAID_1 ||
+               device->raid_level == SA_RAID_TRIPLE) {
+               if (rmd.is_write) {
+                       pqi_calc_aio_r1_nexus(raid_map, &rmd);
+               } else {
+                       group = device->next_bypass_group;
+                       next_bypass_group = group + 1;
+                       if (next_bypass_group >= rmd.layout_map_count)
+                               next_bypass_group = 0;
+                       device->next_bypass_group = next_bypass_group;
+                       rmd.map_index += group * rmd.data_disks_per_row;
+               }
+       } else if ((device->raid_level == SA_RAID_5 ||
+               device->raid_level == SA_RAID_6) &&
+               (rmd.layout_map_count > 1 || rmd.is_write)) {
+               rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
+               if (rc)
+                       return PQI_RAID_BYPASS_INELIGIBLE;
        }
 
-       aio_handle = raid_map->disk_data[map_index].aio_handle;
-       disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
-               first_row * strip_size +
-               (first_row_offset - first_column * strip_size);
-       disk_block_cnt = block_cnt;
+       if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
+               return PQI_RAID_BYPASS_INELIGIBLE;
+
+       rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
+       rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
+               rmd.first_row * rmd.strip_size +
+               (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
+       rmd.disk_block_cnt = rmd.block_cnt;
 
        /* Handle differing logical/physical block sizes. */
        if (raid_map->phys_blk_shift) {
-               disk_block <<= raid_map->phys_blk_shift;
-               disk_block_cnt <<= raid_map->phys_blk_shift;
+               rmd.disk_block <<= raid_map->phys_blk_shift;
+               rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
        }
 
-       if (unlikely(disk_block_cnt > 0xffff))
+       if (unlikely(rmd.disk_block_cnt > 0xffff))
                return PQI_RAID_BYPASS_INELIGIBLE;
 
-       /* Build the new CDB for the physical disk I/O. */
-       if (disk_block > 0xffffffff) {
-               cdb[0] = is_write ? WRITE_16 : READ_16;
-               cdb[1] = 0;
-               put_unaligned_be64(disk_block, &cdb[2]);
-               put_unaligned_be32(disk_block_cnt, &cdb[10]);
-               cdb[14] = 0;
-               cdb[15] = 0;
-               cdb_length = 16;
-       } else {
-               cdb[0] = is_write ? WRITE_10 : READ_10;
-               cdb[1] = 0;
-               put_unaligned_be32((u32)disk_block, &cdb[2]);
-               cdb[6] = 0;
-               put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
-               cdb[9] = 0;
-               cdb_length = 10;
-       }
-
-       if (get_unaligned_le16(&raid_map->flags) &
-               RAID_MAP_ENCRYPTION_ENABLED) {
-               pqi_set_encryption_info(&encryption_info, raid_map,
-                       first_block);
+       pqi_set_aio_cdb(&rmd);
+
+       if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
+               if (rmd.data_length > device->max_transfer_encrypted)
+                       return PQI_RAID_BYPASS_INELIGIBLE;
+               pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
                encryption_info_ptr = &encryption_info;
        } else {
                encryption_info_ptr = NULL;
        }
 
-       return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
-               cdb, cdb_length, queue_group, encryption_info_ptr, true);
+       if (rmd.is_write) {
+               switch (device->raid_level) {
+               case SA_RAID_1:
+               case SA_RAID_TRIPLE:
+                       return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
+                               encryption_info_ptr, device, &rmd);
+               case SA_RAID_5:
+               case SA_RAID_6:
+                       return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
+                               encryption_info_ptr, device, &rmd);
+               }
+       }
+
+       return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
+               rmd.cdb, rmd.cdb_length, queue_group,
+               encryption_info_ptr, true);
 }
 
 #define PQI_STATUS_IDLE                0x0
@@ -2859,7 +3109,7 @@ static void pqi_process_io_error(unsigned int iu_type,
        }
 }
 
-static int pqi_interpret_task_management_response(
+static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
        struct pqi_task_management_response *response)
 {
        int rc;
@@ -2877,6 +3127,10 @@ static int pqi_interpret_task_management_response(
                break;
        }
 
+       if (rc)
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
+
        return rc;
 }
 
@@ -2942,13 +3196,11 @@ static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue
                case PQI_RESPONSE_IU_VENDOR_GENERAL:
                        io_request->status =
                                get_unaligned_le16(
-                               &((struct pqi_vendor_general_response *)
-                                       response)->status);
+                               &((struct pqi_vendor_general_response *)response)->status);
                        break;
                case PQI_RESPONSE_IU_TASK_MANAGEMENT:
-                       io_request->status =
-                               pqi_interpret_task_management_response(
-                                       (void *)response);
+                       io_request->status = pqi_interpret_task_management_response(ctrl_info,
+                               (void *)response);
                        break;
                case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
                        pqi_aio_path_disabled(io_request);
@@ -3056,8 +3308,8 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
        put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
                &request.header.iu_length);
        request.event_type = event->event_type;
-       request.event_id = event->event_id;
-       request.additional_event_id = event->additional_event_id;
+       put_unaligned_le16(event->event_id, &request.event_id);
+       put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
 
        pqi_send_event_ack(ctrl_info, &request, sizeof(request));
 }
@@ -3068,8 +3320,8 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
        struct pqi_ctrl_info *ctrl_info)
 {
-       unsigned long timeout;
        u8 status;
+       unsigned long timeout;
 
        timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
 
@@ -3081,120 +3333,170 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
                if (status & PQI_SOFT_RESET_ABORT)
                        return RESET_ABORT;
 
+               if (!sis_is_firmware_running(ctrl_info))
+                       return RESET_NORESPONSE;
+
                if (time_after(jiffies, timeout)) {
-                       dev_err(&ctrl_info->pci_dev->dev,
+                       dev_warn(&ctrl_info->pci_dev->dev,
                                "timed out waiting for soft reset status\n");
                        return RESET_TIMEDOUT;
                }
 
-               if (!sis_is_firmware_running(ctrl_info))
-                       return RESET_NORESPONSE;
-
                ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
        }
 }
 
-static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
-       enum pqi_soft_reset_status reset_status)
+static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
 {
        int rc;
+       unsigned int delay_secs;
+       enum pqi_soft_reset_status reset_status;
+
+       if (ctrl_info->soft_reset_handshake_supported)
+               reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
+       else
+               reset_status = RESET_INITIATE_FIRMWARE;
+
+       delay_secs = PQI_POST_RESET_DELAY_SECS;
 
        switch (reset_status) {
-       case RESET_INITIATE_DRIVER:
        case RESET_TIMEDOUT:
+               delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
+               fallthrough;
+       case RESET_INITIATE_DRIVER:
                dev_info(&ctrl_info->pci_dev->dev,
-                       "resetting controller %u\n", ctrl_info->ctrl_id);
+                               "Online Firmware Activation: resetting controller\n");
                sis_soft_reset(ctrl_info);
                fallthrough;
        case RESET_INITIATE_FIRMWARE:
-               rc = pqi_ofa_ctrl_restart(ctrl_info);
+               ctrl_info->pqi_mode_enabled = false;
+               pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+               rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
                pqi_ofa_free_host_buffer(ctrl_info);
+               pqi_ctrl_ofa_done(ctrl_info);
                dev_info(&ctrl_info->pci_dev->dev,
-                       "Online Firmware Activation for controller %u: %s\n",
-                       ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
+                               "Online Firmware Activation: %s\n",
+                               rc == 0 ? "SUCCESS" : "FAILED");
                break;
        case RESET_ABORT:
-               pqi_ofa_ctrl_unquiesce(ctrl_info);
                dev_info(&ctrl_info->pci_dev->dev,
-                       "Online Firmware Activation for controller %u: %s\n",
-                       ctrl_info->ctrl_id, "ABORTED");
+                               "Online Firmware Activation ABORTED\n");
+               if (ctrl_info->soft_reset_handshake_supported)
+                       pqi_clear_soft_reset_status(ctrl_info);
+               pqi_ofa_free_host_buffer(ctrl_info);
+               pqi_ctrl_ofa_done(ctrl_info);
+               pqi_ofa_ctrl_unquiesce(ctrl_info);
                break;
        case RESET_NORESPONSE:
+               fallthrough;
+       default:
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "unexpected Online Firmware Activation reset status: 0x%x\n",
+                       reset_status);
                pqi_ofa_free_host_buffer(ctrl_info);
+               pqi_ctrl_ofa_done(ctrl_info);
+               pqi_ofa_ctrl_unquiesce(ctrl_info);
                pqi_take_ctrl_offline(ctrl_info);
                break;
        }
 }
 
-static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_event *event)
+static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
 {
-       u16 event_id;
-       enum pqi_soft_reset_status status;
+       struct pqi_ctrl_info *ctrl_info;
 
-       event_id = get_unaligned_le16(&event->event_id);
+       ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
 
-       mutex_lock(&ctrl_info->ofa_mutex);
+       pqi_ctrl_ofa_start(ctrl_info);
+       pqi_ofa_setup_host_buffer(ctrl_info);
+       pqi_ofa_host_memory_update(ctrl_info);
+}
 
-       if (event_id == PQI_EVENT_OFA_QUIESCE) {
-               dev_info(&ctrl_info->pci_dev->dev,
-                       "Received Online Firmware Activation quiesce event for controller %u\n",
-                       ctrl_info->ctrl_id);
-               pqi_ofa_ctrl_quiesce(ctrl_info);
-               pqi_acknowledge_event(ctrl_info, event);
-               if (ctrl_info->soft_reset_handshake_supported) {
-                       status = pqi_poll_for_soft_reset_status(ctrl_info);
-                       pqi_process_soft_reset(ctrl_info, status);
-               } else {
-                       pqi_process_soft_reset(ctrl_info,
-                                       RESET_INITIATE_FIRMWARE);
-               }
+static void pqi_ofa_quiesce_worker(struct work_struct *work)
+{
+       struct pqi_ctrl_info *ctrl_info;
+       struct pqi_event *event;
 
-       } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
-               pqi_acknowledge_event(ctrl_info, event);
-               pqi_ofa_setup_host_buffer(ctrl_info,
-                       le32_to_cpu(event->ofa_bytes_requested));
-               pqi_ofa_host_memory_update(ctrl_info);
-       } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
-               pqi_ofa_free_host_buffer(ctrl_info);
-               pqi_acknowledge_event(ctrl_info, event);
+       ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
+
+       event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
+
+       pqi_ofa_ctrl_quiesce(ctrl_info);
+       pqi_acknowledge_event(ctrl_info, event);
+       pqi_process_soft_reset(ctrl_info);
+}
+
+static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_event *event)
+{
+       bool ack_event;
+
+       ack_event = true;
+
+       switch (event->event_id) {
+       case PQI_EVENT_OFA_MEMORY_ALLOCATION:
+               dev_info(&ctrl_info->pci_dev->dev,
+                       "received Online Firmware Activation memory allocation request\n");
+               schedule_work(&ctrl_info->ofa_memory_alloc_work);
+               break;
+       case PQI_EVENT_OFA_QUIESCE:
+               dev_info(&ctrl_info->pci_dev->dev,
+                       "received Online Firmware Activation quiesce request\n");
+               schedule_work(&ctrl_info->ofa_quiesce_work);
+               ack_event = false;
+               break;
+       case PQI_EVENT_OFA_CANCELED:
                dev_info(&ctrl_info->pci_dev->dev,
-                       "Online Firmware Activation(%u) cancel reason : %u\n",
-                       ctrl_info->ctrl_id, event->ofa_cancel_reason);
+                       "received Online Firmware Activation cancel request: reason: %u\n",
+                       ctrl_info->ofa_cancel_reason);
+               pqi_ofa_free_host_buffer(ctrl_info);
+               pqi_ctrl_ofa_done(ctrl_info);
+               break;
+       default:
+               dev_err(&ctrl_info->pci_dev->dev,
+                       "received unknown Online Firmware Activation request: event ID: %u\n",
+                       event->event_id);
+               break;
        }
 
-       mutex_unlock(&ctrl_info->ofa_mutex);
+       return ack_event;
 }
 
 static void pqi_event_worker(struct work_struct *work)
 {
        unsigned int i;
+       bool rescan_needed;
        struct pqi_ctrl_info *ctrl_info;
        struct pqi_event *event;
+       bool ack_event;
 
        ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
 
        pqi_ctrl_busy(ctrl_info);
-       pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
+       pqi_wait_if_ctrl_blocked(ctrl_info);
        if (pqi_ctrl_offline(ctrl_info))
                goto out;
 
-       pqi_schedule_rescan_worker_delayed(ctrl_info);
-
+       rescan_needed = false;
        event = ctrl_info->events;
        for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
                if (event->pending) {
                        event->pending = false;
                        if (event->event_type == PQI_EVENT_TYPE_OFA) {
-                               pqi_ctrl_unbusy(ctrl_info);
-                               pqi_ofa_process_event(ctrl_info, event);
-                               return;
+                               ack_event = pqi_ofa_process_event(ctrl_info, event);
+                       } else {
+                               ack_event = true;
+                               rescan_needed = true;
                        }
-                       pqi_acknowledge_event(ctrl_info, event);
+                       if (ack_event)
+                               pqi_acknowledge_event(ctrl_info, event);
                }
                event++;
        }
 
+       if (rescan_needed)
+               pqi_schedule_rescan_worker_delayed(ctrl_info);
+
 out:
        pqi_ctrl_unbusy(ctrl_info);
 }
@@ -3205,8 +3507,7 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t)
 {
        int num_interrupts;
        u32 heartbeat_count;
-       struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
-                                                    heartbeat_timer);
+       struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
 
        pqi_check_ctrl_health(ctrl_info);
        if (pqi_ctrl_offline(ctrl_info))
@@ -3252,37 +3553,18 @@ static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
        del_timer_sync(&ctrl_info->heartbeat_timer);
 }
 
-static inline int pqi_event_type_to_event_index(unsigned int event_type)
-{
-       int index;
-
-       for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
-               if (event_type == pqi_supported_event_types[index])
-                       return index;
-
-       return -1;
-}
-
-static inline bool pqi_is_supported_event(unsigned int event_type)
-{
-       return pqi_event_type_to_event_index(event_type) != -1;
-}
-
-static void pqi_ofa_capture_event_payload(struct pqi_event *event,
-       struct pqi_event_response *response)
+static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_event *event, struct pqi_event_response *response)
 {
-       u16 event_id;
-
-       event_id = get_unaligned_le16(&event->event_id);
-
-       if (event->event_type == PQI_EVENT_TYPE_OFA) {
-               if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
-                       event->ofa_bytes_requested =
-                       response->data.ofa_memory_allocation.bytes_requested;
-               } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
-                       event->ofa_cancel_reason =
-                       response->data.ofa_cancelled.reason;
-               }
+       switch (event->event_id) {
+       case PQI_EVENT_OFA_MEMORY_ALLOCATION:
+               ctrl_info->ofa_bytes_requested =
+                       get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
+               break;
+       case PQI_EVENT_OFA_CANCELED:
+               ctrl_info->ofa_cancel_reason =
+                       get_unaligned_le16(&response->data.ofa_cancelled.reason);
+               break;
        }
 }
 
@@ -3316,17 +3598,17 @@ static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
                num_events++;
                response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
 
-               event_index =
-                       pqi_event_type_to_event_index(response->event_type);
+               event_index = pqi_event_type_to_event_index(response->event_type);
 
                if (event_index >= 0 && response->request_acknowledge) {
                        event = &ctrl_info->events[event_index];
                        event->pending = true;
                        event->event_type = response->event_type;
-                       event->event_id = response->event_id;
-                       event->additional_event_id = response->additional_event_id;
+                       event->event_id = get_unaligned_le16(&response->event_id);
+                       event->additional_event_id =
+                               get_unaligned_le32(&response->additional_event_id);
                        if (event->event_type == PQI_EVENT_TYPE_OFA)
-                               pqi_ofa_capture_event_payload(event, response);
+                               pqi_ofa_capture_event_payload(ctrl_info, event, response);
                }
 
                oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
@@ -3343,8 +3625,7 @@ static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
 
 #define PQI_LEGACY_INTX_MASK   0x1
 
-static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
-       bool enable_intx)
+static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
 {
        u32 intx_mask;
        struct pqi_device_registers __iomem *pqi_registers;
@@ -3421,8 +3702,7 @@ static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
                valid_irq = true;
                break;
        case IRQ_MODE_INTX:
-               intx_status =
-                       readl(&ctrl_info->pqi_registers->legacy_intx_status);
+               intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
                if (intx_status & PQI_LEGACY_INTX_PENDING)
                        valid_irq = true;
                else
@@ -3743,7 +4023,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
                &admin_queues_aligned->iq_element_array;
        admin_queues->oq_element_array =
                &admin_queues_aligned->oq_element_array;
-       admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
+       admin_queues->iq_ci =
+               (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
        admin_queues->oq_pi =
                (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
 
@@ -3757,8 +4038,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
                ctrl_info->admin_queue_memory_base);
        admin_queues->iq_ci_bus_addr =
                ctrl_info->admin_queue_memory_base_dma_handle +
-               ((void *)admin_queues->iq_ci -
-               ctrl_info->admin_queue_memory_base);
+               ((void __iomem *)admin_queues->iq_ci -
+               (void __iomem *)ctrl_info->admin_queue_memory_base);
        admin_queues->oq_pi_bus_addr =
                ctrl_info->admin_queue_memory_base_dma_handle +
                ((void __iomem *)admin_queues->oq_pi -
@@ -3794,6 +4075,7 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
                (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
                (admin_queues->int_msg_num << 16);
        writel(reg, &pqi_registers->admin_iq_num_elements);
+
        writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
                &pqi_registers->function_and_status_code);
 
@@ -4021,59 +4303,40 @@ static int pqi_process_raid_io_error_synchronous(
        return rc;
 }
 
+static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
+{
+       return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
+}
+
 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
        struct pqi_iu_header *request, unsigned int flags,
-       struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+       struct pqi_raid_error_info *error_info)
 {
        int rc = 0;
        struct pqi_io_request *io_request;
-       unsigned long start_jiffies;
-       unsigned long msecs_blocked;
        size_t iu_length;
        DECLARE_COMPLETION_ONSTACK(wait);
 
-       /*
-        * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
-        * are mutually exclusive.
-        */
-
        if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
                if (down_interruptible(&ctrl_info->sync_request_sem))
                        return -ERESTARTSYS;
        } else {
-               if (timeout_msecs == NO_TIMEOUT) {
-                       down(&ctrl_info->sync_request_sem);
-               } else {
-                       start_jiffies = jiffies;
-                       if (down_timeout(&ctrl_info->sync_request_sem,
-                               msecs_to_jiffies(timeout_msecs)))
-                               return -ETIMEDOUT;
-                       msecs_blocked =
-                               jiffies_to_msecs(jiffies - start_jiffies);
-                       if (msecs_blocked >= timeout_msecs) {
-                               rc = -ETIMEDOUT;
-                               goto out;
-                       }
-                       timeout_msecs -= msecs_blocked;
-               }
+               down(&ctrl_info->sync_request_sem);
        }
 
        pqi_ctrl_busy(ctrl_info);
-       timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
-       if (timeout_msecs == 0) {
-               pqi_ctrl_unbusy(ctrl_info);
-               rc = -ETIMEDOUT;
-               goto out;
-       }
+       /*
+        * Wait for other admin queue updates such as;
+        * config table changes, OFA memory updates, ...
+        */
+       if (pqi_is_blockable_request(request))
+               pqi_wait_if_ctrl_blocked(ctrl_info);
 
        if (pqi_ctrl_offline(ctrl_info)) {
-               pqi_ctrl_unbusy(ctrl_info);
                rc = -ENXIO;
                goto out;
        }
 
-       atomic_inc(&ctrl_info->sync_cmds_outstanding);
-
        io_request = pqi_alloc_io_request(ctrl_info);
 
        put_unaligned_le16(io_request->index,
@@ -4090,38 +4353,24 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
        io_request->io_complete_callback = pqi_raid_synchronous_complete;
        io_request->context = &wait;
 
-       pqi_start_io(ctrl_info,
-               &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+       pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
                io_request);
 
-       pqi_ctrl_unbusy(ctrl_info);
-
-       if (timeout_msecs == NO_TIMEOUT) {
-               pqi_wait_for_completion_io(ctrl_info, &wait);
-       } else {
-               if (!wait_for_completion_io_timeout(&wait,
-                       msecs_to_jiffies(timeout_msecs))) {
-                       dev_warn(&ctrl_info->pci_dev->dev,
-                               "command timed out\n");
-                       rc = -ETIMEDOUT;
-               }
-       }
+       pqi_wait_for_completion_io(ctrl_info, &wait);
 
        if (error_info) {
                if (io_request->error_info)
-                       memcpy(error_info, io_request->error_info,
-                               sizeof(*error_info));
+                       memcpy(error_info, io_request->error_info, sizeof(*error_info));
                else
                        memset(error_info, 0, sizeof(*error_info));
        } else if (rc == 0 && io_request->error_info) {
-               rc = pqi_process_raid_io_error_synchronous(
-                       io_request->error_info);
+               rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
        }
 
        pqi_free_io_request(io_request);
 
-       atomic_dec(&ctrl_info->sync_cmds_outstanding);
 out:
+       pqi_ctrl_unbusy(ctrl_info);
        up(&ctrl_info->sync_request_sem);
 
        return rc;
@@ -4158,8 +4407,7 @@ static int pqi_submit_admin_request_synchronous(
        rc = pqi_poll_for_admin_response(ctrl_info, response);
 
        if (rc == 0)
-               rc = pqi_validate_admin_response(response,
-                       request->function_code);
+               rc = pqi_validate_admin_response(response, request->function_code);
 
        return rc;
 }
@@ -4193,8 +4441,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
        if (rc)
                goto out;
 
-       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
-               &response);
+       rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
 
        pqi_pci_unmap(ctrl_info->pci_dev,
                &request.data.report_device_capability.sg_descriptor, 1,
@@ -4529,8 +4776,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
        if (rc)
                goto out;
 
-       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               0, NULL, NO_TIMEOUT);
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 
        pqi_pci_unmap(ctrl_info->pci_dev,
                request.data.report_event_configuration.sg_descriptors, 1,
@@ -4543,7 +4789,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
                event_descriptor = &event_config->descriptors[i];
                if (enable_events &&
                        pqi_is_supported_event(event_descriptor->event_type))
-                       put_unaligned_le16(ctrl_info->event_queue.oq_id,
+                               put_unaligned_le16(ctrl_info->event_queue.oq_id,
                                        &event_descriptor->oq_id);
                else
                        put_unaligned_le16(0, &event_descriptor->oq_id);
@@ -4565,8 +4811,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
        if (rc)
                goto out;
 
-       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-               NULL, NO_TIMEOUT);
+       rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 
        pqi_pci_unmap(ctrl_info->pci_dev,
                request.data.report_event_configuration.sg_descriptors, 1,
@@ -4618,7 +4863,6 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
 
 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
 {
-
        ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
                                     ctrl_info->error_buffer_length,
                                     &ctrl_info->error_buffer_dma_handle,
@@ -4638,9 +4882,8 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
        struct device *dev;
        struct pqi_io_request *io_request;
 
-       ctrl_info->io_request_pool =
-               kcalloc(ctrl_info->max_io_slots,
-                       sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+       ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
+               sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
 
        if (!ctrl_info->io_request_pool) {
                dev_err(&ctrl_info->pci_dev->dev,
@@ -4653,8 +4896,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
        io_request = ctrl_info->io_request_pool;
 
        for (i = 0; i < ctrl_info->max_io_slots; i++) {
-               io_request->iu =
-                       kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
+               io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
 
                if (!io_request->iu) {
                        dev_err(&ctrl_info->pci_dev->dev,
@@ -4674,8 +4916,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
 
                io_request->index = i;
                io_request->sg_chain_buffer = sg_chain_buffer;
-               io_request->sg_chain_buffer_dma_handle =
-                       sg_chain_buffer_dma_handle;
+               io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
                io_request++;
        }
 
@@ -4782,10 +5023,16 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
                PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
                sizeof(struct pqi_sg_descriptor)) +
                PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
+
+       ctrl_info->max_sg_per_r56_iu =
+               ((ctrl_info->max_inbound_iu_length -
+               PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
+               sizeof(struct pqi_sg_descriptor)) +
+               PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
 }
 
-static inline void pqi_set_sg_descriptor(
-       struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
+static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
+       struct scatterlist *sg)
 {
        u64 address = (u64)sg_dma_address(sg);
        unsigned int length = sg_dma_len(sg);
@@ -4795,16 +5042,52 @@ static inline void pqi_set_sg_descriptor(
        put_unaligned_le32(0, &sg_descriptor->flags);
 }
 
+static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
+       struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
+       int max_sg_per_iu, bool *chained)
+{
+       int i;
+       unsigned int num_sg_in_iu;
+
+       *chained = false;
+       i = 0;
+       num_sg_in_iu = 0;
+       max_sg_per_iu--;        /* Subtract 1 to leave room for chain marker. */
+
+       while (1) {
+               pqi_set_sg_descriptor(sg_descriptor, sg);
+               if (!*chained)
+                       num_sg_in_iu++;
+               i++;
+               if (i == sg_count)
+                       break;
+               sg_descriptor++;
+               if (i == max_sg_per_iu) {
+                       put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
+                               &sg_descriptor->address);
+                       put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
+                               &sg_descriptor->length);
+                       put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
+                       *chained = true;
+                       num_sg_in_iu++;
+                       sg_descriptor = io_request->sg_chain_buffer;
+               }
+               sg = sg_next(sg);
+       }
+
+       put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+
+       return num_sg_in_iu;
+}
+
 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
        struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
        struct pqi_io_request *io_request)
 {
-       int i;
        u16 iu_length;
        int sg_count;
        bool chained;
        unsigned int num_sg_in_iu;
-       unsigned int max_sg_per_iu;
        struct scatterlist *sg;
        struct pqi_sg_descriptor *sg_descriptor;
 
@@ -4820,36 +5103,10 @@ static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
 
        sg = scsi_sglist(scmd);
        sg_descriptor = request->sg_descriptors;
-       max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
-       chained = false;
-       num_sg_in_iu = 0;
-       i = 0;
 
-       while (1) {
-               pqi_set_sg_descriptor(sg_descriptor, sg);
-               if (!chained)
-                       num_sg_in_iu++;
-               i++;
-               if (i == sg_count)
-                       break;
-               sg_descriptor++;
-               if (i == max_sg_per_iu) {
-                       put_unaligned_le64(
-                               (u64)io_request->sg_chain_buffer_dma_handle,
-                               &sg_descriptor->address);
-                       put_unaligned_le32((sg_count - num_sg_in_iu)
-                               * sizeof(*sg_descriptor),
-                               &sg_descriptor->length);
-                       put_unaligned_le32(CISS_SG_CHAIN,
-                               &sg_descriptor->flags);
-                       chained = true;
-                       num_sg_in_iu++;
-                       sg_descriptor = io_request->sg_chain_buffer;
-               }
-               sg = sg_next(sg);
-       }
+       num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+               ctrl_info->max_sg_per_iu, &chained);
 
-       put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
        request->partial = chained;
        iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
 
@@ -4859,16 +5116,14 @@ out:
        return 0;
 }
 
-static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
+static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
        struct pqi_io_request *io_request)
 {
-       int i;
        u16 iu_length;
        int sg_count;
        bool chained;
        unsigned int num_sg_in_iu;
-       unsigned int max_sg_per_iu;
        struct scatterlist *sg;
        struct pqi_sg_descriptor *sg_descriptor;
 
@@ -4876,7 +5131,7 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
        if (sg_count < 0)
                return sg_count;
 
-       iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
+       iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
                PQI_REQUEST_HEADER_LENGTH;
        num_sg_in_iu = 0;
 
@@ -4885,35 +5140,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
 
        sg = scsi_sglist(scmd);
        sg_descriptor = request->sg_descriptors;
-       max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
-       chained = false;
-       i = 0;
 
-       while (1) {
-               pqi_set_sg_descriptor(sg_descriptor, sg);
-               if (!chained)
-                       num_sg_in_iu++;
-               i++;
-               if (i == sg_count)
-                       break;
-               sg_descriptor++;
-               if (i == max_sg_per_iu) {
-                       put_unaligned_le64(
-                               (u64)io_request->sg_chain_buffer_dma_handle,
-                               &sg_descriptor->address);
-                       put_unaligned_le32((sg_count - num_sg_in_iu)
-                               * sizeof(*sg_descriptor),
-                               &sg_descriptor->length);
-                       put_unaligned_le32(CISS_SG_CHAIN,
-                               &sg_descriptor->flags);
-                       chained = true;
-                       num_sg_in_iu++;
-                       sg_descriptor = io_request->sg_chain_buffer;
-               }
-               sg = sg_next(sg);
-       }
+       num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+               ctrl_info->max_sg_per_iu, &chained);
 
-       put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
        request->partial = chained;
        iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
 
@@ -4924,7 +5154,81 @@ out:
        return 0;
 }
 
-static void pqi_raid_io_complete(struct pqi_io_request *io_request,
+static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
+       struct pqi_io_request *io_request)
+{
+       u16 iu_length;
+       int sg_count;
+       bool chained;
+       unsigned int num_sg_in_iu;
+       struct scatterlist *sg;
+       struct pqi_sg_descriptor *sg_descriptor;
+
+       sg_count = scsi_dma_map(scmd);
+       if (sg_count < 0)
+               return sg_count;
+
+       iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
+               PQI_REQUEST_HEADER_LENGTH;
+       num_sg_in_iu = 0;
+
+       if (sg_count != 0) {
+               sg = scsi_sglist(scmd);
+               sg_descriptor = request->sg_descriptors;
+
+               num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+                       ctrl_info->max_sg_per_r56_iu, &chained);
+
+               request->partial = chained;
+               iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+       }
+
+       put_unaligned_le16(iu_length, &request->header.iu_length);
+       request->num_sg_descriptors = num_sg_in_iu;
+
+       return 0;
+}
+
+static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
+       struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
+       struct pqi_io_request *io_request)
+{
+       u16 iu_length;
+       int sg_count;
+       bool chained;
+       unsigned int num_sg_in_iu;
+       struct scatterlist *sg;
+       struct pqi_sg_descriptor *sg_descriptor;
+
+       sg_count = scsi_dma_map(scmd);
+       if (sg_count < 0)
+               return sg_count;
+
+       iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
+               PQI_REQUEST_HEADER_LENGTH;
+       num_sg_in_iu = 0;
+
+       if (sg_count == 0)
+               goto out;
+
+       sg = scsi_sglist(scmd);
+       sg_descriptor = request->sg_descriptors;
+
+       num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+               ctrl_info->max_sg_per_iu, &chained);
+
+       request->partial = chained;
+       iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+       put_unaligned_le16(iu_length, &request->header.iu_length);
+       request->num_sg_descriptors = num_sg_in_iu;
+
+       return 0;
+}
+
+static void pqi_raid_io_complete(struct pqi_io_request *io_request,
        void *context)
 {
        struct scsi_cmnd *scmd;
@@ -4948,16 +5252,14 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
        io_request->scmd = scmd;
 
        request = io_request->iu;
-       memset(request, 0,
-               offsetof(struct pqi_raid_path_request, sg_descriptors));
+       memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
 
        request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
        put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
        request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
        put_unaligned_le16(io_request->index, &request->request_id);
        request->error_index = request->request_id;
-       memcpy(request->lun_number, device->scsi3addr,
-               sizeof(request->lun_number));
+       memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
 
        cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
        memcpy(request->cdb, scmd->cmnd, cdb_length);
@@ -4967,30 +5269,20 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
        case 10:
        case 12:
        case 16:
-               /* No bytes in the Additional CDB bytes field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_0;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
                break;
        case 20:
-               /* 4 bytes in the Additional cdb field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_4;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
                break;
        case 24:
-               /* 8 bytes in the Additional cdb field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_8;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
                break;
        case 28:
-               /* 12 bytes in the Additional cdb field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_12;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
                break;
        case 32:
        default:
-               /* 16 bytes in the Additional cdb field */
-               request->additional_cdb_bytes_usage =
-                       SOP_ADDITIONAL_CDB_BYTES_16;
+               request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
                break;
        }
 
@@ -5037,12 +5329,6 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
                device, scmd, queue_group);
 }
 
-static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
-{
-       if (!pqi_ctrl_blocked(ctrl_info))
-               schedule_work(&ctrl_info->raid_bypass_retry_work);
-}
-
 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
 {
        struct scsi_cmnd *scmd;
@@ -5059,7 +5345,7 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
                return false;
 
        device = scmd->device->hostdata;
-       if (pqi_device_offline(device))
+       if (pqi_device_offline(device) || pqi_device_in_remove(device))
                return false;
 
        ctrl_info = shost_to_hba(scmd->device->host);
@@ -5069,132 +5355,6 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
        return true;
 }
 
-static inline void pqi_add_to_raid_bypass_retry_list(
-       struct pqi_ctrl_info *ctrl_info,
-       struct pqi_io_request *io_request, bool at_head)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
-       if (at_head)
-               list_add(&io_request->request_list_entry,
-                       &ctrl_info->raid_bypass_retry_list);
-       else
-               list_add_tail(&io_request->request_list_entry,
-                       &ctrl_info->raid_bypass_retry_list);
-       spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-}
-
-static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
-       void *context)
-{
-       struct scsi_cmnd *scmd;
-
-       scmd = io_request->scmd;
-       pqi_free_io_request(io_request);
-       pqi_scsi_done(scmd);
-}
-
-static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
-{
-       struct scsi_cmnd *scmd;
-       struct pqi_ctrl_info *ctrl_info;
-
-       io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
-       scmd = io_request->scmd;
-       scmd->result = 0;
-       ctrl_info = shost_to_hba(scmd->device->host);
-
-       pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
-       pqi_schedule_bypass_retry(ctrl_info);
-}
-
-static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
-{
-       struct scsi_cmnd *scmd;
-       struct pqi_scsi_dev *device;
-       struct pqi_ctrl_info *ctrl_info;
-       struct pqi_queue_group *queue_group;
-
-       scmd = io_request->scmd;
-       device = scmd->device->hostdata;
-       if (pqi_device_in_reset(device)) {
-               pqi_free_io_request(io_request);
-               set_host_byte(scmd, DID_RESET);
-               pqi_scsi_done(scmd);
-               return 0;
-       }
-
-       ctrl_info = shost_to_hba(scmd->device->host);
-       queue_group = io_request->queue_group;
-
-       pqi_reinit_io_request(io_request);
-
-       return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
-               device, scmd, queue_group);
-}
-
-static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
-       struct pqi_ctrl_info *ctrl_info)
-{
-       unsigned long flags;
-       struct pqi_io_request *io_request;
-
-       spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
-       io_request = list_first_entry_or_null(
-               &ctrl_info->raid_bypass_retry_list,
-               struct pqi_io_request, request_list_entry);
-       if (io_request)
-               list_del(&io_request->request_list_entry);
-       spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-
-       return io_request;
-}
-
-static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
-{
-       int rc;
-       struct pqi_io_request *io_request;
-
-       pqi_ctrl_busy(ctrl_info);
-
-       while (1) {
-               if (pqi_ctrl_blocked(ctrl_info))
-                       break;
-               io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
-               if (!io_request)
-                       break;
-               rc = pqi_retry_raid_bypass(io_request);
-               if (rc) {
-                       pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
-                               true);
-                       pqi_schedule_bypass_retry(ctrl_info);
-                       break;
-               }
-       }
-
-       pqi_ctrl_unbusy(ctrl_info);
-}
-
-static void pqi_raid_bypass_retry_worker(struct work_struct *work)
-{
-       struct pqi_ctrl_info *ctrl_info;
-
-       ctrl_info = container_of(work, struct pqi_ctrl_info,
-               raid_bypass_retry_work);
-       pqi_retry_raid_bypass_requests(ctrl_info);
-}
-
-static void pqi_clear_all_queued_raid_bypass_retries(
-       struct pqi_ctrl_info *ctrl_info)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
-       INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
-       spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-}
-
 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
        void *context)
 {
@@ -5202,12 +5362,11 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
 
        scmd = io_request->scmd;
        scsi_dma_unmap(scmd);
-       if (io_request->status == -EAGAIN)
+       if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
                set_host_byte(scmd, DID_IMM_RETRY);
-       else if (pqi_raid_bypass_retry_needed(io_request)) {
-               pqi_queue_raid_bypass_retry(io_request);
-               return;
+               scmd->SCp.this_residual++;
        }
+
        pqi_free_io_request(io_request);
        pqi_scsi_done(scmd);
 }
@@ -5235,8 +5394,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
        io_request->raid_bypass = raid_bypass;
 
        request = io_request->iu;
-       memset(request, 0,
-               offsetof(struct pqi_raid_path_request, sg_descriptors));
+       memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
 
        request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
        put_unaligned_le32(aio_handle, &request->nexus_id);
@@ -5290,6 +5448,129 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
        return 0;
 }
 
+static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+       struct pqi_scsi_dev_raid_map_data *rmd)
+{
+       int rc;
+       struct pqi_io_request *io_request;
+       struct pqi_aio_r1_path_request *r1_request;
+
+       io_request = pqi_alloc_io_request(ctrl_info);
+       io_request->io_complete_callback = pqi_aio_io_complete;
+       io_request->scmd = scmd;
+       io_request->raid_bypass = true;
+
+       r1_request = io_request->iu;
+       memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
+
+       r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
+       put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
+       r1_request->num_drives = rmd->num_it_nexus_entries;
+       put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
+       put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
+       if (rmd->num_it_nexus_entries == 3)
+               put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
+
+       put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
+       r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+       put_unaligned_le16(io_request->index, &r1_request->request_id);
+       r1_request->error_index = r1_request->request_id;
+       if (rmd->cdb_length > sizeof(r1_request->cdb))
+               rmd->cdb_length = sizeof(r1_request->cdb);
+       r1_request->cdb_length = rmd->cdb_length;
+       memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
+
+       /* The direction is always write. */
+       r1_request->data_direction = SOP_READ_FLAG;
+
+       if (encryption_info) {
+               r1_request->encryption_enable = true;
+               put_unaligned_le16(encryption_info->data_encryption_key_index,
+                               &r1_request->data_encryption_key_index);
+               put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+                               &r1_request->encrypt_tweak_lower);
+               put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+                               &r1_request->encrypt_tweak_upper);
+       }
+
+       rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
+       if (rc) {
+               pqi_free_io_request(io_request);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
+       pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+       return 0;
+}
+
+static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+       struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+       struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+       struct pqi_scsi_dev_raid_map_data *rmd)
+{
+       int rc;
+       struct pqi_io_request *io_request;
+       struct pqi_aio_r56_path_request *r56_request;
+
+       io_request = pqi_alloc_io_request(ctrl_info);
+       io_request->io_complete_callback = pqi_aio_io_complete;
+       io_request->scmd = scmd;
+       io_request->raid_bypass = true;
+
+       r56_request = io_request->iu;
+       memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
+
+       if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
+               r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
+       else
+               r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
+
+       put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
+       put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
+       put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
+       if (rmd->raid_level == SA_RAID_6) {
+               put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
+               r56_request->xor_multiplier = rmd->xor_mult;
+       }
+       put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
+       r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+       put_unaligned_le64(rmd->row, &r56_request->row);
+
+       put_unaligned_le16(io_request->index, &r56_request->request_id);
+       r56_request->error_index = r56_request->request_id;
+
+       if (rmd->cdb_length > sizeof(r56_request->cdb))
+               rmd->cdb_length = sizeof(r56_request->cdb);
+       r56_request->cdb_length = rmd->cdb_length;
+       memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
+
+       /* The direction is always write. */
+       r56_request->data_direction = SOP_READ_FLAG;
+
+       if (encryption_info) {
+               r56_request->encryption_enable = true;
+               put_unaligned_le16(encryption_info->data_encryption_key_index,
+                               &r56_request->data_encryption_key_index);
+               put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+                               &r56_request->encrypt_tweak_lower);
+               put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+                               &r56_request->encrypt_tweak_upper);
+       }
+
+       rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
+       if (rc) {
+               pqi_free_io_request(io_request);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
+       pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+       return 0;
+}
+
 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
        struct scsi_cmnd *scmd)
 {
@@ -5302,6 +5583,14 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
        return hw_queue;
 }
 
+static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
+{
+       if (blk_rq_is_passthrough(scmd->request))
+               return false;
+
+       return scmd->SCp.this_residual == 0;
+}
+
 /*
  * This function gets called just before we hand the completed SCSI request
  * back to the SML.
@@ -5325,8 +5614,82 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
        atomic_dec(&device->scsi_cmds_outstanding);
 }
 
-static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
        struct scsi_cmnd *scmd)
+{
+       u32 oldest_jiffies;
+       u8 lru_index;
+       int i;
+       int rc;
+       struct pqi_scsi_dev *device;
+       struct pqi_stream_data *pqi_stream_data;
+       struct pqi_scsi_dev_raid_map_data rmd;
+
+       if (!ctrl_info->enable_stream_detection)
+               return false;
+
+       rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+       if (rc)
+               return false;
+
+       /* Check writes only. */
+       if (!rmd.is_write)
+               return false;
+
+       device = scmd->device->hostdata;
+
+       /* Check for RAID 5/6 streams. */
+       if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
+               return false;
+
+       /*
+        * If controller does not support AIO RAID{5,6} writes, need to send
+        * requests down non-AIO path.
+        */
+       if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
+               (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
+               return true;
+
+       lru_index = 0;
+       oldest_jiffies = INT_MAX;
+       for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
+               pqi_stream_data = &device->stream_data[i];
+               /*
+                * Check for adjacent request or request is within
+                * the previous request.
+                */
+               if ((pqi_stream_data->next_lba &&
+                       rmd.first_block >= pqi_stream_data->next_lba) &&
+                       rmd.first_block <= pqi_stream_data->next_lba +
+                               rmd.block_cnt) {
+                       pqi_stream_data->next_lba = rmd.first_block +
+                               rmd.block_cnt;
+                       pqi_stream_data->last_accessed = jiffies;
+                       return true;
+               }
+
+               /* unused entry */
+               if (pqi_stream_data->last_accessed == 0) {
+                       lru_index = i;
+                       break;
+               }
+
+               /* Find entry with oldest last accessed time. */
+               if (pqi_stream_data->last_accessed <= oldest_jiffies) {
+                       oldest_jiffies = pqi_stream_data->last_accessed;
+                       lru_index = i;
+               }
+       }
+
+       /* Set LRU entry. */
+       pqi_stream_data = &device->stream_data[lru_index];
+       pqi_stream_data->last_accessed = jiffies;
+       pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
+
+       return false;
+}
+
+static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 {
        int rc;
        struct pqi_ctrl_info *ctrl_info;
@@ -5336,7 +5699,6 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
        bool raid_bypassed;
 
        device = scmd->device->hostdata;
-       ctrl_info = shost_to_hba(shost);
 
        if (!device) {
                set_host_byte(scmd, DID_NO_CONNECT);
@@ -5346,15 +5708,15 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
 
        atomic_inc(&device->scsi_cmds_outstanding);
 
+       ctrl_info = shost_to_hba(shost);
+
        if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
                set_host_byte(scmd, DID_NO_CONNECT);
                pqi_scsi_done(scmd);
                return 0;
        }
 
-       pqi_ctrl_busy(ctrl_info);
-       if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
-           pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
+       if (pqi_ctrl_blocked(ctrl_info)) {
                rc = SCSI_MLQUEUE_HOST_BUSY;
                goto out;
        }
@@ -5371,9 +5733,9 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
        if (pqi_is_logical_device(device)) {
                raid_bypassed = false;
                if (device->raid_bypass_enabled &&
-                       !blk_rq_is_passthrough(scmd->request)) {
-                       rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
-                               scmd, queue_group);
+                       pqi_is_bypass_eligible_request(scmd) &&
+                       !pqi_is_parity_write_stream(ctrl_info, scmd)) {
+                       rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
                        if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
                                raid_bypassed = true;
                                atomic_inc(&device->raid_bypass_cnt);
@@ -5389,7 +5751,6 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
        }
 
 out:
-       pqi_ctrl_unbusy(ctrl_info);
        if (rc)
                atomic_dec(&device->scsi_cmds_outstanding);
 
@@ -5479,6 +5840,7 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
                        list_for_each_entry_safe(io_request, next,
                                &queue_group->request_list[path],
                                request_list_entry) {
+
                                scmd = io_request->scmd;
                                if (!scmd)
                                        continue;
@@ -5489,6 +5851,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
 
                                list_del(&io_request->request_list_entry);
                                set_host_byte(scmd, DID_RESET);
+                               pqi_free_io_request(io_request);
+                               scsi_dma_unmap(scmd);
                                pqi_scsi_done(scmd);
                        }
 
@@ -5498,102 +5862,37 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
        }
 }
 
-static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
-{
-       unsigned int i;
-       unsigned int path;
-       struct pqi_queue_group *queue_group;
-       unsigned long flags;
-       struct pqi_io_request *io_request;
-       struct pqi_io_request *next;
-       struct scsi_cmnd *scmd;
-
-       for (i = 0; i < ctrl_info->num_queue_groups; i++) {
-               queue_group = &ctrl_info->queue_groups[i];
-
-               for (path = 0; path < 2; path++) {
-                       spin_lock_irqsave(&queue_group->submit_lock[path],
-                                               flags);
-
-                       list_for_each_entry_safe(io_request, next,
-                               &queue_group->request_list[path],
-                               request_list_entry) {
-
-                               scmd = io_request->scmd;
-                               if (!scmd)
-                                       continue;
-
-                               list_del(&io_request->request_list_entry);
-                               set_host_byte(scmd, DID_RESET);
-                               pqi_scsi_done(scmd);
-                       }
-
-                       spin_unlock_irqrestore(
-                               &queue_group->submit_lock[path], flags);
-               }
-       }
-}
+#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS    10
 
 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device, unsigned long timeout_secs)
+       struct pqi_scsi_dev *device, unsigned long timeout_msecs)
 {
-       unsigned long timeout;
+       int cmds_outstanding;
+       unsigned long start_jiffies;
+       unsigned long warning_timeout;
+       unsigned long msecs_waiting;
 
-       timeout = (timeout_secs * PQI_HZ) + jiffies;
+       start_jiffies = jiffies;
+       warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
 
-       while (atomic_read(&device->scsi_cmds_outstanding)) {
+       while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
                pqi_check_ctrl_health(ctrl_info);
                if (pqi_ctrl_offline(ctrl_info))
                        return -ENXIO;
-               if (timeout_secs != NO_TIMEOUT) {
-                       if (time_after(jiffies, timeout)) {
-                               dev_err(&ctrl_info->pci_dev->dev,
-                                       "timed out waiting for pending IO\n");
-                               return -ETIMEDOUT;
-                       }
-               }
-               usleep_range(1000, 2000);
-       }
-
-       return 0;
-}
-
-static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
-       unsigned long timeout_secs)
-{
-       bool io_pending;
-       unsigned long flags;
-       unsigned long timeout;
-       struct pqi_scsi_dev *device;
-
-       timeout = (timeout_secs * PQI_HZ) + jiffies;
-       while (1) {
-               io_pending = false;
-
-               spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-               list_for_each_entry(device, &ctrl_info->scsi_device_list,
-                       scsi_device_list_entry) {
-                       if (atomic_read(&device->scsi_cmds_outstanding)) {
-                               io_pending = true;
-                               break;
-                       }
+               msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
+               if (msecs_waiting > timeout_msecs) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
+                               ctrl_info->scsi_host->host_no, device->bus, device->target,
+                               device->lun, msecs_waiting / 1000, cmds_outstanding);
+                       return -ETIMEDOUT;
                }
-               spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
-                                       flags);
-
-               if (!io_pending)
-                       break;
-
-               pqi_check_ctrl_health(ctrl_info);
-               if (pqi_ctrl_offline(ctrl_info))
-                       return -ENXIO;
-
-               if (timeout_secs != NO_TIMEOUT) {
-                       if (time_after(jiffies, timeout)) {
-                               dev_err(&ctrl_info->pci_dev->dev,
-                                       "timed out waiting for pending IO\n");
-                               return -ETIMEDOUT;
-                       }
+               if (time_after(jiffies, warning_timeout)) {
+                       dev_warn(&ctrl_info->pci_dev->dev,
+                               "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
+                               ctrl_info->scsi_host->host_no, device->bus, device->target,
+                               device->lun, msecs_waiting / 1000, cmds_outstanding);
+                       warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
                }
                usleep_range(1000, 2000);
        }
@@ -5601,18 +5900,6 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
        return 0;
 }
 
-static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
-{
-       while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
-               pqi_check_ctrl_health(ctrl_info);
-               if (pqi_ctrl_offline(ctrl_info))
-                       return -ENXIO;
-               usleep_range(1000, 2000);
-       }
-
-       return 0;
-}
-
 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
        void *context)
 {
@@ -5621,13 +5908,15 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
        complete(waiting);
 }
 
-#define PQI_LUN_RESET_TIMEOUT_SECS             30
 #define PQI_LUN_RESET_POLL_COMPLETION_SECS     10
 
 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *device, struct completion *wait)
 {
        int rc;
+       unsigned int wait_secs;
+
+       wait_secs = 0;
 
        while (1) {
                if (wait_for_completion_io_timeout(wait,
@@ -5641,13 +5930,21 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
                        rc = -ENXIO;
                        break;
                }
+
+               wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
+
+               dev_warn(&ctrl_info->pci_dev->dev,
+                       "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n",
+                       ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun,
+                       wait_secs);
        }
 
        return rc;
 }
 
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device)
+#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS    30
+
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
 {
        int rc;
        struct pqi_io_request *io_request;
@@ -5669,11 +5966,9 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
                sizeof(request->lun_number));
        request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
        if (ctrl_info->tmf_iu_timeout_supported)
-               put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
-                                       &request->timeout);
+               put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
 
-       pqi_start_io(ctrl_info,
-               &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+       pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
                io_request);
 
        rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
@@ -5685,31 +5980,33 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
        return rc;
 }
 
-/* Performs a reset at the LUN level. */
+#define PQI_LUN_RESET_RETRIES                          3
+#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS             (10 * 1000)
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS         (10 * 60 * 1000)
+#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS  (2 * 60 * 1000)
 
-#define PQI_LUN_RESET_RETRIES                  3
-#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS     10000
-#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS  120
-
-static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
-       struct pqi_scsi_dev *device)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
 {
-       int rc;
+       int reset_rc;
+       int wait_rc;
        unsigned int retries;
-       unsigned long timeout_secs;
+       unsigned long timeout_msecs;
 
        for (retries = 0;;) {
-               rc = pqi_lun_reset(ctrl_info, device);
-               if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+               reset_rc = pqi_lun_reset(ctrl_info, device);
+               if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
                        break;
                msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
        }
 
-       timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
+       timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
+               PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
 
-       rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
+       wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
+       if (wait_rc && reset_rc == 0)
+               reset_rc = wait_rc;
 
-       return rc == 0 ? SUCCESS : FAILED;
+       return reset_rc == 0 ? SUCCESS : FAILED;
 }
 
 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
@@ -5717,23 +6014,15 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
 {
        int rc;
 
-       mutex_lock(&ctrl_info->lun_reset_mutex);
-
        pqi_ctrl_block_requests(ctrl_info);
        pqi_ctrl_wait_until_quiesced(ctrl_info);
        pqi_fail_io_queued_for_device(ctrl_info, device);
        rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
-       pqi_device_reset_start(device);
-       pqi_ctrl_unblock_requests(ctrl_info);
-
        if (rc)
                rc = FAILED;
        else
-               rc = _pqi_device_reset(ctrl_info, device);
-
-       pqi_device_reset_done(device);
-
-       mutex_unlock(&ctrl_info->lun_reset_mutex);
+               rc = pqi_lun_reset_with_retries(ctrl_info, device);
+       pqi_ctrl_unblock_requests(ctrl_info);
 
        return rc;
 }
@@ -5749,29 +6038,25 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
        ctrl_info = shost_to_hba(shost);
        device = scmd->device->hostdata;
 
+       mutex_lock(&ctrl_info->lun_reset_mutex);
+
        dev_err(&ctrl_info->pci_dev->dev,
                "resetting scsi %d:%d:%d:%d\n",
                shost->host_no, device->bus, device->target, device->lun);
 
        pqi_check_ctrl_health(ctrl_info);
-       if (pqi_ctrl_offline(ctrl_info) ||
-               pqi_device_reset_blocked(ctrl_info)) {
+       if (pqi_ctrl_offline(ctrl_info))
                rc = FAILED;
-               goto out;
-       }
-
-       pqi_wait_until_ofa_finished(ctrl_info);
-
-       atomic_inc(&ctrl_info->sync_cmds_outstanding);
-       rc = pqi_device_reset(ctrl_info, device);
-       atomic_dec(&ctrl_info->sync_cmds_outstanding);
+       else
+               rc = pqi_device_reset(ctrl_info, device);
 
-out:
        dev_err(&ctrl_info->pci_dev->dev,
                "reset of scsi %d:%d:%d:%d: %s\n",
                shost->host_no, device->bus, device->target, device->lun,
                rc == SUCCESS ? "SUCCESS" : "FAILED");
 
+       mutex_unlock(&ctrl_info->lun_reset_mutex);
+
        return rc;
 }
 
@@ -5809,10 +6094,13 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
                        scsi_change_queue_depth(sdev,
                                device->advertised_queue_depth);
                }
-               if (pqi_is_logical_device(device))
+               if (pqi_is_logical_device(device)) {
                        pqi_disable_write_same(sdev);
-               else
+               } else {
                        sdev->allow_restart = 1;
+                       if (device->device_type == SA_DEVICE_TYPE_NVME)
+                               pqi_disable_write_same(sdev);
+               }
        }
 
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5986,6 +6274,8 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 
        if (pqi_ctrl_offline(ctrl_info))
                return -ENXIO;
+       if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
+               return -EBUSY;
        if (!arg)
                return -EINVAL;
        if (!capable(CAP_SYS_RAWIO))
@@ -6070,7 +6360,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
                put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
 
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
+               PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
 
        if (iocommand.buf_size > 0)
                pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
@@ -6122,9 +6412,6 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
 
        ctrl_info = shost_to_hba(sdev->host);
 
-       if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
-               return -EBUSY;
-
        switch (cmd) {
        case CCISS_DEREGDISK:
        case CCISS_REGNEWDISK:
@@ -6157,14 +6444,13 @@ static ssize_t pqi_firmware_version_show(struct device *dev,
        shost = class_to_shost(dev);
        ctrl_info = shost_to_hba(shost);
 
-       return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
 }
 
 static ssize_t pqi_driver_version_show(struct device *dev,
        struct device_attribute *attr, char *buffer)
 {
-       return snprintf(buffer, PAGE_SIZE, "%s\n",
-                       DRIVER_VERSION BUILD_TIMESTAMP);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
 }
 
 static ssize_t pqi_serial_number_show(struct device *dev,
@@ -6176,7 +6462,7 @@ static ssize_t pqi_serial_number_show(struct device *dev,
        shost = class_to_shost(dev);
        ctrl_info = shost_to_hba(shost);
 
-       return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
 }
 
 static ssize_t pqi_model_show(struct device *dev,
@@ -6188,7 +6474,7 @@ static ssize_t pqi_model_show(struct device *dev,
        shost = class_to_shost(dev);
        ctrl_info = shost_to_hba(shost);
 
-       return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
 }
 
 static ssize_t pqi_vendor_show(struct device *dev,
@@ -6200,7 +6486,7 @@ static ssize_t pqi_vendor_show(struct device *dev,
        shost = class_to_shost(dev);
        ctrl_info = shost_to_hba(shost);
 
-       return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
 }
 
 static ssize_t pqi_host_rescan_store(struct device *dev,
@@ -6253,14 +6539,103 @@ static ssize_t pqi_lockup_action_store(struct device *dev,
        return -EINVAL;
 }
 
+static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+       return scnprintf(buffer, 10, "%x\n",
+                       ctrl_info->enable_stream_detection);
+}
+
+static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
+       struct device_attribute *attr, const char *buffer, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       u8 set_stream_detection = 0;
+
+       if (kstrtou8(buffer, 0, &set_stream_detection))
+               return -EINVAL;
+
+       if (set_stream_detection > 0)
+               set_stream_detection = 1;
+
+       ctrl_info->enable_stream_detection = set_stream_detection;
+
+       return count;
+}
+
+static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+       return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
+}
+
+static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
+       struct device_attribute *attr, const char *buffer, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       u8 set_r5_writes = 0;
+
+       if (kstrtou8(buffer, 0, &set_r5_writes))
+               return -EINVAL;
+
+       if (set_r5_writes > 0)
+               set_r5_writes = 1;
+
+       ctrl_info->enable_r5_writes = set_r5_writes;
+
+       return count;
+}
+
+static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
+       struct device_attribute *attr, char *buffer)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+       return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
+}
+
+static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
+       struct device_attribute *attr, const char *buffer, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+       u8 set_r6_writes = 0;
+
+       if (kstrtou8(buffer, 0, &set_r6_writes))
+               return -EINVAL;
+
+       if (set_r6_writes > 0)
+               set_r6_writes = 1;
+
+       ctrl_info->enable_r6_writes = set_r6_writes;
+
+       return count;
+}
+
 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
-static DEVICE_ATTR(lockup_action, 0644,
-       pqi_lockup_action_show, pqi_lockup_action_store);
+static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
+       pqi_lockup_action_store);
+static DEVICE_ATTR(enable_stream_detection, 0644,
+       pqi_host_enable_stream_detection_show,
+       pqi_host_enable_stream_detection_store);
+static DEVICE_ATTR(enable_r5_writes, 0644,
+       pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
+static DEVICE_ATTR(enable_r6_writes, 0644,
+       pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
 
 static struct device_attribute *pqi_shost_attrs[] = {
        &dev_attr_driver_version,
@@ -6270,6 +6645,9 @@ static struct device_attribute *pqi_shost_attrs[] = {
        &dev_attr_vendor,
        &dev_attr_rescan,
        &dev_attr_lockup_action,
+       &dev_attr_enable_stream_detection,
+       &dev_attr_enable_r5_writes,
+       &dev_attr_enable_r6_writes,
        NULL
 };
 
@@ -6302,8 +6680,9 @@ static ssize_t pqi_unique_id_show(struct device *dev,
 
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 
-       return snprintf(buffer, PAGE_SIZE,
-               "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+       return scnprintf(buffer, PAGE_SIZE,
+               "%02X%02X%02X%02X%02X%02X%02X%02X"
+               "%02X%02X%02X%02X%02X%02X%02X%02X\n",
                unique_id[0], unique_id[1], unique_id[2], unique_id[3],
                unique_id[4], unique_id[5], unique_id[6], unique_id[7],
                unique_id[8], unique_id[9], unique_id[10], unique_id[11],
@@ -6334,7 +6713,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
 
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 
-       return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
+       return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
 }
 
 #define MAX_PATHS      8
@@ -6446,7 +6825,7 @@ static ssize_t pqi_sas_address_show(struct device *dev,
 
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 
-       return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
+       return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
 }
 
 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
@@ -6504,7 +6883,7 @@ static ssize_t pqi_raid_level_show(struct device *dev,
 
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 
-       return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
 }
 
 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
@@ -6531,7 +6910,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
 
        spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 
-       return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+       return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
 }
 
 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
@@ -6578,9 +6957,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
 
        shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
        if (!shost) {
-               dev_err(&ctrl_info->pci_dev->dev,
-                       "scsi_host_alloc failed for controller %u\n",
-                       ctrl_info->ctrl_id);
+               dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
                return -ENOMEM;
        }
 
@@ -6599,21 +6976,18 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
        shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
        shost->unique_id = shost->irq;
        shost->nr_hw_queues = ctrl_info->num_queue_groups;
+       shost->host_tagset = 1;
        shost->hostdata[0] = (unsigned long)ctrl_info;
 
        rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
        if (rc) {
-               dev_err(&ctrl_info->pci_dev->dev,
-                       "scsi_add_host failed for controller %u\n",
-                       ctrl_info->ctrl_id);
+               dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
                goto free_host;
        }
 
        rc = pqi_add_sas_host(shost, ctrl_info);
        if (rc) {
-               dev_err(&ctrl_info->pci_dev->dev,
-                       "add SAS host failed for controller %u\n",
-                       ctrl_info->ctrl_id);
+               dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
                goto remove_host;
        }
 
@@ -6683,8 +7057,7 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
                rc = sis_pqi_reset_quiesce(ctrl_info);
                if (rc) {
                        dev_err(&ctrl_info->pci_dev->dev,
-                               "PQI reset failed during quiesce with error %d\n",
-                               rc);
+                               "PQI reset failed during quiesce with error %d\n", rc);
                        return rc;
                }
        }
@@ -6739,13 +7112,24 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
        if (rc)
                goto out;
 
-       memcpy(ctrl_info->firmware_version, identify->firmware_version,
-               sizeof(identify->firmware_version));
-       ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
-       snprintf(ctrl_info->firmware_version +
-               strlen(ctrl_info->firmware_version),
-               sizeof(ctrl_info->firmware_version),
-               "-%u", get_unaligned_le16(&identify->firmware_build_number));
+       if (get_unaligned_le32(&identify->extra_controller_flags) &
+               BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
+               memcpy(ctrl_info->firmware_version,
+                       identify->firmware_version_long,
+                       sizeof(identify->firmware_version_long));
+       } else {
+               memcpy(ctrl_info->firmware_version,
+                       identify->firmware_version_short,
+                       sizeof(identify->firmware_version_short));
+               ctrl_info->firmware_version
+                       [sizeof(identify->firmware_version_short)] = '\0';
+               snprintf(ctrl_info->firmware_version +
+                       strlen(ctrl_info->firmware_version),
+                       sizeof(ctrl_info->firmware_version) -
+                       sizeof(identify->firmware_version_short),
+                       "-%u",
+                       get_unaligned_le16(&identify->firmware_build_number));
+       }
 
        memcpy(ctrl_info->model, identify->product_id,
                sizeof(identify->product_id));
@@ -6832,8 +7216,7 @@ static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
        put_unaligned_le16(last_section,
                &request.data.config_table_update.last_section);
 
-       return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               0, NULL, NO_TIMEOUT);
+       return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 }
 
 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
@@ -6842,6 +7225,7 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
 {
        void *features_requested;
        void __iomem *features_requested_iomem_addr;
+       void __iomem *host_max_known_feature_iomem_addr;
 
        features_requested = firmware_features->features_supported +
                le16_to_cpu(firmware_features->num_elements);
@@ -6852,6 +7236,16 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
        memcpy_toio(features_requested_iomem_addr, features_requested,
                le16_to_cpu(firmware_features->num_elements));
 
+       if (pqi_is_firmware_feature_supported(firmware_features,
+               PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
+               host_max_known_feature_iomem_addr =
+                       features_requested_iomem_addr +
+                       (le16_to_cpu(firmware_features->num_elements) * 2) +
+                       sizeof(__le16);
+               writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
+                       host_max_known_feature_iomem_addr);
+       }
+
        return pqi_config_table_update(ctrl_info,
                PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
                PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
@@ -6889,16 +7283,28 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
        struct pqi_firmware_feature *firmware_feature)
 {
        switch (firmware_feature->feature_bit) {
+       case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
+               ctrl_info->enable_r1_writes = firmware_feature->enabled;
+               break;
+       case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
+               ctrl_info->enable_r5_writes = firmware_feature->enabled;
+               break;
+       case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
+               ctrl_info->enable_r6_writes = firmware_feature->enabled;
+               break;
        case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
                ctrl_info->soft_reset_handshake_supported =
-                       firmware_feature->enabled;
+                       firmware_feature->enabled &&
+                       pqi_read_soft_reset_status(ctrl_info);
                break;
        case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
-               ctrl_info->raid_iu_timeout_supported =
-                       firmware_feature->enabled;
+               ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
                break;
        case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
-               ctrl_info->tmf_iu_timeout_supported =
+               ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
+               break;
+       case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
+               ctrl_info->unique_wwid_in_report_phys_lun_supported =
                        firmware_feature->enabled;
                break;
        }
@@ -6917,15 +7323,60 @@ static DEFINE_MUTEX(pqi_firmware_features_mutex);
 
 static struct pqi_firmware_feature pqi_firmware_features[] = {
        {
-               .feature_name = "Online Firmware Activation",
-               .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
+               .feature_name = "Online Firmware Activation",
+               .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "Serial Management Protocol",
+               .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "Maximum Known Feature",
+               .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "RAID 0 Read Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
                .feature_status = pqi_firmware_feature_status,
        },
        {
-               .feature_name = "Serial Management Protocol",
-               .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
+               .feature_name = "RAID 1 Read Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "RAID 5 Read Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
                .feature_status = pqi_firmware_feature_status,
        },
+       {
+               .feature_name = "RAID 6 Read Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "RAID 0 Write Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "RAID 1 Write Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
+               .feature_status = pqi_ctrl_update_feature_flags,
+       },
+       {
+               .feature_name = "RAID 5 Write Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
+               .feature_status = pqi_ctrl_update_feature_flags,
+       },
+       {
+               .feature_name = "RAID 6 Write Bypass",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
+               .feature_status = pqi_ctrl_update_feature_flags,
+       },
        {
                .feature_name = "New Soft Reset Handshake",
                .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
@@ -6941,6 +7392,16 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
                .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
                .feature_status = pqi_ctrl_update_feature_flags,
        },
+       {
+               .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
+               .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
+               .feature_status = pqi_firmware_feature_status,
+       },
+       {
+               .feature_name = "Unique WWID in Report Physical LUN",
+               .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
+               .feature_status = pqi_ctrl_update_feature_flags,
+       },
 };
 
 static void pqi_process_firmware_features(
@@ -6999,7 +7460,7 @@ static void pqi_process_firmware_features(
                if (pqi_is_firmware_feature_enabled(firmware_features,
                        firmware_features_iomem_addr,
                        pqi_firmware_features[i].feature_bit)) {
-                       pqi_firmware_features[i].enabled = true;
+                               pqi_firmware_features[i].enabled = true;
                }
                pqi_firmware_feature_update(ctrl_info,
                        &pqi_firmware_features[i]);
@@ -7025,14 +7486,34 @@ static void pqi_process_firmware_features_section(
        mutex_unlock(&pqi_firmware_features_mutex);
 }
 
+/*
+ * Reset all controller settings that can be initialized during the processing
+ * of the PQI Configuration Table.
+ */
+
+static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
+{
+       ctrl_info->heartbeat_counter = NULL;
+       ctrl_info->soft_reset_status = NULL;
+       ctrl_info->soft_reset_handshake_supported = false;
+       ctrl_info->enable_r1_writes = false;
+       ctrl_info->enable_r5_writes = false;
+       ctrl_info->enable_r6_writes = false;
+       ctrl_info->raid_iu_timeout_supported = false;
+       ctrl_info->tmf_iu_timeout_supported = false;
+       ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
+}
+
 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
 {
        u32 table_length;
        u32 section_offset;
+       bool firmware_feature_section_present;
        void __iomem *table_iomem_addr;
        struct pqi_config_table *config_table;
        struct pqi_config_table_section_header *section;
        struct pqi_config_table_section_info section_info;
+       struct pqi_config_table_section_info feature_section_info;
 
        table_length = ctrl_info->config_table_length;
        if (table_length == 0)
@@ -7049,25 +7530,24 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
         * Copy the config table contents from I/O memory space into the
         * temporary buffer.
         */
-       table_iomem_addr = ctrl_info->iomem_base +
-               ctrl_info->config_table_offset;
+       table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
        memcpy_fromio(config_table, table_iomem_addr, table_length);
 
+       firmware_feature_section_present = false;
        section_info.ctrl_info = ctrl_info;
-       section_offset =
-               get_unaligned_le32(&config_table->first_section_offset);
+       section_offset = get_unaligned_le32(&config_table->first_section_offset);
 
        while (section_offset) {
                section = (void *)config_table + section_offset;
 
                section_info.section = section;
                section_info.section_offset = section_offset;
-               section_info.section_iomem_addr =
-                       table_iomem_addr + section_offset;
+               section_info.section_iomem_addr = table_iomem_addr + section_offset;
 
                switch (get_unaligned_le16(&section->section_id)) {
                case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
-                       pqi_process_firmware_features_section(&section_info);
+                       firmware_feature_section_present = true;
+                       feature_section_info = section_info;
                        break;
                case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
                        if (pqi_disable_heartbeat)
@@ -7077,8 +7557,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
                                ctrl_info->heartbeat_counter =
                                        table_iomem_addr +
                                        section_offset +
-                                       offsetof(
-                                       struct pqi_config_table_heartbeat,
+                                       offsetof(struct pqi_config_table_heartbeat,
                                                heartbeat_counter);
                        break;
                case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
@@ -7086,14 +7565,21 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
                                table_iomem_addr +
                                section_offset +
                                offsetof(struct pqi_config_table_soft_reset,
-                                               soft_reset_status);
+                                       soft_reset_status);
                        break;
                }
 
-               section_offset =
-                       get_unaligned_le16(&section->next_section_offset);
+               section_offset = get_unaligned_le16(&section->next_section_offset);
        }
 
+       /*
+        * We process the firmware feature section after all other sections
+        * have been processed so that the feature bit callbacks can take
+        * into account the settings configured by other sections.
+        */
+       if (firmware_feature_section_present)
+               pqi_process_firmware_features_section(&feature_section_info);
+
        kfree(config_table);
 
        return 0;
@@ -7141,15 +7627,14 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
        return pqi_revert_to_sis_mode(ctrl_info);
 }
 
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY     5000
-
 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
 {
        int rc;
+       u32 product_id;
 
        if (reset_devices) {
                sis_soft_reset(ctrl_info);
-               msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+               msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
        } else {
                rc = pqi_force_sis_mode(ctrl_info);
                if (rc)
@@ -7182,15 +7667,19 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
                return rc;
        }
 
+       product_id = sis_get_product_id(ctrl_info);
+       ctrl_info->product_id = (u8)product_id;
+       ctrl_info->product_revision = (u8)(product_id >> 8);
+
        if (reset_devices) {
                if (ctrl_info->max_outstanding_requests >
                        PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
-                       ctrl_info->max_outstanding_requests =
+                               ctrl_info->max_outstanding_requests =
                                        PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
        } else {
                if (ctrl_info->max_outstanding_requests >
                        PQI_MAX_OUTSTANDING_REQUESTS)
-                       ctrl_info->max_outstanding_requests =
+                               ctrl_info->max_outstanding_requests =
                                        PQI_MAX_OUTSTANDING_REQUESTS;
        }
 
@@ -7295,6 +7784,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
 
        pqi_start_heartbeat_timer(ctrl_info);
 
+       if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+               rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+               if (rc) { /* Supported features not returned correctly. */
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "error obtaining advanced RAID bypass configuration\n");
+                       return rc;
+               }
+               ctrl_info->ciss_report_log_flags |=
+                       CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+       }
+
        rc = pqi_enable_events(ctrl_info);
        if (rc) {
                dev_err(&ctrl_info->pci_dev->dev,
@@ -7444,12 +7944,25 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
        ctrl_info->controller_online = true;
        pqi_ctrl_unblock_requests(ctrl_info);
 
+       pqi_ctrl_reset_config(ctrl_info);
+
        rc = pqi_process_config_table(ctrl_info);
        if (rc)
                return rc;
 
        pqi_start_heartbeat_timer(ctrl_info);
 
+       if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+               rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+               if (rc) {
+                       dev_err(&ctrl_info->pci_dev->dev,
+                               "error obtaining advanced RAID bypass configuration\n");
+                       return rc;
+               }
+               ctrl_info->ciss_report_log_flags |=
+                       CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+       }
+
        rc = pqi_enable_events(ctrl_info);
        if (rc) {
                dev_err(&ctrl_info->pci_dev->dev,
@@ -7478,15 +7991,15 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
                return rc;
        }
 
-       pqi_schedule_update_time_worker(ctrl_info);
+       if (pqi_ofa_in_progress(ctrl_info))
+               pqi_ctrl_unblock_scan(ctrl_info);
 
        pqi_scan_scsi_devices(ctrl_info);
 
        return 0;
 }
 
-static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
-       u16 timeout)
+static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
 {
        int rc;
 
@@ -7592,7 +8105,6 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
 
        INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
        atomic_set(&ctrl_info->num_interrupts, 0);
-       atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
 
        INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
        INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -7600,19 +8112,26 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
        timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
        INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
 
+       INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
+       INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
+
        sema_init(&ctrl_info->sync_request_sem,
                PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
        init_waitqueue_head(&ctrl_info->block_requests_wait);
 
-       INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
-       spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
-       INIT_WORK(&ctrl_info->raid_bypass_retry_work,
-               pqi_raid_bypass_retry_worker);
-
        ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
        ctrl_info->irq_mode = IRQ_MODE_NONE;
        ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
 
+       ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+       ctrl_info->max_transfer_encrypted_sas_sata =
+               PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
+       ctrl_info->max_transfer_encrypted_nvme =
+               PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
+       ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
+       ctrl_info->max_write_raid_1_10_2drive = ~0;
+       ctrl_info->max_write_raid_1_10_3drive = ~0;
+
        return ctrl_info;
 }
 
@@ -7664,81 +8183,57 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
 
 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
 {
-       pqi_cancel_update_time_worker(ctrl_info);
-       pqi_cancel_rescan_worker(ctrl_info);
-       pqi_wait_until_lun_reset_finished(ctrl_info);
-       pqi_wait_until_scan_finished(ctrl_info);
-       pqi_ctrl_ofa_start(ctrl_info);
+       pqi_ctrl_block_scan(ctrl_info);
+       pqi_scsi_block_requests(ctrl_info);
+       pqi_ctrl_block_device_reset(ctrl_info);
        pqi_ctrl_block_requests(ctrl_info);
        pqi_ctrl_wait_until_quiesced(ctrl_info);
-       pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
-       pqi_fail_io_queued_for_all_devices(ctrl_info);
-       pqi_wait_until_inbound_queues_empty(ctrl_info);
        pqi_stop_heartbeat_timer(ctrl_info);
-       ctrl_info->pqi_mode_enabled = false;
-       pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
 }
 
 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
 {
-       pqi_ofa_free_host_buffer(ctrl_info);
-       ctrl_info->pqi_mode_enabled = true;
-       pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
-       ctrl_info->controller_online = true;
-       pqi_ctrl_unblock_requests(ctrl_info);
        pqi_start_heartbeat_timer(ctrl_info);
-       pqi_schedule_update_time_worker(ctrl_info);
-       pqi_clear_soft_reset_status(ctrl_info,
-               PQI_SOFT_RESET_ABORT);
-       pqi_scan_scsi_devices(ctrl_info);
+       pqi_ctrl_unblock_requests(ctrl_info);
+       pqi_ctrl_unblock_device_reset(ctrl_info);
+       pqi_scsi_unblock_requests(ctrl_info);
+       pqi_ctrl_unblock_scan(ctrl_info);
 }
 
-static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
-       u32 total_size, u32 chunk_size)
+static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
 {
-       u32 sg_count;
-       u32 size;
        int i;
-       struct pqi_sg_descriptor *mem_descriptor = NULL;
+       u32 sg_count;
        struct device *dev;
        struct pqi_ofa_memory *ofap;
-
-       dev = &ctrl_info->pci_dev->dev;
-
-       sg_count = (total_size + chunk_size - 1);
-       sg_count /= chunk_size;
+       struct pqi_sg_descriptor *mem_descriptor;
+       dma_addr_t dma_handle;
 
        ofap = ctrl_info->pqi_ofa_mem_virt_addr;
 
-       if (sg_count*chunk_size < total_size)
+       sg_count = DIV_ROUND_UP(total_size, chunk_size);
+       if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
                goto out;
 
-       ctrl_info->pqi_ofa_chunk_virt_addr =
-                               kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
+       ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
        if (!ctrl_info->pqi_ofa_chunk_virt_addr)
                goto out;
 
-       for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
-               dma_addr_t dma_handle;
+       dev = &ctrl_info->pci_dev->dev;
 
+       for (i = 0; i < sg_count; i++) {
                ctrl_info->pqi_ofa_chunk_virt_addr[i] =
-                       dma_alloc_coherent(dev, chunk_size, &dma_handle,
-                                          GFP_KERNEL);
-
+                       dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
                if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
-                       break;
-
+                       goto out_free_chunks;
                mem_descriptor = &ofap->sg_descriptor[i];
-               put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
-               put_unaligned_le32 (chunk_size, &mem_descriptor->length);
+               put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
+               put_unaligned_le32(chunk_size, &mem_descriptor->length);
        }
 
-       if (!size || size < total_size)
-               goto out_free_chunks;
-
        put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
        put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
-       put_unaligned_le32(size, &ofap->bytes_allocated);
+       put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
 
        return 0;
 
@@ -7746,82 +8241,87 @@ out_free_chunks:
        while (--i >= 0) {
                mem_descriptor = &ofap->sg_descriptor[i];
                dma_free_coherent(dev, chunk_size,
-                               ctrl_info->pqi_ofa_chunk_virt_addr[i],
-                               get_unaligned_le64(&mem_descriptor->address));
+                       ctrl_info->pqi_ofa_chunk_virt_addr[i],
+                       get_unaligned_le64(&mem_descriptor->address));
        }
        kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
 
 out:
-       put_unaligned_le32 (0, &ofap->bytes_allocated);
        return -ENOMEM;
 }
 
 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
 {
        u32 total_size;
+       u32 chunk_size;
        u32 min_chunk_size;
-       u32 chunk_sz;
 
-       total_size = le32_to_cpu(
-                       ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
-       min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
+       if (ctrl_info->ofa_bytes_requested == 0)
+               return 0;
+
+       total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
+       min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
+       min_chunk_size = PAGE_ALIGN(min_chunk_size);
 
-       for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
-               if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
+       for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
+               if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
                        return 0;
+               chunk_size /= 2;
+               chunk_size = PAGE_ALIGN(chunk_size);
+       }
 
        return -ENOMEM;
 }
 
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
-       u32 bytes_requested)
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
 {
-       struct pqi_ofa_memory *pqi_ofa_memory;
        struct device *dev;
+       struct pqi_ofa_memory *ofap;
 
        dev = &ctrl_info->pci_dev->dev;
-       pqi_ofa_memory = dma_alloc_coherent(dev,
-                                           PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
-                                           &ctrl_info->pqi_ofa_mem_dma_handle,
-                                           GFP_KERNEL);
 
-       if (!pqi_ofa_memory)
+       ofap = dma_alloc_coherent(dev, sizeof(*ofap),
+               &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
+       if (!ofap)
                return;
 
-       put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
-       memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
-                                       sizeof(pqi_ofa_memory->signature));
-       pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
-
-       ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
+       ctrl_info->pqi_ofa_mem_virt_addr = ofap;
 
        if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
-               dev_err(dev, "Failed to allocate host buffer of size = %u",
-                       bytes_requested);
+               dev_err(dev,
+                       "failed to allocate host buffer for Online Firmware Activation\n");
+               dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
+               ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+               return;
        }
 
-       return;
+       put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
+       memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
 }
 
 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
 {
-       int i;
-       struct pqi_sg_descriptor *mem_descriptor;
+       unsigned int i;
+       struct device *dev;
        struct pqi_ofa_memory *ofap;
+       struct pqi_sg_descriptor *mem_descriptor;
+       unsigned int num_memory_descriptors;
 
        ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
        if (!ofap)
                return;
 
-       if (!ofap->bytes_allocated)
+       dev = &ctrl_info->pci_dev->dev;
+
+       if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
                goto out;
 
        mem_descriptor = ofap->sg_descriptor;
+       num_memory_descriptors =
+               get_unaligned_le16(&ofap->num_memory_descriptors);
 
-       for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
-               i++) {
-               dma_free_coherent(&ctrl_info->pci_dev->dev,
+       for (i = 0; i < num_memory_descriptors; i++) {
+               dma_free_coherent(dev,
                        get_unaligned_le32(&mem_descriptor[i].length),
                        ctrl_info->pqi_ofa_chunk_virt_addr[i],
                        get_unaligned_le64(&mem_descriptor[i].address));
@@ -7829,47 +8329,45 @@ static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
        kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
 
 out:
-       dma_free_coherent(&ctrl_info->pci_dev->dev,
-                       PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
-                       ctrl_info->pqi_ofa_mem_dma_handle);
+       dma_free_coherent(dev, sizeof(*ofap), ofap,
+               ctrl_info->pqi_ofa_mem_dma_handle);
        ctrl_info->pqi_ofa_mem_virt_addr = NULL;
 }
 
 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
 {
+       u32 buffer_length;
        struct pqi_vendor_general_request request;
-       size_t size;
        struct pqi_ofa_memory *ofap;
 
        memset(&request, 0, sizeof(request));
 
-       ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
        request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
        put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
                &request.header.iu_length);
        put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
                &request.function_code);
 
+       ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
        if (ofap) {
-               size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
+               buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
                        get_unaligned_le16(&ofap->num_memory_descriptors) *
                        sizeof(struct pqi_sg_descriptor);
 
                put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
                        &request.data.ofa_memory_allocation.buffer_address);
-               put_unaligned_le32(size,
+               put_unaligned_le32(buffer_length,
                        &request.data.ofa_memory_allocation.buffer_length);
-
        }
 
-       return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-               0, NULL, NO_TIMEOUT);
+       return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
 }
 
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
 {
-       msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+       ssleep(delay_secs);
+
        return pqi_ctrl_init_resume(ctrl_info);
 }
 
@@ -7927,7 +8425,6 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
        pqi_cancel_update_time_worker(ctrl_info);
        pqi_ctrl_wait_until_quiesced(ctrl_info);
        pqi_fail_all_outstanding_requests(ctrl_info);
-       pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
        pqi_ctrl_unblock_requests(ctrl_info);
 }
 
@@ -8060,24 +8557,12 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
                return;
        }
 
-       pqi_disable_events(ctrl_info);
        pqi_wait_until_ofa_finished(ctrl_info);
-       pqi_cancel_update_time_worker(ctrl_info);
-       pqi_cancel_rescan_worker(ctrl_info);
-       pqi_cancel_event_worker(ctrl_info);
-
-       pqi_ctrl_shutdown_start(ctrl_info);
-       pqi_ctrl_wait_until_quiesced(ctrl_info);
-
-       rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
-       if (rc) {
-               dev_err(&pci_dev->dev,
-                       "wait for pending I/O failed\n");
-               return;
-       }
 
+       pqi_scsi_block_requests(ctrl_info);
        pqi_ctrl_block_device_reset(ctrl_info);
-       pqi_wait_until_lun_reset_finished(ctrl_info);
+       pqi_ctrl_block_requests(ctrl_info);
+       pqi_ctrl_wait_until_quiesced(ctrl_info);
 
        /*
         * Write all data in the controller's battery-backed cache to
@@ -8088,15 +8573,6 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
                dev_err(&pci_dev->dev,
                        "unable to flush controller cache\n");
 
-       pqi_ctrl_block_requests(ctrl_info);
-
-       rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
-       if (rc) {
-               dev_err(&pci_dev->dev,
-                       "wait for pending sync cmds failed\n");
-               return;
-       }
-
        pqi_crash_if_pending_command(ctrl_info);
        pqi_reset(ctrl_info);
 }
@@ -8131,19 +8607,18 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
 
        ctrl_info = pci_get_drvdata(pci_dev);
 
-       pqi_disable_events(ctrl_info);
-       pqi_cancel_update_time_worker(ctrl_info);
-       pqi_cancel_rescan_worker(ctrl_info);
-       pqi_wait_until_scan_finished(ctrl_info);
-       pqi_wait_until_lun_reset_finished(ctrl_info);
        pqi_wait_until_ofa_finished(ctrl_info);
-       pqi_flush_cache(ctrl_info, SUSPEND);
+
+       pqi_ctrl_block_scan(ctrl_info);
+       pqi_scsi_block_requests(ctrl_info);
+       pqi_ctrl_block_device_reset(ctrl_info);
        pqi_ctrl_block_requests(ctrl_info);
        pqi_ctrl_wait_until_quiesced(ctrl_info);
-       pqi_wait_until_inbound_queues_empty(ctrl_info);
-       pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+       pqi_flush_cache(ctrl_info, SUSPEND);
        pqi_stop_heartbeat_timer(ctrl_info);
 
+       pqi_crash_if_pending_command(ctrl_info);
+
        if (state.event == PM_EVENT_FREEZE)
                return 0;
 
@@ -8176,14 +8651,21 @@ static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
                                pci_dev->irq, rc);
                        return rc;
                }
-               pqi_start_heartbeat_timer(ctrl_info);
+               pqi_ctrl_unblock_device_reset(ctrl_info);
                pqi_ctrl_unblock_requests(ctrl_info);
+               pqi_scsi_unblock_requests(ctrl_info);
+               pqi_ctrl_unblock_scan(ctrl_info);
                return 0;
        }
 
        pci_set_power_state(pci_dev, PCI_D0);
        pci_restore_state(pci_dev);
 
+       pqi_ctrl_unblock_device_reset(ctrl_info);
+       pqi_ctrl_unblock_requests(ctrl_info);
+       pqi_scsi_unblock_requests(ctrl_info);
+       pqi_ctrl_unblock_scan(ctrl_info);
+
        return pqi_ctrl_init_resume(ctrl_info);
 }
 
@@ -8217,6 +8699,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x152d, 0x8a37)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x193d, 0x8460)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x193d, 0x1104)
@@ -8289,6 +8775,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x1bd4, 0x004f)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1bd4, 0x0051)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1bd4, 0x0052)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1bd4, 0x0053)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1bd4, 0x0054)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x19e5, 0xd227)
@@ -8449,6 +8951,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_VENDOR_ID_ADAPTEC2, 0x1380)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1400)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1402)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1410)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1411)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1412)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1420)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1430)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1440)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1441)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1450)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1452)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1460)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1461)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1462)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1470)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1471)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1472)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1480)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1490)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x1491)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_VENDOR_ID_ADVANTECH, 0x8312)
@@ -8513,6 +9131,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_VENDOR_ID_HP, 0x1001)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              PCI_VENDOR_ID_HP, 0x1002)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_VENDOR_ID_HP, 0x1100)
@@ -8521,6 +9143,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_VENDOR_ID_HP, 0x1101)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1590, 0x0294)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1590, 0x02db)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1590, 0x02dc)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1590, 0x032e)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x1d8d, 0x0800)
@@ -8602,6 +9240,8 @@ static void __attribute__((unused)) verify_structures(void)
                sis_ctrl_to_host_doorbell_clear) != 0xa0);
        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
                sis_driver_scratch) != 0xb0);
+       BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+               sis_product_identifier) != 0xb4);
        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
                sis_firmware_status) != 0xbc);
        BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
@@ -8616,7 +9256,7 @@ static void __attribute__((unused)) verify_structures(void)
        BUILD_BUG_ON(offsetof(struct pqi_iu_header,
                response_queue_id) != 0x4);
        BUILD_BUG_ON(offsetof(struct pqi_iu_header,
-               work_area) != 0x6);
+               driver_flags) != 0x6);
        BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
 
        BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
@@ -8714,7 +9354,7 @@ static void __attribute__((unused)) verify_structures(void)
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
                header.iu_length) != 2);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
-               header.work_area) != 6);
+               header.driver_flags) != 6);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
                request_id) != 8);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
@@ -8770,7 +9410,7 @@ static void __attribute__((unused)) verify_structures(void)
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
                header.iu_length) != 2);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
-               header.work_area) != 6);
+               header.driver_flags) != 6);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
                request_id) != 8);
        BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
@@ -8794,7 +9434,7 @@ static void __attribute__((unused)) verify_structures(void)
        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
                header.response_queue_id) != 4);
        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
-               header.work_area) != 6);
+               header.driver_flags) != 6);
        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
                request_id) != 8);
        BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
@@ -8823,7 +9463,7 @@ static void __attribute__((unused)) verify_structures(void)
        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
                header.response_queue_id) != 4);
        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
-               header.work_area) != 6);
+               header.driver_flags) != 6);
        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
                request_id) != 8);
        BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
@@ -8998,13 +9638,23 @@ static void __attribute__((unused)) verify_structures(void)
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
                configuration_signature) != 1);
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
-               firmware_version) != 5);
+               firmware_version_short) != 5);
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
                extended_logical_unit_count) != 154);
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
                firmware_build_number) != 190);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               vendor_id) != 200);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               product_id) != 208);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               extra_controller_flags) != 286);
        BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
                controller_mode) != 292);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               spare_part_number) != 293);
+       BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+               firmware_version_long) != 325);
 
        BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
                phys_bay_in_box) != 115);
@@ -9022,6 +9672,45 @@ static void __attribute__((unused)) verify_structures(void)
                current_queue_depth_limit) != 1796);
        BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
 
+       BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+               page_code) != 0);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+               subpage_code) != 1);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+               buffer_length) != 2);
+
+       BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+               page_code) != 0);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+               subpage_code) != 1);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+               page_length) != 2);
+
+       BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
+               != 18);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               header) != 0);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               firmware_read_support) != 4);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               driver_read_support) != 5);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               firmware_write_support) != 6);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               driver_write_support) != 7);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_transfer_encrypted_sas_sata) != 8);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_transfer_encrypted_nvme) != 10);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_write_raid_5_6) != 12);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_write_raid_1_10_2drive) != 14);
+       BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+               max_write_raid_1_10_3drive) != 16);
+
        BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
        BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
        BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
index c9b00b3..dd9b784 100644 (file)
@@ -65,8 +65,8 @@ static int pqi_sas_port_add_phy(struct pqi_sas_phy *pqi_sas_phy)
        memset(identify, 0, sizeof(*identify));
        identify->sas_address = pqi_sas_port->sas_address;
        identify->device_type = SAS_END_DEVICE;
-       identify->initiator_port_protocols = SAS_PROTOCOL_STP;
-       identify->target_port_protocols = SAS_PROTOCOL_STP;
+       identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
+       identify->target_port_protocols = SAS_PROTOCOL_ALL;
        phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
        phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
        phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -92,14 +92,25 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
 
        identify = &rphy->identify;
        identify->sas_address = pqi_sas_port->sas_address;
+       identify->phy_identifier = pqi_sas_port->device->phy_id;
+
+       identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
+       identify->target_port_protocols = SAS_PROTOCOL_STP;
 
-       if (pqi_sas_port->device &&
-               pqi_sas_port->device->is_expander_smp_device) {
-               identify->initiator_port_protocols = SAS_PROTOCOL_SMP;
-               identify->target_port_protocols = SAS_PROTOCOL_SMP;
-       } else {
-               identify->initiator_port_protocols = SAS_PROTOCOL_STP;
-               identify->target_port_protocols = SAS_PROTOCOL_STP;
+       if (pqi_sas_port->device) {
+               switch (pqi_sas_port->device->device_type) {
+               case SA_DEVICE_TYPE_SAS:
+               case SA_DEVICE_TYPE_SES:
+               case SA_DEVICE_TYPE_NVME:
+                       identify->target_port_protocols = SAS_PROTOCOL_SSP;
+                       break;
+               case SA_DEVICE_TYPE_EXPANDER_SMP:
+                       identify->target_port_protocols = SAS_PROTOCOL_SMP;
+                       break;
+               case SA_DEVICE_TYPE_SATA:
+               default:
+                       break;
+               }
        }
 
        return sas_rphy_add(rphy);
@@ -107,8 +118,7 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
 
 static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port)
 {
-       if (pqi_sas_port->device &&
-               pqi_sas_port->device->is_expander_smp_device)
+       if (pqi_sas_port->device && pqi_sas_port->device->is_expander_smp_device)
                return sas_expander_alloc(pqi_sas_port->port,
                                SAS_FANOUT_EXPANDER_DEVICE);
 
@@ -161,7 +171,7 @@ static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port)
 
        list_for_each_entry_safe(pqi_sas_phy, next,
                &pqi_sas_port->phy_list_head, phy_list_entry)
-               pqi_free_sas_phy(pqi_sas_phy);
+                       pqi_free_sas_phy(pqi_sas_phy);
 
        sas_port_delete(pqi_sas_port->port);
        list_del(&pqi_sas_port->port_list_entry);
@@ -191,7 +201,7 @@ static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node)
 
        list_for_each_entry_safe(pqi_sas_port, next,
                &pqi_sas_node->port_list_head, port_list_entry)
-               pqi_free_sas_port(pqi_sas_port);
+                       pqi_free_sas_port(pqi_sas_port);
 
        kfree(pqi_sas_node);
 }
@@ -498,7 +508,7 @@ static unsigned int pqi_build_sas_smp_handler_reply(
 
        job->reply_len = le16_to_cpu(error_info->sense_data_length);
        memcpy(job->reply, error_info->data,
-                       le16_to_cpu(error_info->sense_data_length));
+               le16_to_cpu(error_info->sense_data_length));
 
        return job->reply_payload.payload_len -
                get_unaligned_le32(&error_info->data_in_transferred);
@@ -547,6 +557,7 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
                goto out;
 
        reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info);
+
 out:
        bsg_job_done(job, rc, reslen);
 }
index 26ea6b9..c954620 100644 (file)
@@ -71,7 +71,7 @@ struct sis_base_struct {
                                                /* error response data */
        __le32  error_buffer_element_length;    /* length of each PQI error */
                                                /* response buffer element */
-                                               /*   in bytes */
+                                               /* in bytes */
        __le32  error_buffer_num_elements;      /* total number of PQI error */
                                                /* response buffers available */
 };
@@ -146,7 +146,12 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
 bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info)
 {
        return readl(&ctrl_info->registers->sis_firmware_status) &
-                               SIS_CTRL_KERNEL_UP;
+               SIS_CTRL_KERNEL_UP;
+}
+
+u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info)
+{
+       return readl(&ctrl_info->registers->sis_product_identifier);
 }
 
 /* used for passing command parameters/results when issuing SIS commands */
index 878d34c..12cd2ab 100644 (file)
@@ -27,5 +27,6 @@ int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
 void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
 u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
 void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
+u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
 
 #endif /* _SMARTPQI_SIS_H */
index 6bc5453..e6718a7 100644 (file)
@@ -366,10 +366,14 @@ static u32 max_outstanding_req_per_channel;
 static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
 
 static int storvsc_vcpus_per_sub_channel = 4;
+static unsigned int storvsc_max_hw_queues;
 
 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
 
+module_param(storvsc_max_hw_queues, uint, 0644);
+MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues");
+
 module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
 MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
 
@@ -1688,9 +1692,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
        struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
        int i;
        struct scatterlist *sgl;
-       unsigned int sg_count = 0;
+       unsigned int sg_count;
        struct vmscsi_request *vm_srb;
-       struct scatterlist *cur_sgl;
        struct vmbus_packet_mpb_array  *payload;
        u32 payload_sz;
        u32 length;
@@ -1769,8 +1772,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
        payload_sz = sizeof(cmd_request->mpb);
 
        if (sg_count) {
-               unsigned int hvpgoff = 0;
-               unsigned long offset_in_hvpg = sgl->offset & ~HV_HYP_PAGE_MASK;
+               unsigned int hvpgoff, hvpfns_to_add;
+               unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
                unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
                u64 hvpfn;
 
@@ -1783,51 +1786,34 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                                return SCSI_MLQUEUE_DEVICE_BUSY;
                }
 
-               /*
-                * sgl is a list of PAGEs, and payload->range.pfn_array
-                * expects the page number in the unit of HV_HYP_PAGE_SIZE (the
-                * page size that Hyper-V uses, so here we need to divide PAGEs
-                * into HV_HYP_PAGE in case that PAGE_SIZE > HV_HYP_PAGE_SIZE.
-                * Besides, payload->range.offset should be the offset in one
-                * HV_HYP_PAGE.
-                */
                payload->range.len = length;
                payload->range.offset = offset_in_hvpg;
-               hvpgoff = sgl->offset >> HV_HYP_PAGE_SHIFT;
 
-               cur_sgl = sgl;
-               for (i = 0; i < hvpg_count; i++) {
+
+               for (i = 0; sgl != NULL; sgl = sg_next(sgl)) {
                        /*
-                        * 'i' is the index of hv pages in the payload and
-                        * 'hvpgoff' is the offset (in hv pages) of the first
-                        * hv page in the the first page. The relationship
-                        * between the sum of 'i' and 'hvpgoff' and the offset
-                        * (in hv pages) in a payload page ('hvpgoff_in_page')
-                        * is as follow:
-                        *
-                        * |------------------ PAGE -------------------|
-                        * |   NR_HV_HYP_PAGES_IN_PAGE hvpgs in total  |
-                        * |hvpg|hvpg| ...              |hvpg|... |hvpg|
-                        * ^         ^                                 ^                 ^
-                        * +-hvpgoff-+                                 +-hvpgoff_in_page-+
-                        *           ^                                                   |
-                        *           +--------------------- i ---------------------------+
+                        * Init values for the current sgl entry. hvpgoff
+                        * and hvpfns_to_add are in units of Hyper-V size
+                        * pages. Handling the PAGE_SIZE != HV_HYP_PAGE_SIZE
+                        * case also handles values of sgl->offset that are
+                        * larger than PAGE_SIZE. Such offsets are handled
+                        * even on other than the first sgl entry, provided
+                        * they are a multiple of PAGE_SIZE.
                         */
-                       unsigned int hvpgoff_in_page =
-                               (i + hvpgoff) % NR_HV_HYP_PAGES_IN_PAGE;
+                       hvpgoff = HVPFN_DOWN(sgl->offset);
+                       hvpfn = page_to_hvpfn(sg_page(sgl)) + hvpgoff;
+                       hvpfns_to_add = HVPFN_UP(sgl->offset + sgl->length) -
+                                               hvpgoff;
 
                        /*
-                        * Two cases that we need to fetch a page:
-                        * 1) i == 0, the first step or
-                        * 2) hvpgoff_in_page == 0, when we reach the boundary
-                        *    of a page.
+                        * Fill the next portion of the PFN array with
+                        * sequential Hyper-V PFNs for the continguous physical
+                        * memory described by the sgl entry. The end of the
+                        * last sgl should be reached at the same time that
+                        * the PFN array is filled.
                         */
-                       if (hvpgoff_in_page == 0 || i == 0) {
-                               hvpfn = page_to_hvpfn(sg_page(cur_sgl));
-                               cur_sgl = sg_next(cur_sgl);
-                       }
-
-                       payload->range.pfn_array[i] = hvpfn + hvpgoff_in_page;
+                       while (hvpfns_to_add--)
+                               payload->range.pfn_array[i++] = hvpfn++;
                }
        }
 
@@ -1861,8 +1847,6 @@ static struct scsi_host_template scsi_driver = {
        .slave_configure =      storvsc_device_configure,
        .cmd_per_lun =          2048,
        .this_id =              -1,
-       /* Make sure we dont get a sg segment crosses a page boundary */
-       .dma_boundary =         PAGE_SIZE-1,
        /* Ensure there are no gaps in presented sgls */
        .virt_boundary_mask =   PAGE_SIZE-1,
        .no_write_same =        1,
@@ -1907,6 +1891,7 @@ static int storvsc_probe(struct hv_device *device,
 {
        int ret;
        int num_cpus = num_online_cpus();
+       int num_present_cpus = num_present_cpus();
        struct Scsi_Host *host;
        struct hv_host_device *host_dev;
        bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
@@ -2015,8 +2000,17 @@ static int storvsc_probe(struct hv_device *device,
         * For non-IDE disks, the host supports multiple channels.
         * Set the number of HW queues we are supporting.
         */
-       if (!dev_is_ide)
-               host->nr_hw_queues = num_present_cpus();
+       if (!dev_is_ide) {
+               if (storvsc_max_hw_queues > num_present_cpus) {
+                       storvsc_max_hw_queues = 0;
+                       storvsc_log(device, STORVSC_LOGGING_WARN,
+                               "Resetting invalid storvsc_max_hw_queues value to default.\n");
+               }
+               if (storvsc_max_hw_queues)
+                       host->nr_hw_queues = storvsc_max_hw_queues;
+               else
+                       host->nr_hw_queues = num_present_cpus;
+       }
 
        /*
         * Set the error handler work queue.
index 149391f..13d9204 100644 (file)
@@ -100,6 +100,7 @@ static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
 }
 
 /**
+ * cdns_ufs_set_hclkdiv()
  * Sets HCLKDIV register value based on the core_clk
  * @hba: host controller instance
  *
@@ -141,6 +142,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
 }
 
 /**
+ * cdns_ufs_hce_enable_notify()
  * Called before and after HCE enable bit is set.
  * @hba: host controller instance
  * @status: notify stage (pre, post change)
@@ -157,6 +159,7 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
 }
 
 /**
+ * cdns_ufs_hibern8_notify()
  * Called around hibern8 enter/exit.
  * @hba: host controller instance
  * @cmd: UIC Command
@@ -173,6 +176,7 @@ static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
 }
 
 /**
+ * cdns_ufs_link_startup_notify()
  * Called before and after Link startup is carried out.
  * @hba: host controller instance
  * @status: notify stage (pre, post change)
index dee98dc..ced9ef4 100644 (file)
@@ -44,13 +44,103 @@ static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats);
 
+static int ee_usr_mask_get(void *data, u64 *val)
+{
+       struct ufs_hba *hba = data;
+
+       *val = hba->ee_usr_mask;
+       return 0;
+}
+
+static int ufs_debugfs_get_user_access(struct ufs_hba *hba)
+__acquires(&hba->host_sem)
+{
+       down(&hba->host_sem);
+       if (!ufshcd_is_user_access_allowed(hba)) {
+               up(&hba->host_sem);
+               return -EBUSY;
+       }
+       pm_runtime_get_sync(hba->dev);
+       return 0;
+}
+
+static void ufs_debugfs_put_user_access(struct ufs_hba *hba)
+__releases(&hba->host_sem)
+{
+       pm_runtime_put_sync(hba->dev);
+       up(&hba->host_sem);
+}
+
+static int ee_usr_mask_set(void *data, u64 val)
+{
+       struct ufs_hba *hba = data;
+       int err;
+
+       if (val & ~(u64)MASK_EE_STATUS)
+               return -EINVAL;
+       err = ufs_debugfs_get_user_access(hba);
+       if (err)
+               return err;
+       err = ufshcd_update_ee_usr_mask(hba, val, MASK_EE_STATUS);
+       ufs_debugfs_put_user_access(hba);
+       return err;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ee_usr_mask_fops, ee_usr_mask_get, ee_usr_mask_set, "%#llx\n");
+
+void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status)
+{
+       bool chgd = false;
+       u16 ee_ctrl_mask;
+       int err = 0;
+
+       if (!hba->debugfs_ee_rate_limit_ms || !status)
+               return;
+
+       mutex_lock(&hba->ee_ctrl_mutex);
+       ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status);
+       chgd = ee_ctrl_mask != hba->ee_ctrl_mask;
+       if (chgd) {
+               err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
+               if (err)
+                       dev_err(hba->dev, "%s: failed to write ee control %d\n",
+                               __func__, err);
+       }
+       mutex_unlock(&hba->ee_ctrl_mutex);
+
+       if (chgd && !err) {
+               unsigned long delay = msecs_to_jiffies(hba->debugfs_ee_rate_limit_ms);
+
+               queue_delayed_work(system_freezable_wq, &hba->debugfs_ee_work, delay);
+       }
+}
+
+static void ufs_debugfs_restart_ee(struct work_struct *work)
+{
+       struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work);
+
+       if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) ||
+           ufs_debugfs_get_user_access(hba))
+               return;
+       ufshcd_write_ee_control(hba);
+       ufs_debugfs_put_user_access(hba);
+}
+
 void ufs_debugfs_hba_init(struct ufs_hba *hba)
 {
+       /* Set default exception event rate limit period to 20ms */
+       hba->debugfs_ee_rate_limit_ms = 20;
+       INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee);
        hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
        debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops);
+       debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root,
+                           hba, &ee_usr_mask_fops);
+       debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
+                          &hba->debugfs_ee_rate_limit_ms);
 }
 
 void ufs_debugfs_hba_exit(struct ufs_hba *hba)
 {
        debugfs_remove_recursive(hba->debugfs_root);
+       cancel_delayed_work_sync(&hba->debugfs_ee_work);
 }
index f35b39c..3ca29d3 100644 (file)
@@ -12,11 +12,13 @@ void __init ufs_debugfs_init(void);
 void __exit ufs_debugfs_exit(void);
 void ufs_debugfs_hba_init(struct ufs_hba *hba);
 void ufs_debugfs_hba_exit(struct ufs_hba *hba);
+void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status);
 #else
 static inline void ufs_debugfs_init(void) {}
 static inline void ufs_debugfs_exit(void) {}
 static inline void ufs_debugfs_hba_init(struct ufs_hba *hba) {}
 static inline void ufs_debugfs_hba_exit(struct ufs_hba *hba) {}
+static inline void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status) {}
 #endif
 
 #endif
index 267943a..70647ea 100644 (file)
@@ -652,7 +652,6 @@ out:
 
 #define PWR_MODE_STR_LEN       64
 static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
-                               struct ufs_pa_layer_attr *pwr_max,
                                struct ufs_pa_layer_attr *pwr_req)
 {
        struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1155,7 +1154,7 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
                                              dev_req_params);
                break;
        case POST_CHANGE:
-               ret = exynos_ufs_post_pwr_mode(hba, NULL, dev_req_params);
+               ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
                break;
        }
 
index a9dc8d7..9b711d6 100644 (file)
@@ -819,9 +819,9 @@ static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
        if (host->hw_ver.major == 0x1)
-               return UFSHCI_VERSION_11;
+               return ufshci_version(1, 1);
        else
-               return UFSHCI_VERSION_20;
+               return ufshci_version(2, 0);
 }
 
 /**
index acc54f5..d7c3cff 100644 (file)
@@ -246,7 +246,7 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
        }
 
        pm_runtime_get_sync(hba->dev);
-       res = ufshcd_wb_ctrl(hba, wb_enable);
+       res = ufshcd_wb_toggle(hba, wb_enable);
        pm_runtime_put_sync(hba->dev);
 out:
        up(&hba->host_sem);
index bf1897a..cb80b96 100644 (file)
@@ -348,8 +348,14 @@ enum power_desc_param_offset {
 
 /* Exception event mask values */
 enum {
-       MASK_EE_STATUS          = 0xFFFF,
-       MASK_EE_URGENT_BKOPS    = (1 << 2),
+       MASK_EE_STATUS                  = 0xFFFF,
+       MASK_EE_DYNCAP_EVENT            = BIT(0),
+       MASK_EE_SYSPOOL_EVENT           = BIT(1),
+       MASK_EE_URGENT_BKOPS            = BIT(2),
+       MASK_EE_TOO_HIGH_TEMP           = BIT(3),
+       MASK_EE_TOO_LOW_TEMP            = BIT(4),
+       MASK_EE_WRITEBOOSTER_EVENT      = BIT(5),
+       MASK_EE_PERFORMANCE_THROTTLING  = BIT(6),
 };
 
 /* Background operation status */
index fadd566..23ee828 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/pm_qos.h>
 #include <linux/debugfs.h>
+#include <linux/uuid.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+
+struct ufs_host {
+       void (*late_init)(struct ufs_hba *hba);
+};
+
+enum {
+       INTEL_DSM_FNS           =  0,
+       INTEL_DSM_RESET         =  1,
+};
 
 struct intel_host {
+       struct ufs_host ufs_host;
+       u32             dsm_fns;
        u32             active_ltr;
        u32             idle_ltr;
        struct dentry   *debugfs_root;
+       struct gpio_desc *reset_gpio;
 };
 
+static const guid_t intel_dsm_guid =
+       GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
+                 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
+
+static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
+                      unsigned int fn, u32 *result)
+{
+       union acpi_object *obj;
+       int err = 0;
+       size_t len;
+
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+       if (!obj)
+               return -EOPNOTSUPP;
+
+       if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       len = min_t(size_t, obj->buffer.length, 4);
+
+       *result = 0;
+       memcpy(result, obj->buffer.pointer, len);
+out:
+       ACPI_FREE(obj);
+
+       return err;
+}
+
+static int intel_dsm(struct intel_host *intel_host, struct device *dev,
+                    unsigned int fn, u32 *result)
+{
+       if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+               return -EOPNOTSUPP;
+
+       return __intel_dsm(intel_host, dev, fn, result);
+}
+
+static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
+{
+       int err;
+
+       err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+       dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
+}
+
+static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
+                                      enum ufs_notify_change_status status)
+{
+       /* Cannot enable ICE until after HC enable */
+       if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
+               u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
+
+               hce |= CRYPTO_GENERAL_ENABLE;
+               ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
+       }
+
+       return 0;
+}
+
 static int ufs_intel_disable_lcc(struct ufs_hba *hba)
 {
        u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
@@ -144,6 +220,41 @@ static void intel_remove_debugfs(struct ufs_hba *hba)
        debugfs_remove_recursive(host->debugfs_root);
 }
 
+static int ufs_intel_device_reset(struct ufs_hba *hba)
+{
+       struct intel_host *host = ufshcd_get_variant(hba);
+
+       if (host->dsm_fns & INTEL_DSM_RESET) {
+               u32 result = 0;
+               int err;
+
+               err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
+               if (!err && !result)
+                       err = -EIO;
+               if (err)
+                       dev_err(hba->dev, "%s: DSM error %d result %u\n",
+                               __func__, err, result);
+               return err;
+       }
+
+       if (!host->reset_gpio)
+               return -EOPNOTSUPP;
+
+       gpiod_set_value_cansleep(host->reset_gpio, 1);
+       usleep_range(10, 15);
+
+       gpiod_set_value_cansleep(host->reset_gpio, 0);
+       usleep_range(10, 15);
+
+       return 0;
+}
+
+static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
+{
+       /* GPIO in _DSD has active low setting */
+       return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+}
+
 static int ufs_intel_common_init(struct ufs_hba *hba)
 {
        struct intel_host *host;
@@ -154,6 +265,23 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
        if (!host)
                return -ENOMEM;
        ufshcd_set_variant(hba, host);
+       intel_dsm_init(host, hba->dev);
+       if (host->dsm_fns & INTEL_DSM_RESET) {
+               if (hba->vops->device_reset)
+                       hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+       } else {
+               if (hba->vops->device_reset)
+                       host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
+               if (IS_ERR(host->reset_gpio)) {
+                       dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
+                               __func__, PTR_ERR(host->reset_gpio));
+                       host->reset_gpio = NULL;
+               }
+               if (host->reset_gpio) {
+                       gpiod_set_value_cansleep(host->reset_gpio, 0);
+                       hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+               }
+       }
        intel_ltr_expose(hba->dev);
        intel_add_debugfs(hba);
        return 0;
@@ -206,6 +334,31 @@ static int ufs_intel_ehl_init(struct ufs_hba *hba)
        return ufs_intel_common_init(hba);
 }
 
+static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
+{
+       /* LKF always needs a full reset, so set PM accordingly */
+       if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
+               hba->spm_lvl = UFS_PM_LVL_6;
+               hba->rpm_lvl = UFS_PM_LVL_6;
+       } else {
+               hba->spm_lvl = UFS_PM_LVL_5;
+               hba->rpm_lvl = UFS_PM_LVL_5;
+       }
+}
+
+static int ufs_intel_lkf_init(struct ufs_hba *hba)
+{
+       struct ufs_host *ufs_host;
+       int err;
+
+       hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+       hba->caps |= UFSHCD_CAP_CRYPTO;
+       err = ufs_intel_common_init(hba);
+       ufs_host = ufshcd_get_variant(hba);
+       ufs_host->late_init = ufs_intel_lkf_late_init;
+       return err;
+}
+
 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .name                   = "intel-pci",
        .init                   = ufs_intel_common_init,
@@ -222,6 +375,16 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
        .resume                 = ufs_intel_resume,
 };
 
+static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
+       .name                   = "intel-pci",
+       .init                   = ufs_intel_lkf_init,
+       .exit                   = ufs_intel_common_exit,
+       .hce_enable_notify      = ufs_intel_hce_enable_notify,
+       .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
+       .device_reset           = ufs_intel_device_reset,
+};
+
 #ifdef CONFIG_PM_SLEEP
 /**
  * ufshcd_pci_suspend - suspend power management function
@@ -321,6 +484,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
 static int
 ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+       struct ufs_host *ufs_host;
        struct ufs_hba *hba;
        void __iomem *mmio_base;
        int err;
@@ -358,6 +522,10 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return err;
        }
 
+       ufs_host = ufshcd_get_variant(hba);
+       if (ufs_host && ufs_host->late_init)
+               ufs_host->late_init(hba);
+
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_allow(&pdev->dev);
 
@@ -383,6 +551,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
        { }     /* terminate list */
 };
 
index 1a69949..fedb7de 100644 (file)
@@ -106,7 +106,6 @@ out:
 static int ufshcd_populate_vreg(struct device *dev, const char *name,
                struct ufs_vreg **out_vreg)
 {
-       int ret = 0;
        char prop_name[MAX_PROP_SIZE];
        struct ufs_vreg *vreg = NULL;
        struct device_node *np = dev->of_node;
@@ -135,9 +134,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
                vreg->max_uA = 0;
        }
 out:
-       if (!ret)
-               *out_vreg = vreg;
-       return ret;
+       *out_vreg = vreg;
+       return 0;
 }
 
 /**
index d3d05e9..0625da7 100644 (file)
@@ -24,7 +24,6 @@
 #include "ufs_bsg.h"
 #include "ufshcd-crypto.h"
 #include <asm/unaligned.h>
-#include <linux/blkdev.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ufs.h>
@@ -245,8 +244,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
                                         struct ufs_vreg *vreg);
 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
 
@@ -273,20 +272,12 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 
 static inline void ufshcd_wb_config(struct ufs_hba *hba)
 {
-       int ret;
-
        if (!ufshcd_is_wb_allowed(hba))
                return;
 
-       ret = ufshcd_wb_ctrl(hba, true);
-       if (ret)
-               dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
-       else
-               dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
-       ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
-       if (ret)
-               dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
-                       __func__, ret);
+       ufshcd_wb_toggle(hba, true);
+
+       ufshcd_wb_toggle_flush_during_h8(hba, true);
        if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
                ufshcd_wb_toggle_flush(hba, true);
 }
@@ -336,11 +327,15 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
                return;
 
        if (str_t == UFS_TM_SEND)
-               trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->req_header,
-                                 &descp->input_param1, UFS_TSF_TM_INPUT);
+               trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+                                 &descp->upiu_req.req_header,
+                                 &descp->upiu_req.input_param1,
+                                 UFS_TSF_TM_INPUT);
        else
-               trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->rsp_header,
-                                 &descp->output_param1, UFS_TSF_TM_OUTPUT);
+               trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+                                 &descp->upiu_rsp.rsp_header,
+                                 &descp->upiu_rsp.output_param1,
+                                 UFS_TSF_TM_OUTPUT);
 }
 
 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
@@ -667,23 +662,12 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
  */
 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 {
-       u32 intr_mask = 0;
+       if (hba->ufs_version == ufshci_version(1, 0))
+               return INTERRUPT_MASK_ALL_VER_10;
+       if (hba->ufs_version <= ufshci_version(2, 0))
+               return INTERRUPT_MASK_ALL_VER_11;
 
-       switch (hba->ufs_version) {
-       case UFSHCI_VERSION_10:
-               intr_mask = INTERRUPT_MASK_ALL_VER_10;
-               break;
-       case UFSHCI_VERSION_11:
-       case UFSHCI_VERSION_20:
-               intr_mask = INTERRUPT_MASK_ALL_VER_11;
-               break;
-       case UFSHCI_VERSION_21:
-       default:
-               intr_mask = INTERRUPT_MASK_ALL_VER_21;
-               break;
-       }
-
-       return intr_mask;
+       return INTERRUPT_MASK_ALL_VER_21;
 }
 
 /**
@@ -694,10 +678,22 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
  */
 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
 {
+       u32 ufshci_ver;
+
        if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
-               return ufshcd_vops_get_ufs_hci_version(hba);
+               ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
+       else
+               ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
 
-       return ufshcd_readl(hba, REG_UFS_VERSION);
+       /*
+        * UFSHCI v1.x uses a different version scheme, in order
+        * to allow the use of comparisons with the ufshci_version
+        * function, we convert it to the same scheme as ufs 2.0+.
+        */
+       if (ufshci_ver & 0x00010000)
+               return ufshci_version(1, ufshci_ver & 0x00000100);
+
+       return ufshci_ver;
 }
 
 /**
@@ -929,8 +925,7 @@ static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 {
        /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
-       if ((hba->ufs_version == UFSHCI_VERSION_10) ||
-           (hba->ufs_version == UFSHCI_VERSION_11))
+       if (hba->ufs_version <= ufshci_version(1, 1))
                return UFS_UNIPRO_VER_1_41;
        else
                return UFS_UNIPRO_VER_1_6;
@@ -1266,7 +1261,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
        /* Enable Write Booster if we have scaled up else disable it */
        downgrade_write(&hba->clk_scaling_lock);
        is_writelock = false;
-       ufshcd_wb_ctrl(hba, scale_up);
+       ufshcd_wb_toggle(hba, scale_up);
 
 out_unprepare:
        ufshcd_clock_scaling_unprepare(hba, is_writelock);
@@ -2333,7 +2328,7 @@ static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
 {
        u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
 
-       if (hba->ufs_version == UFSHCI_VERSION_10) {
+       if (hba->ufs_version == ufshci_version(1, 0)) {
                u32 rw;
                rw = set & INTERRUPT_MASK_RW_VER_10;
                set = rw | ((set ^ intrs) & intrs);
@@ -2353,7 +2348,7 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
 {
        u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
 
-       if (hba->ufs_version == UFSHCI_VERSION_10) {
+       if (hba->ufs_version == ufshci_version(1, 0)) {
                u32 rw;
                rw = (set & INTERRUPT_MASK_RW_VER_10) &
                        ~(intrs & INTERRUPT_MASK_RW_VER_10);
@@ -2516,8 +2511,7 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
        u8 upiu_flags;
        int ret = 0;
 
-       if ((hba->ufs_version == UFSHCI_VERSION_10) ||
-           (hba->ufs_version == UFSHCI_VERSION_11))
+       if (hba->ufs_version <= ufshci_version(1, 1))
                lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
        else
                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
@@ -2544,8 +2538,7 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        u8 upiu_flags;
        int ret = 0;
 
-       if ((hba->ufs_version == UFSHCI_VERSION_10) ||
-           (hba->ufs_version == UFSHCI_VERSION_11))
+       if (hba->ufs_version <= ufshci_version(1, 1))
                lrbp->command_type = UTP_CMD_TYPE_SCSI;
        else
                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
@@ -5161,6 +5154,46 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
        }
 }
 
+int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
+{
+       return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+                                      QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
+                                      &ee_ctrl_mask);
+}
+
+int ufshcd_write_ee_control(struct ufs_hba *hba)
+{
+       int err;
+
+       mutex_lock(&hba->ee_ctrl_mutex);
+       err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
+       mutex_unlock(&hba->ee_ctrl_mutex);
+       if (err)
+               dev_err(hba->dev, "%s: failed to write ee control %d\n",
+                       __func__, err);
+       return err;
+}
+
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
+                            u16 set, u16 clr)
+{
+       u16 new_mask, ee_ctrl_mask;
+       int err = 0;
+
+       mutex_lock(&hba->ee_ctrl_mutex);
+       new_mask = (*mask & ~clr) | set;
+       ee_ctrl_mask = new_mask | *other_mask;
+       if (ee_ctrl_mask != hba->ee_ctrl_mask)
+               err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
+       /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
+       if (!err) {
+               hba->ee_ctrl_mask = ee_ctrl_mask;
+               *mask = new_mask;
+       }
+       mutex_unlock(&hba->ee_ctrl_mutex);
+       return err;
+}
+
 /**
  * ufshcd_disable_ee - disable exception event
  * @hba: per-adapter instance
@@ -5171,22 +5204,9 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
  *
  * Returns zero on success, non-zero error value on failure.
  */
-static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
 {
-       int err = 0;
-       u32 val;
-
-       if (!(hba->ee_ctrl_mask & mask))
-               goto out;
-
-       val = hba->ee_ctrl_mask & ~mask;
-       val &= MASK_EE_STATUS;
-       err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
-                       QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
-       if (!err)
-               hba->ee_ctrl_mask &= ~mask;
-out:
-       return err;
+       return ufshcd_update_ee_drv_mask(hba, 0, mask);
 }
 
 /**
@@ -5199,22 +5219,9 @@ out:
  *
  * Returns zero on success, non-zero error value on failure.
  */
-static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
 {
-       int err = 0;
-       u32 val;
-
-       if (hba->ee_ctrl_mask & mask)
-               goto out;
-
-       val = hba->ee_ctrl_mask | mask;
-       val &= MASK_EE_STATUS;
-       err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
-                       QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
-       if (!err)
-               hba->ee_ctrl_mask |= mask;
-out:
-       return err;
+       return ufshcd_update_ee_drv_mask(hba, mask, 0);
 }
 
 /**
@@ -5431,85 +5438,74 @@ out:
                                __func__, err);
 }
 
-int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
 {
-       int ret;
        u8 index;
-       enum query_opcode opcode;
+       enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
+                                  UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+       index = ufshcd_wb_get_query_index(hba);
+       return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
+}
+
+int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
+{
+       int ret;
 
        if (!ufshcd_is_wb_allowed(hba))
                return 0;
 
        if (!(enable ^ hba->dev_info.wb_enabled))
                return 0;
-       if (enable)
-               opcode = UPIU_QUERY_OPCODE_SET_FLAG;
-       else
-               opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
 
-       index = ufshcd_wb_get_query_index(hba);
-       ret = ufshcd_query_flag_retry(hba, opcode,
-                                     QUERY_FLAG_IDN_WB_EN, index, NULL);
+       ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
        if (ret) {
-               dev_err(hba->dev, "%s write booster %s failed %d\n",
+               dev_err(hba->dev, "%s Write Booster %s failed %d\n",
                        __func__, enable ? "enable" : "disable", ret);
                return ret;
        }
 
        hba->dev_info.wb_enabled = enable;
-       dev_dbg(hba->dev, "%s write booster %s %d\n",
-                       __func__, enable ? "enable" : "disable", ret);
+       dev_info(hba->dev, "%s Write Booster %s\n",
+                       __func__, enable ? "enabled" : "disabled");
 
        return ret;
 }
 
-static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
 {
-       int val;
-       u8 index;
-
-       if (set)
-               val =  UPIU_QUERY_OPCODE_SET_FLAG;
-       else
-               val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+       int ret;
 
-       index = ufshcd_wb_get_query_index(hba);
-       return ufshcd_query_flag_retry(hba, val,
-                               QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
-                               index, NULL);
+       ret = __ufshcd_wb_toggle(hba, set,
+                       QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
+       if (ret) {
+               dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
+                       __func__, set ? "enable" : "disable", ret);
+               return;
+       }
+       dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
+                       __func__, set ? "enabled" : "disabled");
 }
 
-static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
 {
        int ret;
-       u8 index;
-       enum query_opcode opcode;
 
        if (!ufshcd_is_wb_allowed(hba) ||
            hba->dev_info.wb_buf_flush_enabled == enable)
-               return 0;
-
-       if (enable)
-               opcode = UPIU_QUERY_OPCODE_SET_FLAG;
-       else
-               opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+               return;
 
-       index = ufshcd_wb_get_query_index(hba);
-       ret = ufshcd_query_flag_retry(hba, opcode,
-                                     QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, index,
-                                     NULL);
+       ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
        if (ret) {
                dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
                        enable ? "enable" : "disable", ret);
-               goto out;
+               return;
        }
 
        hba->dev_info.wb_buf_flush_enabled = enable;
 
-       dev_dbg(hba->dev, "WB-Buf Flush %s\n", enable ? "enabled" : "disabled");
-out:
-       return ret;
-
+       dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
+                       __func__, enable ? "enabled" : "disabled");
 }
 
 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
@@ -5617,11 +5613,12 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
                goto out;
        }
 
-       status &= hba->ee_ctrl_mask;
+       trace_ufshcd_exception_event(dev_name(hba->dev), status);
 
-       if (status & MASK_EE_URGENT_BKOPS)
+       if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
                ufshcd_bkops_exception_event_handler(hba);
 
+       ufs_debugfs_exception_event(hba, status);
 out:
        ufshcd_scsi_unblock_requests(hba);
        /*
@@ -6402,7 +6399,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        blk_mq_start_request(req);
 
        task_tag = req->tag;
-       treq->req_header.dword_0 |= cpu_to_be32(task_tag);
+       treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
 
        memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
        ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
@@ -6475,16 +6472,16 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
        treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
 
        /* Configure task request UPIU */
-       treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
+       treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
                                  cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
-       treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+       treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
 
        /*
         * The host shall provide the same value for LUN field in the basic
         * header and for Input Parameter.
         */
-       treq.input_param1 = cpu_to_be32(lun_id);
-       treq.input_param2 = cpu_to_be32(task_id);
+       treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
+       treq.upiu_req.input_param2 = cpu_to_be32(task_id);
 
        err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
        if (err == -ETIMEDOUT)
@@ -6495,7 +6492,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
                dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
                                __func__, ocs_value);
        else if (tm_response)
-               *tm_response = be32_to_cpu(treq.output_param1) &
+               *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
                                MASK_TM_SERVICE_RESP;
        return err;
 }
@@ -6560,15 +6557,10 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
        ufshcd_prepare_lrbp_crypto(NULL, lrbp);
        hba->dev_cmd.type = cmd_type;
 
-       switch (hba->ufs_version) {
-       case UFSHCI_VERSION_10:
-       case UFSHCI_VERSION_11:
+       if (hba->ufs_version <= ufshci_version(1, 1))
                lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
-               break;
-       default:
+       else
                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-               break;
-       }
 
        /* update the task tag in the request upiu */
        req_upiu->header.dword_0 |= cpu_to_be32(tag);
@@ -6675,7 +6667,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
                treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
                treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
 
-               memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
+               memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
 
                err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
                if (err == -ETIMEDOUT)
@@ -6688,7 +6680,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
                        break;
                }
 
-               memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
+               memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
 
                break;
        default:
@@ -7128,7 +7120,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
 }
 
 /**
- * ufshcd_calc_icc_level - calculate the max ICC level
+ * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
  * In case regulators are not initialized we'll return 0
  * @hba: per-adapter instance
  * @desc_buf: power descriptor buffer to extract ICC levels from.
@@ -7149,19 +7141,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
                goto out;
        }
 
-       if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
+       if (hba->vreg_info.vcc->max_uA)
                icc_level = ufshcd_get_max_icc_level(
                                hba->vreg_info.vcc->max_uA,
                                POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
                                &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
 
-       if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
+       if (hba->vreg_info.vccq->max_uA)
                icc_level = ufshcd_get_max_icc_level(
                                hba->vreg_info.vccq->max_uA,
                                icc_level,
                                &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
 
-       if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
+       if (hba->vreg_info.vccq2->max_uA)
                icc_level = ufshcd_get_max_icc_level(
                                hba->vreg_info.vccq2->max_uA,
                                icc_level,
@@ -7922,6 +7914,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
        ufshcd_set_active_icc_lvl(hba);
 
        ufshcd_wb_config(hba);
+       if (hba->ee_usr_mask)
+               ufshcd_write_ee_control(hba);
        /* Enable Auto-Hibernate if configured */
        ufshcd_auto_hibern8_enable(hba);
 
@@ -8919,6 +8913,9 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                 */
                ufshcd_urgent_bkops(hba);
 
+       if (hba->ee_usr_mask)
+               ufshcd_write_ee_control(hba);
+
        hba->clk_gating.is_suspended = false;
 
        if (ufshcd_is_clkscaling_supported(hba))
@@ -9280,13 +9277,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
 
-       if ((hba->ufs_version != UFSHCI_VERSION_10) &&
-           (hba->ufs_version != UFSHCI_VERSION_11) &&
-           (hba->ufs_version != UFSHCI_VERSION_20) &&
-           (hba->ufs_version != UFSHCI_VERSION_21))
-               dev_err(hba->dev, "invalid UFS version 0x%x\n",
-                       hba->ufs_version);
-
        /* Get Interrupt bit mask per version */
        hba->intr_mask = ufshcd_get_intr_mask(hba);
 
@@ -9337,6 +9327,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Initialize mutex for device management commands */
        mutex_init(&hba->dev_cmd.lock);
 
+       /* Initialize mutex for exception event control */
+       mutex_init(&hba->ee_ctrl_mutex);
+
        init_rwsem(&hba->clk_scaling_lock);
 
        ufshcd_init_clk_gating(hba);
index 18e56c1..5eb66a8 100644 (file)
@@ -773,7 +773,10 @@ struct ufs_hba {
        u32 ufshcd_state;
        u32 eh_flags;
        u32 intr_mask;
-       u16 ee_ctrl_mask;
+       u16 ee_ctrl_mask; /* Exception event mask */
+       u16 ee_drv_mask;  /* Exception event mask for driver */
+       u16 ee_usr_mask;  /* Exception event mask for user (via debugfs) */
+       struct mutex ee_ctrl_mutex;
        bool is_powered;
        bool shutting_down;
        struct semaphore host_sem;
@@ -840,6 +843,8 @@ struct ufs_hba {
 #endif
 #ifdef CONFIG_DEBUG_FS
        struct dentry *debugfs_root;
+       struct delayed_work debugfs_ee_work;
+       u32 debugfs_ee_rate_limit_ms;
 #endif
 };
 
@@ -1099,7 +1104,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
                             u8 *desc_buff, int *buff_len,
                             enum query_opcode desc_op);
 
-int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
 
 /* Wrapper functions for safely calling variant operations */
 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
@@ -1181,7 +1186,7 @@ static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
 }
 
 static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
-                                 bool status,
+                                 enum ufs_notify_change_status status,
                                  struct ufs_pa_layer_attr *dev_max_params,
                                  struct ufs_pa_layer_attr *dev_req_params)
 {
@@ -1285,4 +1290,23 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
                     const char *prefix);
 
+int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
+int ufshcd_write_ee_control(struct ufs_hba *hba);
+int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
+                            u16 set, u16 clr);
+
+static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
+                                           u16 set, u16 clr)
+{
+       return ufshcd_update_ee_control(hba, &hba->ee_drv_mask,
+                                       &hba->ee_usr_mask, set, clr);
+}
+
+static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
+                                           u16 set, u16 clr)
+{
+       return ufshcd_update_ee_control(hba, &hba->ee_usr_mask,
+                                       &hba->ee_drv_mask, set, clr);
+}
+
 #endif /* End of Header */
index 6795e1f..de95be5 100644 (file)
@@ -74,13 +74,17 @@ enum {
 #define MINOR_VERSION_NUM_MASK         UFS_MASK(0xFFFF, 0)
 #define MAJOR_VERSION_NUM_MASK         UFS_MASK(0xFFFF, 16)
 
-/* Controller UFSHCI version */
-enum {
-       UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
-       UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
-       UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
-       UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
-};
+/*
+ * Controller UFSHCI version
+ * - 2.x and newer use the following scheme:
+ *   major << 8 + minor << 4
+ * - 1.x has been converted to match this in
+ *   ufshcd_get_ufs_version()
+ */
+static inline u32 ufshci_version(u32 major, u32 minor)
+{
+       return (major << 8) + (minor << 4);
+}
 
 /*
  * HCDDID - Host Controller Identification Descriptor
@@ -482,17 +486,21 @@ struct utp_task_req_desc {
        struct request_desc_header header;
 
        /* DW 4-11 - Task request UPIU structure */
-       struct utp_upiu_header  req_header;
-       __be32                  input_param1;
-       __be32                  input_param2;
-       __be32                  input_param3;
-       __be32                  __reserved1[2];
+       struct {
+               struct utp_upiu_header  req_header;
+               __be32                  input_param1;
+               __be32                  input_param2;
+               __be32                  input_param3;
+               __be32                  __reserved1[2];
+       } upiu_req;
 
        /* DW 12-19 - Task Management Response UPIU structure */
-       struct utp_upiu_header  rsp_header;
-       __be32                  output_param1;
-       __be32                  output_param2;
-       __be32                  __reserved2[3];
+       struct {
+               struct utp_upiu_header  rsp_header;
+               __be32                  output_param1;
+               __be32                  output_param2;
+               __be32                  __reserved2[3];
+       } upiu_rsp;
 };
 
 #endif /* End of Header */
index e5c443b..2c54c5d 100644 (file)
@@ -1154,10 +1154,10 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        /*
         * Initialize struct se_cmd descriptor from target_core_mod infrastructure
         */
-       transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
-                       conn->sess->se_sess, be32_to_cpu(hdr->data_length),
-                       cmd->data_direction, sam_task_attr,
-                       cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
+       __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+                        conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+                        cmd->data_direction, sam_task_attr,
+                        cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
 
        pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
                " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
@@ -1167,7 +1167,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        target_get_sess_cmd(&cmd->se_cmd, true);
 
        cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
-       cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
+       cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb,
+                                               GFP_KERNEL);
+
        if (cmd->sense_reason) {
                if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
                        return iscsit_add_reject_cmd(cmd,
@@ -2012,10 +2014,10 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                                             buf);
        }
 
-       transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
-                             conn->sess->se_sess, 0, DMA_NONE,
-                             TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
-                             scsilun_to_int(&hdr->lun));
+       __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+                         conn->sess->se_sess, 0, DMA_NONE,
+                         TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
+                         scsilun_to_int(&hdr->lun));
 
        target_get_sess_cmd(&cmd->se_cmd, true);
 
index 0fa1d57..f4a24fa 100644 (file)
@@ -161,14 +161,13 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
        char *str, *str2, *ip_str, *port_str;
        struct sockaddr_storage sockaddr = { };
        int ret;
-       char buf[MAX_PORTAL_LEN + 1];
+       char buf[MAX_PORTAL_LEN + 1] = { };
 
        if (strlen(name) > MAX_PORTAL_LEN) {
                pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
                        (int)strlen(name), MAX_PORTAL_LEN);
                return ERR_PTR(-EOVERFLOW);
        }
-       memset(buf, 0, MAX_PORTAL_LEN + 1);
        snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
 
        str = strstr(buf, "[");
index 8b40f10..151e294 100644 (file)
@@ -28,7 +28,6 @@
 #include "iscsi_target_auth.h"
 
 #define MAX_LOGIN_PDUS  7
-#define TEXT_LEN       4096
 
 void convert_null_to_semi(char *buf, int len)
 {
index 35e75a3..cce3a82 100644 (file)
@@ -28,7 +28,6 @@
 /* Instance Attributes Table */
 #define ISCSI_INST_NUM_NODES           1
 #define ISCSI_INST_DESCR               "Storage Engine Target"
-#define ISCSI_INST_LAST_FAILURE_TYPE   0
 #define ISCSI_DISCONTINUITY_TIME       0
 
 #define ISCSI_NODE_INDEX               1
index 9468b01..6dd5810 100644 (file)
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
 
-#define PRINT_BUFF(buff, len)                                  \
-{                                                              \
-       int zzz;                                                \
-                                                               \
-       pr_debug("%d:\n", __LINE__);                            \
-       for (zzz = 0; zzz < len; zzz++) {                       \
-               if (zzz % 16 == 0) {                            \
-                       if (zzz)                                \
-                               pr_debug("\n");                 \
-                       pr_debug("%4i: ", zzz);                 \
-               }                                               \
-               pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
-       }                                                       \
-       if ((len + 1) % 16)                                     \
-               pr_debug("\n");                                 \
-}
-
 extern struct list_head g_tiqn_list;
 extern spinlock_t tiqn_lock;
 
index badba43..2687fd7 100644 (file)
@@ -39,7 +39,6 @@
 
 #define to_tcm_loop_hba(hba)   container_of(hba, struct tcm_loop_hba, dev)
 
-static struct workqueue_struct *tcm_loop_workqueue;
 static struct kmem_cache *tcm_loop_cmd_cache;
 
 static int tcm_loop_hba_no_cnt;
@@ -67,8 +66,12 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
 {
        struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
                                struct tcm_loop_cmd, tl_se_cmd);
+       struct scsi_cmnd *sc = tl_cmd->sc;
 
-       kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+       if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+               kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+       else
+               sc->scsi_done(sc);
 }
 
 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
@@ -102,10 +105,8 @@ static struct device_driver tcm_loop_driverfs = {
  */
 static struct device *tcm_loop_primary;
 
-static void tcm_loop_submission_work(struct work_struct *work)
+static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
 {
-       struct tcm_loop_cmd *tl_cmd =
-               container_of(work, struct tcm_loop_cmd, work);
        struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
        struct scsi_cmnd *sc = tl_cmd->sc;
        struct tcm_loop_nexus *tl_nexus;
@@ -113,7 +114,6 @@ static void tcm_loop_submission_work(struct work_struct *work)
        struct tcm_loop_tpg *tl_tpg;
        struct scatterlist *sgl_bidi = NULL;
        u32 sgl_bidi_count = 0, transfer_length;
-       int rc;
 
        tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
        tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
@@ -151,21 +151,20 @@ static void tcm_loop_submission_work(struct work_struct *work)
        }
 
        se_cmd->tag = tl_cmd->sc_cmd_tag;
-       rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
-                       &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
-                       transfer_length, TCM_SIMPLE_TAG,
-                       sc->sc_data_direction, 0,
-                       scsi_sglist(sc), scsi_sg_count(sc),
-                       sgl_bidi, sgl_bidi_count,
-                       scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
-       if (rc < 0) {
-               set_host_byte(sc, DID_NO_CONNECT);
-               goto out_done;
-       }
+       target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
+                       tl_cmd->sc->device->lun, transfer_length,
+                       TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
+
+       if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
+                              scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
+                              scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
+                              GFP_ATOMIC))
+               return;
+
+       target_queue_submission(se_cmd);
        return;
 
 out_done:
-       kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
        sc->scsi_done(sc);
 }
 
@@ -175,24 +174,18 @@ out_done:
  */
 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 {
-       struct tcm_loop_cmd *tl_cmd;
+       struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
 
        pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
                 __func__, sc->device->host->host_no, sc->device->id,
                 sc->device->channel, sc->device->lun, sc->cmnd[0],
                 scsi_bufflen(sc));
 
-       tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
-       if (!tl_cmd) {
-               set_host_byte(sc, DID_ERROR);
-               sc->scsi_done(sc);
-               return 0;
-       }
-
+       memset(tl_cmd, 0, sizeof(*tl_cmd));
        tl_cmd->sc = sc;
        tl_cmd->sc_cmd_tag = sc->request->tag;
-       INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
-       queue_work(tcm_loop_workqueue, &tl_cmd->work);
+
+       tcm_loop_target_queue_cmd(tl_cmd);
        return 0;
 }
 
@@ -320,6 +313,7 @@ static struct scsi_host_template tcm_loop_driver_template = {
        .dma_boundary           = PAGE_SIZE - 1,
        .module                 = THIS_MODULE,
        .track_queue_depth      = 1,
+       .cmd_size               = sizeof(struct tcm_loop_cmd),
 };
 
 static int tcm_loop_driver_probe(struct device *dev)
@@ -580,7 +574,6 @@ static int tcm_loop_queue_data_or_status(const char *func,
        if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
            (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
                scsi_set_resid(sc, se_cmd->residual_count);
-       sc->scsi_done(sc);
        return 0;
 }
 
@@ -1164,17 +1157,13 @@ static int __init tcm_loop_fabric_init(void)
 {
        int ret = -ENOMEM;
 
-       tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
-       if (!tcm_loop_workqueue)
-               goto out;
-
        tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
                                sizeof(struct tcm_loop_cmd),
                                __alignof__(struct tcm_loop_cmd),
                                0, NULL);
        if (!tcm_loop_cmd_cache) {
                pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
-               goto out_destroy_workqueue;
+               goto out;
        }
 
        ret = tcm_loop_alloc_core_bus();
@@ -1191,8 +1180,6 @@ out_release_core_bus:
        tcm_loop_release_core_bus();
 out_destroy_cache:
        kmem_cache_destroy(tcm_loop_cmd_cache);
-out_destroy_workqueue:
-       destroy_workqueue(tcm_loop_workqueue);
 out:
        return ret;
 }
@@ -1202,7 +1189,6 @@ static void __exit tcm_loop_fabric_exit(void)
        target_unregister_template(&loop_ops);
        tcm_loop_release_core_bus();
        kmem_cache_destroy(tcm_loop_cmd_cache);
-       destroy_workqueue(tcm_loop_workqueue);
 }
 
 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
index d311090..437663b 100644 (file)
@@ -16,7 +16,6 @@ struct tcm_loop_cmd {
        struct scsi_cmnd *sc;
        /* The TCM I/O descriptor that is accessed via container_of() */
        struct se_cmd tl_se_cmd;
-       struct work_struct work;
        struct completion tmr_done;
        /* Sense buffer that will be mapped into outgoing status */
        unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
index 2a6165f..ce84f93 100644 (file)
@@ -1218,11 +1218,9 @@ static void sbp_handle_command(struct sbp_target_request *req)
 
        /* only used for printk until we do TMRs */
        req->se_cmd.tag = req->orb_pointer;
-       if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
-                             req->sense_buf, unpacked_lun, data_length,
-                             TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
-               goto err;
-
+       target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+                         req->sense_buf, unpacked_lun, data_length,
+                         TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF);
        return;
 
 err:
index f043522..9cb1ca8 100644 (file)
@@ -1494,7 +1494,7 @@ static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
 {
        struct t10_wwn *t10_wwn = to_t10_wwn(item);
        struct se_device *dev = t10_wwn->t10_dev;
-       unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+       unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { };
 
        /*
         * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
@@ -1536,7 +1536,6 @@ static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
         * Also, strip any newline added from the userspace
         * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
         */
-       memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
        snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
        snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
                        "%s", strstrip(buf));
@@ -1556,11 +1555,9 @@ static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
 {
        struct t10_wwn *t10_wwn = to_t10_wwn(item);
        struct t10_vpd *vpd;
-       unsigned char buf[VPD_TMP_BUF_SIZE];
+       unsigned char buf[VPD_TMP_BUF_SIZE] = { };
        ssize_t len = 0;
 
-       memset(buf, 0, VPD_TMP_BUF_SIZE);
-
        spin_lock(&t10_wwn->t10_vpd_lock);
        list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
                if (!vpd->protocol_identifier_set)
@@ -1663,9 +1660,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
 {
        struct se_node_acl *se_nacl;
        struct t10_pr_registration *pr_reg;
-       char i_buf[PR_REG_ISID_ID_LEN];
-
-       memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+       char i_buf[PR_REG_ISID_ID_LEN] = { };
 
        pr_reg = dev->dev_pr_res_holder;
        if (!pr_reg)
@@ -2286,7 +2281,7 @@ static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
        struct se_hba *hba = dev->se_hba;
        struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
        struct t10_alua_lu_gp_member *lu_gp_mem;
-       unsigned char buf[LU_GROUP_NAME_BUF];
+       unsigned char buf[LU_GROUP_NAME_BUF] = { };
        int move = 0;
 
        lu_gp_mem = dev->dev_alua_lu_gp_mem;
@@ -2297,7 +2292,6 @@ static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
                pr_err("ALUA LU Group Alias too large!\n");
                return -EINVAL;
        }
-       memset(buf, 0, LU_GROUP_NAME_BUF);
        memcpy(buf, page, count);
        /*
         * Any ALUA logical unit alias besides "NULL" means we will be
@@ -2615,9 +2609,7 @@ static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
        struct se_hba *hba;
        struct t10_alua_lu_gp_member *lu_gp_mem;
        ssize_t len = 0, cur_len;
-       unsigned char buf[LU_GROUP_NAME_BUF];
-
-       memset(buf, 0, LU_GROUP_NAME_BUF);
+       unsigned char buf[LU_GROUP_NAME_BUF] = { };
 
        spin_lock(&lu_gp->lu_gp_lock);
        list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
@@ -3020,9 +3012,7 @@ static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
        struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
        struct se_lun *lun;
        ssize_t len = 0, cur_len;
-       unsigned char buf[TG_PT_GROUP_NAME_BUF];
-
-       memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+       unsigned char buf[TG_PT_GROUP_NAME_BUF] = { };
 
        spin_lock(&tg_pt_gp->tg_pt_gp_lock);
        list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
@@ -3409,11 +3399,10 @@ static struct config_group *target_core_call_addhbatotarget(
 {
        char *se_plugin_str, *str, *str2;
        struct se_hba *hba;
-       char buf[TARGET_CORE_NAME_MAX_LEN];
+       char buf[TARGET_CORE_NAME_MAX_LEN] = { };
        unsigned long plugin_dep_id = 0;
        int ret;
 
-       memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
        if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
                pr_err("Passed *name strlen(): %d exceeds"
                        " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
index 7787c52..a8df9f0 100644 (file)
@@ -735,8 +735,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
 
        dev->queue_cnt = nr_cpu_ids;
        for (i = 0; i < dev->queue_cnt; i++) {
-               INIT_LIST_HEAD(&dev->queues[i].state_list);
-               spin_lock_init(&dev->queues[i].lock);
+               struct se_device_queue *q;
+
+               q = &dev->queues[i];
+               INIT_LIST_HEAD(&q->state_list);
+               spin_lock_init(&q->lock);
+
+               init_llist_head(&q->sq.cmd_list);
+               INIT_WORK(&q->sq.work, target_queued_submit_work);
        }
 
        dev->se_hba = hba;
@@ -1029,7 +1035,7 @@ int core_dev_setup_virtual_lun0(void)
 {
        struct se_hba *hba;
        struct se_device *dev;
-       char buf[] = "rd_pages=8,rd_nullio=1";
+       char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
        int ret;
 
        hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
index ee85602..fc7edc0 100644 (file)
@@ -892,6 +892,7 @@ static void target_fabric_release_wwn(struct config_item *item)
        struct target_fabric_configfs *tf = wwn->wwn_tf;
 
        configfs_remove_default_groups(&wwn->fabric_stat_group);
+       configfs_remove_default_groups(&wwn->param_group);
        tf->tf_ops->fabric_drop_wwn(wwn);
 }
 
@@ -918,6 +919,57 @@ TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
 
 /* End of tfc_wwn_fabric_stats_cit */
 
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item,
+                                              char *page)
+{
+       struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+                                         param_group);
+       return sprintf(page, "%d\n",
+                      wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ?
+                      SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity);
+}
+
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
+                                               const char *page, size_t count)
+{
+       struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+                                         param_group);
+       int compl_val;
+
+       if (kstrtoint(page, 0, &compl_val))
+               return -EINVAL;
+
+       switch (compl_val) {
+       case SE_COMPL_AFFINITY_CPUID:
+               wwn->cmd_compl_affinity = compl_val;
+               break;
+       case SE_COMPL_AFFINITY_CURR_CPU:
+               wwn->cmd_compl_affinity = WORK_CPU_UNBOUND;
+               break;
+       default:
+               if (compl_val < 0 || compl_val >= nr_cpu_ids ||
+                   !cpu_online(compl_val)) {
+                       pr_err("Command completion value must be between %d and %d or an online CPU.\n",
+                              SE_COMPL_AFFINITY_CPUID,
+                              SE_COMPL_AFFINITY_CURR_CPU);
+                       return -EINVAL;
+               }
+               wwn->cmd_compl_affinity = compl_val;
+       }
+
+       return count;
+}
+CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
+
+static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
+       &target_fabric_wwn_attr_cmd_completion_affinity,
+       NULL,
+};
+
+TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs);
+
 /* Start of tfc_wwn_cit */
 
 static struct config_group *target_fabric_make_wwn(
@@ -937,6 +989,7 @@ static struct config_group *target_fabric_make_wwn(
        if (!wwn || IS_ERR(wwn))
                return ERR_PTR(-EINVAL);
 
+       wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID;
        wwn->wwn_tf = tf;
 
        config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
@@ -945,6 +998,10 @@ static struct config_group *target_fabric_make_wwn(
                        &tf->tf_wwn_fabric_stats_cit);
        configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
 
+       config_group_init_type_name(&wwn->param_group, "param",
+                       &tf->tf_wwn_param_cit);
+       configfs_add_default_group(&wwn->param_group, &wwn->wwn_group);
+
        if (tf->tf_ops->add_wwn_groups)
                tf->tf_ops->add_wwn_groups(wwn);
        return &wwn->wwn_group;
@@ -974,6 +1031,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf)
        target_fabric_setup_discovery_cit(tf);
        target_fabric_setup_wwn_cit(tf);
        target_fabric_setup_wwn_fabric_stats_cit(tf);
+       target_fabric_setup_wwn_param_cit(tf);
        target_fabric_setup_tpg_cit(tf);
        target_fabric_setup_tpg_base_cit(tf);
        target_fabric_setup_tpg_port_cit(tf);
index 5a66854..ef4a8e1 100644 (file)
@@ -498,6 +498,7 @@ fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
 
        prot_length = nolb * se_dev->prot_length;
 
+       memset(buf, 0xff, bufsize);
        for (prot = 0; prot < prot_length;) {
                sector_t len = min_t(sector_t, bufsize, prot_length - prot);
                ssize_t ret = kernel_write(prot_fd, buf, len, &pos);
@@ -523,7 +524,6 @@ fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
                pr_err("Unable to allocate FILEIO prot buf\n");
                return -ENOMEM;
        }
-       memset(buf, 0xff, PAGE_SIZE);
 
        rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
 
@@ -882,7 +882,6 @@ static int fd_format_prot(struct se_device *dev)
                 (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
                                        dev->prot_length);
 
-       memset(buf, 0xff, unit_size);
        ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
                              buf, unit_size);
        vfree(buf);
index ee3d520..d6fdd1c 100644 (file)
@@ -61,9 +61,18 @@ static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *nam
                return NULL;
        }
 
+       ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
+                                  GFP_KERNEL);
+       if (!ib_dev->ibd_plug)
+               goto free_dev;
+
        pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 
        return &ib_dev->dev;
+
+free_dev:
+       kfree(ib_dev);
+       return NULL;
 }
 
 static int iblock_configure_device(struct se_device *dev)
@@ -171,6 +180,7 @@ static void iblock_dev_call_rcu(struct rcu_head *p)
        struct se_device *dev = container_of(p, struct se_device, rcu_head);
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 
+       kfree(ib_dev->ibd_plug);
        kfree(ib_dev);
 }
 
@@ -188,6 +198,33 @@ static void iblock_destroy_device(struct se_device *dev)
        bioset_exit(&ib_dev->ibd_bio_set);
 }
 
+static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
+{
+       struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
+       struct iblock_dev_plug *ib_dev_plug;
+
+       /*
+        * Each se_device has a per cpu work this can be run from. Wwe
+        * shouldn't have multiple threads on the same cpu calling this
+        * at the same time.
+        */
+       ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()];
+       if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
+               return NULL;
+
+       blk_start_plug(&ib_dev_plug->blk_plug);
+       return &ib_dev_plug->se_plug;
+}
+
+static void iblock_unplug_device(struct se_dev_plug *se_plug)
+{
+       struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
+                                       struct iblock_dev_plug, se_plug);
+
+       blk_finish_plug(&ib_dev_plug->blk_plug);
+       clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
+}
+
 static unsigned long long iblock_emulate_read_cap_with_block_size(
        struct se_device *dev,
        struct block_device *bd,
@@ -304,9 +341,8 @@ static void iblock_bio_done(struct bio *bio)
        iblock_complete_cmd(cmd);
 }
 
-static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
-              int op_flags)
+static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
+                                 unsigned int opf)
 {
        struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
        struct bio *bio;
@@ -326,7 +362,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
        bio->bi_private = cmd;
        bio->bi_end_io = &iblock_bio_done;
        bio->bi_iter.bi_sector = lba;
-       bio_set_op_attrs(bio, op, op_flags);
+       bio->bi_opf = opf;
 
        return bio;
 }
@@ -335,7 +371,10 @@ static void iblock_submit_bios(struct bio_list *list)
 {
        struct blk_plug plug;
        struct bio *bio;
-
+       /*
+        * The block layer handles nested plugs, so just plug/unplug to handle
+        * fabric drivers that didn't support batching and multi bio cmds.
+        */
        blk_start_plug(&plug);
        while ((bio = bio_list_pop(list)))
                submit_bio(bio);
@@ -477,7 +516,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
                goto fail;
        cmd->priv = ibr;
 
-       bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
+       bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
        if (!bio)
                goto fail_free_ibr;
 
@@ -490,8 +529,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
                                != sg->length) {
 
-                       bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
-                                            0);
+                       bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
                        if (!bio)
                                goto fail_put_bios;
 
@@ -685,9 +723,11 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        struct bio_list list;
        struct scatterlist *sg;
        u32 sg_num = sgl_nents;
+       unsigned int opf;
        unsigned bio_cnt;
-       int i, rc, op, op_flags = 0;
+       int i, rc;
        struct sg_mapping_iter prot_miter;
+       unsigned int miter_dir;
 
        if (data_direction == DMA_TO_DEVICE) {
                struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -696,15 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                 * Force writethrough using REQ_FUA if a volatile write cache
                 * is not enabled, or if initiator set the Force Unit Access bit.
                 */
-               op = REQ_OP_WRITE;
+               opf = REQ_OP_WRITE;
+               miter_dir = SG_MITER_TO_SG;
                if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
                        if (cmd->se_cmd_flags & SCF_FUA)
-                               op_flags = REQ_FUA;
+                               opf |= REQ_FUA;
                        else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
-                               op_flags = REQ_FUA;
+                               opf |= REQ_FUA;
                }
        } else {
-               op = REQ_OP_READ;
+               opf = REQ_OP_READ;
+               miter_dir = SG_MITER_FROM_SG;
        }
 
        ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -718,7 +760,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                return 0;
        }
 
-       bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
+       bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
        if (!bio)
                goto fail_free_ibr;
 
@@ -730,8 +772,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 
        if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
                sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
-                              op == REQ_OP_READ ? SG_MITER_FROM_SG :
-                                                  SG_MITER_TO_SG);
+                              miter_dir);
 
        for_each_sg(sgl, sg, sgl_nents, i) {
                /*
@@ -752,8 +793,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                bio_cnt = 0;
                        }
 
-                       bio = iblock_get_bio(cmd, block_lba, sg_num, op,
-                                            op_flags);
+                       bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
                        if (!bio)
                                goto fail_put_bios;
 
@@ -813,7 +853,8 @@ static unsigned int iblock_get_lbppbe(struct se_device *dev)
 {
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        struct block_device *bd = ib_dev->ibd_bd;
-       int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
+       unsigned int logs_per_phys =
+               bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
 
        return ilog2(logs_per_phys);
 }
@@ -867,6 +908,8 @@ static const struct target_backend_ops iblock_ops = {
        .configure_device       = iblock_configure_device,
        .destroy_device         = iblock_destroy_device,
        .free_device            = iblock_free_device,
+       .plug_device            = iblock_plug_device,
+       .unplug_device          = iblock_unplug_device,
        .parse_cdb              = iblock_parse_cdb,
        .set_configfs_dev_params = iblock_set_configfs_dev_params,
        .show_configfs_dev_params = iblock_show_configfs_dev_params,
index cefc641..8c55375 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/atomic.h>
 #include <linux/refcount.h>
+#include <linux/blkdev.h>
 #include <target/target_core_base.h>
 
 #define IBLOCK_VERSION         "4.0"
@@ -17,6 +18,14 @@ struct iblock_req {
 
 #define IBDF_HAS_UDEV_PATH             0x01
 
+#define IBD_PLUGF_PLUGGED              0x01
+
+struct iblock_dev_plug {
+       struct se_dev_plug se_plug;
+       struct blk_plug blk_plug;
+       unsigned long flags;
+};
+
 struct iblock_dev {
        struct se_device dev;
        unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
@@ -24,6 +33,7 @@ struct iblock_dev {
        struct bio_set  ibd_bio_set;
        struct block_device *ibd_bd;
        bool ibd_readonly;
+       struct iblock_dev_plug *ibd_plug;
 } ____cacheline_aligned;
 
 #endif /* TARGET_CORE_IBLOCK_H */
index e7b3c6e..a343bcf 100644 (file)
@@ -34,6 +34,7 @@ struct target_fabric_configfs {
        struct config_item_type tf_discovery_cit;
        struct config_item_type tf_wwn_cit;
        struct config_item_type tf_wwn_fabric_stats_cit;
+       struct config_item_type tf_wwn_param_cit;
        struct config_item_type tf_tpg_cit;
        struct config_item_type tf_tpg_base_cit;
        struct config_item_type tf_tpg_lun_cit;
@@ -153,6 +154,7 @@ void        target_qf_do_work(struct work_struct *work);
 bool   target_check_wce(struct se_device *dev);
 bool   target_check_fua(struct se_device *dev);
 void   __target_execute_cmd(struct se_cmd *, bool);
+void   target_queued_submit_work(struct work_struct *work);
 
 /* target_core_stat.c */
 void   target_stat_setup_dev_default_groups(struct se_device *);
index d4cc43a..d61dc16 100644 (file)
@@ -896,9 +896,8 @@ static void core_scsi3_aptpl_reserve(
        struct se_node_acl *node_acl,
        struct t10_pr_registration *pr_reg)
 {
-       char i_buf[PR_REG_ISID_ID_LEN];
+       char i_buf[PR_REG_ISID_ID_LEN] = { };
 
-       memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
 
        spin_lock(&dev->dev_reservation_lock);
@@ -928,12 +927,10 @@ static int __core_scsi3_check_aptpl_registration(
 {
        struct t10_pr_registration *pr_reg, *pr_reg_tmp;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
-       unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
-       unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+       unsigned char i_port[PR_APTPL_MAX_IPORT_LEN] = { };
+       unsigned char t_port[PR_APTPL_MAX_TPORT_LEN] = { };
        u16 tpgt;
 
-       memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
-       memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
        /*
         * Copy Initiator Port information from struct se_node_acl
         */
@@ -1023,9 +1020,8 @@ static void __core_scsi3_dump_registration(
        enum register_type register_type)
 {
        struct se_portal_group *se_tpg = nacl->se_tpg;
-       char i_buf[PR_REG_ISID_ID_LEN];
+       char i_buf[PR_REG_ISID_ID_LEN] = { };
 
-       memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
        core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
 
        pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
@@ -1204,10 +1200,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
        struct se_session *sess)
 {
        struct se_portal_group *tpg = nacl->se_tpg;
-       unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+       unsigned char buf[PR_REG_ISID_LEN] = { };
+       unsigned char *isid_ptr = NULL;
 
        if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
-               memset(&buf[0], 0, PR_REG_ISID_LEN);
                tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
                                        PR_REG_ISID_LEN);
                isid_ptr = &buf[0];
@@ -1285,11 +1281,10 @@ static void __core_scsi3_free_registration(
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
        struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
        struct se_dev_entry *deve;
-       char i_buf[PR_REG_ISID_ID_LEN];
+       char i_buf[PR_REG_ISID_ID_LEN] = { };
 
        lockdep_assert_held(&pr_tmpl->registration_lock);
 
-       memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
 
        if (!list_empty(&pr_reg->pr_reg_list))
@@ -2059,7 +2054,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
        struct se_portal_group *se_tpg;
        struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
-       unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+       unsigned char isid_buf[PR_REG_ISID_LEN] = { };
+       unsigned char *isid_ptr = NULL;
        sense_reason_t ret = TCM_NO_SENSE;
        int pr_holder = 0, type;
 
@@ -2070,7 +2066,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
        se_tpg = se_sess->se_tpg;
 
        if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
-               memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
                se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
                                PR_REG_ISID_LEN);
                isid_ptr = &isid_buf[0];
@@ -2282,11 +2277,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
        struct se_lun *se_lun = cmd->se_lun;
        struct t10_pr_registration *pr_reg, *pr_res_holder;
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
-       char i_buf[PR_REG_ISID_ID_LEN];
+       char i_buf[PR_REG_ISID_ID_LEN] = { };
        sense_reason_t ret;
 
-       memset(i_buf, 0, PR_REG_ISID_ID_LEN);
-
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2457,12 +2450,11 @@ static void __core_scsi3_complete_pro_release(
        int unreg)
 {
        const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
-       char i_buf[PR_REG_ISID_ID_LEN];
+       char i_buf[PR_REG_ISID_ID_LEN] = { };
        int pr_res_type = 0, pr_res_scope = 0;
 
        lockdep_assert_held(&dev->dev_reservation_lock);
 
-       memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
        /*
         * Go ahead and release the current PR reservation holder.
@@ -2768,11 +2760,10 @@ static void __core_scsi3_complete_pro_preempt(
 {
        struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
        const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
-       char i_buf[PR_REG_ISID_ID_LEN];
+       char i_buf[PR_REG_ISID_ID_LEN] = { };
 
        lockdep_assert_held(&dev->dev_reservation_lock);
 
-       memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
        /*
         * Do an implicit RELEASE of the existing reservation.
@@ -3158,7 +3149,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
        unsigned char *buf;
        const unsigned char *initiator_str;
-       char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
+       char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { };
        u32 tid_len, tmp_tid_len;
        int new_reg = 0, type, scope, matching_iname;
        sense_reason_t ret;
@@ -3170,7 +3161,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
-       memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        se_tpg = se_sess->se_tpg;
        tf_ops = se_tpg->se_tpg_tfo;
        /*
index 9ee797b..1c9aeab 100644 (file)
@@ -34,8 +34,6 @@
 #include "target_core_internal.h"
 #include "target_core_pscsi.h"
 
-#define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
-
 static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
 {
        return container_of(dev, struct pscsi_dev_virt, dev);
@@ -620,8 +618,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
                        unsigned char *buf;
 
                        buf = transport_kmap_data_sg(cmd);
-                       if (!buf)
+                       if (!buf) {
                                ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
+                       }
 
                        if (cdb[0] == MODE_SENSE_10) {
                                if (!(buf[3] & 0x80))
index bf936bb..6648c1c 100644 (file)
@@ -530,12 +530,13 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 }
 
 enum {
-       Opt_rd_pages, Opt_rd_nullio, Opt_err
+       Opt_rd_pages, Opt_rd_nullio, Opt_rd_dummy, Opt_err
 };
 
 static match_table_t tokens = {
        {Opt_rd_pages, "rd_pages=%d"},
        {Opt_rd_nullio, "rd_nullio=%d"},
+       {Opt_rd_dummy, "rd_dummy=%d"},
        {Opt_err, NULL}
 };
 
@@ -574,6 +575,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
                        pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
                        rd_dev->rd_flags |= RDF_NULLIO;
                        break;
+               case Opt_rd_dummy:
+                       match_int(args, &arg);
+                       if (arg != 1)
+                               break;
+
+                       pr_debug("RAMDISK: Setting DUMMY flag: %d\n", arg);
+                       rd_dev->rd_flags |= RDF_DUMMY;
+                       break;
                default:
                        break;
                }
@@ -590,12 +599,22 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
        ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
                        rd_dev->rd_dev_id);
        bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
-                       "  SG_table_count: %u  nullio: %d\n", rd_dev->rd_page_count,
+                       "  SG_table_count: %u  nullio: %d dummy: %d\n",
+                       rd_dev->rd_page_count,
                        PAGE_SIZE, rd_dev->sg_table_count,
-                       !!(rd_dev->rd_flags & RDF_NULLIO));
+                       !!(rd_dev->rd_flags & RDF_NULLIO),
+                       !!(rd_dev->rd_flags & RDF_DUMMY));
        return bl;
 }
 
+static u32 rd_get_device_type(struct se_device *dev)
+{
+       if (RD_DEV(dev)->rd_flags & RDF_DUMMY)
+               return 0x3f; /* Unknown device type, not connected */
+       else
+               return sbc_get_device_type(dev);
+}
+
 static sector_t rd_get_blocks(struct se_device *dev)
 {
        struct rd_dev *rd_dev = RD_DEV(dev);
@@ -647,7 +666,7 @@ static const struct target_backend_ops rd_mcp_ops = {
        .parse_cdb              = rd_parse_cdb,
        .set_configfs_dev_params = rd_set_configfs_dev_params,
        .show_configfs_dev_params = rd_show_configfs_dev_params,
-       .get_device_type        = sbc_get_device_type,
+       .get_device_type        = rd_get_device_type,
        .get_blocks             = rd_get_blocks,
        .init_prot              = rd_init_prot,
        .free_prot              = rd_free_prot,
index 8b88f9b..9ffda5c 100644 (file)
@@ -28,6 +28,7 @@ struct rd_dev_sg_table {
 
 #define RDF_HAS_PAGE_COUNT     0x01
 #define RDF_NULLIO             0x02
+#define RDF_DUMMY              0x04
 
 struct rd_dev {
        struct se_device dev;
index f7c527a..7b07e55 100644 (file)
@@ -448,7 +448,7 @@ compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
        sense_reason_t ret;
        unsigned int offset;
        size_t rc;
-       int i;
+       int sg_cnt;
 
        buf = kzalloc(cmp_len, GFP_KERNEL);
        if (!buf) {
@@ -467,7 +467,7 @@ compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
         */
        offset = 0;
        ret = TCM_NO_SENSE;
-       for_each_sg(read_sgl, sg, read_nents, i) {
+       for_each_sg(read_sgl, sg, read_nents, sg_cnt) {
                unsigned int len = min(sg->length, cmp_len);
                unsigned char *addr = kmap_atomic(sg_page(sg));
 
index ca5579e..70a6618 100644 (file)
@@ -701,7 +701,6 @@ static sense_reason_t
 spc_emulate_inquiry(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct se_portal_group *tpg = cmd->se_lun->lun_tpg;
        unsigned char *rbuf;
        unsigned char *cdb = cmd->t_task_cdb;
        unsigned char *buf;
@@ -715,10 +714,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
-       if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev))
-               buf[0] = 0x3f; /* Not connected */
-       else
-               buf[0] = dev->transport->get_device_type(dev);
+       buf[0] = dev->transport->get_device_type(dev);
 
        if (!(cdb[1] & 0x1)) {
                if (cdb[2]) {
index 237309d..62d15bc 100644 (file)
@@ -31,9 +31,6 @@
 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
 #endif
 
-#define NONE           "None"
-#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
-
 #define SCSI_LU_INDEX                  1
 #define LU_COUNT                       1
 
index 7347285..e7fcbc0 100644 (file)
@@ -124,6 +124,8 @@ void core_tmr_abort_task(
        int i;
 
        for (i = 0; i < dev->queue_cnt; i++) {
+               flush_work(&dev->queues[i].sq.work);
+
                spin_lock_irqsave(&dev->queues[i].lock, flags);
                list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
                                         state_list) {
@@ -302,6 +304,8 @@ static void core_tmr_drain_state_list(
         * in the Control Mode Page.
         */
        for (i = 0; i < dev->queue_cnt; i++) {
+               flush_work(&dev->queues[i].sq.work);
+
                spin_lock_irqsave(&dev->queues[i].lock, flags);
                list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
                                         state_list) {
index 5ecb9f1..8fbfe75 100644 (file)
@@ -41,6 +41,7 @@
 #include <trace/events/target.h>
 
 static struct workqueue_struct *target_completion_wq;
+static struct workqueue_struct *target_submission_wq;
 static struct kmem_cache *se_sess_cache;
 struct kmem_cache *se_ua_cache;
 struct kmem_cache *t10_pr_reg_cache;
@@ -129,8 +130,15 @@ int init_se_kmem_caches(void)
        if (!target_completion_wq)
                goto out_free_lba_map_mem_cache;
 
+       target_submission_wq = alloc_workqueue("target_submission",
+                                              WQ_MEM_RECLAIM, 0);
+       if (!target_submission_wq)
+               goto out_free_completion_wq;
+
        return 0;
 
+out_free_completion_wq:
+       destroy_workqueue(target_completion_wq);
 out_free_lba_map_mem_cache:
        kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 out_free_lba_map_cache:
@@ -153,6 +161,7 @@ out:
 
 void release_se_kmem_caches(void)
 {
+       destroy_workqueue(target_submission_wq);
        destroy_workqueue(target_completion_wq);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
@@ -848,7 +857,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
 /* May be called from interrupt context so must not sleep. */
 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 {
-       int success;
+       struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
+       int success, cpu;
        unsigned long flags;
 
        if (target_cmd_interrupted(cmd))
@@ -875,7 +885,13 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 
        INIT_WORK(&cmd->work, success ? target_complete_ok_work :
                  target_complete_failure_work);
-       queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+
+       if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+               cpu = cmd->cpuid;
+       else
+               cpu = wwn->cmd_compl_affinity;
+
+       queue_work_on(cpu, target_completion_wq, &cmd->work);
 }
 EXPORT_SYMBOL(target_complete_cmd);
 
@@ -1304,7 +1320,7 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
  * Compare the data buffer size from the CDB with the data buffer limit from the transport
  * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
  *
- * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
+ * Note: target drivers set @cmd->data_length by calling __target_init_cmd().
  *
  * Return: TCM_NO_SENSE
  */
@@ -1371,7 +1387,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
  *
  * Preserves the value of @cmd->tag.
  */
-void transport_init_se_cmd(
+void __target_init_cmd(
        struct se_cmd *cmd,
        const struct target_core_fabric_ops *tfo,
        struct se_session *se_sess,
@@ -1382,7 +1398,6 @@ void transport_init_se_cmd(
 {
        INIT_LIST_HEAD(&cmd->se_delayed_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
-       INIT_LIST_HEAD(&cmd->se_cmd_list);
        INIT_LIST_HEAD(&cmd->state_list);
        init_completion(&cmd->t_transport_stop_comp);
        cmd->free_compl = NULL;
@@ -1391,6 +1406,7 @@ void transport_init_se_cmd(
        INIT_WORK(&cmd->work, NULL);
        kref_init(&cmd->cmd_kref);
 
+       cmd->t_task_cdb = &cmd->__t_task_cdb[0];
        cmd->se_tfo = tfo;
        cmd->se_sess = se_sess;
        cmd->data_length = data_length;
@@ -1404,7 +1420,7 @@ void transport_init_se_cmd(
 
        cmd->state_active = false;
 }
-EXPORT_SYMBOL(transport_init_se_cmd);
+EXPORT_SYMBOL(__target_init_cmd);
 
 static sense_reason_t
 transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -1428,11 +1444,10 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
 }
 
 sense_reason_t
-target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
+target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
 {
        sense_reason_t ret;
 
-       cmd->t_task_cdb = &cmd->__t_task_cdb[0];
        /*
         * Ensure that the received CDB is less than the max (252 + 8) bytes
         * for VARIABLE_LENGTH_CMD
@@ -1450,8 +1465,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
         * setup the pointer from __t_task_cdb to t_task_cdb.
         */
        if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
-               cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
-                                               GFP_KERNEL);
+               cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
                if (!cmd->t_task_cdb) {
                        pr_err("Unable to allocate cmd->t_task_cdb"
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
@@ -1573,46 +1587,31 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
 }
 
 /**
- * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
- *                      se_cmd + use pre-allocated SGL memory.
- *
- * @se_cmd: command descriptor to submit
+ * target_init_cmd - initialize se_cmd
+ * @se_cmd: command descriptor to init
  * @se_sess: associated se_sess for endpoint
- * @cdb: pointer to SCSI CDB
  * @sense: pointer to SCSI sense buffer
  * @unpacked_lun: unpacked LUN to reference for struct se_lun
  * @data_length: fabric expected data transfer length
  * @task_attr: SAM task attribute
  * @data_dir: DMA data direction
  * @flags: flags for command submission from target_sc_flags_tables
- * @sgl: struct scatterlist memory for unidirectional mapping
- * @sgl_count: scatterlist count for unidirectional mapping
- * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
- * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
- * @sgl_prot: struct scatterlist memory protection information
- * @sgl_prot_count: scatterlist count for protection information
  *
  * Task tags are supported if the caller has set @se_cmd->tag.
  *
- * Returns non zero to signal active I/O shutdown failure.  All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
+ * Returns:
+ *     - less than zero to signal active I/O shutdown failure.
+ *     - zero on success.
  *
- * This may only be called from process context, and also currently
- * assumes internal allocation of fabric payload buffer by target-core.
+ * If the fabric driver calls target_stop_session, then it must check the
+ * return code and handle failures. This will never fail for other drivers,
+ * and the return code can be ignored.
  */
-int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
-               unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
-               u32 data_length, int task_attr, int data_dir, int flags,
-               struct scatterlist *sgl, u32 sgl_count,
-               struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
-               struct scatterlist *sgl_prot, u32 sgl_prot_count)
+int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+                   unsigned char *sense, u64 unpacked_lun,
+                   u32 data_length, int task_attr, int data_dir, int flags)
 {
        struct se_portal_group *se_tpg;
-       sense_reason_t rc;
-       int ret;
-
-       might_sleep();
 
        se_tpg = se_sess->se_tpg;
        BUG_ON(!se_tpg);
@@ -1620,53 +1619,72 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
 
        if (flags & TARGET_SCF_USE_CPUID)
                se_cmd->se_cmd_flags |= SCF_USE_CPUID;
+       /*
+        * Signal bidirectional data payloads to target-core
+        */
+       if (flags & TARGET_SCF_BIDI_OP)
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+
+       if (flags & TARGET_SCF_UNKNOWN_SIZE)
+               se_cmd->unknown_data_length = 1;
        /*
         * Initialize se_cmd for target operation.  From this point
         * exceptions are handled by sending exception status via
         * target_core_fabric_ops->queue_status() callback
         */
-       transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
-                               data_length, data_dir, task_attr, sense,
-                               unpacked_lun);
+       __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
+                         data_dir, task_attr, sense, unpacked_lun);
 
-       if (flags & TARGET_SCF_UNKNOWN_SIZE)
-               se_cmd->unknown_data_length = 1;
        /*
         * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
         * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
         * kref_put() to happen during fabric packet acknowledgement.
         */
-       ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
-       if (ret)
-               return ret;
-       /*
-        * Signal bidirectional data payloads to target-core
-        */
-       if (flags & TARGET_SCF_BIDI_OP)
-               se_cmd->se_cmd_flags |= SCF_BIDI;
+       return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+}
+EXPORT_SYMBOL_GPL(target_init_cmd);
 
-       rc = target_cmd_init_cdb(se_cmd, cdb);
-       if (rc) {
-               transport_send_check_condition_and_sense(se_cmd, rc, 0);
-               target_put_sess_cmd(se_cmd);
-               return 0;
-       }
+/**
+ * target_submit_prep - prepare cmd for submission
+ * @se_cmd: command descriptor to prep
+ * @cdb: pointer to SCSI CDB
+ * @sgl: struct scatterlist memory for unidirectional mapping
+ * @sgl_count: scatterlist count for unidirectional mapping
+ * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
+ * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
+ * @gfp: gfp allocation type
+ *
+ * Returns:
+ *     - less than zero to signal failure.
+ *     - zero on success.
+ *
+ * If failure is returned, lio will the callers queue_status to complete
+ * the cmd.
+ */
+int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
+                      struct scatterlist *sgl, u32 sgl_count,
+                      struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+                      struct scatterlist *sgl_prot, u32 sgl_prot_count,
+                      gfp_t gfp)
+{
+       sense_reason_t rc;
+
+       rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
+       if (rc)
+               goto send_cc_direct;
 
        /*
         * Locate se_lun pointer and attach it to struct se_cmd
         */
        rc = transport_lookup_cmd_lun(se_cmd);
-       if (rc) {
-               transport_send_check_condition_and_sense(se_cmd, rc, 0);
-               target_put_sess_cmd(se_cmd);
-               return 0;
-       }
+       if (rc)
+               goto send_cc_direct;
 
        rc = target_cmd_parse_cdb(se_cmd);
-       if (rc != 0) {
-               transport_generic_request_failure(se_cmd, rc);
-               return 0;
-       }
+       if (rc != 0)
+               goto generic_fail;
 
        /*
         * Save pointers for SGLs containing protection information,
@@ -1686,6 +1704,41 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
        if (sgl_count != 0) {
                BUG_ON(!sgl);
 
+               rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
+                               sgl_bidi, sgl_bidi_count);
+               if (rc != 0)
+                       goto generic_fail;
+       }
+
+       return 0;
+
+send_cc_direct:
+       transport_send_check_condition_and_sense(se_cmd, rc, 0);
+       target_put_sess_cmd(se_cmd);
+       return -EIO;
+
+generic_fail:
+       transport_generic_request_failure(se_cmd, rc);
+       return -EIO;
+}
+EXPORT_SYMBOL_GPL(target_submit_prep);
+
+/**
+ * target_submit - perform final initialization and submit cmd to LIO core
+ * @se_cmd: command descriptor to submit
+ *
+ * target_submit_prep must have been called on the cmd, and this must be
+ * called from process context.
+ */
+void target_submit(struct se_cmd *se_cmd)
+{
+       struct scatterlist *sgl = se_cmd->t_data_sg;
+       unsigned char *buf = NULL;
+
+       might_sleep();
+
+       if (se_cmd->t_data_nents != 0) {
+               BUG_ON(!sgl);
                /*
                 * A work-around for tcm_loop as some userspace code via
                 * scsi-generic do not memset their associated read buffers,
@@ -1696,8 +1749,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                 */
                if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
                     se_cmd->data_direction == DMA_FROM_DEVICE) {
-                       unsigned char *buf = NULL;
-
                        if (sgl)
                                buf = kmap(sg_page(sgl)) + sgl->offset;
 
@@ -1707,12 +1758,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                        }
                }
 
-               rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
-                               sgl_bidi, sgl_bidi_count);
-               if (rc != 0) {
-                       transport_generic_request_failure(se_cmd, rc);
-                       return 0;
-               }
        }
 
        /*
@@ -1722,9 +1767,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
        core_alua_check_nonop_delay(se_cmd);
 
        transport_handle_cdb_direct(se_cmd);
-       return 0;
 }
-EXPORT_SYMBOL(target_submit_cmd_map_sgls);
+EXPORT_SYMBOL_GPL(target_submit);
 
 /**
  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
@@ -1741,25 +1785,109 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
  *
  * Task tags are supported if the caller has set @se_cmd->tag.
  *
- * Returns non zero to signal active I/O shutdown failure.  All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
- *
  * This may only be called from process context, and also currently
  * assumes internal allocation of fabric payload buffer by target-core.
  *
  * It also assumes interal target core SGL memory allocation.
+ *
+ * This function must only be used by drivers that do their own
+ * sync during shutdown and does not use target_stop_session. If there
+ * is a failure this function will call into the fabric driver's
+ * queue_status with a CHECK_CONDITION.
  */
-int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
                unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
                u32 data_length, int task_attr, int data_dir, int flags)
 {
-       return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
-                       unpacked_lun, data_length, task_attr, data_dir,
-                       flags, NULL, 0, NULL, 0, NULL, 0);
+       int rc;
+
+       rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
+                            task_attr, data_dir, flags);
+       WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n");
+       if (rc)
+               return;
+
+       if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+                              GFP_KERNEL))
+               return;
+
+       target_submit(se_cmd);
 }
 EXPORT_SYMBOL(target_submit_cmd);
 
+
+static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
+{
+       struct se_dev_plug *se_plug;
+
+       if (!se_dev->transport->plug_device)
+               return NULL;
+
+       se_plug = se_dev->transport->plug_device(se_dev);
+       if (!se_plug)
+               return NULL;
+
+       se_plug->se_dev = se_dev;
+       /*
+        * We have a ref to the lun at this point, but the cmds could
+        * complete before we unplug, so grab a ref to the se_device so we
+        * can call back into the backend.
+        */
+       config_group_get(&se_dev->dev_group);
+       return se_plug;
+}
+
+static void target_unplug_device(struct se_dev_plug *se_plug)
+{
+       struct se_device *se_dev = se_plug->se_dev;
+
+       se_dev->transport->unplug_device(se_plug);
+       config_group_put(&se_dev->dev_group);
+}
+
+void target_queued_submit_work(struct work_struct *work)
+{
+       struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
+       struct se_cmd *se_cmd, *next_cmd;
+       struct se_dev_plug *se_plug = NULL;
+       struct se_device *se_dev = NULL;
+       struct llist_node *cmd_list;
+
+       cmd_list = llist_del_all(&sq->cmd_list);
+       if (!cmd_list)
+               /* Previous call took what we were queued to submit */
+               return;
+
+       cmd_list = llist_reverse_order(cmd_list);
+       llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
+               if (!se_dev) {
+                       se_dev = se_cmd->se_dev;
+                       se_plug = target_plug_device(se_dev);
+               }
+
+               target_submit(se_cmd);
+       }
+
+       if (se_plug)
+               target_unplug_device(se_plug);
+}
+
+/**
+ * target_queue_submission - queue the cmd to run on the LIO workqueue
+ * @se_cmd: command descriptor to submit
+ */
+void target_queue_submission(struct se_cmd *se_cmd)
+{
+       struct se_device *se_dev = se_cmd->se_dev;
+       int cpu = se_cmd->cpuid;
+       struct se_cmd_queue *sq;
+
+       sq = &se_dev->queues[cpu].sq;
+       llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
+       queue_work_on(cpu, target_submission_wq, &sq->work);
+}
+EXPORT_SYMBOL_GPL(target_queue_submission);
+
 static void target_complete_tmr_failure(struct work_struct *work)
 {
        struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
@@ -1799,8 +1927,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
        se_tpg = se_sess->se_tpg;
        BUG_ON(!se_tpg);
 
-       transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
-                             0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
+       __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+                         0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
        /*
         * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
         * allocation failure.
@@ -2778,9 +2906,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
         * invocations before se_cmd descriptor release.
         */
        if (ack_kref) {
-               if (!kref_get_unless_zero(&se_cmd->cmd_kref))
-                       return -EINVAL;
-
+               kref_get(&se_cmd->cmd_kref);
                se_cmd->se_cmd_flags |= SCF_ACK_KREF;
        }
 
index bf73cd5..bdfc057 100644 (file)
@@ -8,13 +8,12 @@
 
 #include <linux/spinlock.h>
 #include <linux/module.h>
-#include <linux/idr.h>
 #include <linux/kernel.h>
 #include <linux/timer.h>
 #include <linux/parser.h>
 #include <linux/vmalloc.h>
 #include <linux/uio_driver.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/highmem.h>
@@ -111,6 +110,7 @@ struct tcmu_dev {
        struct kref kref;
 
        struct se_device se_dev;
+       struct se_dev_plug se_plug;
 
        char *name;
        struct se_hba *hba;
@@ -119,6 +119,7 @@ struct tcmu_dev {
 #define TCMU_DEV_BIT_BROKEN 1
 #define TCMU_DEV_BIT_BLOCKED 2
 #define TCMU_DEV_BIT_TMR_NOTIFY 3
+#define TCM_DEV_BIT_PLUGGED 4
        unsigned long flags;
 
        struct uio_info uio_info;
@@ -143,9 +144,9 @@ struct tcmu_dev {
        uint32_t dbi_max;
        uint32_t dbi_thresh;
        unsigned long *data_bitmap;
-       struct radix_tree_root data_blocks;
+       struct xarray data_blocks;
 
-       struct idr commands;
+       struct xarray commands;
 
        struct timer_list cmd_timer;
        unsigned int cmd_time_out;
@@ -500,13 +501,13 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
                                       int prev_dbi, int *iov_cnt)
 {
        struct page *page;
-       int ret, dbi;
+       int dbi;
 
        dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
        if (dbi == udev->dbi_thresh)
                return -1;
 
-       page = radix_tree_lookup(&udev->data_blocks, dbi);
+       page = xa_load(&udev->data_blocks, dbi);
        if (!page) {
                if (atomic_add_return(1, &global_db_count) >
                                      tcmu_global_max_blocks)
@@ -517,8 +518,7 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
                if (!page)
                        goto err_alloc;
 
-               ret = radix_tree_insert(&udev->data_blocks, dbi, page);
-               if (ret)
+               if (xa_store(&udev->data_blocks, dbi, page, GFP_NOIO))
                        goto err_insert;
        }
 
@@ -557,7 +557,7 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
 static inline struct page *
 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
 {
-       return radix_tree_lookup(&udev->data_blocks, dbi);
+       return xa_load(&udev->data_blocks, dbi);
 }
 
 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
@@ -959,6 +959,25 @@ static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
        return cmd_head;
 }
 
+static void tcmu_unplug_device(struct se_dev_plug *se_plug)
+{
+       struct se_device *se_dev = se_plug->se_dev;
+       struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+       clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags);
+       uio_event_notify(&udev->uio_info);
+}
+
+static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
+{
+       struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+       if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+               return &udev->se_plug;
+
+       return NULL;
+}
+
 /**
  * queue_cmd_ring - queue cmd to ring or internally
  * @tcmu_cmd: cmd to queue
@@ -977,8 +996,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
        struct tcmu_mailbox *mb = udev->mb_addr;
        struct tcmu_cmd_entry *entry;
        struct iovec *iov;
-       int iov_cnt, iov_bidi_cnt, cmd_id;
-       uint32_t cmd_head;
+       int iov_cnt, iov_bidi_cnt;
+       uint32_t cmd_id, cmd_head;
        uint64_t cdb_off;
        /* size of data buffer needed */
        size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE;
@@ -1031,8 +1050,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
                 */
                goto free_and_queue;
 
-       cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
-       if (cmd_id < 0) {
+       if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
+                    GFP_NOWAIT) < 0) {
                pr_err("tcmu: Could not allocate cmd id.\n");
 
                tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
@@ -1086,8 +1105,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
 
        list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
 
-       /* TODO: only if FLUSH and FUA? */
-       uio_event_notify(&udev->uio_info);
+       if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+               uio_event_notify(&udev->uio_info);
 
        return 0;
 
@@ -1253,7 +1272,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
        pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
                 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
 
-       tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL);
+       tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO);
        if (!tmr)
                goto unlock;
 
@@ -1415,7 +1434,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
                }
                WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
 
-               cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
+               cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
                if (!cmd) {
                        pr_err("cmd_id %u not found, ring is broken\n",
                               entry->hdr.cmd_id);
@@ -1433,7 +1452,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
                free_space = tcmu_run_tmr_queue(udev);
 
        if (atomic_read(&global_db_count) > tcmu_global_max_blocks &&
-           idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
+           xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
                /*
                 * Allocated blocks exceeded global block limit, currently no
                 * more pending or waiting commands so try to reclaim blocks.
@@ -1556,12 +1575,12 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        INIT_LIST_HEAD(&udev->qfull_queue);
        INIT_LIST_HEAD(&udev->tmr_queue);
        INIT_LIST_HEAD(&udev->inflight_queue);
-       idr_init(&udev->commands);
+       xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
 
        timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
        timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
 
-       INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+       xa_init(&udev->data_blocks);
 
        return &udev->se_dev;
 }
@@ -1585,19 +1604,19 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
        return -EINVAL;
 }
 
-static void tcmu_blocks_release(struct radix_tree_root *blocks,
-                               int start, int end)
+static void tcmu_blocks_release(struct xarray *blocks, unsigned long first,
+                               unsigned long last)
 {
-       int i;
+       XA_STATE(xas, blocks, first);
        struct page *page;
 
-       for (i = start; i < end; i++) {
-               page = radix_tree_delete(blocks, i);
-               if (page) {
-                       __free_page(page);
-                       atomic_dec(&global_db_count);
-               }
+       xas_lock(&xas);
+       xas_for_each(&xas, page, last) {
+               xas_store(&xas, NULL);
+               __free_page(page);
+               atomic_dec(&global_db_count);
        }
+       xas_unlock(&xas);
 }
 
 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
@@ -1616,7 +1635,7 @@ static void tcmu_dev_kref_release(struct kref *kref)
        struct se_device *dev = &udev->se_dev;
        struct tcmu_cmd *cmd;
        bool all_expired = true;
-       int i;
+       unsigned long i;
 
        vfree(udev->mb_addr);
        udev->mb_addr = NULL;
@@ -1628,7 +1647,7 @@ static void tcmu_dev_kref_release(struct kref *kref)
 
        /* Upper layer should drain all requests before calling this */
        mutex_lock(&udev->cmdr_lock);
-       idr_for_each_entry(&udev->commands, cmd, i) {
+       xa_for_each(&udev->commands, i, cmd) {
                if (tcmu_check_and_free_pending_cmd(cmd) != 0)
                        all_expired = false;
        }
@@ -1636,10 +1655,10 @@ static void tcmu_dev_kref_release(struct kref *kref)
        tcmu_remove_all_queued_tmr(udev);
        if (!list_empty(&udev->qfull_queue))
                all_expired = false;
-       idr_destroy(&udev->commands);
+       xa_destroy(&udev->commands);
        WARN_ON(!all_expired);
 
-       tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
+       tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max);
        bitmap_free(udev->data_bitmap);
        mutex_unlock(&udev->cmdr_lock);
 
@@ -2226,16 +2245,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
 {
        struct tcmu_mailbox *mb;
        struct tcmu_cmd *cmd;
-       int i;
+       unsigned long i;
 
        mutex_lock(&udev->cmdr_lock);
 
-       idr_for_each_entry(&udev->commands, cmd, i) {
+       xa_for_each(&udev->commands, i, cmd) {
                pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
                          cmd->cmd_id, udev->name,
                          test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
 
-               idr_remove(&udev->commands, i);
+               xa_erase(&udev->commands, i);
                if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
                        WARN_ON(!cmd->se_cmd);
                        list_del_init(&cmd->queue_entry);
@@ -2863,6 +2882,8 @@ static struct target_backend_ops tcmu_ops = {
        .configure_device       = tcmu_configure_device,
        .destroy_device         = tcmu_destroy_device,
        .free_device            = tcmu_free_device,
+       .unplug_device          = tcmu_unplug_device,
+       .plug_device            = tcmu_plug_device,
        .parse_cdb              = tcmu_parse_cdb,
        .tmr_notify             = tcmu_tmr_notify,
        .set_configfs_dev_params = tcmu_set_configfs_dev_params,
@@ -2923,7 +2944,7 @@ static void find_free_blocks(void)
                unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
 
                /* Release the block pages */
-               tcmu_blocks_release(&udev->data_blocks, start, end);
+               tcmu_blocks_release(&udev->data_blocks, start, end - 1);
                mutex_unlock(&udev->cmdr_lock);
 
                total_freed += end - start;
index 66d6f1d..d31ed07 100644 (file)
@@ -554,7 +554,7 @@ static int target_xcopy_setup_pt_cmd(
        }
        cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 
-       if (target_cmd_init_cdb(cmd, cdb))
+       if (target_cmd_init_cdb(cmd, cdb, GFP_KERNEL))
                return -EINVAL;
 
        cmd->tag = 0;
@@ -615,8 +615,8 @@ static int target_xcopy_read_source(
        pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
                (unsigned long long)src_lba, src_sectors, length);
 
-       transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
-                             DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+       __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+                         DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
 
        rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
                                remote_port);
@@ -660,8 +660,8 @@ static int target_xcopy_write_destination(
        pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
                (unsigned long long)dst_lba, dst_sectors, length);
 
-       transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
-                             DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+       __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+                         DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
 
        rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
                                remote_port);
index 768f250..410b723 100644 (file)
@@ -543,16 +543,22 @@ static void ft_send_work(struct work_struct *work)
 
        fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
        cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
+
        /*
         * Use a single se_cmd->cmd_kref as we expect to release se_cmd
         * directly from ft_check_stop_free callback in response path.
         */
-       if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
-                             &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
-                             ntohl(fcp->fc_dl), task_attr, data_dir,
-                             TARGET_SCF_ACK_KREF))
+       if (target_init_cmd(&cmd->se_cmd, cmd->sess->se_sess,
+                           &cmd->ft_sense_buffer[0],
+                           scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl),
+                           task_attr, data_dir, TARGET_SCF_ACK_KREF))
                goto err;
 
+       if (target_submit_prep(&cmd->se_cmd, fcp->fc_cdb, NULL, 0, NULL, 0,
+                              NULL, 0, GFP_KERNEL))
+               return;
+
+       target_submit(&cmd->se_cmd);
        pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
        return;
 
index 410fa89..7acb507 100644 (file)
@@ -1050,19 +1050,17 @@ static void usbg_cmd_work(struct work_struct *work)
        tv_nexus = tpg->tpg_nexus;
        dir = get_cmd_dir(cmd->cmd_buf);
        if (dir < 0) {
-               transport_init_se_cmd(se_cmd,
-                               tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
-                               tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
-                               cmd->prio_attr, cmd->sense_iu.sense,
-                               cmd->unpacked_lun);
+               __target_init_cmd(se_cmd,
+                                 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+                                 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+                                 cmd->prio_attr, cmd->sense_iu.sense,
+                                 cmd->unpacked_lun);
                goto out;
        }
 
-       if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
-                             cmd->sense_iu.sense, cmd->unpacked_lun, 0,
-                             cmd->prio_attr, dir, flags) < 0)
-               goto out;
-
+       target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
+                         cmd->sense_iu.sense, cmd->unpacked_lun, 0,
+                         cmd->prio_attr, dir, flags);
        return;
 
 out:
@@ -1181,19 +1179,17 @@ static void bot_cmd_work(struct work_struct *work)
        tv_nexus = tpg->tpg_nexus;
        dir = get_cmd_dir(cmd->cmd_buf);
        if (dir < 0) {
-               transport_init_se_cmd(se_cmd,
-                               tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
-                               tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
-                               cmd->prio_attr, cmd->sense_iu.sense,
-                               cmd->unpacked_lun);
+               __target_init_cmd(se_cmd,
+                                 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+                                 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+                                 cmd->prio_attr, cmd->sense_iu.sense,
+                                 cmd->unpacked_lun);
                goto out;
        }
 
-       if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
-                       cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
-                       cmd->data_len, cmd->prio_attr, dir, 0) < 0)
-               goto out;
-
+       target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+                         cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
+                         cmd->data_len, cmd->prio_attr, dir, 0);
        return;
 
 out:
index 5de21ad..d16c04d 100644 (file)
@@ -85,7 +85,7 @@ struct vhost_scsi_cmd {
        /* The number of scatterlists associated with this cmd */
        u32 tvc_sgl_count;
        u32 tvc_prot_sgl_count;
-       /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
+       /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
        u32 tvc_lun;
        /* Pointer to the SGL formatted memory from virtio-scsi */
        struct scatterlist *tvc_sgl;
@@ -101,8 +101,6 @@ struct vhost_scsi_cmd {
        struct vhost_scsi_nexus *tvc_nexus;
        /* The TCM I/O descriptor that is accessed via container_of() */
        struct se_cmd tvc_se_cmd;
-       /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
-       struct work_struct work;
        /* Copy of the incoming SCSI command descriptor block (CDB) */
        unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
        /* Sense buffer that will be mapped into outgoing status */
@@ -240,8 +238,6 @@ struct vhost_scsi_ctx {
        struct iov_iter out_iter;
 };
 
-static struct workqueue_struct *vhost_scsi_workqueue;
-
 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
 static DEFINE_MUTEX(vhost_scsi_mutex);
 static LIST_HEAD(vhost_scsi_list);
@@ -614,7 +610,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
                return ERR_PTR(-EIO);
        }
 
-       tag = sbitmap_get(&svq->scsi_tags, 0, false);
+       tag = sbitmap_get(&svq->scsi_tags);
        if (tag < 0) {
                pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
                return ERR_PTR(-ENOMEM);
@@ -782,14 +778,11 @@ static int vhost_scsi_to_tcm_attr(int attr)
        return TCM_SIMPLE_TAG;
 }
 
-static void vhost_scsi_submission_work(struct work_struct *work)
+static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
 {
-       struct vhost_scsi_cmd *cmd =
-               container_of(work, struct vhost_scsi_cmd, work);
-       struct vhost_scsi_nexus *tv_nexus;
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
+       struct vhost_scsi_nexus *tv_nexus;
        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
-       int rc;
 
        /* FIXME: BIDI operation */
        if (cmd->tvc_sgl_count) {
@@ -805,18 +798,17 @@ static void vhost_scsi_submission_work(struct work_struct *work)
        tv_nexus = cmd->tvc_nexus;
 
        se_cmd->tag = 0;
-       rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
-                       cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
+       target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
                        vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
-                       cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
-                       sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
-                       cmd->tvc_prot_sgl_count);
-       if (rc < 0) {
-               transport_send_check_condition_and_sense(se_cmd,
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
-               transport_generic_free_cmd(se_cmd, 0);
-       }
+                       cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
+
+       if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
+                              cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+                              cmd->tvc_prot_sgl_count, GFP_KERNEL))
+               return;
+
+       target_queue_submission(se_cmd);
 }
 
 static void
@@ -1132,14 +1124,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
                 */
                cmd->tvc_vq_desc = vc.head;
-               /*
-                * Dispatch cmd descriptor for cmwq execution in process
-                * context provided by vhost_scsi_workqueue.  This also ensures
-                * cmd is executed on the same kworker CPU as this vhost
-                * thread to gain positive L2 cache locality effects.
-                */
-               INIT_WORK(&cmd->work, vhost_scsi_submission_work);
-               queue_work(vhost_scsi_workqueue, &cmd->work);
+               vhost_scsi_target_queue_cmd(cmd);
                ret = 0;
 err:
                /*
@@ -1512,7 +1497,7 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
                return 0;
 
        if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
-                             NUMA_NO_NODE))
+                             NUMA_NO_NODE, false, true))
                return -ENOMEM;
        svq->max_cmds = max_cmds;
 
@@ -2486,17 +2471,9 @@ static int __init vhost_scsi_init(void)
                " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
                utsname()->machine);
 
-       /*
-        * Use our own dedicated workqueue for submitting I/O into
-        * target core to avoid contention within system_wq.
-        */
-       vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
-       if (!vhost_scsi_workqueue)
-               goto out;
-
        ret = vhost_scsi_register();
        if (ret < 0)
-               goto out_destroy_workqueue;
+               goto out;
 
        ret = target_register_template(&vhost_scsi_ops);
        if (ret < 0)
@@ -2506,8 +2483,6 @@ static int __init vhost_scsi_init(void)
 
 out_vhost_scsi_deregister:
        vhost_scsi_deregister();
-out_destroy_workqueue:
-       destroy_workqueue(vhost_scsi_workqueue);
 out:
        return ret;
 };
@@ -2516,7 +2491,6 @@ static void vhost_scsi_exit(void)
 {
        target_unregister_template(&vhost_scsi_ops);
        vhost_scsi_deregister();
-       destroy_workqueue(vhost_scsi_workqueue);
 };
 
 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
index f302494..55a4763 100644 (file)
@@ -360,21 +360,18 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
 {
        struct se_cmd *se_cmd = &pending_req->se_cmd;
        struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
-       int rc;
 
        scsiback_get(pending_req->info);
        se_cmd->tag = pending_req->rqid;
-       rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
-                       pending_req->sense_buffer, pending_req->v2p->lun,
-                       pending_req->data_len, 0,
-                       pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
-                       pending_req->sgl, pending_req->n_sg,
-                       NULL, 0, NULL, 0);
-       if (rc < 0) {
-               transport_send_check_condition_and_sense(se_cmd,
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
-               transport_generic_free_cmd(se_cmd, 0);
-       }
+       target_init_cmd(se_cmd, sess, pending_req->sense_buffer,
+                       pending_req->v2p->lun, pending_req->data_len, 0,
+                       pending_req->sc_data_direction, TARGET_SCF_ACK_KREF);
+
+       if (target_submit_prep(se_cmd, pending_req->cmnd, pending_req->sgl,
+                              pending_req->n_sg, NULL, 0, NULL, 0, GFP_KERNEL))
+               return;
+
+       target_submit(se_cmd);
 }
 
 static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
index 2c473c9..3bd3ee6 100644 (file)
@@ -306,12 +306,21 @@ struct blk_mq_ops {
         * reserved budget. Also we have to handle failure case
         * of .get_budget for avoiding I/O deadlock.
         */
-       bool (*get_budget)(struct request_queue *);
+       int (*get_budget)(struct request_queue *);
 
        /**
         * @put_budget: Release the reserved budget.
         */
-       void (*put_budget)(struct request_queue *);
+       void (*put_budget)(struct request_queue *, int);
+
+       /*
+        * @set_rq_budget_toekn: store rq's budget token
+        */
+       void (*set_rq_budget_token)(struct request *, int);
+       /*
+        * @get_rq_budget_toekn: retrieve rq's budget token
+        */
+       int (*get_rq_budget_token)(struct request *);
 
        /**
         * @timeout: Called on request timeout.
index f1d74dc..7be8c5f 100644 (file)
@@ -1726,6 +1726,7 @@ static inline unsigned long virt_to_hvpfn(void *addr)
 #define NR_HV_HYP_PAGES_IN_PAGE        (PAGE_SIZE / HV_HYP_PAGE_SIZE)
 #define offset_in_hvpage(ptr)  ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
 #define HVPFN_UP(x)    (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
+#define HVPFN_DOWN(x)  ((x) >> HV_HYP_PAGE_SHIFT)
 #define page_to_hvpfn(page)    (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
 
 #endif /* _HYPERV_H */
index 74cc638..2713e68 100644 (file)
@@ -56,10 +56,23 @@ struct sbitmap {
         */
        unsigned int map_nr;
 
+       /**
+        * @round_robin: Allocate bits in strict round-robin order.
+        */
+       bool round_robin;
+
        /**
         * @map: Allocated bitmap.
         */
        struct sbitmap_word *map;
+
+       /*
+        * @alloc_hint: Cache of last successfully allocated or freed bit.
+        *
+        * This is per-cpu, which allows multiple users to stick to different
+        * cachelines until the map is exhausted.
+        */
+       unsigned int __percpu *alloc_hint;
 };
 
 #define SBQ_WAIT_QUEUES 8
@@ -95,14 +108,6 @@ struct sbitmap_queue {
         */
        struct sbitmap sb;
 
-       /*
-        * @alloc_hint: Cache of last successfully allocated or freed bit.
-        *
-        * This is per-cpu, which allows multiple users to stick to different
-        * cachelines until the map is exhausted.
-        */
-       unsigned int __percpu *alloc_hint;
-
        /**
         * @wake_batch: Number of bits which must be freed before we wake up any
         * waiters.
@@ -124,11 +129,6 @@ struct sbitmap_queue {
         */
        atomic_t ws_active;
 
-       /**
-        * @round_robin: Allocate bits in strict round-robin order.
-        */
-       bool round_robin;
-
        /**
         * @min_shallow_depth: The minimum shallow depth which may be passed to
         * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
@@ -144,11 +144,16 @@ struct sbitmap_queue {
  *         given, a good default is chosen.
  * @flags: Allocation flags.
  * @node: Memory node to allocate on.
+ * @round_robin: If true, be stricter about allocation order; always allocate
+ *               starting from the last allocated bit. This is less efficient
+ *               than the default behavior (false).
+ * @alloc_hint: If true, apply percpu hint for where to start searching for
+ *              a free bit.
  *
  * Return: Zero on success or negative errno on failure.
  */
 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
-                     gfp_t flags, int node);
+                     gfp_t flags, int node, bool round_robin, bool alloc_hint);
 
 /**
  * sbitmap_free() - Free memory used by a &struct sbitmap.
@@ -156,6 +161,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
  */
 static inline void sbitmap_free(struct sbitmap *sb)
 {
+       free_percpu(sb->alloc_hint);
        kfree(sb->map);
        sb->map = NULL;
 }
@@ -173,22 +179,17 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
 /**
  * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
  * @sb: Bitmap to allocate from.
- * @alloc_hint: Hint for where to start searching for a free bit.
- * @round_robin: If true, be stricter about allocation order; always allocate
- *               starting from the last allocated bit. This is less efficient
- *               than the default behavior (false).
  *
  * This operation provides acquire barrier semantics if it succeeds.
  *
  * Return: Non-negative allocated bit number if successful, -1 otherwise.
  */
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
+int sbitmap_get(struct sbitmap *sb);
 
 /**
  * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
  * limiting the depth used from each word.
  * @sb: Bitmap to allocate from.
- * @alloc_hint: Hint for where to start searching for a free bit.
  * @shallow_depth: The maximum number of bits to allocate from a single word.
  *
  * This rather specific operation allows for having multiple users with
@@ -200,8 +201,7 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
  *
  * Return: Non-negative allocated bit number if successful, -1 otherwise.
  */
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
-                       unsigned long shallow_depth);
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
 
 /**
  * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
@@ -315,10 +315,16 @@ static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int b
        set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
 }
 
-static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
-                                           unsigned int bitnr)
+/*
+ * Pair of sbitmap_get, and this one applies both cleared bit and
+ * allocation hint.
+ */
+static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
 {
-       clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+       sbitmap_deferred_clear_bit(sb, bitnr);
+
+       if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
+               *raw_cpu_ptr(sb->alloc_hint) = bitnr;
 }
 
 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
@@ -326,6 +332,24 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
        return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
 }
 
+static inline int sbitmap_calculate_shift(unsigned int depth)
+{
+       int     shift = ilog2(BITS_PER_LONG);
+
+       /*
+        * If the bitmap is small, shrink the number of bits per word so
+        * we spread over a few cachelines, at least. If less than 4
+        * bits, just forget about it, it's not going to work optimally
+        * anyway.
+        */
+       if (depth >= 4) {
+               while ((4U << shift) > depth)
+                       shift--;
+       }
+
+       return shift;
+}
+
 /**
  * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
  * @sb: Bitmap to show.
@@ -335,6 +359,16 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
  */
 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
 
+
+/**
+ * sbitmap_weight() - Return how many set and not cleared bits in a &struct
+ * sbitmap.
+ * @sb: Bitmap to check.
+ *
+ * Return: How many set and not cleared bits set
+ */
+unsigned int sbitmap_weight(const struct sbitmap *sb);
+
 /**
  * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
  * seq_file.
@@ -369,7 +403,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
 {
        kfree(sbq->ws);
-       free_percpu(sbq->alloc_hint);
        sbitmap_free(&sbq->sb);
 }
 
index 2568cb0..fac8e89 100644 (file)
@@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
                         struct fc_frame *);
 
 /* libfcoe funcs */
-u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
+u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
 int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
                      const struct libfc_function_template *, int init_fcp);
 u32 fcoe_fc_crc(struct fc_frame *fp);
index ace15b5..83f7e52 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/timer.h>
 #include <linux/scatterlist.h>
 #include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
 #include <scsi/scsi_request.h>
 
 struct Scsi_Host;
@@ -75,6 +76,8 @@ struct scsi_cmnd {
 
        int eh_eflags;          /* Used by error handlr */
 
+       int budget_token;
+
        /*
         * This is set to jiffies as it was when the command was first
         * allocated.  It is used to time how long the command has
index 1a5c9a3..05c7c32 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/blkdev.h>
 #include <scsi/scsi.h>
 #include <linux/atomic.h>
+#include <linux/sbitmap.h>
 
 struct device;
 struct request_queue;
@@ -106,7 +107,7 @@ struct scsi_device {
        struct list_head    siblings;   /* list of all devices on this host */
        struct list_head    same_target_siblings; /* just the devices sharing same target id */
 
-       atomic_t device_busy;           /* commands actually active on LLDD */
+       struct sbitmap budget_map;
        atomic_t device_blocked;        /* Device returned QUEUE_FULL. */
 
        atomic_t restarts;
@@ -590,6 +591,11 @@ static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
        return 0;
 }
 
+static inline int scsi_device_busy(struct scsi_device *sdev)
+{
+       return sbitmap_weight(&sdev->budget_map);
+}
+
 #define MODULE_ALIAS_SCSI_DEVICE(type) \
        MODULE_ALIAS("scsi:t-" __stringify(type) "*")
 #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
index 2852e47..a9f782f 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
- * Header file for SCSI device handler infrastruture.
+ * Header file for SCSI device handler infrastructure.
  *
  * Modified version of patches posted by Mike Christie <michaelc@cs.wisc.edu>
  *
index e30fd96..3f3ebfd 100644 (file)
@@ -19,7 +19,6 @@ struct scsi_device;
 struct scsi_host_cmd_pool;
 struct scsi_target;
 struct Scsi_Host;
-struct scsi_host_cmd_pool;
 struct scsi_transport_template;
 
 
@@ -30,40 +29,15 @@ struct scsi_transport_template;
 #define MODE_TARGET 0x02
 
 struct scsi_host_template {
-       struct module *module;
-       const char *name;
-
        /*
-        * The info function will return whatever useful information the
-        * developer sees fit.  If not provided, then the name field will
-        * be used instead.
-        *
-        * Status: OPTIONAL
+        * Put fields referenced in IO submission path together in
+        * same cacheline
         */
-       const char *(* info)(struct Scsi_Host *);
 
        /*
-        * Ioctl interface
-        *
-        * Status: OPTIONAL
-        */
-       int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
-                    void __user *arg);
-
-
-#ifdef CONFIG_COMPAT
-       /* 
-        * Compat handler. Handle 32bit ABI.
-        * When unknown ioctl is passed return -ENOIOCTLCMD.
-        *
-        * Status: OPTIONAL
+        * Additional per-command data allocated for the driver.
         */
-       int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
-                           void __user *arg);
-#endif
-
-       int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
-       int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+       unsigned int cmd_size;
 
        /*
         * The queuecommand function is used to queue up a scsi
@@ -111,6 +85,41 @@ struct scsi_host_template {
         */
        void (*commit_rqs)(struct Scsi_Host *, u16);
 
+       struct module *module;
+       const char *name;
+
+       /*
+        * The info function will return whatever useful information the
+        * developer sees fit.  If not provided, then the name field will
+        * be used instead.
+        *
+        * Status: OPTIONAL
+        */
+       const char *(*info)(struct Scsi_Host *);
+
+       /*
+        * Ioctl interface
+        *
+        * Status: OPTIONAL
+        */
+       int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
+                    void __user *arg);
+
+
+#ifdef CONFIG_COMPAT
+       /*
+        * Compat handler. Handle 32bit ABI.
+        * When unknown ioctl is passed return -ENOIOCTLCMD.
+        *
+        * Status: OPTIONAL
+        */
+       int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
+                           void __user *arg);
+#endif
+
+       int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+       int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
+
        /*
         * This is an error handling strategy routine.  You don't need to
         * define one of these if you don't want to - there is a default
@@ -270,6 +279,16 @@ struct scsi_host_template {
         */
        int (* map_queues)(struct Scsi_Host *shost);
 
+       /*
+        * SCSI interface of blk_poll - poll for IO completions.
+        * Only applicable if SCSI LLD exposes multiple h/w queues.
+        *
+        * Return value: Number of completed entries found.
+        *
+        * Status: OPTIONAL
+        */
+       int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
+
        /*
         * Check if scatterlists need to be padded for DMA draining.
         *
@@ -484,10 +503,6 @@ struct scsi_host_template {
         */
        u64 vendor_id;
 
-       /*
-        * Additional per-command data allocated for the driver.
-        */
-       unsigned int cmd_size;
        struct scsi_host_cmd_pool *cmd_pool;
 
        /* Delay for runtime autosuspend */
@@ -616,6 +631,7 @@ struct Scsi_Host {
         * the total queue depth is can_queue.
         */
        unsigned nr_hw_queues;
+       unsigned nr_maps;
        unsigned active_mode:2;
        unsigned unchecked_isa_dma:1;
 
index ce2fba4..1f78b09 100644 (file)
@@ -34,6 +34,8 @@ struct target_backend_ops {
        int (*configure_device)(struct se_device *);
        void (*destroy_device)(struct se_device *);
        void (*free_device)(struct se_device *device);
+       struct se_dev_plug *(*plug_device)(struct se_device *se_dev);
+       void (*unplug_device)(struct se_dev_plug *se_plug);
 
        ssize_t (*set_configfs_dev_params)(struct se_device *,
                                           const char *, ssize_t);
index 54dcc0e..d1f7d2a 100644 (file)
@@ -127,25 +127,25 @@ enum transport_state_table {
 
 /* Used for struct se_cmd->se_cmd_flags */
 enum se_cmd_flags_table {
-       SCF_SUPPORTED_SAM_OPCODE        = 0x00000001,
-       SCF_TRANSPORT_TASK_SENSE        = 0x00000002,
-       SCF_EMULATED_TASK_SENSE         = 0x00000004,
-       SCF_SCSI_DATA_CDB               = 0x00000008,
-       SCF_SCSI_TMR_CDB                = 0x00000010,
-       SCF_FUA                         = 0x00000080,
-       SCF_SE_LUN_CMD                  = 0x00000100,
-       SCF_BIDI                        = 0x00000400,
-       SCF_SENT_CHECK_CONDITION        = 0x00000800,
-       SCF_OVERFLOW_BIT                = 0x00001000,
-       SCF_UNDERFLOW_BIT               = 0x00002000,
-       SCF_ALUA_NON_OPTIMIZED          = 0x00008000,
-       SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
-       SCF_COMPARE_AND_WRITE           = 0x00080000,
-       SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
-       SCF_ACK_KREF                    = 0x00400000,
-       SCF_USE_CPUID                   = 0x00800000,
-       SCF_TASK_ATTR_SET               = 0x01000000,
-       SCF_TREAT_READ_AS_NORMAL        = 0x02000000,
+       SCF_SUPPORTED_SAM_OPCODE                = (1 << 0),
+       SCF_TRANSPORT_TASK_SENSE                = (1 << 1),
+       SCF_EMULATED_TASK_SENSE                 = (1 << 2),
+       SCF_SCSI_DATA_CDB                       = (1 << 3),
+       SCF_SCSI_TMR_CDB                        = (1 << 4),
+       SCF_FUA                                 = (1 << 5),
+       SCF_SE_LUN_CMD                          = (1 << 6),
+       SCF_BIDI                                = (1 << 7),
+       SCF_SENT_CHECK_CONDITION                = (1 << 8),
+       SCF_OVERFLOW_BIT                        = (1 << 9),
+       SCF_UNDERFLOW_BIT                       = (1 << 10),
+       SCF_ALUA_NON_OPTIMIZED                  = (1 << 11),
+       SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC       = (1 << 12),
+       SCF_COMPARE_AND_WRITE                   = (1 << 13),
+       SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC  = (1 << 14),
+       SCF_ACK_KREF                            = (1 << 15),
+       SCF_USE_CPUID                           = (1 << 16),
+       SCF_TASK_ATTR_SET                       = (1 << 17),
+       SCF_TREAT_READ_AS_NORMAL                = (1 << 18),
 };
 
 /*
@@ -488,7 +488,7 @@ struct se_cmd {
        /* Only used for internal passthrough and legacy TCM fabric modules */
        struct se_session       *se_sess;
        struct se_tmr_req       *se_tmr_req;
-       struct list_head        se_cmd_list;
+       struct llist_node       se_cmd_list;
        struct completion       *free_compl;
        struct completion       *abrt_compl;
        const struct target_core_fabric_ops *se_tfo;
@@ -765,9 +765,19 @@ struct se_dev_stat_grps {
        struct config_group scsi_lu_group;
 };
 
+struct se_cmd_queue {
+       struct llist_head       cmd_list;
+       struct work_struct      work;
+};
+
+struct se_dev_plug {
+       struct se_device        *se_dev;
+};
+
 struct se_device_queue {
        struct list_head        state_list;
        spinlock_t              lock;
+       struct se_cmd_queue     sq;
 };
 
 struct se_device {
@@ -934,11 +944,20 @@ static inline struct se_portal_group *param_to_tpg(struct config_item *item)
                        tpg_param_group);
 }
 
+enum {
+       /* Use se_cmd's cpuid for completion */
+       SE_COMPL_AFFINITY_CPUID         = -1,
+       /* Complete on current CPU */
+       SE_COMPL_AFFINITY_CURR_CPU      = -2,
+};
+
 struct se_wwn {
        struct target_fabric_configfs *wwn_tf;
        void                    *priv;
        struct config_group     wwn_group;
        struct config_group     fabric_stat_group;
+       struct config_group     param_group;
+       int                     cmd_compl_affinity;
 };
 
 static inline void atomic_inc_mb(atomic_t *v)
index d60a3eb..3c5ade7 100644 (file)
@@ -148,18 +148,25 @@ void      transport_deregister_session_configfs(struct se_session *);
 void   transport_deregister_session(struct se_session *);
 
 
-void   transport_init_se_cmd(struct se_cmd *,
+void   __target_init_cmd(struct se_cmd *,
                const struct target_core_fabric_ops *,
                struct se_session *, u32, int, int, unsigned char *, u64);
+int    target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+               unsigned char *sense, u64 unpacked_lun, u32 data_length,
+               int task_attr, int data_dir, int flags);
+int    target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
+               struct scatterlist *sgl, u32 sgl_count,
+               struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+               struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp);
+void   target_submit(struct se_cmd *se_cmd);
 sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
-sense_reason_t target_cmd_init_cdb(struct se_cmd *, unsigned char *);
+sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb,
+                                  gfp_t gfp);
 sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
-int    target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
-               unsigned char *, unsigned char *, u64, u32, int, int, int,
-               struct scatterlist *, u32, struct scatterlist *, u32,
-               struct scatterlist *, u32);
-int    target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
+void   target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
                unsigned char *, u64, u32, int, int, int);
+void   target_queue_submission(struct se_cmd *se_cmd);
+
 int    target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
                unsigned char *sense, u64 unpacked_lun,
                void *fabric_tmr_ptr, unsigned char tm_type,
index e151477..1cb6f1a 100644 (file)
@@ -349,6 +349,27 @@ TRACE_EVENT(ufshcd_upiu,
        )
 );
 
+TRACE_EVENT(ufshcd_exception_event,
+
+       TP_PROTO(const char *dev_name, u16 status),
+
+       TP_ARGS(dev_name, status),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name)
+               __field(u16, status)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __entry->status = status;
+       ),
+
+       TP_printk("%s: status 0x%x",
+               __get_str(dev_name), __entry->status
+       )
+);
+
 #endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
 
 /* This part must be outside protection */
index d693d92..47b3691 100644 (file)
@@ -9,6 +9,54 @@
 #include <linux/sbitmap.h>
 #include <linux/seq_file.h>
 
+static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
+{
+       unsigned depth = sb->depth;
+
+       sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
+       if (!sb->alloc_hint)
+               return -ENOMEM;
+
+       if (depth && !sb->round_robin) {
+               int i;
+
+               for_each_possible_cpu(i)
+                       *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
+       }
+       return 0;
+}
+
+static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
+                                                   unsigned int depth)
+{
+       unsigned hint;
+
+       hint = this_cpu_read(*sb->alloc_hint);
+       if (unlikely(hint >= depth)) {
+               hint = depth ? prandom_u32() % depth : 0;
+               this_cpu_write(*sb->alloc_hint, hint);
+       }
+
+       return hint;
+}
+
+static inline void update_alloc_hint_after_get(struct sbitmap *sb,
+                                              unsigned int depth,
+                                              unsigned int hint,
+                                              unsigned int nr)
+{
+       if (nr == -1) {
+               /* If the map is full, a hint won't do us much good. */
+               this_cpu_write(*sb->alloc_hint, 0);
+       } else if (nr == hint || unlikely(sb->round_robin)) {
+               /* Only update the hint if we used it. */
+               hint = nr + 1;
+               if (hint >= depth - 1)
+                       hint = 0;
+               this_cpu_write(*sb->alloc_hint, hint);
+       }
+}
+
 /*
  * See if we have deferred clears that we can batch move
  */
@@ -33,24 +81,15 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
 }
 
 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
-                     gfp_t flags, int node)
+                     gfp_t flags, int node, bool round_robin,
+                     bool alloc_hint)
 {
        unsigned int bits_per_word;
        unsigned int i;
 
-       if (shift < 0) {
-               shift = ilog2(BITS_PER_LONG);
-               /*
-                * If the bitmap is small, shrink the number of bits per word so
-                * we spread over a few cachelines, at least. If less than 4
-                * bits, just forget about it, it's not going to work optimally
-                * anyway.
-                */
-               if (depth >= 4) {
-                       while ((4U << shift) > depth)
-                               shift--;
-               }
-       }
+       if (shift < 0)
+               shift = sbitmap_calculate_shift(depth);
+
        bits_per_word = 1U << shift;
        if (bits_per_word > BITS_PER_LONG)
                return -EINVAL;
@@ -58,15 +97,25 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
        sb->shift = shift;
        sb->depth = depth;
        sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+       sb->round_robin = round_robin;
 
        if (depth == 0) {
                sb->map = NULL;
                return 0;
        }
 
+       if (alloc_hint) {
+               if (init_alloc_hint(sb, flags))
+                       return -ENOMEM;
+       } else {
+               sb->alloc_hint = NULL;
+       }
+
        sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
-       if (!sb->map)
+       if (!sb->map) {
+               free_percpu(sb->alloc_hint);
                return -ENOMEM;
+       }
 
        for (i = 0; i < sb->map_nr; i++) {
                sb->map[i].depth = min(depth, bits_per_word);
@@ -129,14 +178,14 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
 }
 
 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
-                                    unsigned int alloc_hint, bool round_robin)
+                                    unsigned int alloc_hint)
 {
        struct sbitmap_word *map = &sb->map[index];
        int nr;
 
        do {
                nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
-                                       !round_robin);
+                                       !sb->round_robin);
                if (nr != -1)
                        break;
                if (!sbitmap_deferred_clear(map))
@@ -146,7 +195,7 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
        return nr;
 }
 
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
+static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
 {
        unsigned int i, index;
        int nr = -1;
@@ -158,14 +207,13 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
         * alloc_hint to find the right word index. No point in looping
         * twice in find_next_zero_bit() for that case.
         */
-       if (round_robin)
+       if (sb->round_robin)
                alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
        else
                alloc_hint = 0;
 
        for (i = 0; i < sb->map_nr; i++) {
-               nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
-                                               round_robin);
+               nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
                if (nr != -1) {
                        nr += index << sb->shift;
                        break;
@@ -179,10 +227,27 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
 
        return nr;
 }
+
+int sbitmap_get(struct sbitmap *sb)
+{
+       int nr;
+       unsigned int hint, depth;
+
+       if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+               return -1;
+
+       depth = READ_ONCE(sb->depth);
+       hint = update_alloc_hint_before_get(sb, depth);
+       nr = __sbitmap_get(sb, hint);
+       update_alloc_hint_after_get(sb, depth, hint, nr);
+
+       return nr;
+}
 EXPORT_SYMBOL_GPL(sbitmap_get);
 
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
-                       unsigned long shallow_depth)
+static int __sbitmap_get_shallow(struct sbitmap *sb,
+                                unsigned int alloc_hint,
+                                unsigned long shallow_depth)
 {
        unsigned int i, index;
        int nr = -1;
@@ -214,6 +279,22 @@ again:
 
        return nr;
 }
+
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
+{
+       int nr;
+       unsigned int hint, depth;
+
+       if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+               return -1;
+
+       depth = READ_ONCE(sb->depth);
+       hint = update_alloc_hint_before_get(sb, depth);
+       nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
+       update_alloc_hint_after_get(sb, depth, hint, nr);
+
+       return nr;
+}
 EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
 
 bool sbitmap_any_bit_set(const struct sbitmap *sb)
@@ -243,20 +324,21 @@ static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
        return weight;
 }
 
-static unsigned int sbitmap_weight(const struct sbitmap *sb)
+static unsigned int sbitmap_cleared(const struct sbitmap *sb)
 {
-       return __sbitmap_weight(sb, true);
+       return __sbitmap_weight(sb, false);
 }
 
-static unsigned int sbitmap_cleared(const struct sbitmap *sb)
+unsigned int sbitmap_weight(const struct sbitmap *sb)
 {
-       return __sbitmap_weight(sb, false);
+       return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
 }
+EXPORT_SYMBOL_GPL(sbitmap_weight);
 
 void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
 {
        seq_printf(m, "depth=%u\n", sb->depth);
-       seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
+       seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
        seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
        seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
        seq_printf(m, "map_nr=%u\n", sb->map_nr);
@@ -350,21 +432,11 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
        int ret;
        int i;
 
-       ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
+       ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
+                               round_robin, true);
        if (ret)
                return ret;
 
-       sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
-       if (!sbq->alloc_hint) {
-               sbitmap_free(&sbq->sb);
-               return -ENOMEM;
-       }
-
-       if (depth && !round_robin) {
-               for_each_possible_cpu(i)
-                       *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
-       }
-
        sbq->min_shallow_depth = UINT_MAX;
        sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
        atomic_set(&sbq->wake_index, 0);
@@ -372,7 +444,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
 
        sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
        if (!sbq->ws) {
-               free_percpu(sbq->alloc_hint);
                sbitmap_free(&sbq->sb);
                return -ENOMEM;
        }
@@ -382,7 +453,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
                atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
        }
 
-       sbq->round_robin = round_robin;
        return 0;
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
@@ -415,60 +485,16 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
 
 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
 {
-       unsigned int hint, depth;
-       int nr;
-
-       hint = this_cpu_read(*sbq->alloc_hint);
-       depth = READ_ONCE(sbq->sb.depth);
-       if (unlikely(hint >= depth)) {
-               hint = depth ? prandom_u32() % depth : 0;
-               this_cpu_write(*sbq->alloc_hint, hint);
-       }
-       nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
-
-       if (nr == -1) {
-               /* If the map is full, a hint won't do us much good. */
-               this_cpu_write(*sbq->alloc_hint, 0);
-       } else if (nr == hint || unlikely(sbq->round_robin)) {
-               /* Only update the hint if we used it. */
-               hint = nr + 1;
-               if (hint >= depth - 1)
-                       hint = 0;
-               this_cpu_write(*sbq->alloc_hint, hint);
-       }
-
-       return nr;
+       return sbitmap_get(&sbq->sb);
 }
 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
 
 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
                                unsigned int shallow_depth)
 {
-       unsigned int hint, depth;
-       int nr;
-
        WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
 
-       hint = this_cpu_read(*sbq->alloc_hint);
-       depth = READ_ONCE(sbq->sb.depth);
-       if (unlikely(hint >= depth)) {
-               hint = depth ? prandom_u32() % depth : 0;
-               this_cpu_write(*sbq->alloc_hint, hint);
-       }
-       nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
-
-       if (nr == -1) {
-               /* If the map is full, a hint won't do us much good. */
-               this_cpu_write(*sbq->alloc_hint, 0);
-       } else if (nr == hint || unlikely(sbq->round_robin)) {
-               /* Only update the hint if we used it. */
-               hint = nr + 1;
-               if (hint >= depth - 1)
-                       hint = 0;
-               this_cpu_write(*sbq->alloc_hint, hint);
-       }
-
-       return nr;
+       return sbitmap_get_shallow(&sbq->sb, shallow_depth);
 }
 EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
 
@@ -576,8 +602,8 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
        smp_mb__after_atomic();
        sbitmap_queue_wake_up(sbq);
 
-       if (likely(!sbq->round_robin && nr < sbq->sb.depth))
-               *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
+       if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
+               *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
 
@@ -615,7 +641,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
                if (!first)
                        seq_puts(m, ", ");
                first = false;
-               seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
+               seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
        }
        seq_puts(m, "}\n");
 
@@ -633,7 +659,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
        }
        seq_puts(m, "}\n");
 
-       seq_printf(m, "round_robin=%d\n", sbq->round_robin);
+       seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
        seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_show);