Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 8 May 2019 17:12:46 +0000 (10:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 8 May 2019 17:12:46 +0000 (10:12 -0700)
Pull SCSI updates from James Bottomley:
 "This is mostly update of the usual drivers: qla2xxx, qedf, smartpqi,
  hpsa, lpfc, ufs, mpt3sas, ibmvfc and hisi_sas. Plus number of minor
  changes, spelling fixes and other trivia"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (298 commits)
  scsi: qla2xxx: Avoid that lockdep complains about unsafe locking in tcm_qla2xxx_close_session()
  scsi: qla2xxx: Avoid that qlt_send_resp_ctio() corrupts memory
  scsi: qla2xxx: Fix hardirq-unsafe locking
  scsi: qla2xxx: Complain loudly about reference count underflow
  scsi: qla2xxx: Use __le64 instead of uint32_t[2] for sending DMA addresses to firmware
  scsi: qla2xxx: Introduce the dsd32 and dsd64 data structures
  scsi: qla2xxx: Check the size of firmware data structures at compile time
  scsi: qla2xxx: Pass little-endian values to the firmware
  scsi: qla2xxx: Fix race conditions in the code for aborting SCSI commands
  scsi: qla2xxx: Use an on-stack completion in qla24xx_control_vp()
  scsi: qla2xxx: Make qla24xx_async_abort_cmd() static
  scsi: qla2xxx: Remove unnecessary locking from the target code
  scsi: qla2xxx: Remove qla_tgt_cmd.released
  scsi: qla2xxx: Complain if a command is released that is owned by the firmware
  scsi: qla2xxx: target: Fix offline port handling and host reset handling
  scsi: qla2xxx: Fix abort handling in tcm_qla2xxx_write_pending()
  scsi: qla2xxx: Fix error handling in qlt_alloc_qfull_cmd()
  scsi: qla2xxx: Simplify qlt_send_term_imm_notif()
  scsi: qla2xxx: Fix use-after-free issues in qla2xxx_qpair_sp_free_dma()
  scsi: qla2xxx: Fix a qla24xx_enable_msix() error path
  ...

158 files changed:
Documentation/devicetree/bindings/ufs/cdns,ufshc.txt
Documentation/devicetree/bindings/ufs/ufs-mediatek.txt [new file with mode: 0644]
Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
MAINTAINERS
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptsas.c
drivers/message/fusion/mptscsih.c
drivers/message/fusion/mptspi.c
drivers/nvme/host/fc.c
drivers/scsi/NCR5380.c
drivers/scsi/aic7xxx/Kconfig.aic7xxx
drivers/scsi/aic7xxx/aic7xxx_core.c
drivers/scsi/atp870u.c
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/csiostor/csio_isr.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/dpt_i2o.c
drivers/scsi/gdth.c
drivers/scsi/hisi_sas/hisi_sas.h
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/hpsa.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_init.c
drivers/scsi/libsas/sas_phy.c
drivers/scsi/libsas/sas_port.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_bsg.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_debugfs.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpt3sas/Kconfig
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mvsas/mv_64xx.c
drivers/scsi/mvsas/mv_94xx.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/mvumi.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/qedf/qedf.h
drivers/scsi/qedf/qedf_dbg.c
drivers/scsi/qedf/qedf_debugfs.c
drivers/scsi/qedf/qedf_els.c
drivers/scsi/qedf/qedf_fip.c
drivers/scsi/qedf/qedf_io.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qedf/qedf_version.h
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/qla2xxx/qla_dsd.h [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_mr.h
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_nvme.h
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_nx2.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_tmpl.c
drivers/scsi/qla2xxx/qla_tmpl.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qlogicfas408.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/smartpqi/Makefile
drivers/scsi/smartpqi/smartpqi.h
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/smartpqi/smartpqi_sas_transport.c
drivers/scsi/smartpqi/smartpqi_sis.c
drivers/scsi/smartpqi/smartpqi_sis.h
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/cdns-pltfrm.c
drivers/scsi/ufs/ufs-hisi.c
drivers/scsi/ufs/ufs-mediatek.c [new file with mode: 0644]
drivers/scsi/ufs/ufs-mediatek.h [new file with mode: 0644]
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd-pltfrm.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/unipro.h
drivers/scsi/virtio_scsi.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_pr.c
drivers/target/target_core_pr.h
drivers/target/target_core_tmr.c
drivers/target/target_core_xcopy.c
include/linux/nvme-fc-driver.h
include/scsi/libsas.h
include/scsi/osd_attributes.h [deleted file]
include/scsi/osd_protocol.h [deleted file]
include/scsi/osd_sec.h [deleted file]
include/scsi/osd_sense.h [deleted file]
include/scsi/osd_types.h [deleted file]
include/scsi/scsi_host.h
include/scsi/scsi_transport_fc.h
include/target/iscsi/iscsi_target_core.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/uapi/scsi/fc/fc_els.h

index a04a498..02347b0 100644 (file)
@@ -5,8 +5,9 @@ Each UFS controller instance should have its own node.
 Please see the ufshcd-pltfrm.txt for a list of all available properties.
 
 Required properties:
-- compatible   : Compatible list, contains the following controller:
-                       "cdns,ufshc"
+- compatible   : Compatible list, contains one of the following controllers:
+                       "cdns,ufshc" - Generic CDNS HCI,
+                       "cdns,ufshc-m31-16nm" - CDNS UFS HC + M31 16nm PHY
                  complemented with the JEDEC version:
                        "jedec,ufs-2.0"
 
diff --git a/Documentation/devicetree/bindings/ufs/ufs-mediatek.txt b/Documentation/devicetree/bindings/ufs/ufs-mediatek.txt
new file mode 100644 (file)
index 0000000..72aab85
--- /dev/null
@@ -0,0 +1,43 @@
+* Mediatek Universal Flash Storage (UFS) Host Controller
+
+UFS nodes are defined to describe on-chip UFS hardware macro.
+Each UFS Host Controller should have its own node.
+
+To bind UFS PHY with UFS host controller, the controller node should
+contain a phandle reference to UFS M-PHY node.
+
+Required properties for UFS nodes:
+- compatible         : Compatible list, contains the following controller:
+                       "mediatek,mt8183-ufshci" for MediaTek UFS host controller
+                       present on MT81xx chipsets.
+- reg                : Address and length of the UFS register set.
+- phys               : phandle to m-phy.
+- clocks             : List of phandle and clock specifier pairs.
+- clock-names        : List of clock input name strings sorted in the same
+                       order as the clocks property. "ufs" is mandatory.
+                       "ufs": ufshci core control clock.
+- freq-table-hz      : Array of <min max> operating frequencies stored in the same
+                       order as the clocks property. If this property is not
+                       defined or a value in the array is "0" then it is assumed
+                       that the frequency is set by the parent clock or a
+                       fixed rate clock source.
+- vcc-supply         : phandle to VCC supply regulator node.
+
+Example:
+
+       ufsphy: phy@11fa0000 {
+               ...
+       };
+
+       ufshci@11270000 {
+               compatible = "mediatek,mt8183-ufshci";
+               reg = <0 0x11270000 0 0x2300>;
+               interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>;
+               phys = <&ufsphy>;
+
+               clocks = <&infracfg_ao INFRACFG_AO_UFS_CG>;
+               clock-names = "ufs";
+               freq-table-hz = <0 0>;
+
+               vcc-supply = <&mt_pmic_vemc_ldo_reg>;
+       };
index f647c09..56bccde 100644 (file)
@@ -31,7 +31,6 @@ Optional properties:
 - vcc-max-microamp      : specifies max. load that can be drawn from vcc supply
 - vccq-max-microamp     : specifies max. load that can be drawn from vccq supply
 - vccq2-max-microamp    : specifies max. load that can be drawn from vccq2 supply
-- <name>-fixed-regulator : boolean property specifying that <name>-supply is a fixed regulator
 
 - clocks                : List of phandle and clock specifier pairs
 - clock-names           : List of clock input name strings sorted in the same
@@ -65,7 +64,6 @@ Example:
                interrupts = <0 28 0>;
 
                vdd-hba-supply = <&xxx_reg0>;
-               vdd-hba-fixed-regulator;
                vcc-supply = <&xxx_reg1>;
                vcc-supply-1p8;
                vccq-supply = <&xxx_reg2>;
index 5a2b78e..ee13411 100644 (file)
@@ -16057,6 +16057,13 @@ L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/ufs/*dwc*
 
+UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS
+M:     Stanley Chu <stanley.chu@mediatek.com>
+L:     linux-scsi@vger.kernel.org
+L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     drivers/scsi/ufs/ufs-mediatek*
+
 UNSORTED BLOCK IMAGES (UBI)
 M:     Artem Bityutskiy <dedekind1@gmail.com>
 M:     Richard Weinberger <richard@nod.at>
index ba551d8..d8882b0 100644 (file)
@@ -642,7 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
                        freereq = 0;
                if (event != MPI_EVENT_EVENT_CHANGE)
                        break;
-               /* else: fall through */
+               /* fall through */
        case MPI_FUNCTION_CONFIG:
        case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
                ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
index 8d22d61..f9ac224 100644 (file)
@@ -565,7 +565,7 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
         * TODO - this define is not in MPI spec yet,
         * but they plan to set it to 0x21
         */
-        if (event == 0x21 ) {
+       if (event == 0x21) {
                ioc->aen_event_read_flag=1;
                dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n",
                    ioc->name));
index 612cb5b..6a79cd0 100644 (file)
@@ -2928,27 +2928,27 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
        if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
                u8 *tmp;
 
-       smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
-       if (le16_to_cpu(smprep->ResponseDataLength) !=
-               sizeof(struct rep_manu_reply))
+               smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
+               if (le16_to_cpu(smprep->ResponseDataLength) !=
+                   sizeof(struct rep_manu_reply))
                        goto out_free;
 
-       manufacture_reply = data_out + sizeof(struct rep_manu_request);
-       strncpy(edev->vendor_id, manufacture_reply->vendor_id,
-               SAS_EXPANDER_VENDOR_ID_LEN);
-       strncpy(edev->product_id, manufacture_reply->product_id,
-               SAS_EXPANDER_PRODUCT_ID_LEN);
-       strncpy(edev->product_rev, manufacture_reply->product_rev,
-               SAS_EXPANDER_PRODUCT_REV_LEN);
-       edev->level = manufacture_reply->sas_format;
-       if (manufacture_reply->sas_format) {
-               strncpy(edev->component_vendor_id,
-                       manufacture_reply->component_vendor_id,
+               manufacture_reply = data_out + sizeof(struct rep_manu_request);
+               strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+                       SAS_EXPANDER_VENDOR_ID_LEN);
+               strncpy(edev->product_id, manufacture_reply->product_id,
+                       SAS_EXPANDER_PRODUCT_ID_LEN);
+               strncpy(edev->product_rev, manufacture_reply->product_rev,
+                       SAS_EXPANDER_PRODUCT_REV_LEN);
+               edev->level = manufacture_reply->sas_format;
+               if (manufacture_reply->sas_format) {
+                       strncpy(edev->component_vendor_id,
+                               manufacture_reply->component_vendor_id,
                                SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
-               tmp = (u8 *)&manufacture_reply->component_id;
-               edev->component_id = tmp[0] << 8 | tmp[1];
-               edev->component_revision_id =
-                       manufacture_reply->component_revision_id;
+                       tmp = (u8 *)&manufacture_reply->component_id;
+                       edev->component_id = tmp[0] << 8 | tmp[1];
+                       edev->component_revision_id =
+                               manufacture_reply->component_revision_id;
                }
        } else {
                printk(MYIOC_s_ERR_FMT
index 6ba07c7..f0737c5 100644 (file)
@@ -786,6 +786,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
                        /*
                         * Allow non-SAS & non-NEXUS_LOSS to drop into below code
                         */
+                       /* Fall through */
 
                case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:        /* 0x0048 */
                        /* Linux handles an unsolicited DID_RESET better
@@ -882,6 +883,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
 
                case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:           /* 0x0044 */
                        scsi_set_resid(sc, 0);
+                       /* Fall through */
                case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:        /* 0x0040 */
                case MPI_IOCSTATUS_SUCCESS:                     /* 0x0000 */
                        sc->result = (DID_OK << 16) | scsi_status;
@@ -1934,7 +1936,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
        /*  If our attempts to reset the host failed, then return a failed
         *  status.  The host will be taken off line by the SCSI mid-layer.
         */
-    retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+       retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
        if (retval < 0)
                status = FAILED;
        else
index 7172b0b..eabc4de 100644 (file)
@@ -258,8 +258,6 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
        IOCPage4_t              *IOCPage4Ptr;
        MPT_FRAME_HDR           *mf;
        dma_addr_t               dataDma;
-       u16                      req_idx;
-       u32                      frameOffset;
        u32                      flagsLength;
        int                      ii;
 
@@ -276,9 +274,6 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
         */
        pReq = (Config_t *)mf;
 
-       req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
-       frameOffset = ioc->req_sz - sizeof(IOCPage4_t);
-
        /* Complete the request frame (same for all requests).
         */
        pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
index 6d84513..9544eb6 100644 (file)
@@ -14,7 +14,7 @@
 #include "fabrics.h"
 #include <linux/nvme-fc-driver.h>
 #include <linux/nvme-fc.h>
-
+#include <scsi/scsi_transport_fc.h>
 
 /* *************************** Data Structures/Defines ****************** */
 
index 01c23d2..fe0535a 100644 (file)
@@ -272,9 +272,8 @@ mrs[] = {
 static void NCR5380_print(struct Scsi_Host *instance)
 {
        struct NCR5380_hostdata *hostdata = shost_priv(instance);
-       unsigned char status, data, basr, mr, icr, i;
+       unsigned char status, basr, mr, icr, i;
 
-       data = NCR5380_read(CURRENT_SCSI_DATA_REG);
        status = NCR5380_read(STATUS_REG);
        mr = NCR5380_read(MODE_REG);
        icr = NCR5380_read(INITIATOR_COMMAND_REG);
@@ -1933,13 +1932,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                        if (!hostdata->connected)
                                                return;
 
-                                       /* Fall through to reject message */
-
+                                       /* Reject message */
+                                       /* Fall through */
+                               default:
                                        /*
                                         * If we get something weird that we aren't expecting,
-                                        * reject it.
+                                        * log it.
                                         */
-                               default:
                                        if (tmp == EXTENDED_MESSAGE)
                                                scmd_printk(KERN_INFO, cmd,
                                                            "rejecting unknown extended message code %02x, length %d\n",
index 55ac55e..40fe08a 100644 (file)
@@ -3,7 +3,7 @@
 # $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic7xxx#7 $
 #
 config SCSI_AIC7XXX
-       tristate "Adaptec AIC7xxx Fast -> U160 support (New Driver)"
+       tristate "Adaptec AIC7xxx Fast -> U160 support"
        depends on (PCI || EISA) && SCSI
        select SCSI_SPI_ATTRS
        ---help---
index d4a7263..a9d40d3 100644 (file)
@@ -1666,7 +1666,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
                                        printk("\tCRC Value Mismatch\n");
                                if ((sstat2 & CRCENDERR) != 0)
                                        printk("\tNo terminal CRC packet "
-                                              "recevied\n");
+                                              "received\n");
                                if ((sstat2 & CRCREQERR) != 0)
                                        printk("\tIllegal CRC packet "
                                               "request\n");
index 1267200..446a789 100644 (file)
@@ -194,12 +194,11 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
                                ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
                                ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
                                ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
-                               if (dev->id[c][target_id].last_len != adrcnt)
-                               {
-                                       k = dev->id[c][target_id].last_len;
+                               if (dev->id[c][target_id].last_len != adrcnt) {
+                                       k = dev->id[c][target_id].last_len;
                                        k -= adrcnt;
                                        dev->id[c][target_id].tran_len = k;                        
-                               dev->id[c][target_id].last_len = adrcnt;                           
+                                       dev->id[c][target_id].last_len = adrcnt;
                                }
 #ifdef ED_DBGP
                                printk("dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len);
index 0a6972e..ea042c1 100644 (file)
@@ -963,7 +963,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
  * @ctrl: ptr to ctrl_info
  * @cq: Completion Queue
  * @dq: Default Queue
- * @lenght: ring size
+ * @length: ring size
  * @entry_size: size of each entry in DEFQ
  * @is_header: Header or Data DEFQ
  * @ulp_num: Bind to which ULP
index bc9f2a2..8def63c 100644 (file)
@@ -1083,7 +1083,6 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
 static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
 {
        struct bnx2fc_rport *tgt = io_req->tgt;
-       int rc = SUCCESS;
        unsigned int time_left;
 
        io_req->wait_for_comp = 1;
@@ -1110,7 +1109,7 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
        kref_put(&io_req->refcount, bnx2fc_cmd_release);
 
        spin_lock_bh(&tgt->tgt_lock);
-       return rc;
+       return SUCCESS;
 }
 
 /**
index 7c88147..b254040 100644 (file)
@@ -474,13 +474,39 @@ csio_reduce_sqsets(struct csio_hw *hw, int cnt)
        csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
 }
 
+static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs)
+{
+       struct csio_hw *hw = affd->priv;
+       u8 i;
+
+       if (!nvecs)
+               return;
+
+       if (nvecs < hw->num_pports) {
+               affd->nr_sets = 1;
+               affd->set_size[0] = nvecs;
+               return;
+       }
+
+       affd->nr_sets = hw->num_pports;
+       for (i = 0; i < hw->num_pports; i++)
+               affd->set_size[i] = nvecs / hw->num_pports;
+}
+
 static int
 csio_enable_msix(struct csio_hw *hw)
 {
        int i, j, k, n, min, cnt;
        int extra = CSIO_EXTRA_VECS;
        struct csio_scsi_cpu_info *info;
-       struct irq_affinity desc = { .pre_vectors = 2 };
+       struct irq_affinity desc = {
+               .pre_vectors = CSIO_EXTRA_VECS,
+               .calc_sets = csio_calc_sets,
+               .priv = hw,
+       };
+
+       if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
+               return -ENOSPC;
 
        min = hw->num_pports + extra;
        cnt = hw->num_sqsets + extra;
index 75e1273..b8dd9e6 100644 (file)
@@ -979,14 +979,17 @@ static int init_act_open(struct cxgbi_sock *csk)
        csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
        if (csk->atid < 0) {
                pr_err("NO atid available.\n");
-               goto rel_resource;
+               return -EINVAL;
        }
        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
        cxgbi_sock_get(csk);
 
        skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
-       if (!skb)
-               goto rel_resource;
+       if (!skb) {
+               cxgb3_free_atid(t3dev, csk->atid);
+               cxgbi_sock_put(csk);
+               return -ENOMEM;
+       }
        skb->sk = (struct sock *)csk;
        set_arp_failure_handler(skb, act_open_arp_failure);
        csk->snd_win = cxgb3i_snd_win;
@@ -1007,11 +1010,6 @@ static int init_act_open(struct cxgbi_sock *csk)
        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
        send_act_open_req(csk, skb, csk->l2t);
        return 0;
-
-rel_resource:
-       if (skb)
-               __kfree_skb(skb);
-       return -EINVAL;
 }
 
 cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
index d44914e..124f334 100644 (file)
@@ -60,7 +60,7 @@ MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
 #define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
 static int cxgb4i_rcv_win = -1;
 module_param(cxgb4i_rcv_win, int, 0644);
-MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
+MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes");
 
 #define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
 static int cxgb4i_snd_win = -1;
index 006372b..8b915d4 100644 (file)
@@ -282,7 +282,6 @@ struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
 }
 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
 
-#if IS_ENABLED(CONFIG_IPV6)
 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
                                                     int *port)
 {
@@ -315,7 +314,6 @@ static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
                  ndev, ndev->name);
        return NULL;
 }
-#endif
 
 void cxgbi_hbas_remove(struct cxgbi_device *cdev)
 {
@@ -653,6 +651,8 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
        }
 
        cdev = cxgbi_device_find_by_netdev(ndev, &port);
+       if (!cdev)
+               cdev = cxgbi_device_find_by_mac(ndev, &port);
        if (!cdev) {
                pr_info("dst %pI4, %s, NOT cxgbi device.\n",
                        &daddr->sin_addr.s_addr, ndev->name);
@@ -2310,7 +2310,6 @@ int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
 {
        struct cxgbi_endpoint *cep = ep->dd_data;
        struct cxgbi_sock *csk;
-       int len;
 
        log_debug(1 << CXGBI_DBG_ISCSI,
                "cls_conn 0x%p, param %d.\n", ep, param);
@@ -2328,9 +2327,9 @@ int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
                return iscsi_conn_get_addr_param((struct sockaddr_storage *)
                                                 &csk->daddr, param, buf);
        default:
-               return -ENOSYS;
+               break;
        }
-       return len;
+       return -ENOSYS;
 }
 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
 
@@ -2563,13 +2562,9 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
                        pr_info("shost 0x%p, priv NULL.\n", shost);
                        goto err_out;
                }
-
-               rtnl_lock();
-               if (!vlan_uses_dev(hba->ndev))
-                       ifindex = hba->ndev->ifindex;
-               rtnl_unlock();
        }
 
+check_route:
        if (dst_addr->sa_family == AF_INET) {
                csk = cxgbi_check_route(dst_addr, ifindex);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -2590,6 +2585,13 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
        if (!hba)
                hba = csk->cdev->hbas[csk->port_id];
        else if (hba != csk->cdev->hbas[csk->port_id]) {
+               if (ifindex != hba->ndev->ifindex) {
+                       cxgbi_sock_put(csk);
+                       cxgbi_sock_closed(csk);
+                       ifindex = hba->ndev->ifindex;
+                       goto check_route;
+               }
+
                pr_info("Could not connect through requested host %u"
                        "hba 0x%p != 0x%p (%u).\n",
                        shost->host_no, hba,
index abdc34a..a3afd14 100644 (file)
@@ -835,8 +835,8 @@ static void adpt_i2o_sys_shutdown(void)
        adpt_hba *pHba, *pNext;
        struct adpt_i2o_post_wait_data *p1, *old;
 
-        printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
-        printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
+       printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
+       printk(KERN_INFO "   This could take a few minutes if there are many devices attached\n");
        /* Delete all IOPs from the controller chain */
        /* They should have already been released by the
         * scsi-core
@@ -859,7 +859,7 @@ static void adpt_i2o_sys_shutdown(void)
 //     spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
        adpt_post_wait_queue = NULL;
 
-        printk(KERN_INFO "Adaptec I2O controllers down.\n");
+       printk(KERN_INFO "Adaptec I2O controllers down.\n");
 }
 
 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
@@ -3390,7 +3390,7 @@ static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
                return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
        }
 
-        return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ 
+       return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
 }
 
 
@@ -3463,8 +3463,8 @@ static int adpt_i2o_enable_hba(adpt_hba* pHba)
 
 static int adpt_i2o_systab_send(adpt_hba* pHba)
 {
-        u32 msg[12];
-        int ret;
+       u32 msg[12];
+       int ret;
 
        msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
        msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
index e7f1dd4..0ca9b43 100644 (file)
@@ -3697,8 +3697,9 @@ static int ioc_general(void __user *arg, char *cmnd)
 
        rval = 0;
 out_free_buf:
-       dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len, buf,
-                       paddr);
+       if (buf)
+               dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len,
+                                 buf, paddr);
        return rval;
 }
  
index 9bfa9f1..fc87994 100644 (file)
@@ -170,6 +170,7 @@ struct hisi_sas_phy {
        u32             code_violation_err_count;
        enum sas_linkrate       minimum_linkrate;
        enum sas_linkrate       maximum_linkrate;
+       int enable;
 };
 
 struct hisi_sas_port {
@@ -551,6 +552,8 @@ extern int hisi_sas_slave_configure(struct scsi_device *sdev);
 extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
 extern void hisi_sas_scan_start(struct Scsi_Host *shost);
 extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
+extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
+                               int enable);
 extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
 extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
                                    struct sas_task *task,
index 14bac49..8a7feb8 100644 (file)
@@ -10,7 +10,6 @@
  */
 
 #include "hisi_sas.h"
-#include "../libsas/sas_internal.h"
 #define DRV_NAME "hisi_sas"
 
 #define DEV_IS_GONE(dev) \
@@ -171,7 +170,7 @@ void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
        int phy_no;
 
        for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
-               hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+               hisi_sas_phy_enable(hisi_hba, phy_no, 0);
 }
 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
 
@@ -684,7 +683,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
                id->initiator_bits = SAS_PROTOCOL_ALL;
                id->target_bits = phy->identify.target_port_protocols;
        } else if (phy->phy_type & PORT_TYPE_SATA) {
-               /*Nothing*/
+               /* Nothing */
        }
 
        sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
@@ -755,7 +754,8 @@ static int hisi_sas_init_device(struct domain_device *device)
                 * STP target port
                 */
                local_phy = sas_get_local_phy(device);
-               if (!scsi_is_sas_phy_local(local_phy)) {
+               if (!scsi_is_sas_phy_local(local_phy) &&
+                   !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
                        unsigned long deadline = ata_deadline(jiffies, 20000);
                        struct sata_device *sata_dev = &device->sata_dev;
                        struct ata_host *ata_host = sata_dev->ata_host;
@@ -770,8 +770,7 @@ static int hisi_sas_init_device(struct domain_device *device)
                }
                sas_put_local_phy(local_phy);
                if (rc) {
-                       dev_warn(dev, "SATA disk hardreset fail: 0x%x\n",
-                                rc);
+                       dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
                        return rc;
                }
 
@@ -976,6 +975,30 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
        timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
 }
 
+/* Wrapper to ensure we track hisi_sas_phy.enable properly */
+void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
+{
+       struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+       struct asd_sas_phy *aphy = &phy->sas_phy;
+       struct sas_phy *sphy = aphy->phy;
+       unsigned long flags;
+
+       spin_lock_irqsave(&phy->lock, flags);
+
+       if (enable) {
+               /* We may have been enabled already; if so, don't touch */
+               if (!phy->enable)
+                       sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+               hisi_hba->hw->phy_start(hisi_hba, phy_no);
+       } else {
+               sphy->negotiated_linkrate = SAS_PHY_DISABLED;
+               hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+       }
+       phy->enable = enable;
+       spin_unlock_irqrestore(&phy->lock, flags);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
+
 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
 {
        struct sas_ha_struct *sas_ha = sas_phy->ha;
@@ -1112,10 +1135,10 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
        sas_phy->phy->maximum_linkrate = max;
        sas_phy->phy->minimum_linkrate = min;
 
-       hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+       hisi_sas_phy_enable(hisi_hba, phy_no, 0);
        msleep(100);
        hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
-       hisi_hba->hw->phy_start(hisi_hba, phy_no);
+       hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 
        return 0;
 }
@@ -1133,13 +1156,13 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
                break;
 
        case PHY_FUNC_LINK_RESET:
-               hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+               hisi_sas_phy_enable(hisi_hba, phy_no, 0);
                msleep(100);
-               hisi_hba->hw->phy_start(hisi_hba, phy_no);
+               hisi_sas_phy_enable(hisi_hba, phy_no, 1);
                break;
 
        case PHY_FUNC_DISABLE:
-               hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+               hisi_sas_phy_enable(hisi_hba, phy_no, 0);
                break;
 
        case PHY_FUNC_SET_LINK_RATE:
@@ -1264,8 +1287,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
                        /* no error, but return the number of bytes of
                         * underrun
                         */
-                       dev_warn(dev, "abort tmf: task to dev %016llx "
-                                "resp: 0x%x sts 0x%x underrun\n",
+                       dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
                                 SAS_ADDR(device->sas_addr),
                                 task->task_status.resp,
                                 task->task_status.stat);
@@ -1280,10 +1302,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
                        break;
                }
 
-               dev_warn(dev, "abort tmf: task to dev "
-                        "%016llx resp: 0x%x status 0x%x\n",
-                        SAS_ADDR(device->sas_addr), task->task_status.resp,
-                        task->task_status.stat);
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                   task->task_status.stat == SAS_OPEN_REJECT) {
+                       dev_warn(dev, "abort tmf: open reject failed\n");
+                       res = -EIO;
+               } else {
+                       dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
+                                SAS_ADDR(device->sas_addr),
+                                task->task_status.resp,
+                                task->task_status.stat);
+               }
                sas_free_task(task);
                task = NULL;
        }
@@ -1427,9 +1455,9 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
                                        sas_ha->notify_port_event(sas_phy,
                                                        PORTE_BROADCAST_RCVD);
                        }
-               } else if (old_state & (1 << phy_no))
-                       /* PHY down but was up before */
+               } else {
                        hisi_sas_phy_down(hisi_hba, phy_no, 0);
+               }
 
        }
 }
@@ -1711,7 +1739,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
        struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
        struct device *dev = hisi_hba->dev;
        struct hisi_sas_tmf_task tmf_task;
-       int rc = TMF_RESP_FUNC_FAILED;
+       int rc;
 
        rc = hisi_sas_internal_task_abort(hisi_hba, device,
                                          HISI_SAS_INT_ABT_DEV, 0);
@@ -1803,7 +1831,7 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
 
        if (dev_is_sata(device)) {
                rc = hisi_sas_softreset_ata_disk(device);
-               if (rc)
+               if (rc == TMF_RESP_FUNC_FAILED)
                        return TMF_RESP_FUNC_FAILED;
        }
 
@@ -2100,10 +2128,8 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
        }
 
 exit:
-       dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
-               "resp: 0x%x sts 0x%x\n",
-               SAS_ADDR(device->sas_addr),
-               task,
+       dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n",
+               SAS_ADDR(device->sas_addr), task,
                task->task_status.resp, /* 0 is complete, -1 is undelivered */
                task->task_status.stat);
        sas_free_task(task);
@@ -2172,16 +2198,18 @@ static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
 {
        struct asd_sas_phy *sas_phy = &phy->sas_phy;
        struct sas_phy *sphy = sas_phy->phy;
-       struct sas_phy_data *d = sphy->hostdata;
+       unsigned long flags;
 
        phy->phy_attached = 0;
        phy->phy_type = 0;
        phy->port = NULL;
 
-       if (d->enable)
+       spin_lock_irqsave(&phy->lock, flags);
+       if (phy->enable)
                sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
        else
                sphy->negotiated_linkrate = SAS_PHY_DISABLED;
+       spin_unlock_irqrestore(&phy->lock, flags);
 }
 
 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
@@ -2234,6 +2262,19 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
 }
 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
 
+int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+       struct hisi_hba *hisi_hba = shost_priv(shost);
+
+       if (reset_type != SCSI_ADAPTER_RESET)
+               return -EOPNOTSUPP;
+
+       queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
+
 struct scsi_transport_template *hisi_sas_stt;
 EXPORT_SYMBOL_GPL(hisi_sas_stt);
 
@@ -2491,22 +2532,19 @@ int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
 
                if (device_property_read_u32(dev, "ctrl-reset-reg",
                                             &hisi_hba->ctrl_reset_reg)) {
-                       dev_err(dev,
-                               "could not get property ctrl-reset-reg\n");
+                       dev_err(dev, "could not get property ctrl-reset-reg\n");
                        return -ENOENT;
                }
 
                if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
                                             &hisi_hba->ctrl_reset_sts_reg)) {
-                       dev_err(dev,
-                               "could not get property ctrl-reset-sts-reg\n");
+                       dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
                        return -ENOENT;
                }
 
                if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
                                             &hisi_hba->ctrl_clock_ena_reg)) {
-                       dev_err(dev,
-                               "could not get property ctrl-clock-ena-reg\n");
+                       dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
                        return -ENOENT;
                }
        }
index 2938074..78fe7d3 100644 (file)
@@ -798,16 +798,11 @@ static void start_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
        enable_phy_v1_hw(hisi_hba, phy_no);
 }
 
-static void stop_phy_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
-{
-       disable_phy_v1_hw(hisi_hba, phy_no);
-}
-
 static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
-       stop_phy_v1_hw(hisi_hba, phy_no);
+       hisi_sas_phy_enable(hisi_hba, phy_no, 0);
        msleep(100);
-       start_phy_v1_hw(hisi_hba, phy_no);
+       hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 }
 
 static void start_phys_v1_hw(struct timer_list *t)
@@ -817,7 +812,7 @@ static void start_phys_v1_hw(struct timer_list *t)
 
        for (i = 0; i < hisi_hba->n_phy; i++) {
                hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x12a);
-               start_phy_v1_hw(hisi_hba, i);
+               hisi_sas_phy_enable(hisi_hba, i, 1);
        }
 }
 
@@ -1695,8 +1690,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
                for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
                        irq = platform_get_irq(pdev, idx);
                        if (!irq) {
-                               dev_err(dev,
-                                       "irq init: fail map phy interrupt %d\n",
+                               dev_err(dev, "irq init: fail map phy interrupt %d\n",
                                        idx);
                                return -ENOENT;
                        }
@@ -1704,8 +1698,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
                        rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
                                              DRV_NAME " phy", phy);
                        if (rc) {
-                               dev_err(dev, "irq init: could not request "
-                                       "phy interrupt %d, rc=%d\n",
+                               dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
                                        irq, rc);
                                return -ENOENT;
                        }
@@ -1742,8 +1735,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
                rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
                                      DRV_NAME " fatal", hisi_hba);
                if (rc) {
-                       dev_err(dev,
-                               "irq init: could not request fatal interrupt %d, rc=%d\n",
+                       dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
                                irq, rc);
                        return -ENOENT;
                }
@@ -1823,6 +1815,7 @@ static struct scsi_host_template sht_v1_hw = {
        .target_destroy         = sas_target_destroy,
        .ioctl                  = sas_ioctl,
        .shost_attrs            = host_attrs_v1_hw,
+       .host_reset             = hisi_sas_host_reset,
 };
 
 static const struct hisi_sas_hw hisi_sas_v1_hw = {
index 89160ab..d4650be 100644 (file)
@@ -1546,14 +1546,14 @@ static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
        u32 txid_auto;
 
-       disable_phy_v2_hw(hisi_hba, phy_no);
+       hisi_sas_phy_enable(hisi_hba, phy_no, 0);
        if (phy->identify.device_type == SAS_END_DEVICE) {
                txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
                hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
                                        txid_auto | TX_HARDRST_MSK);
        }
        msleep(100);
-       start_phy_v2_hw(hisi_hba, phy_no);
+       hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 }
 
 static void phy_get_events_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1586,7 +1586,7 @@ static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
                if (!sas_phy->phy->enabled)
                        continue;
 
-               start_phy_v2_hw(hisi_hba, i);
+               hisi_sas_phy_enable(hisi_hba, i, 1);
        }
 }
 
@@ -2423,14 +2423,12 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
                        slot_err_v2_hw(hisi_hba, task, slot, 2);
 
                if (ts->stat != SAS_DATA_UNDERRUN)
-                       dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
-                               "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
-                               "Error info: 0x%x 0x%x 0x%x 0x%x\n",
-                               slot->idx, task, sas_dev->device_id,
-                               complete_hdr->dw0, complete_hdr->dw1,
-                               complete_hdr->act, complete_hdr->dw3,
-                               error_info[0], error_info[1],
-                               error_info[2], error_info[3]);
+                       dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+                                slot->idx, task, sas_dev->device_id,
+                                complete_hdr->dw0, complete_hdr->dw1,
+                                complete_hdr->act, complete_hdr->dw3,
+                                error_info[0], error_info[1],
+                                error_info[2], error_info[3]);
 
                if (unlikely(slot->abort))
                        return ts->stat;
@@ -2502,7 +2500,7 @@ out:
                spin_lock_irqsave(&device->done_lock, flags);
                if (test_bit(SAS_HA_FROZEN, &ha->state)) {
                        spin_unlock_irqrestore(&device->done_lock, flags);
-                       dev_info(dev, "slot complete: task(%p) ignored\n ",
+                       dev_info(dev, "slot complete: task(%p) ignored\n",
                                 task);
                        return sts;
                }
@@ -2935,7 +2933,7 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
 
                        if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
                                dev_warn(dev, "phy%d identify timeout\n",
-                                               phy_no);
+                                        phy_no);
                                hisi_sas_notify_phy_event(phy,
                                                HISI_PHYE_LINK_RESET);
                        }
@@ -3036,7 +3034,7 @@ static const struct hisi_sas_hw_error axi_error[] = {
        { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
        { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
        { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
-       {},
+       {}
 };
 
 static const struct hisi_sas_hw_error fifo_error[] = {
@@ -3045,7 +3043,7 @@ static const struct hisi_sas_hw_error fifo_error[] = {
        { .msk = BIT(10), .msg = "GETDQE_FIFO" },
        { .msk = BIT(11), .msg = "CMDP_FIFO" },
        { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
-       {},
+       {}
 };
 
 static const struct hisi_sas_hw_error fatal_axi_errors[] = {
@@ -3109,12 +3107,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
                                if (!(err_value & sub->msk))
                                        continue;
                                dev_err(dev, "%s (0x%x) found!\n",
-                                        sub->msg, irq_value);
+                                       sub->msg, irq_value);
                                queue_work(hisi_hba->wq, &hisi_hba->rst_work);
                        }
                } else {
                        dev_err(dev, "%s (0x%x) found!\n",
-                                axi_error->msg, irq_value);
+                               axi_error->msg, irq_value);
                        queue_work(hisi_hba->wq, &hisi_hba->rst_work);
                }
        }
@@ -3258,7 +3256,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
        /* check ERR bit of Status Register */
        if (fis->status & ATA_ERR) {
                dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
-                               fis->status);
+                        fis->status);
                hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
                res = IRQ_NONE;
                goto end;
@@ -3349,8 +3347,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
                rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
                                      DRV_NAME " phy", hisi_hba);
                if (rc) {
-                       dev_err(dev, "irq init: could not request "
-                               "phy interrupt %d, rc=%d\n",
+                       dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
                                irq, rc);
                        rc = -ENOENT;
                        goto free_phy_int_irqs;
@@ -3364,8 +3361,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
                rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
                                      DRV_NAME " sata", phy);
                if (rc) {
-                       dev_err(dev, "irq init: could not request "
-                               "sata interrupt %d, rc=%d\n",
+                       dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n",
                                irq, rc);
                        rc = -ENOENT;
                        goto free_sata_int_irqs;
@@ -3377,8 +3373,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
                rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
                                      DRV_NAME " fatal", hisi_hba);
                if (rc) {
-                       dev_err(dev,
-                               "irq init: could not request fatal interrupt %d, rc=%d\n",
+                       dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
                                irq, rc);
                        rc = -ENOENT;
                        goto free_fatal_int_irqs;
@@ -3393,8 +3388,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
                rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
                                      DRV_NAME " cq", cq);
                if (rc) {
-                       dev_err(dev,
-                               "irq init: could not request cq interrupt %d, rc=%d\n",
+                       dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
                                irq, rc);
                        rc = -ENOENT;
                        goto free_cq_int_irqs;
@@ -3546,7 +3540,7 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
                break;
        default:
                dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
-                               reg_type);
+                       reg_type);
                return -EINVAL;
        }
 
@@ -3599,6 +3593,7 @@ static struct scsi_host_template sht_v2_hw = {
        .target_destroy         = sas_target_destroy,
        .ioctl                  = sas_ioctl,
        .shost_attrs            = host_attrs_v2_hw,
+       .host_reset             = hisi_sas_host_reset,
 };
 
 static const struct hisi_sas_hw hisi_sas_v2_hw = {
index 086695a..49620c2 100644 (file)
 #define CFG_ABT_SET_IPTT_DONE  0xd8
 #define CFG_ABT_SET_IPTT_DONE_OFF      0
 #define HGC_IOMB_PROC1_STATUS  0x104
+#define HGC_LM_DFX_STATUS2             0x128
+#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF                0
+#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK        (0xfff << \
+                                        HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
+#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF                12
+#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK        (0x7ff << \
+                                        HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
+#define HGC_CQE_ECC_ADDR               0x13c
+#define HGC_CQE_ECC_1B_ADDR_OFF        0
+#define HGC_CQE_ECC_1B_ADDR_MSK        (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
+#define HGC_CQE_ECC_MB_ADDR_OFF        8
+#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
+#define HGC_IOST_ECC_ADDR              0x140
+#define HGC_IOST_ECC_1B_ADDR_OFF       0
+#define HGC_IOST_ECC_1B_ADDR_MSK       (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
+#define HGC_IOST_ECC_MB_ADDR_OFF       16
+#define HGC_IOST_ECC_MB_ADDR_MSK       (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
+#define HGC_DQE_ECC_ADDR               0x144
+#define HGC_DQE_ECC_1B_ADDR_OFF        0
+#define HGC_DQE_ECC_1B_ADDR_MSK        (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
+#define HGC_DQE_ECC_MB_ADDR_OFF        16
+#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
 #define CHNL_INT_STATUS                        0x148
+#define HGC_ITCT_ECC_ADDR              0x150
+#define HGC_ITCT_ECC_1B_ADDR_OFF               0
+#define HGC_ITCT_ECC_1B_ADDR_MSK               (0x3ff << \
+                                                HGC_ITCT_ECC_1B_ADDR_OFF)
+#define HGC_ITCT_ECC_MB_ADDR_OFF               16
+#define HGC_ITCT_ECC_MB_ADDR_MSK               (0x3ff << \
+                                                HGC_ITCT_ECC_MB_ADDR_OFF)
 #define HGC_AXI_FIFO_ERR_INFO  0x154
 #define AXI_ERR_INFO_OFF               0
 #define AXI_ERR_INFO_MSK               (0xff << AXI_ERR_INFO_OFF)
 #define ENT_INT_SRC3_ITC_INT_OFF       15
 #define ENT_INT_SRC3_ITC_INT_MSK       (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
 #define ENT_INT_SRC3_ABT_OFF           16
+#define ENT_INT_SRC3_DQE_POISON_OFF    18
+#define ENT_INT_SRC3_IOST_POISON_OFF   19
+#define ENT_INT_SRC3_ITCT_POISON_OFF   20
+#define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF       21
 #define ENT_INT_SRC_MSK1               0x1c4
 #define ENT_INT_SRC_MSK2               0x1c8
 #define ENT_INT_SRC_MSK3               0x1cc
 #define HGC_COM_INT_MSK                                0x1d8
 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
 #define SAS_ECC_INTR                   0x1e8
+#define SAS_ECC_INTR_DQE_ECC_1B_OFF            0
+#define SAS_ECC_INTR_DQE_ECC_MB_OFF            1
+#define SAS_ECC_INTR_IOST_ECC_1B_OFF   2
+#define SAS_ECC_INTR_IOST_ECC_MB_OFF   3
+#define SAS_ECC_INTR_ITCT_ECC_1B_OFF   4
+#define SAS_ECC_INTR_ITCT_ECC_MB_OFF   5
+#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF       6
+#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF       7
+#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF       8
+#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF       9
+#define SAS_ECC_INTR_CQE_ECC_1B_OFF            10
+#define SAS_ECC_INTR_CQE_ECC_MB_OFF            11
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF       12
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF       13
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF       14
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF       15
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF       16
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF       17
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF       18
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF       19
+#define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF                20
+#define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF                21
 #define SAS_ECC_INTR_MSK               0x1ec
 #define HGC_ERR_STAT_EN                        0x238
 #define CQE_SEND_CNT                   0x248
 #define COMPL_Q_0_DEPTH                        0x4e8
 #define COMPL_Q_0_WR_PTR               0x4ec
 #define COMPL_Q_0_RD_PTR               0x4f0
+#define HGC_RXM_DFX_STATUS14           0xae8
+#define HGC_RXM_DFX_STATUS14_MEM0_OFF  0
+#define HGC_RXM_DFX_STATUS14_MEM0_MSK  (0x1ff << \
+                                        HGC_RXM_DFX_STATUS14_MEM0_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM1_OFF  9
+#define HGC_RXM_DFX_STATUS14_MEM1_MSK  (0x1ff << \
+                                        HGC_RXM_DFX_STATUS14_MEM1_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM2_OFF  18
+#define HGC_RXM_DFX_STATUS14_MEM2_MSK  (0x1ff << \
+                                        HGC_RXM_DFX_STATUS14_MEM2_OFF)
+#define HGC_RXM_DFX_STATUS15           0xaec
+#define HGC_RXM_DFX_STATUS15_MEM3_OFF  0
+#define HGC_RXM_DFX_STATUS15_MEM3_MSK  (0x1ff << \
+                                        HGC_RXM_DFX_STATUS15_MEM3_OFF)
 #define AWQOS_AWCACHE_CFG      0xc84
 #define ARQOS_ARCACHE_CFG      0xc88
 #define HILINK_ERR_DFX         0xe04
 #define CHL_INT0_PHY_RDY_OFF           5
 #define CHL_INT0_PHY_RDY_MSK           (0x1 << CHL_INT0_PHY_RDY_OFF)
 #define CHL_INT1                       (PORT_BASE + 0x1b8)
-#define CHL_INT1_DMAC_TX_ECC_ERR_OFF   15
-#define CHL_INT1_DMAC_TX_ECC_ERR_MSK   (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
-#define CHL_INT1_DMAC_RX_ECC_ERR_OFF   17
-#define CHL_INT1_DMAC_RX_ECC_ERR_MSK   (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF        15
+#define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF        16
+#define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF        17
+#define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF        18
 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF        19
 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF        20
 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF        21
 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF        22
+#define CHL_INT1_DMAC_TX_FIFO_ERR_OFF  23
+#define CHL_INT1_DMAC_RX_FIFO_ERR_OFF  24
+#define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF     26
+#define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF     27
 #define CHL_INT2                       (PORT_BASE + 0x1bc)
 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
 #define CHL_INT2_RX_DISP_ERR_OFF       28
 #define AM_CFG_SINGLE_PORT_MAX_TRANS   (0x5014)
 #define AXI_CFG                                        (0x5100)
 #define AM_ROB_ECC_ERR_ADDR            (0x510c)
-#define AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF 0
-#define AM_ROB_ECC_ONEBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_ONEBIT_ERR_ADDR_OFF)
-#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
-#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
+#define AM_ROB_ECC_ERR_ADDR_OFF        0
+#define AM_ROB_ECC_ERR_ADDR_MSK        0xffffffff
 
 /* RAS registers need init */
 #define RAS_BASE               (0x6000)
@@ -408,6 +479,10 @@ struct hisi_sas_err_record_v3 {
 #define BASE_VECTORS_V3_HW  16
 #define MIN_AFFINE_VECTORS_V3_HW  (BASE_VECTORS_V3_HW + 1)
 
+enum {
+       DSM_FUNC_ERR_HANDLE_MSI = 0,
+};
+
 static bool hisi_sas_intr_conv;
 MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
 
@@ -474,7 +549,6 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
 
 static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 {
-       struct pci_dev *pdev = hisi_hba->pci_dev;
        int i;
 
        /* Global registers init */
@@ -494,14 +568,11 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
        hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
-       if (pdev->revision >= 0x21)
-               hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7aff);
-       else
-               hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
+       hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff);
        hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
        hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
        hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
-       hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x0);
+       hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555);
        hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
        hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
        for (i = 0; i < hisi_hba->queue_count; i++)
@@ -532,12 +603,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
                hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
                hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
                hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
-               if (pdev->revision >= 0x21)
-                       hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
-                                       0xffffffff);
-               else
-                       hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
-                                       0xff87ffff);
+               hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff);
                hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
                hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
                hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -804,6 +870,8 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
 static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
 {
        struct device *dev = hisi_hba->dev;
+       union acpi_object *obj;
+       guid_t guid;
        int rc;
 
        rc = reset_hw_v3_hw(hisi_hba);
@@ -815,6 +883,19 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
        msleep(100);
        init_reg_v3_hw(hisi_hba);
 
+       if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) {
+               dev_err(dev, "Parse GUID failed\n");
+               return -EINVAL;
+       }
+
+       /* Switch over to MSI handling , from PCI AER default */
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
+                               DSM_FUNC_ERR_HANDLE_MSI, NULL);
+       if (!obj)
+               dev_warn(dev, "Switch over to MSI handling failed\n");
+       else
+               ACPI_FREE(obj);
+
        return 0;
 }
 
@@ -856,14 +937,14 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
        struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
        u32 txid_auto;
 
-       disable_phy_v3_hw(hisi_hba, phy_no);
+       hisi_sas_phy_enable(hisi_hba, phy_no, 0);
        if (phy->identify.device_type == SAS_END_DEVICE) {
                txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
                hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
                                        txid_auto | TX_HARDRST_MSK);
        }
        msleep(100);
-       start_phy_v3_hw(hisi_hba, phy_no);
+       hisi_sas_phy_enable(hisi_hba, phy_no, 1);
 }
 
 static enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
@@ -882,7 +963,7 @@ static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
                if (!sas_phy->phy->enabled)
                        continue;
 
-               start_phy_v3_hw(hisi_hba, i);
+               hisi_sas_phy_enable(hisi_hba, i, 1);
        }
 }
 
@@ -929,7 +1010,7 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
                                DLVRY_Q_0_RD_PTR + (queue * 0x14));
        if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
                dev_warn(dev, "full queue=%d r=%d w=%d\n",
-                               queue, r, w);
+                        queue, r, w);
                return -EAGAIN;
        }
 
@@ -1380,6 +1461,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
                struct hisi_sas_initial_fis *initial_fis;
                struct dev_to_host_fis *fis;
                u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+               struct Scsi_Host *shost = hisi_hba->shost;
 
                dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
                initial_fis = &hisi_hba->initial_fis[phy_no];
@@ -1396,6 +1478,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
 
                sas_phy->oob_mode = SATA_OOB_MODE;
                attached_sas_addr[0] = 0x50;
+               attached_sas_addr[6] = shost->host_no;
                attached_sas_addr[7] = phy_no;
                memcpy(sas_phy->attached_sas_addr,
                       attached_sas_addr,
@@ -1539,6 +1622,14 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
 }
 
 static const struct hisi_sas_hw_error port_axi_error[] = {
+       {
+               .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF),
+               .msg = "dmac_tx_ecc_bad_err",
+       },
+       {
+               .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF),
+               .msg = "dmac_rx_ecc_bad_err",
+       },
        {
                .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
                .msg = "dma_tx_axi_wr_err",
@@ -1555,6 +1646,22 @@ static const struct hisi_sas_hw_error port_axi_error[] = {
                .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
                .msg = "dma_rx_axi_rd_err",
        },
+       {
+               .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF),
+               .msg = "dma_tx_fifo_err",
+       },
+       {
+               .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF),
+               .msg = "dma_rx_fifo_err",
+       },
+       {
+               .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF),
+               .msg = "dma_tx_axi_ruser_err",
+       },
+       {
+               .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF),
+               .msg = "dma_rx_axi_ruser_err",
+       },
 };
 
 static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1719,6 +1826,121 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
        return IRQ_HANDLED;
 }
 
+static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
+               .msk = HGC_DQE_ECC_MB_ADDR_MSK,
+               .shift = HGC_DQE_ECC_MB_ADDR_OFF,
+               .msg = "hgc_dqe_eccbad_intr found: ram addr is 0x%08X\n",
+               .reg = HGC_DQE_ECC_ADDR,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
+               .msk = HGC_IOST_ECC_MB_ADDR_MSK,
+               .shift = HGC_IOST_ECC_MB_ADDR_OFF,
+               .msg = "hgc_iost_eccbad_intr found: ram addr is 0x%08X\n",
+               .reg = HGC_IOST_ECC_ADDR,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
+               .msk = HGC_ITCT_ECC_MB_ADDR_MSK,
+               .shift = HGC_ITCT_ECC_MB_ADDR_OFF,
+               .msg = "hgc_itct_eccbad_intr found: ram addr is 0x%08X\n",
+               .reg = HGC_ITCT_ECC_ADDR,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
+               .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
+               .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
+               .msg = "hgc_iostl_eccbad_intr found: mem addr is 0x%08X\n",
+               .reg = HGC_LM_DFX_STATUS2,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
+               .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
+               .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
+               .msg = "hgc_itctl_eccbad_intr found: mem addr is 0x%08X\n",
+               .reg = HGC_LM_DFX_STATUS2,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
+               .msk = HGC_CQE_ECC_MB_ADDR_MSK,
+               .shift = HGC_CQE_ECC_MB_ADDR_OFF,
+               .msg = "hgc_cqe_eccbad_intr found: ram address is 0x%08X\n",
+               .reg = HGC_CQE_ECC_ADDR,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
+               .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
+               .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
+               .msg = "rxm_mem0_eccbad_intr found: mem addr is 0x%08X\n",
+               .reg = HGC_RXM_DFX_STATUS14,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
+               .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
+               .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
+               .msg = "rxm_mem1_eccbad_intr found: mem addr is 0x%08X\n",
+               .reg = HGC_RXM_DFX_STATUS14,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
+               .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
+               .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
+               .msg = "rxm_mem2_eccbad_intr found: mem addr is 0x%08X\n",
+               .reg = HGC_RXM_DFX_STATUS14,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
+               .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
+               .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
+               .msg = "rxm_mem3_eccbad_intr found: mem addr is 0x%08X\n",
+               .reg = HGC_RXM_DFX_STATUS15,
+       },
+       {
+               .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF),
+               .msk = AM_ROB_ECC_ERR_ADDR_MSK,
+               .shift = AM_ROB_ECC_ERR_ADDR_OFF,
+               .msg = "ooo_ram_eccbad_intr found: ROB_ECC_ERR_ADDR=0x%08X\n",
+               .reg = AM_ROB_ECC_ERR_ADDR,
+       },
+};
+
+static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba,
+                                             u32 irq_value)
+{
+       struct device *dev = hisi_hba->dev;
+       const struct hisi_sas_hw_error *ecc_error;
+       u32 val;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) {
+               ecc_error = &multi_bit_ecc_errors[i];
+               if (irq_value & ecc_error->irq_msk) {
+                       val = hisi_sas_read32(hisi_hba, ecc_error->reg);
+                       val &= ecc_error->msk;
+                       val >>= ecc_error->shift;
+                       dev_err(dev, ecc_error->msg, irq_value, val);
+                       queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+               }
+       }
+}
+
+static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba)
+{
+       u32 irq_value, irq_msk;
+
+       irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
+       hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
+
+       irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
+       if (irq_value)
+               multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value);
+
+       hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
+       hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
+}
+
 static const struct hisi_sas_hw_error axi_error[] = {
        { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
        { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
@@ -1728,7 +1950,7 @@ static const struct hisi_sas_hw_error axi_error[] = {
        { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
        { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
        { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
-       {},
+       {}
 };
 
 static const struct hisi_sas_hw_error fifo_error[] = {
@@ -1737,7 +1959,7 @@ static const struct hisi_sas_hw_error fifo_error[] = {
        { .msk = BIT(10), .msg = "GETDQE_FIFO" },
        { .msk = BIT(11), .msg = "CMDP_FIFO" },
        { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
-       {},
+       {}
 };
 
 static const struct hisi_sas_hw_error fatal_axi_error[] = {
@@ -1771,6 +1993,23 @@ static const struct hisi_sas_hw_error fatal_axi_error[] = {
                .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
                .msg = "SAS_HGC_ABT fetch LM list",
        },
+       {
+               .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF),
+               .msg = "read dqe poison",
+       },
+       {
+               .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF),
+               .msg = "read iost poison",
+       },
+       {
+               .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF),
+               .msg = "read itct poison",
+       },
+       {
+               .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF),
+               .msg = "read itct ncq poison",
+       },
+
 };
 
 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
@@ -1823,6 +2062,8 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
                }
        }
 
+       fatal_ecc_int_v3_hw(hisi_hba);
+
        if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
                u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
                u32 dev_id = reg_val & ITCT_DEV_MSK;
@@ -1966,13 +2207,11 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
 
                slot_err_v3_hw(hisi_hba, task, slot);
                if (ts->stat != SAS_DATA_UNDERRUN)
-                       dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
-                               "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
-                               "Error info: 0x%x 0x%x 0x%x 0x%x\n",
-                               slot->idx, task, sas_dev->device_id,
-                               dw0, dw1, complete_hdr->act, dw3,
-                               error_info[0], error_info[1],
-                               error_info[2], error_info[3]);
+                       dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+                                slot->idx, task, sas_dev->device_id,
+                                dw0, dw1, complete_hdr->act, dw3,
+                                error_info[0], error_info[1],
+                                error_info[2], error_info[3]);
                if (unlikely(slot->abort))
                        return ts->stat;
                goto out;
@@ -2205,8 +2444,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
                                      cq_interrupt_v3_hw, irqflags,
                                      DRV_NAME " cq", cq);
                if (rc) {
-                       dev_err(dev,
-                               "could not request cq%d interrupt, rc=%d\n",
+                       dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
                                i, rc);
                        rc = -ENOENT;
                        goto free_cq_irqs;
@@ -2362,7 +2600,7 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
                break;
        default:
                dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
-                               reg_type);
+                       reg_type);
                return -EINVAL;
        }
 
@@ -2678,6 +2916,7 @@ static struct scsi_host_template sht_v3_hw = {
        .ioctl                  = sas_ioctl,
        .shost_attrs            = host_attrs_v3_hw,
        .tag_alloc_policy       = BLK_TAG_ALLOC_RR,
+       .host_reset             = hisi_sas_host_reset,
 };
 
 static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -2800,7 +3039,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        hisi_hba->regs = pcim_iomap(pdev, 5, 0);
        if (!hisi_hba->regs) {
-               dev_err(dev, "cannot map register.\n");
+               dev_err(dev, "cannot map register\n");
                rc = -ENOMEM;
                goto err_out_ha;
        }
@@ -2921,161 +3160,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
        scsi_host_put(shost);
 }
 
-static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
-       { .irq_msk = BIT(19), .msg = "HILINK_INT" },
-       { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
-       { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
-       { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
-       { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
-       { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
-       { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
-       { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
-       { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
-       { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
-       { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
-       { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
-       { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
-};
-
-static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
-       { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
-       { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
-       { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
-       { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
-       { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
-       { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
-       { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
-       { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
-       { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
-       { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
-       { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
-       { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
-       { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
-       { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
-       { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
-       { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
-       { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
-       { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
-       { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
-       { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
-       { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
-       { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
-       { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
-};
-
-static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = {
-       { .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" },
-       { .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" },
-       { .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" },
-       { .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" },
-       { .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" },
-       { .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" },
-       { .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" },
-       { .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" },
-       { .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" },
-       { .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" },
-       { .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" },
-       { .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" },
-       { .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" },
-       { .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" },
-       { .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" },
-       { .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" },
-       { .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" },
-       { .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" },
-       { .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" },
-       { .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" },
-};
-
-static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
-{
-       struct device *dev = hisi_hba->dev;
-       const struct hisi_sas_hw_error *ras_error;
-       bool need_reset = false;
-       u32 irq_value;
-       int i;
-
-       irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
-       for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
-               ras_error = &sas_ras_intr0_nfe[i];
-               if (ras_error->irq_msk & irq_value) {
-                       dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
-                                       ras_error->msg, irq_value);
-                       need_reset = true;
-               }
-       }
-       hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
-
-       irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
-       for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
-               ras_error = &sas_ras_intr1_nfe[i];
-               if (ras_error->irq_msk & irq_value) {
-                       dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
-                                       ras_error->msg, irq_value);
-                       need_reset = true;
-               }
-       }
-       hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
-
-       irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2);
-       for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) {
-               ras_error = &sas_ras_intr2_nfe[i];
-               if (ras_error->irq_msk & irq_value) {
-                       dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n",
-                                       ras_error->msg, irq_value);
-                       need_reset = true;
-               }
-       }
-       hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value);
-
-       return need_reset;
-}
-
-static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
-               pci_channel_state_t state)
-{
-       struct sas_ha_struct *sha = pci_get_drvdata(pdev);
-       struct hisi_hba *hisi_hba = sha->lldd_ha;
-       struct device *dev = hisi_hba->dev;
-
-       dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
-       if (state == pci_channel_io_perm_failure)
-               return PCI_ERS_RESULT_DISCONNECT;
-
-       if (process_non_fatal_error_v3_hw(hisi_hba))
-               return PCI_ERS_RESULT_NEED_RESET;
-
-       return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
-{
-       return PCI_ERS_RESULT_RECOVERED;
-}
-
-static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
-{
-       struct sas_ha_struct *sha = pci_get_drvdata(pdev);
-       struct hisi_hba *hisi_hba = sha->lldd_ha;
-       struct device *dev = hisi_hba->dev;
-       HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
-
-       dev_info(dev, "PCI error: slot reset callback!!\n");
-       queue_work(hisi_hba->wq, &r.work);
-       wait_for_completion(r.completion);
-       if (r.done)
-               return PCI_ERS_RESULT_RECOVERED;
-
-       return PCI_ERS_RESULT_DISCONNECT;
-}
-
 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
 {
        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
@@ -3171,7 +3255,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
        pci_power_t device_state = pdev->current_state;
 
        dev_warn(dev, "resuming from operating state [D%d]\n",
-                       device_state);
+                device_state);
        pci_set_power_state(pdev, PCI_D0);
        pci_enable_wake(pdev, PCI_D0, 0);
        pci_restore_state(pdev);
@@ -3199,9 +3283,6 @@ static const struct pci_device_id sas_v3_pci_table[] = {
 MODULE_DEVICE_TABLE(pci, sas_v3_pci_table);
 
 static const struct pci_error_handlers hisi_sas_err_handler = {
-       .error_detected = hisi_sas_error_detected_v3_hw,
-       .mmio_enabled   = hisi_sas_mmio_enabled_v3_hw,
-       .slot_reset     = hisi_sas_slot_reset_v3_hw,
        .reset_prepare  = hisi_sas_reset_prepare_v3_hw,
        .reset_done     = hisi_sas_reset_done_v3_hw,
 };
index f044e7d..1bef1da 100644 (file)
@@ -60,7 +60,7 @@
  * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
  * with an optional trailing '-' followed by a byte value (0-255).
  */
-#define HPSA_DRIVER_VERSION "3.4.20-125"
+#define HPSA_DRIVER_VERSION "3.4.20-160"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 #define HPSA "hpsa"
 
@@ -2647,9 +2647,20 @@ static void complete_scsi_command(struct CommandList *cp)
                        decode_sense_data(ei->SenseInfo, sense_data_size,
                                &sense_key, &asc, &ascq);
                if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
-                       if (sense_key == ABORTED_COMMAND) {
+                       switch (sense_key) {
+                       case ABORTED_COMMAND:
                                cmd->result |= DID_SOFT_ERROR << 16;
                                break;
+                       case UNIT_ATTENTION:
+                               if (asc == 0x3F && ascq == 0x0E)
+                                       h->drv_req_rescan = 1;
+                               break;
+                       case ILLEGAL_REQUEST:
+                               if (asc == 0x25 && ascq == 0x00) {
+                                       dev->removed = 1;
+                                       cmd->result = DID_NO_CONNECT << 16;
+                               }
+                               break;
                        }
                        break;
                }
@@ -3956,14 +3967,18 @@ static int hpsa_update_device_info(struct ctlr_info *h,
        memset(this_device->device_id, 0,
                sizeof(this_device->device_id));
        if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
-               sizeof(this_device->device_id)) < 0)
+               sizeof(this_device->device_id)) < 0) {
                dev_err(&h->pdev->dev,
-                       "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
+                       "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
                        h->ctlr, __func__,
                        h->scsi_host->host_no,
-                       this_device->target, this_device->lun,
+                       this_device->bus, this_device->target,
+                       this_device->lun,
                        scsi_device_type(this_device->devtype),
                        this_device->model);
+               rc = HPSA_LV_FAILED;
+               goto bail_out;
+       }
 
        if ((this_device->devtype == TYPE_DISK ||
                this_device->devtype == TYPE_ZBC) &&
@@ -5809,7 +5824,7 @@ static int hpsa_send_test_unit_ready(struct ctlr_info *h,
        /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
        (void) fill_cmd(c, TEST_UNIT_READY, h,
                        NULL, 0, 0, lunaddr, TYPE_CMD);
-       rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
        if (rc)
                return rc;
        /* no unmap needed here because no data xfer. */
index 6f93fee..1ecca71 100644 (file)
@@ -281,7 +281,7 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
                res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
                                              &dev->sata_dev.rps_resp);
                if (res) {
-                       pr_debug("report phy sata to %016llx:0x%x returned 0x%x\n",
+                       pr_debug("report phy sata to %016llx:%02d returned 0x%x\n",
                                 SAS_ADDR(dev->parent->sas_addr),
                                 phy->phy_id, res);
                        return res;
index 17b45a0..83f2fd7 100644 (file)
@@ -826,9 +826,14 @@ static struct domain_device *sas_ex_discover_end_dev(
 #ifdef CONFIG_SCSI_SAS_ATA
        if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
                if (child->linkrate > parent->min_linkrate) {
+                       struct sas_phy *cphy = child->phy;
+                       enum sas_linkrate min_prate = cphy->minimum_linkrate,
+                               parent_min_lrate = parent->min_linkrate,
+                               min_linkrate = (min_prate > parent_min_lrate) ?
+                                              parent_min_lrate : 0;
                        struct sas_phy_linkrates rates = {
                                .maximum_linkrate = parent->min_linkrate,
-                               .minimum_linkrate = parent->min_linkrate,
+                               .minimum_linkrate = min_linkrate,
                        };
                        int ret;
 
@@ -865,7 +870,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 
                res = sas_discover_sata(child);
                if (res) {
-                       pr_notice("sas_discover_sata() for device %16llx at %016llx:0x%x returned 0x%x\n",
+                       pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
                                  SAS_ADDR(child->sas_addr),
                                  SAS_ADDR(parent->sas_addr), phy_id, res);
                        goto out_list_del;
@@ -890,7 +895,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 
                res = sas_discover_end_dev(child);
                if (res) {
-                       pr_notice("sas_discover_end_dev() for device %16llx at %016llx:0x%x returned 0x%x\n",
+                       pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n",
                                  SAS_ADDR(child->sas_addr),
                                  SAS_ADDR(parent->sas_addr), phy_id, res);
                        goto out_list_del;
@@ -955,7 +960,7 @@ static struct domain_device *sas_ex_discover_expander(
        int res;
 
        if (phy->routing_attr == DIRECT_ROUTING) {
-               pr_warn("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not allowed\n",
+               pr_warn("ex %016llx:%02d:D <--> ex %016llx:0x%x is not allowed\n",
                        SAS_ADDR(parent->sas_addr), phy_id,
                        SAS_ADDR(phy->attached_sas_addr),
                        phy->attached_phy_id);
@@ -1065,7 +1070,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
            ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
            ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
            ex_phy->attached_dev_type != SAS_SATA_PENDING) {
-               pr_warn("unknown device type(0x%x) attached to ex %016llx phy 0x%x\n",
+               pr_warn("unknown device type(0x%x) attached to ex %016llx phy%02d\n",
                        ex_phy->attached_dev_type,
                        SAS_ADDR(dev->sas_addr),
                        phy_id);
@@ -1081,7 +1086,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        }
 
        if (sas_ex_join_wide_port(dev, phy_id)) {
-               pr_debug("Attaching ex phy%d to wide port %016llx\n",
+               pr_debug("Attaching ex phy%02d to wide port %016llx\n",
                         phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
                return res;
        }
@@ -1093,7 +1098,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
                break;
        case SAS_FANOUT_EXPANDER_DEVICE:
                if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
-                       pr_debug("second fanout expander %016llx phy 0x%x attached to ex %016llx phy 0x%x\n",
+                       pr_debug("second fanout expander %016llx phy%02d attached to ex %016llx phy%02d\n",
                                 SAS_ADDR(ex_phy->attached_sas_addr),
                                 ex_phy->attached_phy_id,
                                 SAS_ADDR(dev->sas_addr),
@@ -1126,7 +1131,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
                            SAS_ADDR(child->sas_addr)) {
                                ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
                                if (sas_ex_join_wide_port(dev, i))
-                                       pr_debug("Attaching ex phy%d to wide port %016llx\n",
+                                       pr_debug("Attaching ex phy%02d to wide port %016llx\n",
                                                 i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
                        }
                }
@@ -1151,7 +1156,7 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
                     phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
-                       memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
+                       memcpy(sub_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
 
                        return 1;
                }
@@ -1163,7 +1168,7 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
 {
        struct expander_device *ex = &dev->ex_dev;
        struct domain_device *child;
-       u8 sub_addr[8] = {0, };
+       u8 sub_addr[SAS_ADDR_SIZE] = {0, };
 
        list_for_each_entry(child, &ex->children, siblings) {
                if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
@@ -1173,7 +1178,7 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
                        sas_find_sub_addr(child, sub_addr);
                        continue;
                } else {
-                       u8 s2[8];
+                       u8 s2[SAS_ADDR_SIZE];
 
                        if (sas_find_sub_addr(child, s2) &&
                            (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
@@ -1261,7 +1266,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
                        else if (SAS_ADDR(sub_sas_addr) !=
                                 SAS_ADDR(phy->attached_sas_addr)) {
 
-                               pr_notice("ex %016llx phy 0x%x diverges(%016llx) on subtractive boundary(%016llx). Disabled\n",
+                               pr_notice("ex %016llx phy%02d diverges(%016llx) on subtractive boundary(%016llx). Disabled\n",
                                          SAS_ADDR(dev->sas_addr), i,
                                          SAS_ADDR(phy->attached_sas_addr),
                                          SAS_ADDR(sub_sas_addr));
@@ -1282,7 +1287,7 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
        };
        struct domain_device *parent = child->parent;
 
-       pr_notice("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x has %c:%c routing link!\n",
+       pr_notice("%s ex %016llx phy%02d <--> %s ex %016llx phy%02d has %c:%c routing link!\n",
                  ex_type[parent->dev_type],
                  SAS_ADDR(parent->sas_addr),
                  parent_phy->phy_id,
@@ -1304,7 +1309,7 @@ static int sas_check_eeds(struct domain_device *child,
 
        if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
                res = -ENODEV;
-               pr_warn("edge ex %016llx phy S:0x%x <--> edge ex %016llx phy S:0x%x, while there is a fanout ex %016llx\n",
+               pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n",
                        SAS_ADDR(parent->sas_addr),
                        parent_phy->phy_id,
                        SAS_ADDR(child->sas_addr),
@@ -1327,7 +1332,7 @@ static int sas_check_eeds(struct domain_device *child,
                ;
        else {
                res = -ENODEV;
-               pr_warn("edge ex %016llx phy 0x%x <--> edge ex %016llx phy 0x%x link forms a third EEDS!\n",
+               pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n",
                        SAS_ADDR(parent->sas_addr),
                        parent_phy->phy_id,
                        SAS_ADDR(child->sas_addr),
@@ -1445,11 +1450,11 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
                        goto out;
                res = rri_resp[2];
                if (res == SMP_RESP_NO_INDEX) {
-                       pr_warn("overflow of indexes: dev %016llx phy 0x%x index 0x%x\n",
+                       pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n",
                                SAS_ADDR(dev->sas_addr), phy_id, i);
                        goto out;
                } else if (res != SMP_RESP_FUNC_ACC) {
-                       pr_notice("%s: dev %016llx phy 0x%x index 0x%x result 0x%x\n",
+                       pr_notice("%s: dev %016llx phy%02d index 0x%x result 0x%x\n",
                                  __func__, SAS_ADDR(dev->sas_addr), phy_id,
                                  i, res);
                        goto out;
@@ -1515,7 +1520,7 @@ static int sas_configure_set(struct domain_device *dev, int phy_id,
                goto out;
        res = cri_resp[2];
        if (res == SMP_RESP_NO_INDEX) {
-               pr_warn("overflow of indexes: dev %016llx phy 0x%x index 0x%x\n",
+               pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n",
                        SAS_ADDR(dev->sas_addr), phy_id, index);
        }
 out:
@@ -1760,10 +1765,11 @@ static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
 
        res = sas_get_phy_discover(dev, phy_id, disc_resp);
        if (res == 0) {
-               memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8);
+               memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
+                      SAS_ADDR_SIZE);
                *type = to_dev_type(dr);
                if (*type == 0)
-                       memset(sas_addr, 0, 8);
+                       memset(sas_addr, 0, SAS_ADDR_SIZE);
        }
        kfree(disc_resp);
        return res;
@@ -1870,10 +1876,12 @@ static int sas_find_bcast_dev(struct domain_device *dev,
                if (phy_id != -1) {
                        *src_dev = dev;
                        ex->ex_change_count = ex_change_count;
-                       pr_info("Expander phy change count has changed\n");
+                       pr_info("ex %016llx phy%02d change count has changed\n",
+                               SAS_ADDR(dev->sas_addr), phy_id);
                        return res;
                } else
-                       pr_info("Expander phys DID NOT change\n");
+                       pr_info("ex %016llx phys DID NOT change\n",
+                               SAS_ADDR(dev->sas_addr));
        }
        list_for_each_entry(ch, &ex->children, siblings) {
                if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
@@ -1983,7 +1991,7 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        struct domain_device *child;
        int res;
 
-       pr_debug("ex %016llx phy%d new device attached\n",
+       pr_debug("ex %016llx phy%02d new device attached\n",
                 SAS_ADDR(dev->sas_addr), phy_id);
        res = sas_ex_phy_discover(dev, phy_id);
        if (res)
@@ -2022,15 +2030,23 @@ static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
        return false;
 }
 
-static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
+static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
+                             bool last, int sibling)
 {
        struct expander_device *ex = &dev->ex_dev;
        struct ex_phy *phy = &ex->ex_phy[phy_id];
        enum sas_device_type type = SAS_PHY_UNUSED;
-       u8 sas_addr[8];
+       u8 sas_addr[SAS_ADDR_SIZE];
+       char msg[80] = "";
        int res;
 
-       memset(sas_addr, 0, 8);
+       if (!last)
+               sprintf(msg, ", part of a wide port with phy%02d", sibling);
+
+       pr_debug("ex %016llx rediscovering phy%02d%s\n",
+                SAS_ADDR(dev->sas_addr), phy_id, msg);
+
+       memset(sas_addr, 0, SAS_ADDR_SIZE);
        res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
        switch (res) {
        case SMP_RESP_NO_PHY:
@@ -2052,6 +2068,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
        if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
                phy->phy_state = PHY_EMPTY;
                sas_unregister_devs_sas_addr(dev, phy_id, last);
+               /*
+                * Even though the PHY is empty, for convenience we discover
+                * the PHY to update the PHY info, like negotiated linkrate.
+                */
+               sas_ex_phy_discover(dev, phy_id);
                return res;
        } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
                   dev_type_flutter(type, phy->attached_dev_type)) {
@@ -2062,13 +2083,13 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 
                if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
                        action = ", needs recovery";
-               pr_debug("ex %016llx phy 0x%x broadcast flutter%s\n",
+               pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
                         SAS_ADDR(dev->sas_addr), phy_id, action);
                return res;
        }
 
        /* we always have to delete the old device when we went here */
-       pr_info("ex %016llx phy 0x%x replace %016llx\n",
+       pr_info("ex %016llx phy%02d replace %016llx\n",
                SAS_ADDR(dev->sas_addr), phy_id,
                SAS_ADDR(phy->attached_sas_addr));
        sas_unregister_devs_sas_addr(dev, phy_id, last);
@@ -2098,7 +2119,7 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
        int i;
        bool last = true;       /* is this the last phy of the port */
 
-       pr_debug("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
+       pr_debug("ex %016llx phy%02d originated BROADCAST(CHANGE)\n",
                 SAS_ADDR(dev->sas_addr), phy_id);
 
        if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
@@ -2109,13 +2130,11 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
                                continue;
                        if (SAS_ADDR(phy->attached_sas_addr) ==
                            SAS_ADDR(changed_phy->attached_sas_addr)) {
-                               pr_debug("phy%d part of wide port with phy%d\n",
-                                        phy_id, i);
                                last = false;
                                break;
                        }
                }
-               res = sas_rediscover_dev(dev, phy_id, last);
+               res = sas_rediscover_dev(dev, phy_id, last, i);
        } else
                res = sas_discover_new(dev, phy_id);
        return res;
index 221340e..28a460c 100644 (file)
@@ -87,25 +87,27 @@ EXPORT_SYMBOL_GPL(sas_free_task);
 /*------------ SAS addr hash -----------*/
 void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
 {
-        const u32 poly = 0x00DB2777;
-        u32     r = 0;
-        int     i;
-
-        for (i = 0; i < 8; i++) {
-                int b;
-                for (b = 7; b >= 0; b--) {
-                        r <<= 1;
-                        if ((1 << b) & sas_addr[i]) {
-                                if (!(r & 0x01000000))
-                                        r ^= poly;
-                        } else if (r & 0x01000000)
-                                r ^= poly;
-                }
-        }
-
-        hashed[0] = (r >> 16) & 0xFF;
-        hashed[1] = (r >> 8) & 0xFF ;
-        hashed[2] = r & 0xFF;
+       const u32 poly = 0x00DB2777;
+       u32 r = 0;
+       int i;
+
+       for (i = 0; i < SAS_ADDR_SIZE; i++) {
+               int b;
+
+               for (b = (SAS_ADDR_SIZE - 1); b >= 0; b--) {
+                       r <<= 1;
+                       if ((1 << b) & sas_addr[i]) {
+                               if (!(r & 0x01000000))
+                                       r ^= poly;
+                       } else if (r & 0x01000000) {
+                               r ^= poly;
+                       }
+               }
+       }
+
+       hashed[0] = (r >> 16) & 0xFF;
+       hashed[1] = (r >> 8) & 0xFF;
+       hashed[2] = r & 0xFF;
 }
 
 int sas_register_ha(struct sas_ha_struct *sas_ha)
@@ -623,7 +625,7 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
        if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
                if (i->dft->lldd_control_phy) {
                        if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
-                               pr_notice("The phy%02d bursting events, shut it down.\n",
+                               pr_notice("The phy%d bursting events, shut it down.\n",
                                          phy->id);
                                sas_notify_phy_event(phy, PHYE_SHUTDOWN);
                        }
index 0374243..e030e14 100644 (file)
@@ -122,11 +122,10 @@ static void sas_phye_shutdown(struct work_struct *work)
                phy->enabled = 0;
                ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
                if (ret)
-                       pr_notice("lldd disable phy%02d returned %d\n",
-                                 phy->id, ret);
+                       pr_notice("lldd disable phy%d returned %d\n", phy->id,
+                                 ret);
        } else
-               pr_notice("phy%02d is not enabled, cannot shutdown\n",
-                         phy->id);
+               pr_notice("phy%d is not enabled, cannot shutdown\n", phy->id);
 }
 
 /* ---------- Phy class registration ---------- */
index 03fe479..38a1047 100644 (file)
@@ -95,6 +95,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
        int i;
        struct sas_ha_struct *sas_ha = phy->ha;
        struct asd_sas_port *port = phy->port;
+       struct domain_device *port_dev;
        struct sas_internal *si =
                to_sas_internal(sas_ha->core.shost->transportt);
        unsigned long flags;
@@ -153,8 +154,9 @@ static void sas_form_port(struct asd_sas_phy *phy)
        }
 
        /* add the phy to the port */
+       port_dev = port->port_dev;
        list_add_tail(&phy->port_phy_el, &port->phy_list);
-       sas_phy_set_target(phy, port->port_dev);
+       sas_phy_set_target(phy, port_dev);
        phy->port = port;
        port->num_phys++;
        port->phy_mask |= (1U << phy->id);
@@ -184,14 +186,21 @@ static void sas_form_port(struct asd_sas_phy *phy)
                 port->phy_mask,
                 SAS_ADDR(port->attached_sas_addr));
 
-       if (port->port_dev)
-               port->port_dev->pathways = port->num_phys;
+       if (port_dev)
+               port_dev->pathways = port->num_phys;
 
        /* Tell the LLDD about this port formation. */
        if (si->dft->lldd_port_formed)
                si->dft->lldd_port_formed(phy);
 
        sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+       /* Only insert a revalidate event after initial discovery */
+       if (port_dev && sas_dev_type_is_expander(port_dev->dev_type)) {
+               struct expander_device *ex_dev = &port_dev->ex_dev;
+
+               ex_dev->ex_change_count = -1;
+               sas_discover_event(port, DISCE_REVALIDATE_DOMAIN);
+       }
        flush_workqueue(sas_ha->disco_q);
 }
 
@@ -254,6 +263,15 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
        spin_unlock(&port->phy_list_lock);
        spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
 
+       /* Only insert revalidate event if the port still has members */
+       if (port->port && dev && sas_dev_type_is_expander(dev->dev_type)) {
+               struct expander_device *ex_dev = &dev->ex_dev;
+
+               ex_dev->ex_change_count = -1;
+               sas_discover_event(port, DISCE_REVALIDATE_DOMAIN);
+       }
+       flush_workqueue(sas_ha->disco_q);
+
        return;
 }
 
index 41d849f..aafcffa 100644 (file)
@@ -942,6 +942,7 @@ struct lpfc_hba {
        int brd_no;                     /* FC board number */
        char SerialNumber[32];          /* adapter Serial Number */
        char OptionROMVersion[32];      /* adapter BIOS / Fcode version */
+       char BIOSVersion[16];           /* Boot BIOS version */
        char ModelDesc[256];            /* Model Description */
        char ModelName[80];             /* Model Name */
        char ProgramType[256];          /* Program Type */
index ce3e541..e9adb3f 100644 (file)
 #define LPFC_REG_WRITE_KEY_SIZE        4
 #define LPFC_REG_WRITE_KEY     "EMLX"
 
+const char *const trunk_errmsg[] = {   /* map errcode */
+       "",     /* There is no such error code at index 0*/
+       "link negotiated speed does not match existing"
+               " trunk - link was \"low\" speed",
+       "link negotiated speed does not match"
+               " existing trunk - link was \"middle\" speed",
+       "link negotiated speed does not match existing"
+               " trunk - link was \"high\" speed",
+       "Attached to non-trunking port - F_Port",
+       "Attached to non-trunking port - N_Port",
+       "FLOGI response timeout",
+       "non-FLOGI frame received",
+       "Invalid FLOGI response",
+       "Trunking initialization protocol",
+       "Trunk peer device mismatch",
+};
+
 /**
  * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
  * @incr: integer to convert.
@@ -114,7 +131,7 @@ static ssize_t
 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
                       char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+       return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
 }
 
 /**
@@ -134,9 +151,9 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_hba   *phba = vport->phba;
 
        if (phba->hba_flag & HBA_FIP_SUPPORT)
-               return snprintf(buf, PAGE_SIZE, "1\n");
+               return scnprintf(buf, PAGE_SIZE, "1\n");
        else
-               return snprintf(buf, PAGE_SIZE, "0\n");
+               return scnprintf(buf, PAGE_SIZE, "0\n");
 }
 
 static ssize_t
@@ -564,14 +581,15 @@ lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       if (phba->cfg_enable_bg)
+       if (phba->cfg_enable_bg) {
                if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
-                       return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+                       return scnprintf(buf, PAGE_SIZE,
+                                       "BlockGuard Enabled\n");
                else
-                       return snprintf(buf, PAGE_SIZE,
+                       return scnprintf(buf, PAGE_SIZE,
                                        "BlockGuard Not Supported\n");
-       else
-                       return snprintf(buf, PAGE_SIZE,
+       else
+               return scnprintf(buf, PAGE_SIZE,
                                        "BlockGuard Disabled\n");
 }
 
@@ -583,7 +601,7 @@ lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
+       return scnprintf(buf, PAGE_SIZE, "%llu\n",
                        (unsigned long long)phba->bg_guard_err_cnt);
 }
 
@@ -595,7 +613,7 @@ lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
+       return scnprintf(buf, PAGE_SIZE, "%llu\n",
                        (unsigned long long)phba->bg_apptag_err_cnt);
 }
 
@@ -607,7 +625,7 @@ lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
+       return scnprintf(buf, PAGE_SIZE, "%llu\n",
                        (unsigned long long)phba->bg_reftag_err_cnt);
 }
 
@@ -625,7 +643,7 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr,
 {
        struct Scsi_Host *host = class_to_shost(dev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+       return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
 }
 
 /**
@@ -644,7 +662,7 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
 }
 
 /**
@@ -666,7 +684,7 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host *shost = class_to_shost(dev);
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
-       return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
 }
 
 /**
@@ -685,7 +703,7 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
 }
 
 /**
@@ -704,7 +722,7 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
 }
 
 /**
@@ -723,7 +741,7 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
 }
 
 /**
@@ -741,7 +759,7 @@ lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
+       return scnprintf(buf, PAGE_SIZE, "%d\n",
                (phba->sli.sli_flag & LPFC_MENLO_MAINT));
 }
 
@@ -761,7 +779,7 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
 }
 
 /**
@@ -789,10 +807,10 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
        sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
 
        if (phba->sli_rev < LPFC_SLI_REV4)
-               len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+               len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
                               fwrev, phba->sli_rev);
        else
-               len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+               len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
                               fwrev, phba->sli_rev, if_type, sli_family);
 
        return len;
@@ -816,7 +834,7 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
        lpfc_vpd_t *vp = &phba->vpd;
 
        lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
-       return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
 }
 
 /**
@@ -837,10 +855,11 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
        char fwrev[FW_REV_STR_SIZE];
 
        if (phba->sli_rev < LPFC_SLI_REV4)
-               return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+               return scnprintf(buf, PAGE_SIZE, "%s\n",
+                               phba->OptionROMVersion);
 
        lpfc_decode_firmware_rev(phba, fwrev, 1);
-       return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
 }
 
 /**
@@ -871,20 +890,20 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
        case LPFC_LINK_DOWN:
        case LPFC_HBA_ERROR:
                if (phba->hba_flag & LINK_DISABLED)
-                       len += snprintf(buf + len, PAGE_SIZE-len,
+                       len += scnprintf(buf + len, PAGE_SIZE-len,
                                "Link Down - User disabled\n");
                else
-                       len += snprintf(buf + len, PAGE_SIZE-len,
+                       len += scnprintf(buf + len, PAGE_SIZE-len,
                                "Link Down\n");
                break;
        case LPFC_LINK_UP:
        case LPFC_CLEAR_LA:
        case LPFC_HBA_READY:
-               len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
+               len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
 
                switch (vport->port_state) {
                case LPFC_LOCAL_CFG_LINK:
-                       len += snprintf(buf + len, PAGE_SIZE-len,
+                       len += scnprintf(buf + len, PAGE_SIZE-len,
                                        "Configuring Link\n");
                        break;
                case LPFC_FDISC:
@@ -894,38 +913,40 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
                case LPFC_NS_QRY:
                case LPFC_BUILD_DISC_LIST:
                case LPFC_DISC_AUTH:
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                        "Discovery\n");
                        break;
                case LPFC_VPORT_READY:
-                       len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
+                                       "Ready\n");
                        break;
 
                case LPFC_VPORT_FAILED:
-                       len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
+                                       "Failed\n");
                        break;
 
                case LPFC_VPORT_UNKNOWN:
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                        "Unknown\n");
                        break;
                }
                if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
-                       len += snprintf(buf + len, PAGE_SIZE-len,
+                       len += scnprintf(buf + len, PAGE_SIZE-len,
                                        "   Menlo Maint Mode\n");
                else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                        if (vport->fc_flag & FC_PUBLIC_LOOP)
-                               len += snprintf(buf + len, PAGE_SIZE-len,
+                               len += scnprintf(buf + len, PAGE_SIZE-len,
                                                "   Public Loop\n");
                        else
-                               len += snprintf(buf + len, PAGE_SIZE-len,
+                               len += scnprintf(buf + len, PAGE_SIZE-len,
                                                "   Private Loop\n");
                } else {
                        if (vport->fc_flag & FC_FABRIC)
-                               len += snprintf(buf + len, PAGE_SIZE-len,
+                               len += scnprintf(buf + len, PAGE_SIZE-len,
                                                "   Fabric\n");
                        else
-                               len += snprintf(buf + len, PAGE_SIZE-len,
+                               len += scnprintf(buf + len, PAGE_SIZE-len,
                                                "   Point-2-Point\n");
                }
        }
@@ -937,28 +958,28 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
                struct lpfc_trunk_link link = phba->trunk_link;
 
                if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                "Trunk port 0: Link %s %s\n",
                                (link.link0.state == LPFC_LINK_UP) ?
                                 "Up" : "Down. ",
                                trunk_errmsg[link.link0.fault]);
 
                if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                "Trunk port 1: Link %s %s\n",
                                (link.link1.state == LPFC_LINK_UP) ?
                                 "Up" : "Down. ",
                                trunk_errmsg[link.link1.fault]);
 
                if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                "Trunk port 2: Link %s %s\n",
                                (link.link2.state == LPFC_LINK_UP) ?
                                 "Up" : "Down. ",
                                trunk_errmsg[link.link2.fault]);
 
                if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                "Trunk port 3: Link %s %s\n",
                                (link.link3.state == LPFC_LINK_UP) ?
                                 "Up" : "Down. ",
@@ -986,15 +1007,15 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_hba *phba = vport->phba;
 
        if (phba->sli_rev < LPFC_SLI_REV4)
-               return snprintf(buf, PAGE_SIZE, "fc\n");
+               return scnprintf(buf, PAGE_SIZE, "fc\n");
 
        if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
                if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
-                       return snprintf(buf, PAGE_SIZE, "fcoe\n");
+                       return scnprintf(buf, PAGE_SIZE, "fcoe\n");
                if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
-                       return snprintf(buf, PAGE_SIZE, "fc\n");
+                       return scnprintf(buf, PAGE_SIZE, "fc\n");
        }
-       return snprintf(buf, PAGE_SIZE, "unknown\n");
+       return scnprintf(buf, PAGE_SIZE, "unknown\n");
 }
 
 /**
@@ -1014,7 +1035,7 @@ lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
        struct lpfc_hba *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
+       return scnprintf(buf, PAGE_SIZE, "%d\n",
                        phba->sli4_hba.pc_sli4_params.oas_supported);
 }
 
@@ -1072,7 +1093,7 @@ lpfc_num_discovered_ports_show(struct device *dev,
        struct Scsi_Host  *shost = class_to_shost(dev);
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
+       return scnprintf(buf, PAGE_SIZE, "%d\n",
                        vport->fc_map_cnt + vport->fc_unmap_cnt);
 }
 
@@ -1204,6 +1225,20 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
 
        psli = &phba->sli;
 
+       /*
+        * If freeing the queues have already started, don't access them.
+        * Otherwise set FREE_WAIT to indicate that queues are being used
+        * to hold the freeing process until we finish.
+        */
+       spin_lock_irq(&phba->hbalock);
+       if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
+               psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
+       } else {
+               spin_unlock_irq(&phba->hbalock);
+               goto skip_wait;
+       }
+       spin_unlock_irq(&phba->hbalock);
+
        /* Wait a little for things to settle down, but not
         * long enough for dev loss timeout to expire.
         */
@@ -1225,6 +1260,11 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
                }
        }
 out:
+       spin_lock_irq(&phba->hbalock);
+       psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
+       spin_unlock_irq(&phba->hbalock);
+
+skip_wait:
        init_completion(&online_compl);
        rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
        if (rc == 0)
@@ -1258,7 +1298,7 @@ out:
  * -EBUSY,    port is not in offline state
  *      0,    successful
  */
-int
+static int
 lpfc_reset_pci_bus(struct lpfc_hba *phba)
 {
        struct pci_dev *pdev = phba->pcidev;
@@ -1586,10 +1626,10 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
 }
 
-int
+static int
 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
 {
        LPFC_MBOXQ_t *mbox = NULL;
@@ -1675,7 +1715,7 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
        else
                state = "online";
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", state);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", state);
 }
 
 /**
@@ -1901,8 +1941,8 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
        uint32_t cnt;
 
        if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
-               return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-       return snprintf(buf, PAGE_SIZE, "Unknown\n");
+               return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+       return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1929,8 +1969,8 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
        uint32_t cnt, acnt;
 
        if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
-               return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-       return snprintf(buf, PAGE_SIZE, "Unknown\n");
+               return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+       return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1957,8 +1997,8 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
        uint32_t cnt;
 
        if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
-               return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-       return snprintf(buf, PAGE_SIZE, "Unknown\n");
+               return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+       return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -1985,8 +2025,8 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
        uint32_t cnt, acnt;
 
        if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
-               return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-       return snprintf(buf, PAGE_SIZE, "Unknown\n");
+               return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+       return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -2013,8 +2053,8 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
        uint32_t cnt;
 
        if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
-               return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
-       return snprintf(buf, PAGE_SIZE, "Unknown\n");
+               return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
+       return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -2041,8 +2081,8 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
        uint32_t cnt, acnt;
 
        if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
-               return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
-       return snprintf(buf, PAGE_SIZE, "Unknown\n");
+               return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+       return scnprintf(buf, PAGE_SIZE, "Unknown\n");
 }
 
 /**
@@ -2067,10 +2107,10 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_hba   *phba = vport->phba;
 
        if (!(phba->max_vpi))
-               return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+               return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
        if (vport->port_type == LPFC_PHYSICAL_PORT)
-               return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
-       return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+               return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+       return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
 }
 
 /**
@@ -2092,7 +2132,7 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
+       return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
 }
 
 /**
@@ -2196,7 +2236,7 @@ lpfc_fips_level_show(struct device *dev,  struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
 }
 
 /**
@@ -2215,7 +2255,7 @@ lpfc_fips_rev_show(struct device *dev,  struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
 }
 
 /**
@@ -2234,7 +2274,7 @@ lpfc_dss_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+       return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
                        (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
                        (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
                                "" : "Not ");
@@ -2263,7 +2303,7 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
        uint16_t max_nr_virtfn;
 
        max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
-       return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
 }
 
 static inline bool lpfc_rangecheck(uint val, uint min, uint max)
@@ -2323,7 +2363,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
        struct Scsi_Host  *shost = class_to_shost(dev);\
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
        struct lpfc_hba   *phba = vport->phba;\
-       return snprintf(buf, PAGE_SIZE, "%d\n",\
+       return scnprintf(buf, PAGE_SIZE, "%d\n",\
                        phba->cfg_##attr);\
 }
 
@@ -2351,7 +2391,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
        struct lpfc_hba   *phba = vport->phba;\
        uint val = 0;\
        val = phba->cfg_##attr;\
-       return snprintf(buf, PAGE_SIZE, "%#x\n",\
+       return scnprintf(buf, PAGE_SIZE, "%#x\n",\
                        phba->cfg_##attr);\
 }
 
@@ -2487,7 +2527,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 { \
        struct Scsi_Host  *shost = class_to_shost(dev);\
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-       return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
+       return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
 }
 
 /**
@@ -2512,7 +2552,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 { \
        struct Scsi_Host  *shost = class_to_shost(dev);\
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-       return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
+       return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
 }
 
 /**
@@ -2784,7 +2824,7 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 
-       return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+       return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
                        (unsigned long long)phba->cfg_soft_wwpn);
 }
 
@@ -2881,7 +2921,7 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
 {
        struct Scsi_Host *shost = class_to_shost(dev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-       return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+       return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
                        (unsigned long long)phba->cfg_soft_wwnn);
 }
 
@@ -2947,7 +2987,7 @@ lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host *shost = class_to_shost(dev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-       return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+       return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
                        wwn_to_u64(phba->cfg_oas_tgt_wwpn));
 }
 
@@ -3015,7 +3055,7 @@ lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host *shost = class_to_shost(dev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
 }
 
 /**
@@ -3078,7 +3118,7 @@ lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host *shost = class_to_shost(dev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-       return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+       return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
                        wwn_to_u64(phba->cfg_oas_vpt_wwpn));
 }
 
@@ -3149,7 +3189,7 @@ lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host *shost = class_to_shost(dev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
 }
 
 /**
@@ -3213,7 +3253,7 @@ lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
        if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
                return -EFAULT;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
 }
 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
                   lpfc_oas_lun_status_show, NULL);
@@ -3365,7 +3405,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
        if (oas_lun != NOT_OAS_ENABLED_LUN)
                phba->cfg_oas_flags |= OAS_LUN_VALID;
 
-       len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+       len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
 
        return len;
 }
@@ -3499,7 +3539,7 @@ lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct Scsi_Host  *shost = class_to_shost(dev);
        struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
 }
 
 static DEVICE_ATTR(iocb_hw, S_IRUGO,
@@ -3511,7 +3551,7 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
        struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
+       return scnprintf(buf, PAGE_SIZE, "%d\n",
                        pring ? pring->txq_max : 0);
 }
 
@@ -3525,7 +3565,7 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
        struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
+       return scnprintf(buf, PAGE_SIZE, "%d\n",
                        pring ? pring->txcmplq_max : 0);
 }
 
@@ -3561,7 +3601,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host  *shost = class_to_shost(dev);
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
+       return scnprintf(buf, PAGE_SIZE, "%d\n",        vport->cfg_devloss_tmo);
 }
 
 /**
@@ -4050,9 +4090,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
                }
                if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
                     phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
-                   val == 4) {
+                   val != FLAGS_TOPOLOGY_MODE_PT_PT) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-                               "3114 Loop mode not supported\n");
+                               "3114 Only non-FC-AL mode is supported\n");
                        return -EINVAL;
                }
                phba->cfg_topology = val;
@@ -5169,12 +5209,12 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
 
        switch (phba->cfg_fcp_cpu_map) {
        case 0:
-               len += snprintf(buf + len, PAGE_SIZE-len,
+               len += scnprintf(buf + len, PAGE_SIZE-len,
                                "fcp_cpu_map: No mapping (%d)\n",
                                phba->cfg_fcp_cpu_map);
                return len;
        case 1:
-               len += snprintf(buf + len, PAGE_SIZE-len,
+               len += scnprintf(buf + len, PAGE_SIZE-len,
                                "fcp_cpu_map: HBA centric mapping (%d): "
                                "%d of %d CPUs online from %d possible CPUs\n",
                                phba->cfg_fcp_cpu_map, num_online_cpus(),
@@ -5188,12 +5228,12 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
                cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
 
                if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                        "CPU %02d not present\n",
                                        phba->sli4_hba.curr_disp_cpu);
                else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
                        if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
-                               len += snprintf(
+                               len += scnprintf(
                                        buf + len, PAGE_SIZE - len,
                                        "CPU %02d hdwq None "
                                        "physid %d coreid %d ht %d\n",
@@ -5201,7 +5241,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
                                        cpup->phys_id,
                                        cpup->core_id, cpup->hyper);
                        else
-                               len += snprintf(
+                               len += scnprintf(
                                        buf + len, PAGE_SIZE - len,
                                        "CPU %02d EQ %04d hdwq %04d "
                                        "physid %d coreid %d ht %d\n",
@@ -5210,7 +5250,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
                                        cpup->core_id, cpup->hyper);
                } else {
                        if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
-                               len += snprintf(
+                               len += scnprintf(
                                        buf + len, PAGE_SIZE - len,
                                        "CPU %02d hdwq None "
                                        "physid %d coreid %d ht %d IRQ %d\n",
@@ -5218,7 +5258,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
                                        cpup->phys_id,
                                        cpup->core_id, cpup->hyper, cpup->irq);
                        else
-                               len += snprintf(
+                               len += scnprintf(
                                        buf + len, PAGE_SIZE - len,
                                        "CPU %02d EQ %04d hdwq %04d "
                                        "physid %d coreid %d ht %d IRQ %d\n",
@@ -5233,7 +5273,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
                if (phba->sli4_hba.curr_disp_cpu <
                                phba->sli4_hba.num_possible_cpu &&
                                (len >= (PAGE_SIZE - 64))) {
-                       len += snprintf(buf + len,
+                       len += scnprintf(buf + len,
                                        PAGE_SIZE - len, "more...\n");
                        break;
                }
@@ -5753,10 +5793,10 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
        struct lpfc_hba   *phba = vport->phba;
        int len;
 
-       len = snprintf(buf, PAGE_SIZE, "SGL sz: %d  total SGEs: %d\n",
+       len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d  total SGEs: %d\n",
                       phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
 
-       len += snprintf(buf + len, PAGE_SIZE, "Cfg: %d  SCSI: %d  NVME: %d\n",
+       len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d  SCSI: %d  NVME: %d\n",
                        phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
                        phba->cfg_nvme_seg_cnt);
        return len;
@@ -6755,7 +6795,7 @@ lpfc_show_rport_##field (struct device *dev,                              \
 {                                                                      \
        struct fc_rport *rport = transport_class_to_rport(dev);         \
        struct lpfc_rport_data *rdata = rport->hostdata;                \
-       return snprintf(buf, sz, format_string,                         \
+       return scnprintf(buf, sz, format_string,                        \
                (rdata->target) ? cast rdata->target->field : 0);       \
 }
 
@@ -7003,6 +7043,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        if (phba->sli_rev != LPFC_SLI_REV4) {
                /* NVME only supported on SLI4 */
                phba->nvmet_support = 0;
+               phba->cfg_nvmet_mrq = 0;
                phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
                phba->cfg_enable_bbcr = 0;
                phba->cfg_xri_rebalancing = 0;
@@ -7104,7 +7145,7 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
        } else {
                /* Not NVME Target mode.  Turn off Target parameters. */
                phba->nvmet_support = 0;
-               phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
+               phba->cfg_nvmet_mrq = 0;
                phba->cfg_nvmet_fb_size = 0;
        }
 }
index f2494d3..b0202bc 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -1968,14 +1968,17 @@ link_diag_state_set_out:
 }
 
 /**
- * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
  * @phba: Pointer to HBA context object.
+ * @mode: loopback mode to set
+ * @link_no: link number for loopback mode to set
  *
  * This function is responsible for issuing a sli4 mailbox command for setting
- * up internal loopback diagnostic.
+ * up loopback diagnostic for a link.
  */
 static int
-lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
+                               uint32_t link_no)
 {
        LPFC_MBOXQ_t *pmboxq;
        uint32_t req_len, alloc_len;
@@ -1996,11 +1999,19 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
        }
        link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
        bf_set(lpfc_mbx_set_diag_state_link_num,
-              &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
-       bf_set(lpfc_mbx_set_diag_state_link_type,
-              &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+              &link_diag_loopback->u.req, link_no);
+
+       if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+               bf_set(lpfc_mbx_set_diag_state_link_type,
+                      &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
+       } else {
+               bf_set(lpfc_mbx_set_diag_state_link_type,
+                      &link_diag_loopback->u.req,
+                      phba->sli4_hba.lnk_info.lnk_tp);
+       }
+
        bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
-              LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+              mode);
 
        mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
        if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
@@ -2054,7 +2065,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
        struct fc_bsg_request *bsg_request = job->request;
        struct fc_bsg_reply *bsg_reply = job->reply;
        struct diag_mode_set *loopback_mode;
-       uint32_t link_flags, timeout;
+       uint32_t link_flags, timeout, link_no;
        int i, rc = 0;
 
        /* no data to return just the return code */
@@ -2069,12 +2080,39 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
                                (int)(sizeof(struct fc_bsg_request) +
                                sizeof(struct diag_mode_set)));
                rc = -EINVAL;
-               goto job_error;
+               goto job_done;
+       }
+
+       loopback_mode = (struct diag_mode_set *)
+               bsg_request->rqst_data.h_vendor.vendor_cmd;
+       link_flags = loopback_mode->type;
+       timeout = loopback_mode->timeout * 100;
+
+       if (loopback_mode->physical_link == -1)
+               link_no = phba->sli4_hba.lnk_info.lnk_no;
+       else
+               link_no = loopback_mode->physical_link;
+
+       if (link_flags == DISABLE_LOOP_BACK) {
+               rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+                                       LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
+                                       link_no);
+               if (!rc) {
+                       /* Unset the need disable bit */
+                       phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
+               }
+               goto job_done;
+       } else {
+               /* Check if we need to disable the loopback state */
+               if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
+                       rc = -EPERM;
+                       goto job_done;
+               }
        }
 
        rc = lpfc_bsg_diag_mode_enter(phba);
        if (rc)
-               goto job_error;
+               goto job_done;
 
        /* indicate we are in loobpack diagnostic mode */
        spin_lock_irq(&phba->hbalock);
@@ -2084,15 +2122,11 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
        /* reset port to start frome scratch */
        rc = lpfc_selective_reset(phba);
        if (rc)
-               goto job_error;
+               goto job_done;
 
        /* bring the link to diagnostic mode */
        lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
                        "3129 Bring link to diagnostic state.\n");
-       loopback_mode = (struct diag_mode_set *)
-               bsg_request->rqst_data.h_vendor.vendor_cmd;
-       link_flags = loopback_mode->type;
-       timeout = loopback_mode->timeout * 100;
 
        rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
        if (rc) {
@@ -2120,13 +2154,54 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
        lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
                        "3132 Set up loopback mode:x%x\n", link_flags);
 
-       if (link_flags == INTERNAL_LOOP_BACK)
-               rc = lpfc_sli4_bsg_set_internal_loopback(phba);
-       else if (link_flags == EXTERNAL_LOOP_BACK)
-               rc = lpfc_hba_init_link_fc_topology(phba,
-                                                   FLAGS_TOPOLOGY_MODE_PT_PT,
-                                                   MBX_NOWAIT);
-       else {
+       switch (link_flags) {
+       case INTERNAL_LOOP_BACK:
+               if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+                       rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+                                       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
+                                       link_no);
+               } else {
+                       /* Trunk is configured, but link is not in this trunk */
+                       if (phba->sli4_hba.conf_trunk) {
+                               rc = -ELNRNG;
+                               goto loopback_mode_exit;
+                       }
+
+                       rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+                                       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
+                                       link_no);
+               }
+
+               if (!rc) {
+                       /* Set the need disable bit */
+                       phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
+               }
+
+               break;
+       case EXTERNAL_LOOP_BACK:
+               if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+                       rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+                               LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
+                               link_no);
+               } else {
+                       /* Trunk is configured, but link is not in this trunk */
+                       if (phba->sli4_hba.conf_trunk) {
+                               rc = -ELNRNG;
+                               goto loopback_mode_exit;
+                       }
+
+                       rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+                                               LPFC_DIAG_LOOPBACK_TYPE_SERDES,
+                                               link_no);
+               }
+
+               if (!rc) {
+                       /* Set the need disable bit */
+                       phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
+               }
+
+               break;
+       default:
                rc = -EINVAL;
                lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
                                "3141 Loopback mode:x%x not supported\n",
@@ -2185,7 +2260,7 @@ loopback_mode_exit:
        }
        lpfc_bsg_diag_mode_exit(phba);
 
-job_error:
+job_done:
        /* make error code available to userspace */
        bsg_reply->result = rc;
        /* complete the job back to userspace if no error */
index 9151824..d170813 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2010-2015 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -68,6 +68,7 @@ struct send_mgmt_resp {
 };
 
 
+#define DISABLE_LOOP_BACK  0x0 /* disables loop back */
 #define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
 #define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
 
@@ -75,6 +76,7 @@ struct diag_mode_set {
        uint32_t command;
        uint32_t type;
        uint32_t timeout;
+       uint32_t physical_link;
 };
 
 struct sli4_link_diag {
index 44f4263..4812bbb 100644 (file)
@@ -886,7 +886,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        }
        if (lpfc_error_lost_link(irsp)) {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
-                                "4101 NS query failed due to link event\n");
+                                "4166 NS query failed due to link event\n");
                if (vport->fc_flag & FC_RSCN_MODE)
                        lpfc_els_flush_rscn(vport);
                goto out;
@@ -907,7 +907,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 * Re-issue the NS cmd
                 */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                                "4102 Process Deferred RSCN Data: x%x x%x\n",
+                                "4167 Process Deferred RSCN Data: x%x x%x\n",
                                 vport->fc_flag, vport->fc_rscn_id_cnt);
                lpfc_els_handle_rscn(vport);
 
@@ -1430,7 +1430,7 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
         * Name object.  NPIV is not in play so this integer
         * value is sufficient and unique per FC-ID.
         */
-       n = snprintf(symbol, size, "%d", vport->phba->brd_no);
+       n = scnprintf(symbol, size, "%d", vport->phba->brd_no);
        return n;
 }
 
@@ -1444,26 +1444,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
 
        lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
 
-       n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+       n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
        if (size < n)
                return n;
 
-       n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+       n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
        if (size < n)
                return n;
 
-       n += snprintf(symbol + n, size - n, " DV%s.",
+       n += scnprintf(symbol + n, size - n, " DV%s.",
                      lpfc_release_version);
        if (size < n)
                return n;
 
-       n += snprintf(symbol + n, size - n, " HN:%s.",
+       n += scnprintf(symbol + n, size - n, " HN:%s.",
                      init_utsname()->nodename);
        if (size < n)
                return n;
 
        /* Note :- OS name is "Linux" */
-       n += snprintf(symbol + n, size - n, " OS:%s\n",
+       n += scnprintf(symbol + n, size - n, " OS:%s",
                      init_utsname()->sysname);
        return n;
 }
@@ -2005,8 +2005,11 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
        ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
        memset(ae, 0, 256);
 
+       /* This string MUST be consistent with other FC platforms
+        * supported by Broadcom.
+        */
        strncpy(ae->un.AttrString,
-               "Broadcom Inc.",
+               "Emulex Corporation",
                       sizeof(ae->un.AttrString));
        len = strnlen(ae->un.AttrString,
                          sizeof(ae->un.AttrString));
@@ -2301,7 +2304,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
        ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
        memset(ae, 0, 256);
 
-       lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+       strlcat(ae->un.AttrString, phba->BIOSVersion,
+               sizeof(ae->un.AttrString));
        len = strnlen(ae->un.AttrString,
                          sizeof(ae->un.AttrString));
        len += (len & 3) ? (4 - (len & 3)) : 4;
@@ -2360,10 +2364,11 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
        ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
        memset(ae, 0, 32);
 
-       ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
-       ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
-       ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
-       ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
+       ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+       ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+       if (vport->nvmei_support || vport->phba->nvmet_support)
+               ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+       ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
        size = FOURBYTES + 32;
        ad->AttrLen = cpu_to_be16(size);
        ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
@@ -2673,9 +2678,11 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
        ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
        memset(ae, 0, 32);
 
-       ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
-       ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
-       ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
+       ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+       ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+       if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+               ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+       ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
        size = FOURBYTES + 32;
        ad->AttrLen = cpu_to_be16(size);
        ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
index 1215eaa..1ee857d 100644 (file)
@@ -170,7 +170,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
                snprintf(buffer,
                        LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
                        dtp->seq_cnt, ms, dtp->fmt);
-               len +=  snprintf(buf+len, size-len, buffer,
+               len +=  scnprintf(buf+len, size-len, buffer,
                        dtp->data1, dtp->data2, dtp->data3);
        }
        for (i = 0; i < index; i++) {
@@ -181,7 +181,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
                snprintf(buffer,
                        LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
                        dtp->seq_cnt, ms, dtp->fmt);
-               len +=  snprintf(buf+len, size-len, buffer,
+               len +=  scnprintf(buf+len, size-len, buffer,
                        dtp->data1, dtp->data2, dtp->data3);
        }
 
@@ -236,7 +236,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
                snprintf(buffer,
                        LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
                        dtp->seq_cnt, ms, dtp->fmt);
-               len +=  snprintf(buf+len, size-len, buffer,
+               len +=  scnprintf(buf+len, size-len, buffer,
                        dtp->data1, dtp->data2, dtp->data3);
        }
        for (i = 0; i < index; i++) {
@@ -247,7 +247,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
                snprintf(buffer,
                        LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
                        dtp->seq_cnt, ms, dtp->fmt);
-               len +=  snprintf(buf+len, size-len, buffer,
+               len +=  scnprintf(buf+len, size-len, buffer,
                        dtp->data1, dtp->data2, dtp->data3);
        }
 
@@ -307,7 +307,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 
        i = lpfc_debugfs_last_hbq;
 
-       len +=  snprintf(buf+len, size-len, "HBQ %d Info\n", i);
+       len +=  scnprintf(buf+len, size-len, "HBQ %d Info\n", i);
 
        hbqs =  &phba->hbqs[i];
        posted = 0;
@@ -315,21 +315,21 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
                posted++;
 
        hip =  lpfc_hbq_defs[i];
-       len +=  snprintf(buf+len, size-len,
+       len +=  scnprintf(buf+len, size-len,
                "idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
                hip->hbq_index, hip->profile, hip->rn,
                hip->buffer_count, hip->init_count, hip->add_count, posted);
 
        raw_index = phba->hbq_get[i];
        getidx = le32_to_cpu(raw_index);
-       len +=  snprintf(buf+len, size-len,
+       len +=  scnprintf(buf+len, size-len,
                "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
                hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
                hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
 
        hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
        for (j=0; j<hbqs->entry_count; j++) {
-               len +=  snprintf(buf+len, size-len,
+               len +=  scnprintf(buf+len, size-len,
                        "%03d: %08x %04x %05x ", j,
                        le32_to_cpu(hbqe->bde.addrLow),
                        le32_to_cpu(hbqe->bde.tus.w),
@@ -341,14 +341,16 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
                low = hbqs->hbqPutIdx - posted;
                if (low >= 0) {
                        if ((j >= hbqs->hbqPutIdx) || (j < low)) {
-                               len +=  snprintf(buf+len, size-len, "Unused\n");
+                               len +=  scnprintf(buf + len, size - len,
+                                               "Unused\n");
                                goto skipit;
                        }
                }
                else {
                        if ((j >= hbqs->hbqPutIdx) &&
                                (j < (hbqs->entry_count+low))) {
-                               len +=  snprintf(buf+len, size-len, "Unused\n");
+                               len +=  scnprintf(buf + len, size - len,
+                                               "Unused\n");
                                goto skipit;
                        }
                }
@@ -358,7 +360,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
                        hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
                        phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
                        if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
-                               len +=  snprintf(buf+len, size-len,
+                               len +=  scnprintf(buf+len, size-len,
                                        "Buf%d: %p %06x\n", i,
                                        hbq_buf->dbuf.virt, hbq_buf->tag);
                                found = 1;
@@ -367,7 +369,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
                        i++;
                }
                if (!found) {
-                       len +=  snprintf(buf+len, size-len, "No DMAinfo?\n");
+                       len +=  scnprintf(buf+len, size-len, "No DMAinfo?\n");
                }
 skipit:
                hbqe++;
@@ -413,14 +415,14 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
                        break;
                qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
 
-               len +=  snprintf(buf + len, size - len, "HdwQ %d Info ", i);
+               len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
                spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
                spin_lock(&qp->abts_nvme_buf_list_lock);
                spin_lock(&qp->io_buf_list_get_lock);
                spin_lock(&qp->io_buf_list_put_lock);
                out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
                        qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs);
-               len +=  snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                 "tot:%d get:%d put:%d mt:%d "
                                 "ABTS scsi:%d nvme:%d Out:%d\n",
                        qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs,
@@ -612,9 +614,9 @@ lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size)
                        break;
                qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock];
 
-               len +=  snprintf(buf + len, size - len, "HdwQ %03d Lock ", i);
+               len += scnprintf(buf + len, size - len, "HdwQ %03d Lock ", i);
                if (phba->cfg_xri_rebalancing) {
-                       len +=  snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                         "get_pvt:%d mv_pvt:%d "
                                         "mv2pub:%d mv2pvt:%d "
                                         "put_pvt:%d put_pub:%d wq:%d\n",
@@ -626,7 +628,7 @@ lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size)
                                         qp->lock_conflict.free_pub_pool,
                                         qp->lock_conflict.wq_access);
                } else {
-                       len +=  snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                         "get:%d put:%d free:%d wq:%d\n",
                                         qp->lock_conflict.alloc_xri_get,
                                         qp->lock_conflict.alloc_xri_put,
@@ -678,7 +680,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
        off = 0;
        spin_lock_irq(&phba->hbalock);
 
-       len +=  snprintf(buf+len, size-len, "HBA SLIM\n");
+       len +=  scnprintf(buf+len, size-len, "HBA SLIM\n");
        lpfc_memcpy_from_slim(buffer,
                phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
 
@@ -692,7 +694,7 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
 
        i = 1024;
        while (i > 0) {
-               len +=  snprintf(buf+len, size-len,
+               len +=  scnprintf(buf+len, size-len,
                "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
                off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
                *(ptr+5), *(ptr+6), *(ptr+7));
@@ -736,11 +738,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
        off = 0;
        spin_lock_irq(&phba->hbalock);
 
-       len +=  snprintf(buf+len, size-len, "SLIM Mailbox\n");
+       len +=  scnprintf(buf+len, size-len, "SLIM Mailbox\n");
        ptr = (uint32_t *)phba->slim2p.virt;
        i = sizeof(MAILBOX_t);
        while (i > 0) {
-               len +=  snprintf(buf+len, size-len,
+               len +=  scnprintf(buf+len, size-len,
                "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
                off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
                *(ptr+5), *(ptr+6), *(ptr+7));
@@ -749,11 +751,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
                off += (8 * sizeof(uint32_t));
        }
 
-       len +=  snprintf(buf+len, size-len, "SLIM PCB\n");
+       len +=  scnprintf(buf+len, size-len, "SLIM PCB\n");
        ptr = (uint32_t *)phba->pcb;
        i = sizeof(PCB_t);
        while (i > 0) {
-               len +=  snprintf(buf+len, size-len,
+               len +=  scnprintf(buf+len, size-len,
                "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
                off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
                *(ptr+5), *(ptr+6), *(ptr+7));
@@ -766,7 +768,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
                for (i = 0; i < 4; i++) {
                        pgpp = &phba->port_gp[i];
                        pring = &psli->sli3_ring[i];
-                       len +=  snprintf(buf+len, size-len,
+                       len +=  scnprintf(buf+len, size-len,
                                         "Ring %d: CMD GetInx:%d "
                                         "(Max:%d Next:%d "
                                         "Local:%d flg:x%x)  "
@@ -783,7 +785,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
                word1 = readl(phba->CAregaddr);
                word2 = readl(phba->HSregaddr);
                word3 = readl(phba->HCregaddr);
-               len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+               len +=  scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
                                 "HC:%08x\n", word0, word1, word2, word3);
        }
        spin_unlock_irq(&phba->hbalock);
@@ -821,12 +823,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
        cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
        outio = 0;
 
-       len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
+       len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
        spin_lock_irq(shost->host_lock);
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
                iocnt = 0;
                if (!cnt) {
-                       len +=  snprintf(buf+len, size-len,
+                       len +=  scnprintf(buf+len, size-len,
                                "Missing Nodelist Entries\n");
                        break;
                }
@@ -864,63 +866,63 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                default:
                        statep = "UNKNOWN";
                }
-               len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+               len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
                                statep, ndlp->nlp_DID);
-               len += snprintf(buf+len, size-len,
+               len += scnprintf(buf+len, size-len,
                                "WWPN x%llx ",
                                wwn_to_u64(ndlp->nlp_portname.u.wwn));
-               len += snprintf(buf+len, size-len,
+               len += scnprintf(buf+len, size-len,
                                "WWNN x%llx ",
                                wwn_to_u64(ndlp->nlp_nodename.u.wwn));
                if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
-                       len += snprintf(buf+len, size-len, "RPI:%03d ",
+                       len += scnprintf(buf+len, size-len, "RPI:%03d ",
                                        ndlp->nlp_rpi);
                else
-                       len += snprintf(buf+len, size-len, "RPI:none ");
-               len +=  snprintf(buf+len, size-len, "flag:x%08x ",
+                       len += scnprintf(buf+len, size-len, "RPI:none ");
+               len +=  scnprintf(buf+len, size-len, "flag:x%08x ",
                        ndlp->nlp_flag);
                if (!ndlp->nlp_type)
-                       len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+                       len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE ");
                if (ndlp->nlp_type & NLP_FC_NODE)
-                       len += snprintf(buf+len, size-len, "FC_NODE ");
+                       len += scnprintf(buf+len, size-len, "FC_NODE ");
                if (ndlp->nlp_type & NLP_FABRIC) {
-                       len += snprintf(buf+len, size-len, "FABRIC ");
+                       len += scnprintf(buf+len, size-len, "FABRIC ");
                        iocnt = 0;
                }
                if (ndlp->nlp_type & NLP_FCP_TARGET)
-                       len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+                       len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ",
                                ndlp->nlp_sid);
                if (ndlp->nlp_type & NLP_FCP_INITIATOR)
-                       len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+                       len += scnprintf(buf+len, size-len, "FCP_INITIATOR ");
                if (ndlp->nlp_type & NLP_NVME_TARGET)
-                       len += snprintf(buf + len,
+                       len += scnprintf(buf + len,
                                        size - len, "NVME_TGT sid:%d ",
                                        NLP_NO_SID);
                if (ndlp->nlp_type & NLP_NVME_INITIATOR)
-                       len += snprintf(buf + len,
+                       len += scnprintf(buf + len,
                                        size - len, "NVME_INITIATOR ");
-               len += snprintf(buf+len, size-len, "usgmap:%x ",
+               len += scnprintf(buf+len, size-len, "usgmap:%x ",
                        ndlp->nlp_usg_map);
-               len += snprintf(buf+len, size-len, "refcnt:%x",
+               len += scnprintf(buf+len, size-len, "refcnt:%x",
                        kref_read(&ndlp->kref));
                if (iocnt) {
                        i = atomic_read(&ndlp->cmd_pending);
-                       len += snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                        " OutIO:x%x Qdepth x%x",
                                        i, ndlp->cmd_qdepth);
                        outio += i;
                }
-               len += snprintf(buf + len, size - len, "defer:%x ",
+               len += scnprintf(buf + len, size - len, "defer:%x ",
                        ndlp->nlp_defer_did);
-               len +=  snprintf(buf+len, size-len, "\n");
+               len +=  scnprintf(buf+len, size-len, "\n");
        }
        spin_unlock_irq(shost->host_lock);
 
-       len += snprintf(buf + len, size - len,
+       len += scnprintf(buf + len, size - len,
                        "\nOutstanding IO x%x\n",  outio);
 
        if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "\nNVME Targetport Entry ...\n");
 
                /* Port state is only one of two values for now. */
@@ -928,18 +930,18 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                        statep = "REGISTERED";
                else
                        statep = "INIT";
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "TGT WWNN x%llx WWPN x%llx State %s\n",
                                wwn_to_u64(vport->fc_nodename.u.wwn),
                                wwn_to_u64(vport->fc_portname.u.wwn),
                                statep);
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "    Targetport DID x%06x\n",
                                phba->targetport->port_id);
                goto out_exit;
        }
 
-       len += snprintf(buf + len, size - len,
+       len += scnprintf(buf + len, size - len,
                                "\nNVME Lport/Rport Entries ...\n");
 
        localport = vport->localport;
@@ -954,11 +956,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
        else
                statep = "UNKNOWN ";
 
-       len += snprintf(buf + len, size - len,
+       len += scnprintf(buf + len, size - len,
                        "Lport DID x%06x PortState %s\n",
                        localport->port_id, statep);
 
-       len += snprintf(buf + len, size - len, "\tRport List:\n");
+       len += scnprintf(buf + len, size - len, "\tRport List:\n");
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
                /* local short-hand pointer. */
                spin_lock(&phba->hbalock);
@@ -985,32 +987,32 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                }
 
                /* Tab in to show lport ownership. */
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "\t%s Port ID:x%06x ",
                                statep, nrport->port_id);
-               len += snprintf(buf + len, size - len, "WWPN x%llx ",
+               len += scnprintf(buf + len, size - len, "WWPN x%llx ",
                                nrport->port_name);
-               len += snprintf(buf + len, size - len, "WWNN x%llx ",
+               len += scnprintf(buf + len, size - len, "WWNN x%llx ",
                                nrport->node_name);
 
                /* An NVME rport can have multiple roles. */
                if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
-                       len +=  snprintf(buf + len, size - len,
+                       len +=  scnprintf(buf + len, size - len,
                                         "INITIATOR ");
                if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
-                       len +=  snprintf(buf + len, size - len,
+                       len +=  scnprintf(buf + len, size - len,
                                         "TARGET ");
                if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
-                       len +=  snprintf(buf + len, size - len,
+                       len +=  scnprintf(buf + len, size - len,
                                         "DISCSRVC ");
                if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
                                          FC_PORT_ROLE_NVME_TARGET |
                                          FC_PORT_ROLE_NVME_DISCOVERY))
-                       len +=  snprintf(buf + len, size - len,
+                       len +=  scnprintf(buf + len, size - len,
                                         "UNKNOWN ROLE x%x",
                                         nrport->port_role);
                /* Terminate the string. */
-               len +=  snprintf(buf + len, size - len, "\n");
+               len +=  scnprintf(buf + len, size - len, "\n");
        }
 
        spin_unlock_irq(shost->host_lock);
@@ -1049,35 +1051,35 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                if (!phba->targetport)
                        return len;
                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "\nNVME Targetport Statistics\n");
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "LS: Rcv %08x Drop %08x Abort %08x\n",
                                atomic_read(&tgtp->rcv_ls_req_in),
                                atomic_read(&tgtp->rcv_ls_req_drop),
                                atomic_read(&tgtp->xmt_ls_abort));
                if (atomic_read(&tgtp->rcv_ls_req_in) !=
                    atomic_read(&tgtp->rcv_ls_req_out)) {
-                       len += snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                        "Rcv LS: in %08x != out %08x\n",
                                        atomic_read(&tgtp->rcv_ls_req_in),
                                        atomic_read(&tgtp->rcv_ls_req_out));
                }
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "LS: Xmt %08x Drop %08x Cmpl %08x\n",
                                atomic_read(&tgtp->xmt_ls_rsp),
                                atomic_read(&tgtp->xmt_ls_drop),
                                atomic_read(&tgtp->xmt_ls_rsp_cmpl));
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "LS: RSP Abort %08x xb %08x Err %08x\n",
                                atomic_read(&tgtp->xmt_ls_rsp_aborted),
                                atomic_read(&tgtp->xmt_ls_rsp_xb_set),
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "FCP: Rcv %08x Defer %08x Release %08x "
                                "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
@@ -1087,13 +1089,13 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 
                if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
                    atomic_read(&tgtp->rcv_fcp_cmd_out)) {
-                       len += snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                        "Rcv FCP: in %08x != out %08x\n",
                                        atomic_read(&tgtp->rcv_fcp_cmd_in),
                                        atomic_read(&tgtp->rcv_fcp_cmd_out));
                }
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "FCP Rsp: read %08x readrsp %08x "
                                "write %08x rsp %08x\n",
                                atomic_read(&tgtp->xmt_fcp_read),
@@ -1101,31 +1103,31 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&tgtp->xmt_fcp_write),
                                atomic_read(&tgtp->xmt_fcp_rsp));
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
                                atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
                                atomic_read(&tgtp->xmt_fcp_rsp_error),
                                atomic_read(&tgtp->xmt_fcp_rsp_drop));
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "FCP Rsp Abort: %08x xb %08x xricqe  %08x\n",
                                atomic_read(&tgtp->xmt_fcp_rsp_aborted),
                                atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
                                atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "ABORT: Xmt %08x Cmpl %08x\n",
                                atomic_read(&tgtp->xmt_fcp_abort),
                                atomic_read(&tgtp->xmt_fcp_abort_cmpl));
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
                                atomic_read(&tgtp->xmt_abort_sol),
                                atomic_read(&tgtp->xmt_abort_unsol),
                                atomic_read(&tgtp->xmt_abort_rsp),
                                atomic_read(&tgtp->xmt_abort_rsp_error));
 
-               len +=  snprintf(buf + len, size - len, "\n");
+               len +=  scnprintf(buf + len, size - len, "\n");
 
                cnt = 0;
                spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
@@ -1136,7 +1138,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                }
                spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                if (cnt) {
-                       len += snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                        "ABORT: %d ctx entries\n", cnt);
                        spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                        list_for_each_entry_safe(ctxp, next_ctxp,
@@ -1144,7 +1146,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                    list) {
                                if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
                                        break;
-                               len += snprintf(buf + len, size - len,
+                               len += scnprintf(buf + len, size - len,
                                                "Entry: oxid %x state %x "
                                                "flag %x\n",
                                                ctxp->oxid, ctxp->state,
@@ -1158,7 +1160,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                tot += atomic_read(&tgtp->xmt_fcp_release);
                tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
                                "CTX Outstanding %08llx\n",
                                phba->sli4_hba.nvmet_xri_cnt,
@@ -1176,10 +1178,10 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                if (!lport)
                        return len;
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "\nNVME HDWQ Statistics\n");
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "LS: Xmt %016x Cmpl %016x\n",
                                atomic_read(&lport->fc4NvmeLsRequests),
                                atomic_read(&lport->fc4NvmeLsCmpls));
@@ -1199,20 +1201,20 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                        if (i >= 32)
                                continue;
 
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                        "HDWQ (%d): Rd %016llx Wr %016llx "
                                        "IO %016llx ",
                                        i, data1, data2, data3);
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                        "Cmpl %016llx OutIO %016llx\n",
                                        tot, ((data1 + data2 + data3) - tot));
                }
-               len += snprintf(buf + len, PAGE_SIZE - len,
+               len += scnprintf(buf + len, PAGE_SIZE - len,
                                "Total FCP Cmpl %016llx Issue %016llx "
                                "OutIO %016llx\n",
                                totin, totout, totout - totin);
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "LS Xmt Err: Abrt %08x Err %08x  "
                                "Cmpl Err: xb %08x Err %08x\n",
                                atomic_read(&lport->xmt_ls_abort),
@@ -1220,7 +1222,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&lport->cmpl_ls_xb),
                                atomic_read(&lport->cmpl_ls_err));
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "FCP Xmt Err: noxri %06x nondlp %06x "
                                "qdepth %06x wqerr %06x err %06x Abrt %06x\n",
                                atomic_read(&lport->xmt_fcp_noxri),
@@ -1230,7 +1232,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&lport->xmt_fcp_err),
                                atomic_read(&lport->xmt_fcp_abort));
 
-               len += snprintf(buf + len, size - len,
+               len += scnprintf(buf + len, size - len,
                                "FCP Cmpl Err: xb %08x Err %08x\n",
                                atomic_read(&lport->cmpl_fcp_xb),
                                atomic_read(&lport->cmpl_fcp_err));
@@ -1322,58 +1324,58 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
 
        if (phba->nvmet_support == 0) {
                /* NVME Initiator */
-               len += snprintf(buf + len, PAGE_SIZE - len,
+               len += scnprintf(buf + len, PAGE_SIZE - len,
                                "ktime %s: Total Samples: %lld\n",
                                (phba->ktime_on ?  "Enabled" : "Disabled"),
                                phba->ktime_data_samples);
                if (phba->ktime_data_samples == 0)
                        return len;
 
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "Segment 1: Last NVME Cmd cmpl "
                        "done -to- Start of next NVME cnd (in driver)\n");
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg1_total,
                                phba->ktime_data_samples),
                        phba->ktime_seg1_min,
                        phba->ktime_seg1_max);
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "Segment 2: Driver start of NVME cmd "
                        "-to- Firmware WQ doorbell\n");
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg2_total,
                                phba->ktime_data_samples),
                        phba->ktime_seg2_min,
                        phba->ktime_seg2_max);
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "Segment 3: Firmware WQ doorbell -to- "
                        "MSI-X ISR cmpl\n");
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg3_total,
                                phba->ktime_data_samples),
                        phba->ktime_seg3_min,
                        phba->ktime_seg3_max);
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "Segment 4: MSI-X ISR cmpl -to- "
                        "NVME cmpl done\n");
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg4_total,
                                phba->ktime_data_samples),
                        phba->ktime_seg4_min,
                        phba->ktime_seg4_max);
-               len += snprintf(
+               len += scnprintf(
                        buf + len, PAGE_SIZE - len,
                        "Total IO avg time: %08lld\n",
                        div_u64(phba->ktime_seg1_total +
@@ -1385,7 +1387,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
        }
 
        /* NVME Target */
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "ktime %s: Total Samples: %lld %lld\n",
                        (phba->ktime_on ? "Enabled" : "Disabled"),
                        phba->ktime_data_samples,
@@ -1393,46 +1395,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
        if (phba->ktime_data_samples == 0)
                return len;
 
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 1: MSI-X ISR Rcv cmd -to- "
                        "cmd pass to NVME Layer\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg1_total,
                                phba->ktime_data_samples),
                        phba->ktime_seg1_min,
                        phba->ktime_seg1_max);
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 2: cmd pass to NVME Layer- "
                        "-to- Driver rcv cmd OP (action)\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg2_total,
                                phba->ktime_data_samples),
                        phba->ktime_seg2_min,
                        phba->ktime_seg2_max);
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 3: Driver rcv cmd OP -to- "
                        "Firmware WQ doorbell: cmd\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg3_total,
                                phba->ktime_data_samples),
                        phba->ktime_seg3_min,
                        phba->ktime_seg3_max);
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 4: Firmware WQ doorbell: cmd "
                        "-to- MSI-X ISR for cmd cmpl\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg4_total,
                                phba->ktime_data_samples),
                        phba->ktime_seg4_min,
                        phba->ktime_seg4_max);
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 5: MSI-X ISR for cmd cmpl "
                        "-to- NVME layer passed cmd done\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg5_total,
                                phba->ktime_data_samples),
@@ -1440,10 +1442,10 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
                        phba->ktime_seg5_max);
 
        if (phba->ktime_status_samples == 0) {
-               len += snprintf(buf + len, PAGE_SIZE-len,
+               len += scnprintf(buf + len, PAGE_SIZE-len,
                                "Total: cmd received by MSI-X ISR "
                                "-to- cmd completed on wire\n");
-               len += snprintf(buf + len, PAGE_SIZE-len,
+               len += scnprintf(buf + len, PAGE_SIZE-len,
                                "avg:%08lld min:%08lld "
                                "max %08lld\n",
                                div_u64(phba->ktime_seg10_total,
@@ -1453,46 +1455,46 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
                return len;
        }
 
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 6: NVME layer passed cmd done "
                        "-to- Driver rcv rsp status OP\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg6_total,
                                phba->ktime_status_samples),
                        phba->ktime_seg6_min,
                        phba->ktime_seg6_max);
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 7: Driver rcv rsp status OP "
                        "-to- Firmware WQ doorbell: status\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg7_total,
                                phba->ktime_status_samples),
                        phba->ktime_seg7_min,
                        phba->ktime_seg7_max);
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 8: Firmware WQ doorbell: status"
                        " -to- MSI-X ISR for status cmpl\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg8_total,
                                phba->ktime_status_samples),
                        phba->ktime_seg8_min,
                        phba->ktime_seg8_max);
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Segment 9: MSI-X ISR for status cmpl  "
                        "-to- NVME layer passed status done\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg9_total,
                                phba->ktime_status_samples),
                        phba->ktime_seg9_min,
                        phba->ktime_seg9_max);
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "Total: cmd received by MSI-X ISR -to- "
                        "cmd completed on wire\n");
-       len += snprintf(buf + len, PAGE_SIZE-len,
+       len += scnprintf(buf + len, PAGE_SIZE-len,
                        "avg:%08lld min:%08lld max %08lld\n",
                        div_u64(phba->ktime_seg10_total,
                                phba->ktime_status_samples),
@@ -1527,7 +1529,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
                (phba->nvmeio_trc_size - 1);
        skip = phba->nvmeio_trc_output_idx;
 
-       len += snprintf(buf + len, size - len,
+       len += scnprintf(buf + len, size - len,
                        "%s IO Trace %s: next_idx %d skip %d size %d\n",
                        (phba->nvmet_support ? "NVME" : "NVMET"),
                        (state ? "Enabled" : "Disabled"),
@@ -1549,18 +1551,18 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
                if (!dtp->fmt)
                        continue;
 
-               len +=  snprintf(buf + len, size - len, dtp->fmt,
+               len +=  scnprintf(buf + len, size - len, dtp->fmt,
                        dtp->data1, dtp->data2, dtp->data3);
 
                if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
                        phba->nvmeio_trc_output_idx = 0;
-                       len += snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                        "Trace Complete\n");
                        goto out;
                }
 
                if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
-                       len += snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                        "Trace Continue (%d of %d)\n",
                                        phba->nvmeio_trc_output_idx,
                                        phba->nvmeio_trc_size);
@@ -1578,18 +1580,18 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
                if (!dtp->fmt)
                        continue;
 
-               len +=  snprintf(buf + len, size - len, dtp->fmt,
+               len +=  scnprintf(buf + len, size - len, dtp->fmt,
                        dtp->data1, dtp->data2, dtp->data3);
 
                if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
                        phba->nvmeio_trc_output_idx = 0;
-                       len += snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                        "Trace Complete\n");
                        goto out;
                }
 
                if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
-                       len += snprintf(buf + len, size - len,
+                       len += scnprintf(buf + len, size - len,
                                        "Trace Continue (%d of %d)\n",
                                        phba->nvmeio_trc_output_idx,
                                        phba->nvmeio_trc_size);
@@ -1597,7 +1599,7 @@ lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
                }
        }
 
-       len += snprintf(buf + len, size - len,
+       len += scnprintf(buf + len, size - len,
                        "Trace Done\n");
 out:
        return len;
@@ -1627,17 +1629,17 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
        uint32_t tot_rcv;
        uint32_t tot_cmpl;
 
-       len += snprintf(buf + len, PAGE_SIZE - len,
+       len += scnprintf(buf + len, PAGE_SIZE - len,
                        "CPUcheck %s ",
                        (phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
                                "Enabled" : "Disabled"));
        if (phba->nvmet_support) {
-               len += snprintf(buf + len, PAGE_SIZE - len,
+               len += scnprintf(buf + len, PAGE_SIZE - len,
                                "%s\n",
                                (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
                                        "Rcv Enabled\n" : "Rcv Disabled\n"));
        } else {
-               len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+               len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
        }
        max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
 
@@ -1658,7 +1660,7 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
                if (!tot_xmt && !tot_cmpl && !tot_rcv)
                        continue;
 
-               len += snprintf(buf + len, PAGE_SIZE - len,
+               len += scnprintf(buf + len, PAGE_SIZE - len,
                                "HDWQ %03d: ", i);
                for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
                        /* Only display non-zero counters */
@@ -1667,22 +1669,22 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
                            !qp->cpucheck_rcv_io[j])
                                continue;
                        if (phba->nvmet_support) {
-                               len += snprintf(buf + len, PAGE_SIZE - len,
+                               len += scnprintf(buf + len, PAGE_SIZE - len,
                                                "CPU %03d: %x/%x/%x ", j,
                                                qp->cpucheck_rcv_io[j],
                                                qp->cpucheck_xmt_io[j],
                                                qp->cpucheck_cmpl_io[j]);
                        } else {
-                               len += snprintf(buf + len, PAGE_SIZE - len,
+                               len += scnprintf(buf + len, PAGE_SIZE - len,
                                                "CPU %03d: %x/%x ", j,
                                                qp->cpucheck_xmt_io[j],
                                                qp->cpucheck_cmpl_io[j]);
                        }
                }
-               len += snprintf(buf + len, PAGE_SIZE - len,
+               len += scnprintf(buf + len, PAGE_SIZE - len,
                                "Total: %x\n", tot_xmt);
                if (len >= max_cnt) {
-                       len += snprintf(buf + len, PAGE_SIZE - len,
+                       len += scnprintf(buf + len, PAGE_SIZE - len,
                                        "Truncated ...\n");
                        return len;
                }
@@ -2258,28 +2260,29 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
        int cnt = 0;
 
        if (dent == phba->debug_writeGuard)
-               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+               cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
        else if (dent == phba->debug_writeApp)
-               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
+               cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
        else if (dent == phba->debug_writeRef)
-               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
+               cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
        else if (dent == phba->debug_readGuard)
-               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+               cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
        else if (dent == phba->debug_readApp)
-               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
+               cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
        else if (dent == phba->debug_readRef)
-               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+               cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
        else if (dent == phba->debug_InjErrNPortID)
-               cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+               cnt = scnprintf(cbuf, 32, "0x%06x\n",
+                               phba->lpfc_injerr_nportid);
        else if (dent == phba->debug_InjErrWWPN) {
                memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
                tmp = cpu_to_be64(tmp);
-               cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+               cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp);
        } else if (dent == phba->debug_InjErrLBA) {
                if (phba->lpfc_injerr_lba == (sector_t)(-1))
-                       cnt = snprintf(cbuf, 32, "off\n");
+                       cnt = scnprintf(cbuf, 32, "off\n");
                else
-                       cnt = snprintf(cbuf, 32, "0x%llx\n",
+                       cnt = scnprintf(cbuf, 32, "0x%llx\n",
                                 (uint64_t) phba->lpfc_injerr_lba);
        } else
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3224,17 +3227,17 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
        switch (count) {
        case SIZE_U8: /* byte (8 bits) */
                pci_read_config_byte(pdev, where, &u8val);
-               len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
                                "%03x: %02x\n", where, u8val);
                break;
        case SIZE_U16: /* word (16 bits) */
                pci_read_config_word(pdev, where, &u16val);
-               len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
                                "%03x: %04x\n", where, u16val);
                break;
        case SIZE_U32: /* double word (32 bits) */
                pci_read_config_dword(pdev, where, &u32val);
-               len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
                                "%03x: %08x\n", where, u32val);
                break;
        case LPFC_PCI_CFG_BROWSE: /* browse all */
@@ -3254,25 +3257,25 @@ pcicfg_browse:
        offset = offset_label;
 
        /* Read PCI config space */
-       len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
                        "%03x: ", offset_label);
        while (index > 0) {
                pci_read_config_dword(pdev, offset, &u32val);
-               len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
                                "%08x ", u32val);
                offset += sizeof(uint32_t);
                if (offset >= LPFC_PCI_CFG_SIZE) {
-                       len += snprintf(pbuffer+len,
+                       len += scnprintf(pbuffer+len,
                                        LPFC_PCI_CFG_SIZE-len, "\n");
                        break;
                }
                index -= sizeof(uint32_t);
                if (!index)
-                       len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+                       len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
                                        "\n");
                else if (!(index % (8 * sizeof(uint32_t)))) {
                        offset_label += (8 * sizeof(uint32_t));
-                       len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+                       len += scnprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
                                        "\n%03x: ", offset_label);
                }
        }
@@ -3543,7 +3546,7 @@ lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
        if (acc_range == SINGLE_WORD) {
                offset_run = offset;
                u32val = readl(mem_mapped_bar + offset_run);
-               len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
                                "%05x: %08x\n", offset_run, u32val);
        } else
                goto baracc_browse;
@@ -3557,35 +3560,35 @@ baracc_browse:
        offset_run = offset_label;
 
        /* Read PCI bar memory mapped space */
-       len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
                        "%05x: ", offset_label);
        index = LPFC_PCI_BAR_RD_SIZE;
        while (index > 0) {
                u32val = readl(mem_mapped_bar + offset_run);
-               len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
                                "%08x ", u32val);
                offset_run += sizeof(uint32_t);
                if (acc_range == LPFC_PCI_BAR_BROWSE) {
                        if (offset_run >= bar_size) {
-                               len += snprintf(pbuffer+len,
+                               len += scnprintf(pbuffer+len,
                                        LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
                                break;
                        }
                } else {
                        if (offset_run >= offset +
                            (acc_range * sizeof(uint32_t))) {
-                               len += snprintf(pbuffer+len,
+                               len += scnprintf(pbuffer+len,
                                        LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
                                break;
                        }
                }
                index -= sizeof(uint32_t);
                if (!index)
-                       len += snprintf(pbuffer+len,
+                       len += scnprintf(pbuffer+len,
                                        LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
                else if (!(index % (8 * sizeof(uint32_t)))) {
                        offset_label += (8 * sizeof(uint32_t));
-                       len += snprintf(pbuffer+len,
+                       len += scnprintf(pbuffer+len,
                                        LPFC_PCI_BAR_RD_BUF_SIZE-len,
                                        "\n%05x: ", offset_label);
                }
@@ -3758,19 +3761,19 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
        if (!qp)
                return len;
 
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "\t\t%s WQ info: ", wqtype);
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n",
                        qp->assoc_qid, qp->q_cnt_1,
                        (unsigned long long)qp->q_cnt_4);
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
                        "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]",
                        qp->queue_id, qp->entry_count,
                        qp->entry_size, qp->host_index,
                        qp->hba_index, qp->notify_interval);
-       len +=  snprintf(pbuffer + len,
+       len +=  scnprintf(pbuffer + len,
                        LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
        return len;
 }
@@ -3810,21 +3813,22 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
        if (!qp)
                return len;
 
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "\t%s CQ info: ", cqtype);
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x "
                        "xabt:x%x wq:x%llx]\n",
                        qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
                        qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
                        "HST-IDX[%04d], NTFI[%03d], PLMT[%03d]",
                        qp->queue_id, qp->entry_count,
                        qp->entry_size, qp->host_index,
                        qp->notify_interval, qp->max_proc_limit);
 
-       len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+       len +=  scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+                       "\n");
 
        return len;
 }
@@ -3836,19 +3840,19 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
        if (!qp || !datqp)
                return len;
 
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "\t\t%s RQ info: ", rqtype);
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
                        "posted:x%x rcv:x%llx]\n",
                        qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
                        qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
                        "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n",
                        qp->queue_id, qp->entry_count, qp->entry_size,
                        qp->host_index, qp->hba_index, qp->notify_interval);
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
                        "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n",
                        datqp->queue_id, datqp->entry_count,
@@ -3927,18 +3931,19 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
        if (!qp)
                return len;
 
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
                        "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
                        eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
                        (unsigned long long)qp->q_cnt_4, qp->q_mode);
-       len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+       len += scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
                        "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
                        "HST-IDX[%04d], NTFI[%03d], PLMT[%03d], AFFIN[%03d]",
                        qp->queue_id, qp->entry_count, qp->entry_size,
                        qp->host_index, qp->notify_interval,
                        qp->max_proc_limit, qp->chann);
-       len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+       len +=  scnprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+                       "\n");
 
        return len;
 }
@@ -3991,9 +3996,10 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
                if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue)
                        phba->lpfc_idiag_last_eq = 0;
 
-               len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
-                                       "HDWQ %d out of %d HBA HDWQs\n",
-                                       x, phba->cfg_hdw_queue);
+               len += scnprintf(pbuffer + len,
+                                LPFC_QUE_INFO_GET_BUF_SIZE - len,
+                                "HDWQ %d out of %d HBA HDWQs\n",
+                                x, phba->cfg_hdw_queue);
 
                /* Fast-path EQ */
                qp = phba->sli4_hba.hdwq[x].hba_eq;
@@ -4075,7 +4081,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
        return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 
 too_big:
-       len +=  snprintf(pbuffer + len,
+       len +=  scnprintf(pbuffer + len,
                LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n");
 out:
        spin_unlock_irq(&phba->hbalock);
@@ -4131,22 +4137,22 @@ lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque,
                return 0;
 
        esize = pque->entry_size;
-       len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
                        "QE-INDEX[%04d]:\n", index);
 
        offset = 0;
-       pentry = pque->qe[index].address;
+       pentry = lpfc_sli4_qe(pque, index);
        while (esize > 0) {
-               len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
                                "%08x ", *pentry);
                pentry++;
                offset += sizeof(uint32_t);
                esize -= sizeof(uint32_t);
                if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
-                       len += snprintf(pbuffer+len,
+                       len += scnprintf(pbuffer+len,
                                        LPFC_QUE_ACC_BUF_SIZE-len, "\n");
        }
-       len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+       len += scnprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
 
        return len;
 }
@@ -4485,7 +4491,7 @@ pass_check:
                pque = (struct lpfc_queue *)idiag.ptr_private;
                if (offset > pque->entry_size/sizeof(uint32_t) - 1)
                        goto error_out;
-               pentry = pque->qe[index].address;
+               pentry = lpfc_sli4_qe(pque, index);
                pentry += offset;
                if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR)
                        *pentry = value;
@@ -4506,7 +4512,7 @@ error_out:
  * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register
  * @phba: The pointer to hba structure.
  * @pbuffer: The pointer to the buffer to copy the data to.
- * @len: The lenght of bytes to copied.
+ * @len: The length of bytes to copied.
  * @drbregid: The id to doorbell registers.
  *
  * Description:
@@ -4526,27 +4532,27 @@ lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
 
        switch (drbregid) {
        case LPFC_DRB_EQ:
-               len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE-len,
                                "EQ-DRB-REG: 0x%08x\n",
                                readl(phba->sli4_hba.EQDBregaddr));
                break;
        case LPFC_DRB_CQ:
-               len += snprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len,
+               len += scnprintf(pbuffer + len, LPFC_DRB_ACC_BUF_SIZE - len,
                                "CQ-DRB-REG: 0x%08x\n",
                                readl(phba->sli4_hba.CQDBregaddr));
                break;
        case LPFC_DRB_MQ:
-               len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
                                "MQ-DRB-REG:   0x%08x\n",
                                readl(phba->sli4_hba.MQDBregaddr));
                break;
        case LPFC_DRB_WQ:
-               len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
                                "WQ-DRB-REG:   0x%08x\n",
                                readl(phba->sli4_hba.WQDBregaddr));
                break;
        case LPFC_DRB_RQ:
-               len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
                                "RQ-DRB-REG:   0x%08x\n",
                                readl(phba->sli4_hba.RQDBregaddr));
                break;
@@ -4716,7 +4722,7 @@ error_out:
  * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
  * @phba: The pointer to hba structure.
  * @pbuffer: The pointer to the buffer to copy the data to.
- * @len: The lenght of bytes to copied.
+ * @len: The length of bytes to copied.
  * @drbregid: The id to doorbell registers.
  *
  * Description:
@@ -4736,37 +4742,37 @@ lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
 
        switch (ctlregid) {
        case LPFC_CTL_PORT_SEM:
-               len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
                                "Port SemReg:   0x%08x\n",
                                readl(phba->sli4_hba.conf_regs_memmap_p +
                                      LPFC_CTL_PORT_SEM_OFFSET));
                break;
        case LPFC_CTL_PORT_STA:
-               len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
                                "Port StaReg:   0x%08x\n",
                                readl(phba->sli4_hba.conf_regs_memmap_p +
                                      LPFC_CTL_PORT_STA_OFFSET));
                break;
        case LPFC_CTL_PORT_CTL:
-               len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
                                "Port CtlReg:   0x%08x\n",
                                readl(phba->sli4_hba.conf_regs_memmap_p +
                                      LPFC_CTL_PORT_CTL_OFFSET));
                break;
        case LPFC_CTL_PORT_ER1:
-               len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
                                "Port Er1Reg:   0x%08x\n",
                                readl(phba->sli4_hba.conf_regs_memmap_p +
                                      LPFC_CTL_PORT_ER1_OFFSET));
                break;
        case LPFC_CTL_PORT_ER2:
-               len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
                                "Port Er2Reg:   0x%08x\n",
                                readl(phba->sli4_hba.conf_regs_memmap_p +
                                      LPFC_CTL_PORT_ER2_OFFSET));
                break;
        case LPFC_CTL_PDEV_CTL:
-               len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
                                "PDev CtlReg:   0x%08x\n",
                                readl(phba->sli4_hba.conf_regs_memmap_p +
                                      LPFC_CTL_PDEV_CTL_OFFSET));
@@ -4959,13 +4965,13 @@ lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
        mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
        mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
 
-       len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
                        "mbx_dump_map: 0x%08x\n", mbx_dump_map);
-       len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
                        "mbx_dump_cnt: %04d\n", mbx_dump_cnt);
-       len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
                        "mbx_word_cnt: %04d\n", mbx_word_cnt);
-       len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
                        "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
 
        return len;
@@ -5114,35 +5120,35 @@ lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
 {
        uint16_t ext_cnt, ext_size;
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\nAvailable Extents Information:\n");
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tPort Available VPI extents: ");
        lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
                                       &ext_cnt, &ext_size);
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tPort Available VFI extents: ");
        lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
                                       &ext_cnt, &ext_size);
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tPort Available RPI extents: ");
        lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
                                       &ext_cnt, &ext_size);
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "Count %3d, Size %3d\n", ext_cnt, ext_size);
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tPort Available XRI extents: ");
        lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
                                       &ext_cnt, &ext_size);
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "Count %3d, Size %3d\n", ext_cnt, ext_size);
 
        return len;
@@ -5166,55 +5172,55 @@ lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
        uint16_t ext_cnt, ext_size;
        int rc;
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\nAllocated Extents Information:\n");
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tHost Allocated VPI extents: ");
        rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
                                            &ext_cnt, &ext_size);
        if (!rc)
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "Port %d Extent %3d, Size %3d\n",
                                phba->brd_no, ext_cnt, ext_size);
        else
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "N/A\n");
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tHost Allocated VFI extents: ");
        rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
                                            &ext_cnt, &ext_size);
        if (!rc)
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "Port %d Extent %3d, Size %3d\n",
                                phba->brd_no, ext_cnt, ext_size);
        else
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "N/A\n");
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tHost Allocated RPI extents: ");
        rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
                                            &ext_cnt, &ext_size);
        if (!rc)
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "Port %d Extent %3d, Size %3d\n",
                                phba->brd_no, ext_cnt, ext_size);
        else
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "N/A\n");
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tHost Allocated XRI extents: ");
        rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
                                            &ext_cnt, &ext_size);
        if (!rc)
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "Port %d Extent %3d, Size %3d\n",
                                phba->brd_no, ext_cnt, ext_size);
        else
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "N/A\n");
 
        return len;
@@ -5238,49 +5244,49 @@ lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
        struct lpfc_rsrc_blks *rsrc_blks;
        int index;
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\nDriver Extents Information:\n");
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tVPI extents:\n");
        index = 0;
        list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "\t\tBlock %3d: Start %4d, Count %4d\n",
                                index, rsrc_blks->rsrc_start,
                                rsrc_blks->rsrc_size);
                index++;
        }
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tVFI extents:\n");
        index = 0;
        list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
                            list) {
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "\t\tBlock %3d: Start %4d, Count %4d\n",
                                index, rsrc_blks->rsrc_start,
                                rsrc_blks->rsrc_size);
                index++;
        }
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tRPI extents:\n");
        index = 0;
        list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
                            list) {
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "\t\tBlock %3d: Start %4d, Count %4d\n",
                                index, rsrc_blks->rsrc_start,
                                rsrc_blks->rsrc_size);
                index++;
        }
 
-       len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+       len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                        "\tXRI extents:\n");
        index = 0;
        list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
                            list) {
-               len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+               len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
                                "\t\tBlock %3d: Start %4d, Count %4d\n",
                                index, rsrc_blks->rsrc_start,
                                rsrc_blks->rsrc_size);
@@ -5706,11 +5712,11 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
                                if (i != 0)
                                        pr_err("%s\n", line_buf);
                                len = 0;
-                               len += snprintf(line_buf+len,
+                               len += scnprintf(line_buf+len,
                                                LPFC_MBX_ACC_LBUF_SZ-len,
                                                "%03d: ", i);
                        }
-                       len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+                       len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
                                        "%08x ", (uint32_t)*pword);
                        pword++;
                }
@@ -5773,11 +5779,11 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
                                        pr_err("%s\n", line_buf);
                                len = 0;
                                memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
-                               len += snprintf(line_buf+len,
+                               len += scnprintf(line_buf+len,
                                                LPFC_MBX_ACC_LBUF_SZ-len,
                                                "%03d: ", i);
                        }
-                       len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+                       len += scnprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
                                        "%08x ",
                                        ((uint32_t)*pword) & 0xffffffff);
                        pword++;
@@ -5796,18 +5802,18 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
                                        pr_err("%s\n", line_buf);
                                len = 0;
                                memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
-                               len += snprintf(line_buf+len,
+                               len += scnprintf(line_buf+len,
                                                LPFC_MBX_ACC_LBUF_SZ-len,
                                                "%03d: ", i);
                        }
                        for (j = 0; j < 4; j++) {
-                               len += snprintf(line_buf+len,
+                               len += scnprintf(line_buf+len,
                                                LPFC_MBX_ACC_LBUF_SZ-len,
                                                "%02x",
                                                ((uint8_t)*pbyte) & 0xff);
                                pbyte++;
                        }
-                       len += snprintf(line_buf+len,
+                       len += scnprintf(line_buf+len,
                                        LPFC_MBX_ACC_LBUF_SZ-len, " ");
                }
                if ((i - 1) % 8)
@@ -5891,7 +5897,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                                            phba, &lpfc_debugfs_op_lockstat);
                if (!phba->debug_lockstat) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-                                        "0913 Cant create debugfs lockstat\n");
+                                        "4610 Cant create debugfs lockstat\n");
                        goto debug_failed;
                }
 #endif
@@ -6134,7 +6140,7 @@ nvmeio_off:
                                    vport, &lpfc_debugfs_op_scsistat);
        if (!vport->debug_scsistat) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-                                "0914 Cannot create debugfs scsistat\n");
+                                "4611 Cannot create debugfs scsistat\n");
                goto debug_failed;
        }
 
index 93ab7df..2322ddb 100644 (file)
@@ -345,10 +345,10 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
 
        esize = q->entry_size;
        qe_word_cnt = esize / sizeof(uint32_t);
-       pword = q->qe[idx].address;
+       pword = lpfc_sli4_qe(q, idx);
 
        len = 0;
-       len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
+       len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
        if (qe_word_cnt > 8)
                printk(KERN_ERR "%s\n", line_buf);
 
@@ -359,11 +359,11 @@ lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
                        if (qe_word_cnt > 8) {
                                len = 0;
                                memset(line_buf, 0, LPFC_LBUF_SZ);
-                               len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
+                               len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len,
                                                "%03d: ", i);
                        }
                }
-               len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
+               len += scnprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
                                ((uint32_t)*pword) & 0xffffffff);
                pword++;
        }
index 7b0755e..c8fb0b4 100644 (file)
@@ -1961,7 +1961,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        IOCB_t *irsp;
        struct lpfc_nodelist *ndlp;
        struct lpfc_dmabuf *prsp;
-       int disc, rc;
+       int disc;
 
        /* we pass cmdiocb to state machine which needs rspiocb as well */
        cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1990,7 +1990,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
        disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
        ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
        spin_unlock_irq(shost->host_lock);
-       rc = 0;
 
        /* PLOGI completes to NPort <nlp_DID> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -2029,18 +2028,16 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                 ndlp->nlp_DID, irsp->ulpStatus,
                                 irsp->un.ulpWord[4]);
                /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-               if (lpfc_error_lost_link(irsp))
-                       rc = NLP_STE_FREED_NODE;
-               else
-                       rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
-                                                    NLP_EVT_CMPL_PLOGI);
+               if (!lpfc_error_lost_link(irsp))
+                       lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+                                               NLP_EVT_CMPL_PLOGI);
        } else {
                /* Good status, call state machine */
                prsp = list_entry(((struct lpfc_dmabuf *)
                                   cmdiocb->context2)->list.next,
                                  struct lpfc_dmabuf, list);
                ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
-               rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+               lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                             NLP_EVT_CMPL_PLOGI);
        }
 
@@ -6744,12 +6741,11 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        uint32_t *lp;
        RNID *rn;
        struct ls_rjt stat;
-       uint32_t cmd;
 
        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
        lp = (uint32_t *) pcmd->virt;
 
-       cmd = *lp++;
+       lp++;
        rn = (RNID *) lp;
 
        /* RNID received */
@@ -7508,14 +7504,14 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        uint32_t *lp;
        IOCB_t *icmd;
        FARP *fp;
-       uint32_t cmd, cnt, did;
+       uint32_t cnt, did;
 
        icmd = &cmdiocb->iocb;
        did = icmd->un.elsreq64.remoteID;
        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
        lp = (uint32_t *) pcmd->virt;
 
-       cmd = *lp++;
+       lp++;
        fp = (FARP *) lp;
        /* FARP-REQ received from DID <did> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -7580,14 +7576,14 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        struct lpfc_dmabuf *pcmd;
        uint32_t *lp;
        IOCB_t *icmd;
-       uint32_t cmd, did;
+       uint32_t did;
 
        icmd = &cmdiocb->iocb;
        did = icmd->un.elsreq64.remoteID;
        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
        lp = (uint32_t *) pcmd->virt;
 
-       cmd = *lp++;
+       lp++;
        /* FARP-RSP received from DID <did> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "0600 FARP-RSP received from DID x%x\n", did);
@@ -8454,6 +8450,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                rjt_err = LSRJT_UNABLE_TPC;
                rjt_exp = LSEXP_INVALID_OX_RX;
                break;
+       case ELS_CMD_FPIN:
+               /*
+                * Received FPIN from fabric - pass it to the
+                * transport FPIN handler.
+                */
+               fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len,
+                               (char *)payload);
+               break;
        default:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
                        "RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
@@ -8776,7 +8780,6 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                                return;
                        }
                        /* fall through */
-
                default:
                        /* Try to recover from this error */
                        if (phba->sli_rev == LPFC_SLI_REV4)
index 14fffbe..c43852f 100644 (file)
@@ -885,15 +885,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
        LPFC_MBOXQ_t          *mb;
        int i;
 
-       if (phba->link_state == LPFC_LINK_DOWN) {
-               if (phba->sli4_hba.conf_trunk) {
-                       phba->trunk_link.link0.state = 0;
-                       phba->trunk_link.link1.state = 0;
-                       phba->trunk_link.link2.state = 0;
-                       phba->trunk_link.link3.state = 0;
-               }
+       if (phba->link_state == LPFC_LINK_DOWN)
                return 0;
-       }
+
        /* Block all SCSI stack I/Os */
        lpfc_scsi_dev_block(phba);
 
@@ -932,7 +926,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
                }
        }
        lpfc_destroy_vport_work_array(phba, vports);
-       /* Clean up any firmware default rpi's */
+
+       /* Clean up any SLI3 firmware default rpi's */
+       if (phba->sli_rev > LPFC_SLI_REV3)
+               goto skip_unreg_did;
+
        mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mb) {
                lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
@@ -944,6 +942,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
                }
        }
 
+ skip_unreg_did:
        /* Setup myDID for link up if we are in pt2pt mode */
        if (phba->pport->fc_flag & FC_PT2PT) {
                mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4147,9 +4146,15 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        rdata->pnode = lpfc_nlp_get(ndlp);
 
        if (ndlp->nlp_type & NLP_FCP_TARGET)
-               rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+               rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
        if (ndlp->nlp_type & NLP_FCP_INITIATOR)
-               rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+               rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
+       if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+               rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
+       if (ndlp->nlp_type & NLP_NVME_TARGET)
+               rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
+       if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
+               rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
 
        if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
                fc_remote_port_rolechg(rport, rport_ids.roles);
@@ -4675,6 +4680,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
                case CMD_XMIT_ELS_RSP64_CX:
                        if (iocb->context1 == (uint8_t *) ndlp)
                                return 1;
+                       /* fall through */
                }
        } else if (pring->ringno == LPFC_FCP_RING) {
                /* Skip match check if waiting to relogin to FCP target */
@@ -4870,6 +4876,10 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                         * accept PLOGIs after unreg_rpi_cmpl
                                         */
                                        acc_plogi = 0;
+                               } else if (vport->load_flag & FC_UNLOADING) {
+                                       mbox->ctx_ndlp = NULL;
+                                       mbox->mbox_cmpl =
+                                               lpfc_sli_def_mbox_cmpl;
                                } else {
                                        mbox->ctx_ndlp = ndlp;
                                        mbox->mbox_cmpl =
@@ -4981,6 +4991,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
        LPFC_MBOXQ_t     *mbox;
        int rc;
 
+       /* Unreg DID is an SLI3 operation. */
+       if (phba->sli_rev > LPFC_SLI_REV3)
+               return;
+
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mbox) {
                lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
index ec12270..edd8f39 100644 (file)
@@ -560,6 +560,8 @@ struct fc_vft_header {
 #define fc_vft_hdr_hopct_WORD          word1
 };
 
+#include <uapi/scsi/fc/fc_els.h>
+
 /*
  *  Extended Link Service LS_COMMAND codes (Payload Word 0)
  */
@@ -603,6 +605,7 @@ struct fc_vft_header {
 #define ELS_CMD_RNID      0x78000000
 #define ELS_CMD_LIRR      0x7A000000
 #define ELS_CMD_LCB      0x81000000
+#define ELS_CMD_FPIN     0x16000000
 #else  /*  __LITTLE_ENDIAN_BITFIELD */
 #define ELS_CMD_MASK      0xffff
 #define ELS_RSP_MASK      0xff
@@ -643,6 +646,7 @@ struct fc_vft_header {
 #define ELS_CMD_RNID      0x78
 #define ELS_CMD_LIRR      0x7A
 #define ELS_CMD_LCB      0x81
+#define ELS_CMD_FPIN     ELS_FPIN
 #endif
 
 /*
index ff875b8..77f9a55 100644 (file)
@@ -1894,18 +1894,19 @@ struct lpfc_mbx_set_link_diag_loopback {
        union {
                struct {
                        uint32_t word0;
-#define lpfc_mbx_set_diag_lpbk_type_SHIFT      0
-#define lpfc_mbx_set_diag_lpbk_type_MASK       0x00000003
-#define lpfc_mbx_set_diag_lpbk_type_WORD       word0
-#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE                0x0
-#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL       0x1
-#define LPFC_DIAG_LOOPBACK_TYPE_SERDES         0x2
-#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT  16
-#define lpfc_mbx_set_diag_lpbk_link_num_MASK   0x0000003F
-#define lpfc_mbx_set_diag_lpbk_link_num_WORD   word0
-#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT 22
-#define lpfc_mbx_set_diag_lpbk_link_type_MASK  0x00000003
-#define lpfc_mbx_set_diag_lpbk_link_type_WORD  word0
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT              0
+#define lpfc_mbx_set_diag_lpbk_type_MASK               0x00000003
+#define lpfc_mbx_set_diag_lpbk_type_WORD               word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE                        0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL               0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES                 0x2
+#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED       0x3
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT          16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK           0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD           word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT         22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK          0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD          word0
                } req;
                struct {
                        uint32_t word0;
@@ -4083,22 +4084,7 @@ struct lpfc_acqe_grp5 {
        uint32_t trailer;
 };
 
-static char *const trunk_errmsg[] = {  /* map errcode */
-       "",     /* There is no such error code at index 0*/
-       "link negotiated speed does not match existing"
-               " trunk - link was \"low\" speed",
-       "link negotiated speed does not match"
-               " existing trunk - link was \"middle\" speed",
-       "link negotiated speed does not match existing"
-               " trunk - link was \"high\" speed",
-       "Attached to non-trunking port - F_Port",
-       "Attached to non-trunking port - N_Port",
-       "FLOGI response timeout",
-       "non-FLOGI frame received",
-       "Invalid FLOGI response",
-       "Trunking initialization protocol",
-       "Trunk peer device mismatch",
-};
+extern const char *const trunk_errmsg[];
 
 struct lpfc_acqe_fc_la {
        uint32_t word0;
index 7fcdaed..eaaef68 100644 (file)
@@ -1117,19 +1117,19 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
 
                }
        }
+       spin_unlock_irq(&phba->hbalock);
 
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-               spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+               spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
                                 &nvmet_aborts);
-               spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
+               spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
                        ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
                        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
                }
        }
 
-       spin_unlock_irq(&phba->hbalock);
        lpfc_sli4_free_sp_events(phba);
        return cnt;
 }
@@ -1844,8 +1844,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
        /* If the pci channel is offline, ignore possible errors, since
         * we cannot communicate with the pci card anyway.
         */
-       if (pci_channel_offline(phba->pcidev))
+       if (pci_channel_offline(phba->pcidev)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3166 pci channel is offline\n");
+               lpfc_sli4_offline_eratt(phba);
                return;
+       }
 
        memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
@@ -1922,6 +1926,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3151 PCI bus read access failure: x%x\n",
                                readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
+                       lpfc_sli4_offline_eratt(phba);
                        return;
                }
                reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
@@ -3075,7 +3080,7 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
  * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
  * to expedite pool. Mark them as expedite.
  **/
-void lpfc_create_expedite_pool(struct lpfc_hba *phba)
+static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
 {
        struct lpfc_sli4_hdw_queue *qp;
        struct lpfc_io_buf *lpfc_ncmd;
@@ -3110,7 +3115,7 @@ void lpfc_create_expedite_pool(struct lpfc_hba *phba)
  * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
  * of HWQ 0. Clear the mark.
  **/
-void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
+static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
 {
        struct lpfc_sli4_hdw_queue *qp;
        struct lpfc_io_buf *lpfc_ncmd;
@@ -3230,7 +3235,7 @@ void lpfc_create_multixri_pools(struct lpfc_hba *phba)
  *
  * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
  **/
-void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
+static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
 {
        u32 i;
        u32 hwq_count;
@@ -3245,6 +3250,13 @@ void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
                lpfc_destroy_expedite_pool(phba);
 
+       if (!(phba->pport->load_flag & FC_UNLOADING)) {
+               lpfc_sli_flush_fcp_rings(phba);
+
+               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+                       lpfc_sli_flush_nvme_rings(phba);
+       }
+
        hwq_count = phba->cfg_hdw_queue;
 
        for (i = 0; i < hwq_count; i++) {
@@ -3611,8 +3623,6 @@ lpfc_io_free(struct lpfc_hba *phba)
        struct lpfc_sli4_hdw_queue *qp;
        int idx;
 
-       spin_lock_irq(&phba->hbalock);
-
        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
                qp = &phba->sli4_hba.hdwq[idx];
                /* Release all the lpfc_nvme_bufs maintained by this host. */
@@ -3642,8 +3652,6 @@ lpfc_io_free(struct lpfc_hba *phba)
                }
                spin_unlock(&qp->io_buf_list_get_lock);
        }
-
-       spin_unlock_irq(&phba->hbalock);
 }
 
 /**
@@ -4457,7 +4465,7 @@ finished:
        return stat;
 }
 
-void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
+static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
@@ -8603,9 +8611,9 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        if (phba->nvmet_support) {
                if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
                        phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+               if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+                       phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
        }
-       if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
-               phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
 
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
@@ -8626,10 +8634,12 @@ static int
 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
 {
        struct lpfc_queue *qdesc;
+       int cpu;
 
+       cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
                                      phba->sli4_hba.cq_esize,
-                                     LPFC_CQE_EXP_COUNT);
+                                     LPFC_CQE_EXP_COUNT, cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0508 Failed allocate fast-path NVME CQ (%d)\n",
@@ -8638,11 +8648,12 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
        }
        qdesc->qe_valid = 1;
        qdesc->hdwq = wqidx;
-       qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
+       qdesc->chann = cpu;
        phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
 
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
-                                     LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
+                                     LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
+                                     cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -8661,18 +8672,20 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
 {
        struct lpfc_queue *qdesc;
        uint32_t wqesize;
+       int cpu;
 
+       cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
        /* Create Fast Path FCP CQs */
        if (phba->enab_exp_wqcq_pages)
                /* Increase the CQ size when WQEs contain an embedded cdb */
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
                                              phba->sli4_hba.cq_esize,
-                                             LPFC_CQE_EXP_COUNT);
+                                             LPFC_CQE_EXP_COUNT, cpu);
 
        else
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                              phba->sli4_hba.cq_esize,
-                                             phba->sli4_hba.cq_ecount);
+                                             phba->sli4_hba.cq_ecount, cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
@@ -8680,7 +8693,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
        }
        qdesc->qe_valid = 1;
        qdesc->hdwq = wqidx;
-       qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
+       qdesc->chann = cpu;
        phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
 
        /* Create Fast Path FCP WQs */
@@ -8690,11 +8703,11 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
                        LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
                                              wqesize,
-                                             LPFC_WQE_EXP_COUNT);
+                                             LPFC_WQE_EXP_COUNT, cpu);
        } else
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                              phba->sli4_hba.wq_esize,
-                                             phba->sli4_hba.wq_ecount);
+                                             phba->sli4_hba.wq_ecount, cpu);
 
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8727,7 +8740,7 @@ int
 lpfc_sli4_queue_create(struct lpfc_hba *phba)
 {
        struct lpfc_queue *qdesc;
-       int idx, eqidx;
+       int idx, eqidx, cpu;
        struct lpfc_sli4_hdw_queue *qp;
        struct lpfc_eq_intr_info *eqi;
 
@@ -8814,13 +8827,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 
        /* Create HBA Event Queues (EQs) */
        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+               /* determine EQ affinity */
+               eqidx = lpfc_find_eq_handle(phba, idx);
+               cpu = lpfc_find_cpu_handle(phba, eqidx, LPFC_FIND_BY_EQ);
                /*
                 * If there are more Hardware Queues than available
-                * CQs, multiple Hardware Queues may share a common EQ.
+                * EQs, multiple Hardware Queues may share a common EQ.
                 */
                if (idx >= phba->cfg_irq_chann) {
                        /* Share an existing EQ */
-                       eqidx = lpfc_find_eq_handle(phba, idx);
                        phba->sli4_hba.hdwq[idx].hba_eq =
                                phba->sli4_hba.hdwq[eqidx].hba_eq;
                        continue;
@@ -8828,7 +8843,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                /* Create an EQ */
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                              phba->sli4_hba.eq_esize,
-                                             phba->sli4_hba.eq_ecount);
+                                             phba->sli4_hba.eq_ecount, cpu);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0497 Failed allocate EQ (%d)\n", idx);
@@ -8838,9 +8853,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                qdesc->hdwq = idx;
 
                /* Save the CPU this EQ is affinitised to */
-               eqidx = lpfc_find_eq_handle(phba, idx);
-               qdesc->chann = lpfc_find_cpu_handle(phba, eqidx,
-                                                   LPFC_FIND_BY_EQ);
+               qdesc->chann = cpu;
                phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
                qdesc->last_cpu = qdesc->chann;
                eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
@@ -8863,11 +8876,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 
                if (phba->nvmet_support) {
                        for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+                               cpu = lpfc_find_cpu_handle(phba, idx,
+                                                          LPFC_FIND_BY_HDWQ);
                                qdesc = lpfc_sli4_queue_alloc(
                                                      phba,
                                                      LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.cq_esize,
-                                                     phba->sli4_hba.cq_ecount);
+                                                     phba->sli4_hba.cq_ecount,
+                                                     cpu);
                                if (!qdesc) {
                                        lpfc_printf_log(
                                                phba, KERN_ERR, LOG_INIT,
@@ -8877,7 +8893,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                                }
                                qdesc->qe_valid = 1;
                                qdesc->hdwq = idx;
-                               qdesc->chann = idx;
+                               qdesc->chann = cpu;
                                phba->sli4_hba.nvmet_cqset[idx] = qdesc;
                        }
                }
@@ -8887,10 +8903,11 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
         * Create Slow Path Completion Queues (CQs)
         */
 
+       cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
        /* Create slow-path Mailbox Command Complete Queue */
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                      phba->sli4_hba.cq_esize,
-                                     phba->sli4_hba.cq_ecount);
+                                     phba->sli4_hba.cq_ecount, cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0500 Failed allocate slow-path mailbox CQ\n");
@@ -8902,7 +8919,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        /* Create slow-path ELS Complete Queue */
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                      phba->sli4_hba.cq_esize,
-                                     phba->sli4_hba.cq_ecount);
+                                     phba->sli4_hba.cq_ecount, cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0501 Failed allocate slow-path ELS CQ\n");
@@ -8921,7 +8938,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                      phba->sli4_hba.mq_esize,
-                                     phba->sli4_hba.mq_ecount);
+                                     phba->sli4_hba.mq_ecount, cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0505 Failed allocate slow-path MQ\n");
@@ -8937,7 +8954,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        /* Create slow-path ELS Work Queue */
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                      phba->sli4_hba.wq_esize,
-                                     phba->sli4_hba.wq_ecount);
+                                     phba->sli4_hba.wq_ecount, cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0504 Failed allocate slow-path ELS WQ\n");
@@ -8951,7 +8968,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                /* Create NVME LS Complete Queue */
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                              phba->sli4_hba.cq_esize,
-                                             phba->sli4_hba.cq_ecount);
+                                             phba->sli4_hba.cq_ecount, cpu);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "6079 Failed allocate NVME LS CQ\n");
@@ -8964,7 +8981,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                /* Create NVME LS Work Queue */
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                              phba->sli4_hba.wq_esize,
-                                             phba->sli4_hba.wq_ecount);
+                                             phba->sli4_hba.wq_ecount, cpu);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "6080 Failed allocate NVME LS WQ\n");
@@ -8982,7 +8999,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        /* Create Receive Queue for header */
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                      phba->sli4_hba.rq_esize,
-                                     phba->sli4_hba.rq_ecount);
+                                     phba->sli4_hba.rq_ecount, cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0506 Failed allocate receive HRQ\n");
@@ -8993,7 +9010,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        /* Create Receive Queue for data */
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                      phba->sli4_hba.rq_esize,
-                                     phba->sli4_hba.rq_ecount);
+                                     phba->sli4_hba.rq_ecount, cpu);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0507 Failed allocate receive DRQ\n");
@@ -9004,11 +9021,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
            phba->nvmet_support) {
                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+                       cpu = lpfc_find_cpu_handle(phba, idx,
+                                                  LPFC_FIND_BY_HDWQ);
                        /* Create NVMET Receive Queue for header */
                        qdesc = lpfc_sli4_queue_alloc(phba,
                                                      LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.rq_esize,
-                                                     LPFC_NVMET_RQE_DEF_COUNT);
+                                                     LPFC_NVMET_RQE_DEF_COUNT,
+                                                     cpu);
                        if (!qdesc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "3146 Failed allocate "
@@ -9019,8 +9039,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                        phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
 
                        /* Only needed for header of RQ pair */
-                       qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
-                                             GFP_KERNEL);
+                       qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
+                                                  GFP_KERNEL,
+                                                  cpu_to_node(cpu));
                        if (qdesc->rqbp == NULL) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "6131 Failed allocate "
@@ -9035,7 +9056,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                        qdesc = lpfc_sli4_queue_alloc(phba,
                                                      LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.rq_esize,
-                                                     LPFC_NVMET_RQE_DEF_COUNT);
+                                                     LPFC_NVMET_RQE_DEF_COUNT,
+                                                     cpu);
                        if (!qdesc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                                "3156 Failed allocate "
@@ -9134,6 +9156,20 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
 void
 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 {
+       /*
+        * Set FREE_INIT before beginning to free the queues.
+        * Wait until the users of queues to acknowledge to
+        * release queues by clearing FREE_WAIT.
+        */
+       spin_lock_irq(&phba->hbalock);
+       phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
+       while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
+               spin_unlock_irq(&phba->hbalock);
+               msleep(20);
+               spin_lock_irq(&phba->hbalock);
+       }
+       spin_unlock_irq(&phba->hbalock);
+
        /* Release HBA eqs */
        if (phba->sli4_hba.hdwq)
                lpfc_sli4_release_hdwq(phba);
@@ -9172,6 +9208,11 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 
        /* Everything on this list has been freed */
        INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
+
+       /* Done with freeing the queues */
+       spin_lock_irq(&phba->hbalock);
+       phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
+       spin_unlock_irq(&phba->hbalock);
 }
 
 int
@@ -9231,7 +9272,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
                rc = lpfc_wq_create(phba, wq, cq, qtype);
                if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
+                               "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
                                qidx, (uint32_t)rc);
                        /* no need to tear down cq - caller will do so */
                        return rc;
@@ -9271,7 +9312,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
  * This routine will populate the cq_lookup table by all
  * available CQ queue_id's.
  **/
-void
+static void
 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
 {
        struct lpfc_queue *eq, *childq;
@@ -10740,7 +10781,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
                                phba->cfg_irq_chann, vectors);
                if (phba->cfg_irq_chann > vectors)
                        phba->cfg_irq_chann = vectors;
-               if (phba->cfg_nvmet_mrq > vectors)
+               if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
                        phba->cfg_nvmet_mrq = vectors;
        }
 
@@ -11297,7 +11338,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
            !phba->nvme_support) {
                phba->nvme_support = 0;
                phba->nvmet_support = 0;
-               phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
+               phba->cfg_nvmet_mrq = 0;
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
                                "6101 Disabling NVME support: "
                                "Not supported by firmware: %d %d\n",
@@ -13046,7 +13087,7 @@ lpfc_io_resume(struct pci_dev *pdev)
  * is destroyed.
  *
  **/
-void
+static void
 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
 {
 
index 11d284c..59252bf 100644 (file)
@@ -871,7 +871,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  * This function will send a unreg_login mailbox command to the firmware
  * to release a rpi.
  **/
-void
+static void
 lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
                 struct lpfc_nodelist *ndlp, uint16_t rpi)
 {
@@ -1733,7 +1733,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
        LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
        MAILBOX_t *mb = &pmb->u.mb;
        uint32_t did  = mb->un.varWords[1];
-       int rc = 0;
 
        if (mb->mbxStatus) {
                /* RegLogin failed */
@@ -1806,8 +1805,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
                         * GFT_ID to determine if remote port supports NVME.
                         */
                        if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
-                               rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
-                                                0, ndlp->nlp_DID);
+                               lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0,
+                                           ndlp->nlp_DID);
                                return ndlp->nlp_state;
                        }
                        ndlp->nlp_fc4_type = NLP_FC4_FCP;
index d16ca41..9d99cb9 100644 (file)
@@ -229,7 +229,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
        if (qhandle == NULL)
                return -ENOMEM;
 
-       qhandle->cpu_id = smp_processor_id();
+       qhandle->cpu_id = raw_smp_processor_id();
        qhandle->qidx = qidx;
        /*
         * NVME qidx == 0 is the admin queue, so both admin queue
@@ -312,7 +312,7 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
  * Return value :
  * None
  */
-void
+static void
 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 {
        struct lpfc_nvme_rport *rport = remoteport->private;
@@ -1111,9 +1111,11 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 out_err:
                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
                                         "6072 NVME Completion Error: xri %x "
-                                        "status x%x result x%x placed x%x\n",
+                                        "status x%x result x%x [x%x] "
+                                        "placed x%x\n",
                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
                                         lpfc_ncmd->status, lpfc_ncmd->result,
+                                        wcqe->parameter,
                                         wcqe->total_data_placed);
                        nCmd->transferred_length = 0;
                        nCmd->rcv_rsplen = 0;
@@ -1141,7 +1143,7 @@ out_err:
        if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
                uint32_t cpu;
                idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                if (cpu < LPFC_CHECK_CPU_CNT) {
                        if (lpfc_ncmd->cpu != cpu)
                                lpfc_printf_vlog(vport,
@@ -1559,7 +1561,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
        if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
                idx = lpfc_queue_info->index;
        } else {
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                idx = phba->sli4_hba.cpu_map[cpu].hdwq;
        }
 
@@ -1639,7 +1641,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
                lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
 
        if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                if (cpu < LPFC_CHECK_CPU_CNT) {
                        lpfc_ncmd->cpu = cpu;
                        if (idx != cpu)
@@ -2081,15 +2083,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
                lpfc_nvme_template.max_hw_queues =
                        phba->sli4_hba.num_present_cpu;
 
+       if (!IS_ENABLED(CONFIG_NVME_FC))
+               return ret;
+
        /* localport is allocated from the stack, but the registration
         * call allocates heap memory as well as the private area.
         */
-#if (IS_ENABLED(CONFIG_NVME_FC))
+
        ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
                                         &vport->phba->pcidev->dev, &localport);
-#else
-       ret = -ENOMEM;
-#endif
        if (!ret) {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
                                 "6005 Successfully registered local "
@@ -2124,6 +2126,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
        return ret;
 }
 
+#if (IS_ENABLED(CONFIG_NVME_FC))
 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
  *
  * The driver has to wait for the host nvme transport to callback
@@ -2134,12 +2137,11 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
  * An uninterruptible wait is used because of the risk of transport-to-
  * driver state mismatch.
  */
-void
+static void
 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
                           struct lpfc_nvme_lport *lport,
                           struct completion *lport_unreg_cmp)
 {
-#if (IS_ENABLED(CONFIG_NVME_FC))
        u32 wait_tmo;
        int ret;
 
@@ -2162,8 +2164,8 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
                         "6177 Lport %p Localport %p Complete Success\n",
                         lport, vport->localport);
-#endif
 }
+#endif
 
 /**
  * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
index 361e2b1..d74bfd2 100644 (file)
@@ -220,7 +220,7 @@ lpfc_nvmet_cmd_template(void)
        /* Word 12, 13, 14, 15 - is zero */
 }
 
-void
+static void
 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
 {
        lockdep_assert_held(&ctxp->ctxlock);
@@ -325,7 +325,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
        struct fc_frame_header *fc_hdr;
        struct rqb_dmabuf *nvmebuf;
        struct lpfc_nvmet_ctx_info *infop;
-       uint32_t *payload;
        uint32_t size, oxid, sid;
        int cpu;
        unsigned long iflag;
@@ -370,7 +369,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
                fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
                oxid = be16_to_cpu(fc_hdr->fh_ox_id);
                tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
-               payload = (uint32_t *)(nvmebuf->dbuf.virt);
                size = nvmebuf->bytes_recv;
                sid = sli4_sid_from_fc_hdr(fc_hdr);
 
@@ -435,7 +433,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
         * Use the CPU context list, from the MRQ the IO was received on
         * (ctxp->idx), to save context structure.
         */
-       cpu = smp_processor_id();
+       cpu = raw_smp_processor_id();
        infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
        spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
        list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
@@ -765,7 +763,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        }
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
-               id = smp_processor_id();
+               id = raw_smp_processor_id();
                if (id < LPFC_CHECK_CPU_CNT) {
                        if (ctxp->cpu != id)
                                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -906,7 +904,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
                ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
 
        if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
-               int id = smp_processor_id();
+               int id = raw_smp_processor_id();
                if (id < LPFC_CHECK_CPU_CNT) {
                        if (rsp->hwqid != id)
                                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -1120,7 +1118,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
 
 
        lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
-                        ctxp->oxid, ctxp->size, smp_processor_id());
+                        ctxp->oxid, ctxp->size, raw_smp_processor_id());
 
        if (!nvmebuf) {
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
@@ -1596,7 +1594,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
 
                lpfc_nvmeio_data(phba,
                        "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
-                       xri, smp_processor_id(), 0);
+                       xri, raw_smp_processor_id(), 0);
 
                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
                                "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
@@ -1612,7 +1610,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
        spin_unlock_irqrestore(&phba->hbalock, iflag);
 
        lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
-                        xri, smp_processor_id(), 1);
+                        xri, raw_smp_processor_id(), 1);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
                        "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
@@ -1725,7 +1723,11 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
                }
                tgtp->tport_unreg_cmp = &tport_unreg_cmp;
                nvmet_fc_unregister_targetport(phba->targetport);
-               wait_for_completion_timeout(&tport_unreg_cmp, 5);
+               if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
+                                       msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
+                       lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+                                       "6179 Unreg targetport %p timeout "
+                                       "reached.\n", phba->targetport);
                lpfc_nvmet_cleanup_io_context(phba);
        }
        phba->targetport = NULL;
@@ -1843,7 +1845,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
        struct lpfc_hba *phba = ctxp->phba;
        struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
        struct lpfc_nvmet_tgtport *tgtp;
-       uint32_t *payload;
+       uint32_t *payload, qno;
        uint32_t rc;
        unsigned long iflags;
 
@@ -1876,6 +1878,15 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
        /* Process FCP command */
        if (rc == 0) {
                atomic_inc(&tgtp->rcv_fcp_cmd_out);
+               spin_lock_irqsave(&ctxp->ctxlock, iflags);
+               if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
+                   (nvmebuf != ctxp->rqb_buffer)) {
+                       spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+                       return;
+               }
+               ctxp->rqb_buffer = NULL;
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+               lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
                return;
        }
 
@@ -1886,6 +1897,20 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
                                 ctxp->oxid, ctxp->size, ctxp->sid);
                atomic_inc(&tgtp->rcv_fcp_cmd_out);
                atomic_inc(&tgtp->defer_fod);
+               spin_lock_irqsave(&ctxp->ctxlock, iflags);
+               if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
+                       spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+                       return;
+               }
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
+               /*
+                * Post a replacement DMA buffer to RQ and defer
+                * freeing rcv buffer till .defer_rcv callback
+                */
+               qno = nvmebuf->idx;
+               lpfc_post_rq_buffer(
+                       phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
+                       phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
                return;
        }
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
@@ -1996,7 +2021,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
        struct fc_frame_header *fc_hdr;
        struct lpfc_nvmet_ctxbuf *ctx_buf;
        struct lpfc_nvmet_ctx_info *current_infop;
-       uint32_t *payload;
        uint32_t size, oxid, sid, qno;
        unsigned long iflag;
        int current_cpu;
@@ -2020,7 +2044,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
         * be empty, thus it would need to be replenished with the
         * context list from another CPU for this MRQ.
         */
-       current_cpu = smp_processor_id();
+       current_cpu = raw_smp_processor_id();
        current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
        spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
        if (current_infop->nvmet_ctx_list_cnt) {
@@ -2050,7 +2074,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
 #endif
 
        lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
-                        oxid, size, smp_processor_id());
+                        oxid, size, raw_smp_processor_id());
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 
@@ -2074,7 +2098,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                return;
        }
 
-       payload = (uint32_t *)(nvmebuf->dbuf.virt);
        sid = sli4_sid_from_fc_hdr(fc_hdr);
 
        ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
@@ -2690,12 +2713,11 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 {
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
-       uint32_t status, result;
+       uint32_t result;
        unsigned long flags;
        bool released = false;
 
        ctxp = cmdwqe->context2;
-       status = bf_get(lpfc_wcqe_c_status, wcqe);
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -2761,11 +2783,10 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
        unsigned long flags;
-       uint32_t status, result;
+       uint32_t result;
        bool released = false;
 
        ctxp = cmdwqe->context2;
-       status = bf_get(lpfc_wcqe_c_status, wcqe);
        result = wcqe->parameter;
 
        if (!ctxp) {
@@ -2842,10 +2863,9 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 {
        struct lpfc_nvmet_rcv_ctx *ctxp;
        struct lpfc_nvmet_tgtport *tgtp;
-       uint32_t status, result;
+       uint32_t result;
 
        ctxp = cmdwqe->context2;
-       status = bf_get(lpfc_wcqe_c_status, wcqe);
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3200,7 +3220,6 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
 {
        struct lpfc_nvmet_tgtport *tgtp;
        struct lpfc_iocbq *abts_wqeq;
-       union lpfc_wqe128 *wqe_abts;
        unsigned long flags;
        int rc;
 
@@ -3230,7 +3249,6 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
                }
        }
        abts_wqeq = ctxp->wqeq;
-       wqe_abts = &abts_wqeq->wqe;
 
        if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
                rc = WQE_BUSY;
index 368deea..2f3f603 100644 (file)
 #define LPFC_NVMET_RQE_DEF_COUNT       2048
 #define LPFC_NVMET_SUCCESS_LEN         12
 
-#define LPFC_NVMET_MRQ_OFF             0xffff
 #define LPFC_NVMET_MRQ_AUTO            0
 #define LPFC_NVMET_MRQ_MAX             16
 
+#define LPFC_NVMET_WAIT_TMO            (5 * MSEC_PER_SEC)
+
 /* Used for NVME Target */
 struct lpfc_nvmet_tgtport {
        struct lpfc_hba *phba;
index ff3c5e0..ba996fb 100644 (file)
@@ -688,7 +688,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        uint32_t sgl_size, cpu, idx;
        int tag;
 
-       cpu = smp_processor_id();
+       cpu = raw_smp_processor_id();
        if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
                tag = blk_mq_unique_tag(cmnd->request);
                idx = blk_mq_unique_tag_to_hwq(tag);
@@ -3669,8 +3669,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
-               cpu = smp_processor_id();
-               if (cpu < LPFC_CHECK_CPU_CNT)
+               cpu = raw_smp_processor_id();
+               if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
                        phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
        }
 #endif
@@ -4463,7 +4463,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
-               cpu = smp_processor_id();
+               cpu = raw_smp_processor_id();
                if (cpu < LPFC_CHECK_CPU_CNT) {
                        struct lpfc_sli4_hdw_queue *hdwq =
                                        &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
@@ -5048,7 +5048,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
        if (!rdata || !rdata->pnode) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-                                "0798 Device Reset rport failure: rdata x%p\n",
+                                "0798 Device Reset rdata failure: rdata x%p\n",
                                 rdata);
                return FAILED;
        }
@@ -5117,9 +5117,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
        int status;
 
        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
-       if (!rdata) {
+       if (!rdata || !rdata->pnode) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-                       "0799 Target Reset rport failure: rdata x%p\n", rdata);
+                                "0799 Target Reset rdata failure: rdata x%p\n",
+                                rdata);
                return FAILED;
        }
        pnode = rdata->pnode;
index 57b4a46..2acda18 100644 (file)
@@ -87,9 +87,6 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
                                     struct lpfc_eqe *eqe);
 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
-static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
-                                  struct lpfc_sli_ring *pring,
-                                  struct lpfc_iocbq *cmdiocb);
 
 static IOCB_t *
 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -151,7 +148,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
        /* sanity check on queue memory */
        if (unlikely(!q))
                return -ENOMEM;
-       temp_wqe = q->qe[q->host_index].wqe;
+       temp_wqe = lpfc_sli4_qe(q, q->host_index);
 
        /* If the host has not yet processed the next entry then we are done */
        idx = ((q->host_index + 1) % q->entry_count);
@@ -271,7 +268,7 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
        /* sanity check on queue memory */
        if (unlikely(!q))
                return -ENOMEM;
-       temp_mqe = q->qe[q->host_index].mqe;
+       temp_mqe = lpfc_sli4_qe(q, q->host_index);
 
        /* If the host has not yet processed the next entry then we are done */
        if (((q->host_index + 1) % q->entry_count) == q->hba_index)
@@ -331,7 +328,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
        /* sanity check on queue memory */
        if (unlikely(!q))
                return NULL;
-       eqe = q->qe[q->host_index].eqe;
+       eqe = lpfc_sli4_qe(q, q->host_index);
 
        /* If the next EQE is not valid then we are done */
        if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
@@ -355,7 +352,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
  * @q: The Event Queue to disable interrupts
  *
  **/
-inline void
+void
 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
 {
        struct lpfc_register doorbell;
@@ -374,7 +371,7 @@ lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
  * @q: The Event Queue to disable interrupts
  *
  **/
-inline void
+void
 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
 {
        struct lpfc_register doorbell;
@@ -545,7 +542,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
        /* sanity check on queue memory */
        if (unlikely(!q))
                return NULL;
-       cqe = q->qe[q->host_index].cqe;
+       cqe = lpfc_sli4_qe(q, q->host_index);
 
        /* If the next CQE is not valid then we are done */
        if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
@@ -667,8 +664,8 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
                return -ENOMEM;
        hq_put_index = hq->host_index;
        dq_put_index = dq->host_index;
-       temp_hrqe = hq->qe[hq_put_index].rqe;
-       temp_drqe = dq->qe[dq_put_index].rqe;
+       temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
+       temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
 
        if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
                return -EINVAL;
@@ -907,10 +904,10 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
                mod_timer(&phba->rrq_tmr, next_time);
        list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
                list_del(&rrq->list);
-               if (!rrq->send_rrq)
+               if (!rrq->send_rrq) {
                        /* this call will free the rrq */
-               lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
-               else if (lpfc_send_rrq(phba, rrq)) {
+                       lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+               else if (lpfc_send_rrq(phba, rrq)) {
                        /* if we send the rrq then the completion handler
                        *  will clear the bit in the xribitmap.
                        */
@@ -2502,8 +2499,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                        } else {
                                ndlp->nlp_flag &= ~NLP_UNREG_INP;
                        }
+                       pmb->ctx_ndlp = NULL;
                }
-               pmb->ctx_ndlp = NULL;
        }
 
        /* Check security permission status on INIT_LINK mailbox command */
@@ -3921,33 +3918,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
                              IOERR_SLI_ABORTED);
 }
 
-/**
- * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- *
- * This function aborts all iocbs in the given ring and frees all the iocb
- * objects in txq. This function issues an abort iocb for all the iocb commands
- * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
- * the return of this function. The caller is not required to hold any locks.
- **/
-void
-lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
-{
-       LIST_HEAD(completions);
-       struct lpfc_iocbq *iocb, *next_iocb;
-
-       if (pring->ringno == LPFC_ELS_RING)
-               lpfc_fabric_abort_hba(phba);
-
-       spin_lock_irq(&phba->hbalock);
-       /* Next issue ABTS for everything on the txcmplq */
-       list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
-               lpfc_sli4_abort_nvme_io(phba, pring, iocb);
-       spin_unlock_irq(&phba->hbalock);
-}
-
-
 /**
  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
  * @phba: Pointer to HBA context object.
@@ -3977,33 +3947,6 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
        }
 }
 
-/**
- * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
- * @phba: Pointer to HBA context object.
- *
- * This function aborts all wqes in NVME rings. This function issues an
- * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
- * the txcmplq is not guaranteed to complete before the return of this
- * function. The caller is not required to hold any locks.
- **/
-void
-lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
-{
-       struct lpfc_sli_ring  *pring;
-       uint32_t i;
-
-       if ((phba->sli_rev < LPFC_SLI_REV4) ||
-           !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
-               return;
-
-       /* Abort all IO on each NVME ring. */
-       for (i = 0; i < phba->cfg_hdw_queue; i++) {
-               pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
-               lpfc_sli_abort_wqe_ring(phba, pring);
-       }
-}
-
-
 /**
  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
  * @phba: Pointer to HBA context object.
@@ -4487,7 +4430,9 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
        }
 
        /* Turn off parity checking and serr during the physical reset */
-       pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+       if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
+               return -EIO;
+
        pci_write_config_word(phba->pcidev, PCI_COMMAND,
                              (cfg_value &
                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
@@ -4564,7 +4509,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
                        "0389 Performing PCI function reset!\n");
 
        /* Turn off parity checking and serr during the physical reset */
-       pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+       if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3205 PCI read Config failed\n");
+               return -EIO;
+       }
+
        pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
                              ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
 
@@ -5395,7 +5345,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
 }
 
 /**
- * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine retrieves SLI4 device physical port name this PCI function
@@ -5403,40 +5353,30 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
  *
  * Return codes
  *      0 - successful
- *      otherwise - failed to retrieve physical port name
+ *      otherwise - failed to retrieve controller attributes
  **/
 static int
-lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
 {
        LPFC_MBOXQ_t *mboxq;
        struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
        struct lpfc_controller_attribute *cntl_attr;
-       struct lpfc_mbx_get_port_name *get_port_name;
        void *virtaddr = NULL;
        uint32_t alloclen, reqlen;
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
-       char cport_name = 0;
        int rc;
 
-       /* We assume nothing at this point */
-       phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
-       phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
-
        mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq)
                return -ENOMEM;
-       /* obtain link type and link number via READ_CONFIG */
-       phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
-       lpfc_sli4_read_config(phba);
-       if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
-               goto retrieve_ppname;
 
-       /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+       /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
        reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
        alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
                        LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
                        LPFC_SLI4_MBX_NEMBED);
+
        if (alloclen < reqlen) {
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "3084 Allocated DMA memory size (%d) is "
@@ -5462,16 +5402,71 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
                rc = -ENXIO;
                goto out_free_mboxq;
        }
+
        cntl_attr = &mbx_cntl_attr->cntl_attr;
        phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
        phba->sli4_hba.lnk_info.lnk_tp =
                bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
        phba->sli4_hba.lnk_info.lnk_no =
                bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+
+       memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
+       strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
+               sizeof(phba->BIOSVersion));
+
        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                       "3086 lnk_type:%d, lnk_numb:%d\n",
+                       "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
                        phba->sli4_hba.lnk_info.lnk_tp,
-                       phba->sli4_hba.lnk_info.lnk_no);
+                       phba->sli4_hba.lnk_info.lnk_no,
+                       phba->BIOSVersion);
+out_free_mboxq:
+       if (rc != MBX_TIMEOUT) {
+               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
+               else
+                       mempool_free(mboxq, phba->mbox_mem_pool);
+       }
+       return rc;
+}
+
+/**
+ * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine retrieves SLI4 device physical port name this PCI function
+ * is attached to.
+ *
+ * Return codes
+ *      0 - successful
+ *      otherwise - failed to retrieve physical port name
+ **/
+static int
+lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_mbx_get_port_name *get_port_name;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+       char cport_name = 0;
+       int rc;
+
+       /* We assume nothing at this point */
+       phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+       phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
+
+       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               return -ENOMEM;
+       /* obtain link type and link number via READ_CONFIG */
+       phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+       lpfc_sli4_read_config(phba);
+       if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+               goto retrieve_ppname;
+
+       /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+       rc = lpfc_sli4_get_ctl_attr(phba);
+       if (rc)
+               goto out_free_mboxq;
 
 retrieve_ppname:
        lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
@@ -7047,7 +7042,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
  *
  * Returns: 0 = success, non-zero failure.
  **/
-int
+static int
 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
 {
        LIST_HEAD(post_nblist);
@@ -7067,7 +7062,7 @@ lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
        return rc;
 }
 
-void
+static void
 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 {
        uint32_t len;
@@ -7250,6 +7245,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                                "3080 Successful retrieving SLI4 device "
                                "physical port name: %s.\n", phba->Port);
 
+       rc = lpfc_sli4_get_ctl_attr(phba);
+       if (!rc)
+               lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                               "8351 Successful retrieving SLI4 device "
+                               "CTL ATTR\n");
+
        /*
         * Evaluate the read rev and vpd data. Populate the driver
         * state with the results. If this routine fails, the failure
@@ -7652,12 +7653,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                phba->cfg_xri_rebalancing = 0;
        }
 
-       /* Arm the CQs and then EQs on device */
-       lpfc_sli4_arm_cqeq_intr(phba);
-
-       /* Indicate device interrupt mode */
-       phba->sli4_hba.intr_enable = 1;
-
        /* Allow asynchronous mailbox command to go through */
        spin_lock_irq(&phba->hbalock);
        phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
@@ -7726,6 +7721,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                phba->trunk_link.link3.state = LPFC_LINK_DOWN;
        spin_unlock_irq(&phba->hbalock);
 
+       /* Arm the CQs and then EQs on device */
+       lpfc_sli4_arm_cqeq_intr(phba);
+
+       /* Indicate device interrupt mode */
+       phba->sli4_hba.intr_enable = 1;
+
        if (!(phba->hba_flag & HBA_FCOE_MODE) &&
            (phba->hba_flag & LINK_DISABLED)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
@@ -7820,8 +7821,9 @@ lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
        mcq = phba->sli4_hba.mbx_cq;
        idx = mcq->hba_index;
        qe_valid = mcq->qe_valid;
-       while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
-               mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+       while (bf_get_le32(lpfc_cqe_valid,
+              (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
+               mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
                if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
                    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
                        pending_completions = true;
@@ -8500,7 +8502,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
                db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
                if (!db_ready)
-                       msleep(2);
+                       mdelay(2);
 
                if (time_after(jiffies, timeout))
                        return MBXERR_ERROR;
@@ -11263,102 +11265,6 @@ abort_iotag_exit:
        return retval;
 }
 
-/**
- * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
- * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
- * @cmdiocb: Pointer to driver command iocb object.
- *
- * This function issues an abort iocb for the provided command iocb down to
- * the port. Other than the case the outstanding command iocb is an abort
- * request, this function issues abort out unconditionally. This function is
- * called with hbalock held. The function returns 0 when it fails due to
- * memory allocation failure or when the command iocb is an abort request.
- **/
-static int
-lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
-                       struct lpfc_iocbq *cmdiocb)
-{
-       struct lpfc_vport *vport = cmdiocb->vport;
-       struct lpfc_iocbq *abtsiocbp;
-       union lpfc_wqe128 *abts_wqe;
-       int retval;
-       int idx = cmdiocb->hba_wqidx;
-
-       /*
-        * There are certain command types we don't want to abort.  And we
-        * don't want to abort commands that are already in the process of
-        * being aborted.
-        */
-       if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
-           cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
-           (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
-               return 0;
-
-       /* issue ABTS for this io based on iotag */
-       abtsiocbp = __lpfc_sli_get_iocbq(phba);
-       if (abtsiocbp == NULL)
-               return 0;
-
-       /* This signals the response to set the correct status
-        * before calling the completion handler
-        */
-       cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
-
-       /* Complete prepping the abort wqe and issue to the FW. */
-       abts_wqe = &abtsiocbp->wqe;
-
-       /* Clear any stale WQE contents */
-       memset(abts_wqe, 0, sizeof(union lpfc_wqe));
-       bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
-       /* word 7 */
-       bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-       bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
-              cmdiocb->iocb.ulpClass);
-
-       /* word 8 - tell the FW to abort the IO associated with this
-        * outstanding exchange ID.
-        */
-       abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
-
-       /* word 9 - this is the iotag for the abts_wqe completion. */
-       bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
-              abtsiocbp->iotag);
-
-       /* word 10 */
-       bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
-       bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
-       /* word 11 */
-       bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
-       bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
-       bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-
-       /* ABTS WQE must go to the same WQ as the WQE to be aborted */
-       abtsiocbp->iocb_flag |= LPFC_IO_NVME;
-       abtsiocbp->vport = vport;
-       abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
-       retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx],
-                                    abtsiocbp);
-       if (retval) {
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
-                                "6147 Failed abts issue_wqe with status x%x "
-                                "for oxid x%x\n",
-                                retval, cmdiocb->sli4_xritag);
-               lpfc_sli_release_iocbq(phba, abtsiocbp);
-               return retval;
-       }
-
-       lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
-                        "6148 Drv Abort NVME Request Issued for "
-                        "ox_id x%x on reqtag x%x\n",
-                        cmdiocb->sli4_xritag,
-                        abtsiocbp->iotag);
-
-       return retval;
-}
-
 /**
  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
  * @phba: pointer to lpfc HBA data structure.
@@ -13636,7 +13542,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "0390 Cannot schedule soft IRQ "
                                "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
-                               cqid, cq->queue_id, smp_processor_id());
+                               cqid, cq->queue_id, raw_smp_processor_id());
 }
 
 /**
@@ -14019,7 +13925,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
                        return false;
                }
 drop:
-               lpfc_in_buf_free(phba, &dma_buf->dbuf);
+               lpfc_rq_buf_free(phba, &dma_buf->hbuf);
                break;
        case FC_STATUS_INSUFF_BUF_FRM_DISC:
                if (phba->nvmet_support) {
@@ -14185,7 +14091,7 @@ work_cq:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "0363 Cannot schedule soft IRQ "
                                "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
-                               cqid, cq->queue_id, smp_processor_id());
+                               cqid, cq->queue_id, raw_smp_processor_id());
 }
 
 /**
@@ -14324,7 +14230,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 
        eqi = phba->sli4_hba.eq_info;
        icnt = this_cpu_inc_return(eqi->icnt);
-       fpeq->last_cpu = smp_processor_id();
+       fpeq->last_cpu = raw_smp_processor_id();
 
        if (icnt > LPFC_EQD_ISR_TRIGGER &&
            phba->cfg_irq_chann == 1 &&
@@ -14410,6 +14316,9 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
        if (!queue)
                return;
 
+       if (!list_empty(&queue->wq_list))
+               list_del(&queue->wq_list);
+
        while (!list_empty(&queue->page_list)) {
                list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
                                 list);
@@ -14425,9 +14334,6 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
        if (!list_empty(&queue->cpu_list))
                list_del(&queue->cpu_list);
 
-       if (!list_empty(&queue->wq_list))
-               list_del(&queue->wq_list);
-
        kfree(queue);
        return;
 }
@@ -14438,6 +14344,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
  * @page_size: The size of a queue page
  * @entry_size: The size of each queue entry for this queue.
  * @entry count: The number of entries that this queue will handle.
+ * @cpu: The cpu that will primarily utilize this queue.
  *
  * This function allocates a queue structure and the DMAable memory used for
  * the host resident queue. This function must be called before creating the
@@ -14445,28 +14352,26 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
  **/
 struct lpfc_queue *
 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
-                     uint32_t entry_size, uint32_t entry_count)
+                     uint32_t entry_size, uint32_t entry_count, int cpu)
 {
        struct lpfc_queue *queue;
        struct lpfc_dmabuf *dmabuf;
-       int x, total_qe_count;
-       void *dma_pointer;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+       uint16_t x, pgcnt;
 
        if (!phba->sli4_hba.pc_sli4_params.supported)
                hw_page_size = page_size;
 
-       queue = kzalloc(sizeof(struct lpfc_queue) +
-                       (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
-       if (!queue)
-               return NULL;
-       queue->page_count = (ALIGN(entry_size * entry_count,
-                       hw_page_size))/hw_page_size;
+       pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
 
        /* If needed, Adjust page count to match the max the adapter supports */
-       if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
-           (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
-               queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
+       if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
+               pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
+
+       queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
+                            GFP_KERNEL, cpu_to_node(cpu));
+       if (!queue)
+               return NULL;
 
        INIT_LIST_HEAD(&queue->list);
        INIT_LIST_HEAD(&queue->wq_list);
@@ -14478,13 +14383,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
        /* Set queue parameters now.  If the system cannot provide memory
         * resources, the free routine needs to know what was allocated.
         */
+       queue->page_count = pgcnt;
+       queue->q_pgs = (void **)&queue[1];
+       queue->entry_cnt_per_pg = hw_page_size / entry_size;
        queue->entry_size = entry_size;
        queue->entry_count = entry_count;
        queue->page_size = hw_page_size;
        queue->phba = phba;
 
-       for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
-               dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       for (x = 0; x < queue->page_count; x++) {
+               dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
+                                     dev_to_node(&phba->pcidev->dev));
                if (!dmabuf)
                        goto out_fail;
                dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
@@ -14496,13 +14405,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
                }
                dmabuf->buffer_tag = x;
                list_add_tail(&dmabuf->list, &queue->page_list);
-               /* initialize queue's entry array */
-               dma_pointer = dmabuf->virt;
-               for (; total_qe_count < entry_count &&
-                    dma_pointer < (hw_page_size + dmabuf->virt);
-                    total_qe_count++, dma_pointer += entry_size) {
-                       queue->qe[total_qe_count].address = dma_pointer;
-               }
+               /* use lpfc_sli4_qe to index a paritcular entry in this page */
+               queue->q_pgs[x] = dmabuf->virt;
        }
        INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
        INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
index 7a1a761..467b827 100644 (file)
@@ -327,6 +327,10 @@ struct lpfc_sli {
 #define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
 #define LPFC_SLI_SUPPRESS_RSP     0x4000 /* Suppress RSP feature is supported */
 #define LPFC_SLI_USE_EQDR         0x8000 /* EQ Delay Register is supported */
+#define LPFC_QUEUE_FREE_INIT     0x10000 /* Queue freeing is in progress */
+#define LPFC_QUEUE_FREE_WAIT     0x20000 /* Hold Queue free as it is being
+                                          * used outside worker thread
+                                          */
 
        struct lpfc_sli_ring *sli3_ring;
 
@@ -427,14 +431,13 @@ struct lpfc_io_buf {
                struct {
                        struct nvmefc_fcp_req *nvmeCmd;
                        uint16_t qidx;
-
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-                       uint64_t ts_cmd_start;
-                       uint64_t ts_last_cmd;
-                       uint64_t ts_cmd_wqput;
-                       uint64_t ts_isr_cmpl;
-                       uint64_t ts_data_nvme;
-#endif
                };
        };
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       uint64_t ts_cmd_start;
+       uint64_t ts_last_cmd;
+       uint64_t ts_cmd_wqput;
+       uint64_t ts_isr_cmpl;
+       uint64_t ts_data_nvme;
+#endif
 };
index 40c8509..8e4fd1a 100644 (file)
@@ -117,21 +117,6 @@ enum lpfc_sli4_queue_subtype {
        LPFC_USOL
 };
 
-union sli4_qe {
-       void *address;
-       struct lpfc_eqe *eqe;
-       struct lpfc_cqe *cqe;
-       struct lpfc_mcqe *mcqe;
-       struct lpfc_wcqe_complete *wcqe_complete;
-       struct lpfc_wcqe_release *wcqe_release;
-       struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
-       struct lpfc_rcqe_complete *rcqe_complete;
-       struct lpfc_mqe *mqe;
-       union  lpfc_wqe *wqe;
-       union  lpfc_wqe128 *wqe128;
-       struct lpfc_rqe *rqe;
-};
-
 /* RQ buffer list */
 struct lpfc_rqb {
        uint16_t entry_count;     /* Current number of RQ slots */
@@ -157,6 +142,7 @@ struct lpfc_queue {
        struct list_head cpu_list;
        uint32_t entry_count;   /* Number of entries to support on the queue */
        uint32_t entry_size;    /* Size of each queue entry. */
+       uint32_t entry_cnt_per_pg;
        uint32_t notify_interval; /* Queue Notification Interval
                                   * For chip->host queues (EQ, CQ, RQ):
                                   *  specifies the interval (number of
@@ -254,17 +240,17 @@ struct lpfc_queue {
        uint16_t last_cpu;      /* most recent cpu */
        uint8_t qe_valid;
        struct lpfc_queue *assoc_qp;
-       union sli4_qe qe[1];    /* array to index entries (must be last) */
+       void **q_pgs;   /* array to index entries per page */
 };
 
 struct lpfc_sli4_link {
-       uint16_t speed;
+       uint32_t speed;
        uint8_t duplex;
        uint8_t status;
        uint8_t type;
        uint8_t number;
        uint8_t fault;
-       uint16_t logical_speed;
+       uint32_t logical_speed;
        uint16_t topology;
 };
 
@@ -543,8 +529,9 @@ struct lpfc_sli4_lnk_info {
 #define LPFC_LNK_DAT_INVAL     0
 #define LPFC_LNK_DAT_VAL       1
        uint8_t lnk_tp;
-#define LPFC_LNK_GE    0x0 /* FCoE */
-#define LPFC_LNK_FC    0x1 /* FC   */
+#define LPFC_LNK_GE            0x0 /* FCoE */
+#define LPFC_LNK_FC            0x1 /* FC */
+#define LPFC_LNK_FC_TRUNKED    0x2 /* FC_Trunked */
        uint8_t lnk_no;
        uint8_t optic_state;
 };
@@ -907,6 +894,18 @@ struct lpfc_sli4_hba {
 #define lpfc_conf_trunk_port3_WORD     conf_trunk
 #define lpfc_conf_trunk_port3_SHIFT    3
 #define lpfc_conf_trunk_port3_MASK     0x1
+#define lpfc_conf_trunk_port0_nd_WORD  conf_trunk
+#define lpfc_conf_trunk_port0_nd_SHIFT 4
+#define lpfc_conf_trunk_port0_nd_MASK  0x1
+#define lpfc_conf_trunk_port1_nd_WORD  conf_trunk
+#define lpfc_conf_trunk_port1_nd_SHIFT 5
+#define lpfc_conf_trunk_port1_nd_MASK  0x1
+#define lpfc_conf_trunk_port2_nd_WORD  conf_trunk
+#define lpfc_conf_trunk_port2_nd_SHIFT 6
+#define lpfc_conf_trunk_port2_nd_MASK  0x1
+#define lpfc_conf_trunk_port3_nd_WORD  conf_trunk
+#define lpfc_conf_trunk_port3_nd_SHIFT 7
+#define lpfc_conf_trunk_port3_nd_MASK  0x1
 };
 
 enum lpfc_sge_type {
@@ -990,8 +989,10 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
                               uint16_t);
 
 void lpfc_sli4_hba_reset(struct lpfc_hba *);
-struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
-                                        uint32_t, uint32_t);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *phba,
+                                        uint32_t page_size,
+                                        uint32_t entry_size,
+                                        uint32_t entry_count, int cpu);
 void lpfc_sli4_queue_free(struct lpfc_queue *);
 int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
 void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
@@ -1057,12 +1058,12 @@ void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
 int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
 int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
 int lpfc_sli4_init_vpi(struct lpfc_vport *);
-inline void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
+void lpfc_sli4_eq_clr_intr(struct lpfc_queue *);
 void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
                           uint32_t count, bool arm);
 void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
                           uint32_t count, bool arm);
-inline void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
+void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
 void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
                               uint32_t count, bool arm);
 void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
@@ -1079,3 +1080,8 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *);
 uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
 uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
+static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
+{
+       return q->q_pgs[idx / q->entry_cnt_per_pg] +
+               (q->entry_size * (idx % q->entry_cnt_per_pg));
+}
index 43fd693..f7d9ef4 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.2.0.0"
+#define LPFC_DRIVER_VERSION "12.2.0.1"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
 
 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
                LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2019 Broadcom. All Rights " \
                "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
                "and/or its subsidiaries."
index 59a6546..473a120 100644 (file)
@@ -2724,7 +2724,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
        do {
                if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
                        dev_info(&instance->pdev->dev,
-                               "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
+                               "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
                                __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
                        if (i == 3)
                                goto kill_hba_and_failed;
@@ -4647,7 +4647,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
  * Return:                     0 if DCMD succeeded
  *                              non-zero if failed
  */
-int
+static int
 megasas_host_device_list_query(struct megasas_instance *instance,
                               bool is_probe)
 {
index e35c2b6..6129399 100644 (file)
@@ -4418,7 +4418,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
        if (!smid) {
                ret = SUCCESS;
                scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
-                       " issued is not found in oustanding commands\n");
+                       " issued is not found in outstanding commands\n");
                mutex_unlock(&instance->reset_mutex);
                goto out;
        }
index b736dbc..a072187 100644 (file)
@@ -45,6 +45,7 @@ config SCSI_MPT3SAS
        depends on PCI && SCSI
        select SCSI_SAS_ATTRS
        select RAID_ATTRS
+       select IRQ_POLL
        ---help---
        This driver supports PCI-Express SAS 12Gb/s Host Adapters.
 
index f60b9e0..8aacbd1 100644 (file)
@@ -94,6 +94,11 @@ module_param(max_msix_vectors, int, 0);
 MODULE_PARM_DESC(max_msix_vectors,
        " max msix vectors");
 
+static int irqpoll_weight = -1;
+module_param(irqpoll_weight, int, 0);
+MODULE_PARM_DESC(irqpoll_weight,
+       "irq poll weight (default= one fourth of HBA queue depth)");
+
 static int mpt3sas_fwfault_debug;
 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
        " enable detection of firmware fault and halt firmware - (default=0)");
@@ -1382,20 +1387,30 @@ union reply_descriptor {
        } u;
 };
 
+static u32 base_mod64(u64 dividend, u32 divisor)
+{
+       u32 remainder;
+
+       if (!divisor)
+               pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
+       remainder = do_div(dividend, divisor);
+       return remainder;
+}
+
 /**
- * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
- * @irq: irq number (not used)
- * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ * _base_process_reply_queue - Process reply descriptors from reply
+ *             descriptor post queue.
+ * @reply_q: per IRQ's reply queue object.
  *
- * Return: IRQ_HANDLED if processed, else IRQ_NONE.
+ * Return: number of reply descriptors processed from reply
+ *             descriptor queue.
  */
-static irqreturn_t
-_base_interrupt(int irq, void *bus_id)
+static int
+_base_process_reply_queue(struct adapter_reply_queue *reply_q)
 {
-       struct adapter_reply_queue *reply_q = bus_id;
        union reply_descriptor rd;
-       u32 completed_cmds;
-       u8 request_desript_type;
+       u64 completed_cmds;
+       u8 request_descript_type;
        u16 smid;
        u8 cb_idx;
        u32 reply;
@@ -1404,21 +1419,18 @@ _base_interrupt(int irq, void *bus_id)
        Mpi2ReplyDescriptorsUnion_t *rpf;
        u8 rc;
 
-       if (ioc->mask_interrupts)
-               return IRQ_NONE;
-
+       completed_cmds = 0;
        if (!atomic_add_unless(&reply_q->busy, 1, 1))
-               return IRQ_NONE;
+               return completed_cmds;
 
        rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
-       request_desript_type = rpf->Default.ReplyFlags
+       request_descript_type = rpf->Default.ReplyFlags
             & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
-       if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+       if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
                atomic_dec(&reply_q->busy);
-               return IRQ_NONE;
+               return completed_cmds;
        }
 
-       completed_cmds = 0;
        cb_idx = 0xFF;
        do {
                rd.word = le64_to_cpu(rpf->Words);
@@ -1426,11 +1438,11 @@ _base_interrupt(int irq, void *bus_id)
                        goto out;
                reply = 0;
                smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
-               if (request_desript_type ==
+               if (request_descript_type ==
                    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
-                   request_desript_type ==
+                   request_descript_type ==
                    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
-                   request_desript_type ==
+                   request_descript_type ==
                    MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
                        cb_idx = _base_get_cb_idx(ioc, smid);
                        if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
@@ -1440,7 +1452,7 @@ _base_interrupt(int irq, void *bus_id)
                                if (rc)
                                        mpt3sas_base_free_smid(ioc, smid);
                        }
-               } else if (request_desript_type ==
+               } else if (request_descript_type ==
                    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
                        reply = le32_to_cpu(
                            rpf->AddressReply.ReplyFrameAddress);
@@ -1486,7 +1498,7 @@ _base_interrupt(int irq, void *bus_id)
                    (reply_q->reply_post_host_index ==
                    (ioc->reply_post_queue_depth - 1)) ? 0 :
                    reply_q->reply_post_host_index + 1;
-               request_desript_type =
+               request_descript_type =
                    reply_q->reply_post_free[reply_q->reply_post_host_index].
                    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
                completed_cmds++;
@@ -1495,7 +1507,7 @@ _base_interrupt(int irq, void *bus_id)
                 * So that FW can find enough entries to post the Reply
                 * Descriptors in the reply descriptor post queue.
                 */
-               if (completed_cmds > ioc->hba_queue_depth/3) {
+               if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
                        if (ioc->combined_reply_queue) {
                                writel(reply_q->reply_post_host_index |
                                                ((msix_index  & 7) <<
@@ -1507,9 +1519,14 @@ _base_interrupt(int irq, void *bus_id)
                                                 MPI2_RPHI_MSIX_INDEX_SHIFT),
                                                &ioc->chip->ReplyPostHostIndex);
                        }
-                       completed_cmds = 1;
+                       if (!reply_q->irq_poll_scheduled) {
+                               reply_q->irq_poll_scheduled = true;
+                               irq_poll_sched(&reply_q->irqpoll);
+                       }
+                       atomic_dec(&reply_q->busy);
+                       return completed_cmds;
                }
-               if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+               if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
                        goto out;
                if (!reply_q->reply_post_host_index)
                        rpf = reply_q->reply_post_free;
@@ -1521,14 +1538,14 @@ _base_interrupt(int irq, void *bus_id)
 
        if (!completed_cmds) {
                atomic_dec(&reply_q->busy);
-               return IRQ_NONE;
+               return completed_cmds;
        }
 
        if (ioc->is_warpdrive) {
                writel(reply_q->reply_post_host_index,
                ioc->reply_post_host_index[msix_index]);
                atomic_dec(&reply_q->busy);
-               return IRQ_HANDLED;
+               return completed_cmds;
        }
 
        /* Update Reply Post Host Index.
@@ -1555,7 +1572,82 @@ _base_interrupt(int irq, void *bus_id)
                        MPI2_RPHI_MSIX_INDEX_SHIFT),
                        &ioc->chip->ReplyPostHostIndex);
        atomic_dec(&reply_q->busy);
-       return IRQ_HANDLED;
+       return completed_cmds;
+}
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ *
+ * Return: IRQ_HANDLED if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+       struct adapter_reply_queue *reply_q = bus_id;
+       struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
+
+       if (ioc->mask_interrupts)
+               return IRQ_NONE;
+       if (reply_q->irq_poll_scheduled)
+               return IRQ_HANDLED;
+       return ((_base_process_reply_queue(reply_q) > 0) ?
+                       IRQ_HANDLED : IRQ_NONE);
+}
+
+/**
+ * _base_irqpoll - IRQ poll callback handler
+ * @irqpoll - irq_poll object
+ * @budget - irq poll weight
+ *
+ * returns number of reply descriptors processed
+ */
+static int
+_base_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+       struct adapter_reply_queue *reply_q;
+       int num_entries = 0;
+
+       reply_q = container_of(irqpoll, struct adapter_reply_queue,
+                       irqpoll);
+       if (reply_q->irq_line_enable) {
+               disable_irq(reply_q->os_irq);
+               reply_q->irq_line_enable = false;
+       }
+       num_entries = _base_process_reply_queue(reply_q);
+       if (num_entries < budget) {
+               irq_poll_complete(irqpoll);
+               reply_q->irq_poll_scheduled = false;
+               reply_q->irq_line_enable = true;
+               enable_irq(reply_q->os_irq);
+       }
+
+       return num_entries;
+}
+
+/**
+ * _base_init_irqpolls - initliaze IRQ polls
+ * @ioc: per adapter object
+ *
+ * returns nothing
+ */
+static void
+_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
+{
+       struct adapter_reply_queue *reply_q, *next;
+
+       if (list_empty(&ioc->reply_queue_list))
+               return;
+
+       list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+               irq_poll_init(&reply_q->irqpoll,
+                       ioc->hba_queue_depth/4, _base_irqpoll);
+               reply_q->irq_poll_scheduled = false;
+               reply_q->irq_line_enable = true;
+               reply_q->os_irq = pci_irq_vector(ioc->pdev,
+                   reply_q->msix_index);
+       }
 }
 
 /**
@@ -1596,6 +1688,17 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
                /* TMs are on msix_index == 0 */
                if (reply_q->msix_index == 0)
                        continue;
+               if (reply_q->irq_poll_scheduled) {
+                       /* Calling irq_poll_disable will wait for any pending
+                        * callbacks to have completed.
+                        */
+                       irq_poll_disable(&reply_q->irqpoll);
+                       irq_poll_enable(&reply_q->irqpoll);
+                       reply_q->irq_poll_scheduled = false;
+                       reply_q->irq_line_enable = true;
+                       enable_irq(reply_q->os_irq);
+                       continue;
+               }
                synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
        }
 }
@@ -2757,6 +2860,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
 
        if (!_base_is_controller_msix_enabled(ioc))
                return;
+       ioc->msix_load_balance = false;
+       if (ioc->reply_queue_count < num_online_cpus()) {
+               ioc->msix_load_balance = true;
+               return;
+       }
 
        memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
 
@@ -3015,6 +3123,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
        if (r)
                goto out_fail;
 
+       if (!ioc->is_driver_loading)
+               _base_init_irqpolls(ioc);
        /* Use the Combined reply queue feature only for SAS3 C0 & higher
         * revision HBAs and also only when reply queue count is greater than 8
         */
@@ -3158,6 +3268,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
 static inline u8
 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
 {
+       /* Enables reply_queue load balancing */
+       if (ioc->msix_load_balance)
+               return ioc->reply_queue_count ?
+                   base_mod64(atomic64_add_return(1,
+                   &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
+
        return ioc->cpu_msix_table[raw_smp_processor_id()];
 }
 
@@ -6506,6 +6622,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        if (r)
                goto out_free_resources;
 
+       if (irqpoll_weight > 0)
+               ioc->thresh_hold = irqpoll_weight;
+       else
+               ioc->thresh_hold = ioc->hba_queue_depth/4;
+
+       _base_init_irqpolls(ioc);
        init_waitqueue_head(&ioc->reset_wq);
 
        /* allocate memory pd handle bitmask list */
index 19158cb..480219f 100644 (file)
@@ -67,6 +67,7 @@
 #include <scsi/scsi_eh.h>
 #include <linux/pci.h>
 #include <linux/poll.h>
+#include <linux/irq_poll.h>
 
 #include "mpt3sas_debug.h"
 #include "mpt3sas_trigger_diag.h"
@@ -75,9 +76,9 @@
 #define MPT3SAS_DRIVER_NAME            "mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION    "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION         "27.102.00.00"
-#define MPT3SAS_MAJOR_VERSION          27
-#define MPT3SAS_MINOR_VERSION          102
+#define MPT3SAS_DRIVER_VERSION         "28.100.00.00"
+#define MPT3SAS_MAJOR_VERSION          28
+#define MPT3SAS_MINOR_VERSION          100
 #define MPT3SAS_BUILD_VERSION          0
 #define MPT3SAS_RELEASE_VERSION        00
 
@@ -882,6 +883,9 @@ struct _event_ack_list {
  * @reply_post_free: reply post base virt address
  * @name: the name registered to request_irq()
  * @busy: isr is actively processing replies on another cpu
+ * @os_irq: irq number
+ * @irqpoll: irq_poll object
+ * @irq_poll_scheduled: Tells whether irq poll is scheduled or not
  * @list: this list
 */
 struct adapter_reply_queue {
@@ -891,6 +895,10 @@ struct adapter_reply_queue {
        Mpi2ReplyDescriptorsUnion_t *reply_post_free;
        char                    name[MPT_NAME_LENGTH];
        atomic_t                busy;
+       u32                     os_irq;
+       struct irq_poll         irqpoll;
+       bool                    irq_poll_scheduled;
+       bool                    irq_line_enable;
        struct list_head        list;
 };
 
@@ -1016,7 +1024,12 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
  * @msix_vector_count: number msix vectors
  * @cpu_msix_table: table for mapping cpus to msix index
  * @cpu_msix_table_sz: table size
+ * @total_io_cnt: Gives total IO count, used to load balance the interrupts
+ * @msix_load_balance: Enables load balancing of interrupts across
+ * the multiple MSIXs
  * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
+ * @thresh_hold: Max number of reply descriptors processed
+ *                             before updating Host Index
  * @scsi_io_cb_idx: shost generated commands
  * @tm_cb_idx: task management commands
  * @scsih_cb_idx: scsih internal commands
@@ -1192,6 +1205,9 @@ struct MPT3SAS_ADAPTER {
        u32             ioc_reset_count;
        MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
        u32             non_operational_loop;
+       atomic64_t      total_io_cnt;
+       bool            msix_load_balance;
+       u16             thresh_hold;
 
        /* internal commands, callback index */
        u8              scsi_io_cb_idx;
index b757d38..2c699bc 100644 (file)
@@ -678,7 +678,8 @@ static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
 static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
 {
        void __iomem *regs = mvi->regs_ex;
-        iow32(SPI_DATA_REG_64XX, data);
+
+       iow32(SPI_DATA_REG_64XX, data);
 }
 
 
index eb5471b..68b5b5f 100644 (file)
@@ -946,7 +946,8 @@ static u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
 static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
 {
        void __iomem *regs = mvi->regs_ex - 0x10200;
-        mw32(SPI_RD_DATA_REG_94XX, data);
+
+       mw32(SPI_RD_DATA_REG_94XX, data);
 }
 
 
index 311d23c..e933c65 100644 (file)
@@ -1422,7 +1422,7 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
 {
        unsigned long flags;
        int rc = TMF_RESP_FUNC_FAILED;
-    struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+       struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
        struct mvs_info *mvi = mvi_dev->mvi_info;
 
        if (mvi_dev->dev_status != MVS_DEV_EH)
index 3df0269..a541061 100644 (file)
@@ -752,7 +752,7 @@ static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
                spin_lock_irqsave(mhba->shost->host_lock, flags);
                atomic_dec(&cmd->sync_cmd);
                if (mhba->tag_cmd[cmd->frame->tag]) {
-                       mhba->tag_cmd[cmd->frame->tag] = 0;
+                       mhba->tag_cmd[cmd->frame->tag] = NULL;
                        dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
                                                        cmd->frame->tag);
                        tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
@@ -1794,7 +1794,7 @@ static void mvumi_handle_clob(struct mvumi_hba *mhba)
                cmd = mhba->tag_cmd[ob_frame->tag];
 
                atomic_dec(&mhba->fw_outstanding);
-               mhba->tag_cmd[ob_frame->tag] = 0;
+               mhba->tag_cmd[ob_frame->tag] = NULL;
                tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
                if (cmd->scmd)
                        mvumi_complete_cmd(mhba, cmd, ob_frame);
@@ -2139,7 +2139,7 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
        spin_lock_irqsave(mhba->shost->host_lock, flags);
 
        if (mhba->tag_cmd[cmd->frame->tag]) {
-               mhba->tag_cmd[cmd->frame->tag] = 0;
+               mhba->tag_cmd[cmd->frame->tag] = NULL;
                tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
        }
        if (!list_empty(&cmd->queue_pointer))
index d0bb357..109effd 100644 (file)
@@ -960,9 +960,9 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
                return -1;
        }
        regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
-               PM8001_INIT_DBG(pm8001_ha,
-                               pm8001_printk("GPIO Output Control Register:"
-                               " = 0x%x\n", regVal));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("GPIO Output Control Register:"
+                       " = 0x%x\n", regVal));
        /* set GPIO-0 output control to tri-state */
        regVal &= 0xFFFFFFFC;
        pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
@@ -1204,6 +1204,7 @@ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
        }
 }
 
+#ifndef PM8001_USE_MSIX
 /**
  * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
@@ -1225,6 +1226,8 @@ pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
        pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
 }
 
+#else
+
 /**
  * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
@@ -1256,6 +1259,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
        msi_index += MSIX_TABLE_BASE;
        pm8001_cw32(pm8001_ha, 0,  msi_index, MSIX_INTERRUPT_DISABLE);
 }
+#endif
 
 /**
  * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
@@ -1266,10 +1270,9 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
-       return;
-#endif
+#else
        pm8001_chip_intx_interrupt_enable(pm8001_ha);
-
+#endif
 }
 
 /**
@@ -1281,10 +1284,9 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
-       return;
-#endif
+#else
        pm8001_chip_intx_interrupt_disable(pm8001_ha);
-
+#endif
 }
 
 /**
@@ -2898,7 +2900,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 static void
 mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
-       u32 param;
        struct sas_task *t;
        struct pm8001_ccb_info *ccb;
        unsigned long flags;
@@ -2913,7 +2914,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        tag = le32_to_cpu(psmpPayload->tag);
 
        ccb = &pm8001_ha->ccb_info[tag];
-       param = le32_to_cpu(psmpPayload->param);
        t = ccb->task;
        ts = &t->task_status;
        pm8001_dev = ccb->device;
@@ -2928,7 +2928,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
                ts->resp = SAS_TASK_COMPLETE;
                ts->stat = SAM_STAT_GOOD;
-       if (pm8001_dev)
+               if (pm8001_dev)
                        pm8001_dev->running_req--;
                break;
        case IO_ABORTED:
@@ -3244,11 +3244,9 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 {
        struct pm8001_phy *phy = &pm8001_ha->phy[i];
        struct asd_sas_phy *sas_phy = &phy->sas_phy;
-       struct sas_ha_struct *sas_ha;
        if (!phy->phy_attached)
                return;
 
-       sas_ha = pm8001_ha->sas;
        if (sas_phy->phy) {
                struct sas_phy *sphy = sas_phy->phy;
                sphy->negotiated_linkrate = sas_phy->linkrate;
@@ -4627,17 +4625,18 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
        return ret;
 }
 
-static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
 {
-       u32 value;
 #ifdef PM8001_USE_MSIX
        return 1;
-#endif
+#else
+       u32 value;
+
        value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
        if (value)
                return 1;
        return 0;
-
+#endif
 }
 
 /**
@@ -5123,7 +5122,7 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
        .chip_rst               = pm8001_hw_chip_rst,
        .chip_iounmap           = pm8001_chip_iounmap,
        .isr                    = pm8001_chip_isr,
-       .is_our_interupt        = pm8001_chip_is_our_interupt,
+       .is_our_interrupt       = pm8001_chip_is_our_interrupt,
        .isr_process_oq         = process_oq,
        .interrupt_enable       = pm8001_chip_interrupt_enable,
        .interrupt_disable      = pm8001_chip_interrupt_disable,
index a36060c..3374f55 100644 (file)
@@ -201,7 +201,7 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
 
        if (unlikely(!pm8001_ha))
                return IRQ_NONE;
-       if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+       if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
                return IRQ_NONE;
 #ifdef PM8001_USE_TASKLET
        tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
@@ -224,7 +224,7 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
        pm8001_ha = sha->lldd_ha;
        if (unlikely(!pm8001_ha))
                return IRQ_NONE;
-       if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+       if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
                return IRQ_NONE;
 
 #ifdef PM8001_USE_TASKLET
index 084f2fc..88eef3b 100644 (file)
@@ -740,8 +740,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
                wait_for_completion(&task->slow_task->completion);
                if (pm8001_ha->chip_id != chip_8001) {
                        pm8001_dev->setds_completion = &completion_setstate;
-                               PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
-                                       pm8001_dev, 0x01);
+                       PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+                               pm8001_dev, 0x01);
                        wait_for_completion(&completion_setstate);
                }
                res = -TMF_RESP_FUNC_FAILED;
index f88b0d3..ac6d8e3 100644 (file)
@@ -197,7 +197,7 @@ struct pm8001_dispatch {
        int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
        irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
-       u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
+       u32 (*is_our_interrupt)(struct pm8001_hba_info *pm8001_ha);
        int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
index 63e4f7d..301de40 100644 (file)
@@ -1316,7 +1316,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
 
 static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
 {
-        u32 i;
+       u32 i;
 
        PM8001_INIT_DBG(pm8001_ha,
                pm8001_printk("chip reset start\n"));
@@ -4381,27 +4381,27 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                        sata_cmd.len = cpu_to_le32(task->total_xfer_len);
                        sata_cmd.esgl = 0;
                }
-                       /* scsi cdb */
-                       sata_cmd.atapi_scsi_cdb[0] =
-                               cpu_to_le32(((task->ata_task.atapi_packet[0]) |
-                               (task->ata_task.atapi_packet[1] << 8) |
-                               (task->ata_task.atapi_packet[2] << 16) |
-                               (task->ata_task.atapi_packet[3] << 24)));
-                       sata_cmd.atapi_scsi_cdb[1] =
-                               cpu_to_le32(((task->ata_task.atapi_packet[4]) |
-                               (task->ata_task.atapi_packet[5] << 8) |
-                               (task->ata_task.atapi_packet[6] << 16) |
-                               (task->ata_task.atapi_packet[7] << 24)));
-                       sata_cmd.atapi_scsi_cdb[2] =
-                               cpu_to_le32(((task->ata_task.atapi_packet[8]) |
-                               (task->ata_task.atapi_packet[9] << 8) |
-                               (task->ata_task.atapi_packet[10] << 16) |
-                               (task->ata_task.atapi_packet[11] << 24)));
-                       sata_cmd.atapi_scsi_cdb[3] =
-                               cpu_to_le32(((task->ata_task.atapi_packet[12]) |
-                               (task->ata_task.atapi_packet[13] << 8) |
-                               (task->ata_task.atapi_packet[14] << 16) |
-                               (task->ata_task.atapi_packet[15] << 24)));
+               /* scsi cdb */
+               sata_cmd.atapi_scsi_cdb[0] =
+                       cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+                       (task->ata_task.atapi_packet[1] << 8) |
+                       (task->ata_task.atapi_packet[2] << 16) |
+                       (task->ata_task.atapi_packet[3] << 24)));
+               sata_cmd.atapi_scsi_cdb[1] =
+                       cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+                       (task->ata_task.atapi_packet[5] << 8) |
+                       (task->ata_task.atapi_packet[6] << 16) |
+                       (task->ata_task.atapi_packet[7] << 24)));
+               sata_cmd.atapi_scsi_cdb[2] =
+                       cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+                       (task->ata_task.atapi_packet[9] << 8) |
+                       (task->ata_task.atapi_packet[10] << 16) |
+                       (task->ata_task.atapi_packet[11] << 24)));
+               sata_cmd.atapi_scsi_cdb[3] =
+                       cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+                       (task->ata_task.atapi_packet[13] << 8) |
+                       (task->ata_task.atapi_packet[14] << 16) |
+                       (task->ata_task.atapi_packet[15] << 24)));
        }
 
        /* Check for read log for failed drive and return */
@@ -4617,17 +4617,18 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
        return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 }
 
-static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
 {
-       u32 value;
 #ifdef PM8001_USE_MSIX
        return 1;
-#endif
+#else
+       u32 value;
+
        value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
        if (value)
                return 1;
        return 0;
-
+#endif
 }
 
 /**
@@ -4724,7 +4725,7 @@ const struct pm8001_dispatch pm8001_80xx_dispatch = {
        .chip_rst               = pm80xx_hw_chip_rst,
        .chip_iounmap           = pm8001_chip_iounmap,
        .isr                    = pm80xx_chip_isr,
-       .is_our_interupt        = pm80xx_chip_is_our_interupt,
+       .is_our_interrupt       = pm80xx_chip_is_our_interrupt,
        .isr_process_oq         = process_oq,
        .interrupt_enable       = pm80xx_chip_interrupt_enable,
        .interrupt_disable      = pm80xx_chip_interrupt_disable,
index 2c78d8f..2c08f6f 100644 (file)
@@ -35,9 +35,6 @@
 #define QEDF_DESCR "QLogic FCoE Offload Driver"
 #define QEDF_MODULE_NAME "qedf"
 
-#define QEDF_MIN_XID           0
-#define QEDF_MAX_SCSI_XID      (NUM_TASKS_PER_CONNECTION - 1)
-#define QEDF_MAX_ELS_XID       4095
 #define QEDF_FLOGI_RETRY_CNT   3
 #define QEDF_RPORT_RETRY_CNT   255
 #define QEDF_MAX_SESSIONS      1024
@@ -52,8 +49,8 @@
        sizeof(struct fc_frame_header))
 #define QEDF_MAX_NPIV          64
 #define QEDF_TM_TIMEOUT                10
-#define QEDF_ABORT_TIMEOUT     10
-#define QEDF_CLEANUP_TIMEOUT   10
+#define QEDF_ABORT_TIMEOUT     (10 * 1000)
+#define QEDF_CLEANUP_TIMEOUT   1
 #define QEDF_MAX_CDB_LEN       16
 
 #define UPSTREAM_REMOVE                1
@@ -85,6 +82,7 @@ struct qedf_els_cb_arg {
 };
 
 enum qedf_ioreq_event {
+       QEDF_IOREQ_EV_NONE,
        QEDF_IOREQ_EV_ABORT_SUCCESS,
        QEDF_IOREQ_EV_ABORT_FAILED,
        QEDF_IOREQ_EV_SEND_RRQ,
@@ -105,7 +103,6 @@ struct qedf_ioreq {
        struct list_head link;
        uint16_t xid;
        struct scsi_cmnd *sc_cmd;
-       bool use_slowpath; /* Use slow SGL for this I/O */
 #define QEDF_SCSI_CMD          1
 #define QEDF_TASK_MGMT_CMD     2
 #define QEDF_ABTS              3
@@ -117,22 +114,43 @@ struct qedf_ioreq {
 #define QEDF_CMD_IN_ABORT              0x1
 #define QEDF_CMD_IN_CLEANUP            0x2
 #define QEDF_CMD_SRR_SENT              0x3
+#define QEDF_CMD_DIRTY                 0x4
+#define QEDF_CMD_ERR_SCSI_DONE         0x5
        u8 io_req_flags;
        uint8_t tm_flags;
        struct qedf_rport *fcport;
+#define        QEDF_CMD_ST_INACTIVE            0
+#define        QEDFC_CMD_ST_IO_ACTIVE          1
+#define        QEDFC_CMD_ST_ABORT_ACTIVE       2
+#define        QEDFC_CMD_ST_ABORT_ACTIVE_EH    3
+#define        QEDFC_CMD_ST_CLEANUP_ACTIVE     4
+#define        QEDFC_CMD_ST_CLEANUP_ACTIVE_EH  5
+#define        QEDFC_CMD_ST_RRQ_ACTIVE         6
+#define        QEDFC_CMD_ST_RRQ_WAIT           7
+#define        QEDFC_CMD_ST_OXID_RETIRE_WAIT   8
+#define        QEDFC_CMD_ST_TMF_ACTIVE         9
+#define        QEDFC_CMD_ST_DRAIN_ACTIVE       10
+#define        QEDFC_CMD_ST_CLEANED            11
+#define        QEDFC_CMD_ST_ELS_ACTIVE         12
+       atomic_t state;
        unsigned long flags;
        enum qedf_ioreq_event event;
        size_t data_xfer_len;
+       /* ID: 001: Alloc cmd (qedf_alloc_cmd) */
+       /* ID: 002: Initiate ABTS (qedf_initiate_abts) */
+       /* ID: 003: For RRQ (qedf_process_abts_compl) */
        struct kref refcount;
        struct qedf_cmd_mgr *cmd_mgr;
        struct io_bdt *bd_tbl;
        struct delayed_work timeout_work;
        struct completion tm_done;
        struct completion abts_done;
+       struct completion cleanup_done;
        struct e4_fcoe_task_context *task;
        struct fcoe_task_params *task_params;
        struct scsi_sgl_task_params *sgl_task_params;
        int idx;
+       int lun;
 /*
  * Need to allocate enough room for both sense data and FCP response data
  * which has a max length of 8 bytes according to spec.
@@ -155,9 +173,9 @@ struct qedf_ioreq {
        int fp_idx;
        unsigned int cpu;
        unsigned int int_cpu;
-#define QEDF_IOREQ_SLOW_SGE            0
-#define QEDF_IOREQ_SINGLE_SGE          1
-#define QEDF_IOREQ_FAST_SGE            2
+#define QEDF_IOREQ_UNKNOWN_SGE         1
+#define QEDF_IOREQ_SLOW_SGE            2
+#define QEDF_IOREQ_FAST_SGE            3
        u8 sge_type;
        struct delayed_work rrq_work;
 
@@ -172,6 +190,8 @@ struct qedf_ioreq {
         * during some form of error processing.
         */
        bool return_scsi_cmd_on_abts;
+
+       unsigned int alloc;
 };
 
 extern struct workqueue_struct *qedf_io_wq;
@@ -181,7 +201,10 @@ struct qedf_rport {
 #define QEDF_RPORT_SESSION_READY 1
 #define QEDF_RPORT_UPLOADING_CONNECTION        2
 #define QEDF_RPORT_IN_RESET 3
+#define QEDF_RPORT_IN_LUN_RESET 4
+#define QEDF_RPORT_IN_TARGET_RESET 5
        unsigned long flags;
+       int lun_reset_lun;
        unsigned long retry_delay_timestamp;
        struct fc_rport *rport;
        struct fc_rport_priv *rdata;
@@ -191,6 +214,7 @@ struct qedf_rport {
        void __iomem *p_doorbell;
        /* Send queue management */
        atomic_t free_sqes;
+       atomic_t ios_to_queue;
        atomic_t num_active_ios;
        struct fcoe_wqe *sq;
        dma_addr_t sq_dma;
@@ -295,8 +319,6 @@ struct qedf_ctx {
 #define QEDF_DCBX_PENDING      0
 #define QEDF_DCBX_DONE         1
        atomic_t dcbx;
-       uint16_t max_scsi_xid;
-       uint16_t max_els_xid;
 #define QEDF_NULL_VLAN_ID      -1
 #define QEDF_FALLBACK_VLAN     1002
 #define QEDF_DEFAULT_PRIO      3
@@ -371,7 +393,6 @@ struct qedf_ctx {
 
        u32 slow_sge_ios;
        u32 fast_sge_ios;
-       u32 single_sge_ios;
 
        uint8_t *grcdump;
        uint32_t grcdump_size;
@@ -396,6 +417,8 @@ struct qedf_ctx {
        u8 target_resets;
        u8 task_set_fulls;
        u8 busy;
+       /* Used for flush routine */
+       struct mutex flush_mutex;
 };
 
 struct io_bdt {
@@ -435,6 +458,12 @@ static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
 /*
  * Externs
  */
+
+/*
+ * (QEDF_LOG_NPIV | QEDF_LOG_SESS | QEDF_LOG_LPORT | QEDF_LOG_ELS | QEDF_LOG_MQ
+ * | QEDF_LOG_IO | QEDF_LOG_UNSOL | QEDF_LOG_SCSI_TM | QEDF_LOG_MP_REQ |
+ * QEDF_LOG_EVT | QEDF_LOG_CONN | QEDF_LOG_DISC | QEDF_LOG_INFO)
+ */
 #define QEDF_DEFAULT_LOG_MASK          0x3CFB6
 extern const struct qed_fcoe_ops *qed_ops;
 extern uint qedf_dump_frames;
@@ -494,7 +523,7 @@ extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
 extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
 extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
 extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
-extern void qedf_wait_for_upload(struct qedf_ctx *qedf);
+bool qedf_wait_for_upload(struct qedf_ctx *qedf);
 extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
        struct fcoe_cqe *cqe);
 extern void qedf_restart_rport(struct qedf_rport *fcport);
@@ -508,6 +537,8 @@ extern void qedf_get_protocol_tlv_data(void *dev, void *data);
 extern void qedf_fp_io_handler(struct work_struct *work);
 extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
 extern void qedf_wq_grcdump(struct work_struct *work);
+void qedf_stag_change_work(struct work_struct *work);
+void qedf_ctx_soft_reset(struct fc_lport *lport);
 
 #define FCOE_WORD_TO_BYTE  4
 #define QEDF_MAX_TASK_NUM      0xFFFF
index f2397ee..f7d170b 100644 (file)
@@ -15,10 +15,6 @@ qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
 {
        va_list va;
        struct va_format vaf;
-       char nfunc[32];
-
-       memset(nfunc, 0, sizeof(nfunc));
-       memcpy(nfunc, func, sizeof(nfunc) - 1);
 
        va_start(va, fmt);
 
@@ -27,9 +23,9 @@ qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
 
        if (likely(qedf) && likely(qedf->pdev))
                pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
-                       nfunc, line, qedf->host_no, &vaf);
+                       func, line, qedf->host_no, &vaf);
        else
-               pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+               pr_err("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
        va_end(va);
 }
@@ -40,10 +36,6 @@ qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
 {
        va_list va;
        struct va_format vaf;
-       char nfunc[32];
-
-       memset(nfunc, 0, sizeof(nfunc));
-       memcpy(nfunc, func, sizeof(nfunc) - 1);
 
        va_start(va, fmt);
 
@@ -55,9 +47,9 @@ qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
 
        if (likely(qedf) && likely(qedf->pdev))
                pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
-                       nfunc, line, qedf->host_no, &vaf);
+                       func, line, qedf->host_no, &vaf);
        else
-               pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+               pr_warn("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
        va_end(va);
@@ -69,10 +61,6 @@ qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
 {
        va_list va;
        struct va_format vaf;
-       char nfunc[32];
-
-       memset(nfunc, 0, sizeof(nfunc));
-       memcpy(nfunc, func, sizeof(nfunc) - 1);
 
        va_start(va, fmt);
 
@@ -84,10 +72,10 @@ qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
 
        if (likely(qedf) && likely(qedf->pdev))
                pr_notice("[%s]:[%s:%d]:%d: %pV",
-                         dev_name(&(qedf->pdev->dev)), nfunc, line,
+                         dev_name(&(qedf->pdev->dev)), func, line,
                          qedf->host_no, &vaf);
        else
-               pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+               pr_notice("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
        va_end(va);
@@ -99,10 +87,6 @@ qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
 {
        va_list va;
        struct va_format vaf;
-       char nfunc[32];
-
-       memset(nfunc, 0, sizeof(nfunc));
-       memcpy(nfunc, func, sizeof(nfunc) - 1);
 
        va_start(va, fmt);
 
@@ -114,9 +98,9 @@ qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
 
        if (likely(qedf) && likely(qedf->pdev))
                pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
-                       nfunc, line, qedf->host_no, &vaf);
+                       func, line, qedf->host_no, &vaf);
        else
-               pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+               pr_info("[0000:00:00.0]:[%s:%d]: %pV", func, line, &vaf);
 
 ret:
        va_end(va);
index a32d8ee..2353892 100644 (file)
@@ -293,6 +293,33 @@ qedf_dbg_io_trace_open(struct inode *inode, struct file *file)
        return single_open(file, qedf_io_trace_show, qedf);
 }
 
+/* Based on fip_state enum from libfcoe.h */
+static char *fip_state_names[] = {
+       "FIP_ST_DISABLED",
+       "FIP_ST_LINK_WAIT",
+       "FIP_ST_AUTO",
+       "FIP_ST_NON_FIP",
+       "FIP_ST_ENABLED",
+       "FIP_ST_VNMP_START",
+       "FIP_ST_VNMP_PROBE1",
+       "FIP_ST_VNMP_PROBE2",
+       "FIP_ST_VNMP_CLAIM",
+       "FIP_ST_VNMP_UP",
+};
+
+/* Based on fc_rport_state enum from libfc.h */
+static char *fc_rport_state_names[] = {
+       "RPORT_ST_INIT",
+       "RPORT_ST_FLOGI",
+       "RPORT_ST_PLOGI_WAIT",
+       "RPORT_ST_PLOGI",
+       "RPORT_ST_PRLI",
+       "RPORT_ST_RTV",
+       "RPORT_ST_READY",
+       "RPORT_ST_ADISC",
+       "RPORT_ST_DELETE",
+};
+
 static int
 qedf_driver_stats_show(struct seq_file *s, void *unused)
 {
@@ -300,10 +327,28 @@ qedf_driver_stats_show(struct seq_file *s, void *unused)
        struct qedf_rport *fcport;
        struct fc_rport_priv *rdata;
 
+       seq_printf(s, "Host WWNN/WWPN: %016llx/%016llx\n",
+                  qedf->wwnn, qedf->wwpn);
+       seq_printf(s, "Host NPortID: %06x\n", qedf->lport->port_id);
+       seq_printf(s, "Link State: %s\n", atomic_read(&qedf->link_state) ?
+           "Up" : "Down");
+       seq_printf(s, "Logical Link State: %s\n", qedf->lport->link_up ?
+           "Up" : "Down");
+       seq_printf(s, "FIP state: %s\n", fip_state_names[qedf->ctlr.state]);
+       seq_printf(s, "FIP VLAN ID: %d\n", qedf->vlan_id & 0xfff);
+       seq_printf(s, "FIP 802.1Q Priority: %d\n", qedf->prio);
+       if (qedf->ctlr.sel_fcf) {
+               seq_printf(s, "FCF WWPN: %016llx\n",
+                          qedf->ctlr.sel_fcf->switch_name);
+               seq_printf(s, "FCF MAC: %pM\n", qedf->ctlr.sel_fcf->fcf_mac);
+       } else {
+               seq_puts(s, "FCF not selected\n");
+       }
+
+       seq_puts(s, "\nSGE stats:\n\n");
        seq_printf(s, "cmg_mgr free io_reqs: %d\n",
            atomic_read(&qedf->cmd_mgr->free_list_cnt));
        seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios);
-       seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios);
        seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios);
 
        seq_puts(s, "Offloaded ports:\n\n");
@@ -313,9 +358,12 @@ qedf_driver_stats_show(struct seq_file *s, void *unused)
                rdata = fcport->rdata;
                if (rdata == NULL)
                        continue;
-               seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n",
-                   rdata->ids.port_id, atomic_read(&fcport->free_sqes),
-                   atomic_read(&fcport->num_active_ios));
+               seq_printf(s, "%016llx/%016llx/%06x: state=%s, free_sqes=%d, num_active_ios=%d\n",
+                          rdata->rport->node_name, rdata->rport->port_name,
+                          rdata->ids.port_id,
+                          fc_rport_state_names[rdata->rp_state],
+                          atomic_read(&fcport->free_sqes),
+                          atomic_read(&fcport->num_active_ios));
        }
        rcu_read_unlock();
 
@@ -361,7 +409,6 @@ qedf_dbg_clear_stats_cmd_write(struct file *filp,
 
        /* Clear stat counters exposed by 'stats' node */
        qedf->slow_sge_ios = 0;
-       qedf->single_sge_ios = 0;
        qedf->fast_sge_ios = 0;
 
        return count;
index 04f0c4d..d900c89 100644 (file)
@@ -23,8 +23,6 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
        int rc = 0;
        uint32_t did, sid;
        uint16_t xid;
-       uint32_t start_time = jiffies / HZ;
-       uint32_t current_time;
        struct fcoe_wqe *sqe;
        unsigned long flags;
        u16 sqe_idx;
@@ -59,18 +57,12 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
                goto els_err;
        }
 
-retry_els:
        els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
        if (!els_req) {
-               current_time = jiffies / HZ;
-               if ((current_time - start_time) > 10) {
-                       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
-                                  "els: Failed els 0x%x\n", op);
-                       rc = -ENOMEM;
-                       goto els_err;
-               }
-               mdelay(20 * USEC_PER_MSEC);
-               goto retry_els;
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
+                         "Failed to alloc ELS request 0x%x\n", op);
+               rc = -ENOMEM;
+               goto els_err;
        }
 
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
@@ -143,6 +135,8 @@ retry_els:
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
                   "req\n");
        qedf_ring_doorbell(fcport);
+       set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
 els_err:
        return rc;
@@ -151,21 +145,16 @@ els_err:
 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        struct qedf_ioreq *els_req)
 {
-       struct fcoe_task_context *task_ctx;
-       struct scsi_cmnd *sc_cmd;
-       uint16_t xid;
        struct fcoe_cqe_midpath_info *mp_info;
 
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
                   " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
 
+       clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
        /* Kill the ELS timer */
        cancel_delayed_work(&els_req->timeout_work);
 
-       xid = els_req->xid;
-       task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
-       sc_cmd = els_req->sc_cmd;
-
        /* Get ELS response length from CQE */
        mp_info = &cqe->cqe_info.midpath_info;
        els_req->mp_req.resp_len = mp_info->data_placement_size;
@@ -205,8 +194,12 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
                   " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
                   orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
 
-       /* This should return the aborted io_req to the command pool */
-       if (orig_io_req)
+       /*
+        * This should return the aborted io_req to the command pool. Note that
+        * we need to check the refcound in case the original request was
+        * flushed but we get a completion on this xid.
+        */
+       if (orig_io_req && refcount > 0)
                kref_put(&orig_io_req->refcount, qedf_release_cmd);
 
 out_free:
@@ -233,6 +226,7 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
        uint32_t sid;
        uint32_t r_a_tov;
        int rc;
+       int refcount;
 
        if (!aborted_io_req) {
                QEDF_ERR(NULL, "abort_io_req is NULL.\n");
@@ -241,6 +235,15 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
 
        fcport = aborted_io_req->fcport;
 
+       if (!fcport) {
+               refcount = kref_read(&aborted_io_req->refcount);
+               QEDF_ERR(NULL,
+                        "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
+                        aborted_io_req->xid, refcount);
+               kref_put(&aborted_io_req->refcount, qedf_release_cmd);
+               return -EINVAL;
+       }
+
        /* Check that fcport is still offloaded */
        if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
                QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
@@ -253,6 +256,19 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
        }
 
        qedf = fcport->qedf;
+
+       /*
+        * Sanity check that we can send a RRQ to make sure that refcount isn't
+        * 0
+        */
+       refcount = kref_read(&aborted_io_req->refcount);
+       if (refcount != 1) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
+                         "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
+                         aborted_io_req->xid, aborted_io_req, refcount);
+               return -EINVAL;
+       }
+
        lport = qedf->lport;
        sid = fcport->sid;
        r_a_tov = lport->r_a_tov;
@@ -335,32 +351,49 @@ void qedf_restart_rport(struct qedf_rport *fcport)
        struct fc_lport *lport;
        struct fc_rport_priv *rdata;
        u32 port_id;
+       unsigned long flags;
 
        if (!fcport)
                return;
 
+       spin_lock_irqsave(&fcport->rport_lock, flags);
        if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
            !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
            test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
                QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
                    fcport);
+               spin_unlock_irqrestore(&fcport->rport_lock, flags);
                return;
        }
 
        /* Set that we are now in reset */
        set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
+       spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
        rdata = fcport->rdata;
-       if (rdata) {
+       if (rdata && !kref_get_unless_zero(&rdata->kref)) {
+               fcport->rdata = NULL;
+               rdata = NULL;
+       }
+
+       if (rdata && rdata->rp_state == RPORT_ST_READY) {
                lport = fcport->qedf->lport;
                port_id = rdata->ids.port_id;
                QEDF_ERR(&(fcport->qedf->dbg_ctx),
                    "LOGO port_id=%x.\n", port_id);
                fc_rport_logoff(rdata);
+               kref_put(&rdata->kref, fc_rport_destroy);
+               mutex_lock(&lport->disc.disc_mutex);
                /* Recreate the rport and log back in */
                rdata = fc_rport_create(lport, port_id);
-               if (rdata)
+               if (rdata) {
+                       mutex_unlock(&lport->disc.disc_mutex);
                        fc_rport_login(rdata);
+                       fcport->rdata = rdata;
+               } else {
+                       mutex_unlock(&lport->disc.disc_mutex);
+                       fcport->rdata = NULL;
+               }
        }
        clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
 }
@@ -569,7 +602,7 @@ static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
        struct qedf_rport *fcport;
        struct fc_lport *lport;
        struct qedf_els_cb_arg *cb_arg = NULL;
-       u32 sid, r_a_tov;
+       u32 r_a_tov;
        int rc;
 
        if (!orig_io_req) {
@@ -595,7 +628,6 @@ static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
 
        qedf = fcport->qedf;
        lport = qedf->lport;
-       sid = fcport->sid;
        r_a_tov = lport->r_a_tov;
 
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
index 3fd3af7..d4741f8 100644 (file)
@@ -19,17 +19,16 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
 {
        struct sk_buff *skb;
        char *eth_fr;
-       int fr_len;
        struct fip_vlan *vlan;
 #define MY_FIP_ALL_FCF_MACS        ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
        static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
        unsigned long flags = 0;
+       int rc = -1;
 
        skb = dev_alloc_skb(sizeof(struct fip_vlan));
        if (!skb)
                return;
 
-       fr_len = sizeof(*vlan);
        eth_fr = (char *)skb->data;
        vlan = (struct fip_vlan *)eth_fr;
 
@@ -68,7 +67,13 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
        }
 
        set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags);
-       qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
+       rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
+       if (rc) {
+               QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+               kfree_skb(skb);
+               return;
+       }
+
 }
 
 static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
@@ -95,6 +100,12 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
                rlen -= dlen;
        }
 
+       if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                         "Dropping VLAN response as link is down.\n");
+               return;
+       }
+
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, "
                   "vid=0x%x.\n", vid);
 
@@ -114,6 +125,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
        struct fip_header *fiph;
        u16 op, vlan_tci = 0;
        u8 sub;
+       int rc = -1;
 
        if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
                QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
@@ -142,9 +154,16 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
                print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
                    skb->data, skb->len, false);
 
-       qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+       rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+       if (rc) {
+               QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+               kfree_skb(skb);
+               return;
+       }
 }
 
+static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
+
 /* Process incoming FIP frames. */
 void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
 {
@@ -157,20 +176,37 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
        size_t rlen, dlen;
        u16 op;
        u8 sub;
-       bool do_reset = false;
+       bool fcf_valid = false;
+       /* Default is to handle CVL regardless of fabric id descriptor */
+       bool fabric_id_valid = true;
+       bool fc_wwpn_valid = false;
+       u64 switch_name;
+       u16 vlan = 0;
 
        eth_hdr = (struct ethhdr *)skb_mac_header(skb);
        fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
        op = ntohs(fiph->fip_op);
        sub = fiph->fip_subcode;
 
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: "
-           "skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph,
-           eth_hdr->h_source, op, sub);
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+                 "FIP frame received: skb=%p fiph=%p source=%pM destn=%pM op=%x sub=%x vlan=%04x",
+                 skb, fiph, eth_hdr->h_source, eth_hdr->h_dest, op,
+                 sub, vlan);
        if (qedf_dump_frames)
                print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
                    skb->data, skb->len, false);
 
+       if (!ether_addr_equal(eth_hdr->h_dest, qedf->mac) &&
+           !ether_addr_equal(eth_hdr->h_dest, fcoe_all_enode) &&
+               !ether_addr_equal(eth_hdr->h_dest, qedf->data_src_addr)) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+                         "Dropping FIP type 0x%x pkt due to destination MAC mismatch dest_mac=%pM ctlr.dest_addr=%pM data_src_addr=%pM.\n",
+                         op, eth_hdr->h_dest, qedf->mac,
+                         qedf->data_src_addr);
+               kfree_skb(skb);
+               return;
+       }
+
        /* Handle FIP VLAN resp in the driver */
        if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
                qedf_fcoe_process_vlan_resp(qedf, skb);
@@ -199,25 +235,36 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
                        switch (desc->fip_dtype) {
                        case FIP_DT_MAC:
                                mp = (struct fip_mac_desc *)desc;
-                               QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-                                   "fd_mac=%pM\n", mp->fd_mac);
+                               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                                         "Switch fd_mac=%pM.\n", mp->fd_mac);
                                if (ether_addr_equal(mp->fd_mac,
                                    qedf->ctlr.sel_fcf->fcf_mac))
-                                       do_reset = true;
+                                       fcf_valid = true;
                                break;
                        case FIP_DT_NAME:
                                wp = (struct fip_wwn_desc *)desc;
-                               QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-                                   "fc_wwpn=%016llx.\n",
-                                   get_unaligned_be64(&wp->fd_wwn));
+                               switch_name = get_unaligned_be64(&wp->fd_wwn);
+                               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                                         "Switch fd_wwn=%016llx fcf_switch_name=%016llx.\n",
+                                         switch_name,
+                                         qedf->ctlr.sel_fcf->switch_name);
+                               if (switch_name ==
+                                   qedf->ctlr.sel_fcf->switch_name)
+                                       fc_wwpn_valid = true;
                                break;
                        case FIP_DT_VN_ID:
                                vp = (struct fip_vn_desc *)desc;
-                               QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-                                   "fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
-                               if (ntoh24(vp->fd_fc_id) ==
+                               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                                         "vx_port fd_fc_id=%x fd_mac=%pM.\n",
+                                         ntoh24(vp->fd_fc_id), vp->fd_mac);
+                               /* Check vx_port fabric ID */
+                               if (ntoh24(vp->fd_fc_id) !=
                                    qedf->lport->port_id)
-                                       do_reset = true;
+                                       fabric_id_valid = false;
+                               /* Check vx_port MAC */
+                               if (!ether_addr_equal(vp->fd_mac,
+                                                     qedf->data_src_addr))
+                                       fabric_id_valid = false;
                                break;
                        default:
                                /* Ignore anything else */
@@ -227,13 +274,11 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
                        rlen -= dlen;
                }
 
-               QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-                   "do_reset=%d.\n", do_reset);
-               if (do_reset) {
-                       fcoe_ctlr_link_down(&qedf->ctlr);
-                       qedf_wait_for_upload(qedf);
-                       fcoe_ctlr_link_up(&qedf->ctlr);
-               }
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                         "fcf_valid=%d fabric_id_valid=%d fc_wwpn_valid=%d.\n",
+                         fcf_valid, fabric_id_valid, fc_wwpn_valid);
+               if (fcf_valid && fabric_id_valid && fc_wwpn_valid)
+                       qedf_ctx_soft_reset(qedf->lport);
                kfree_skb(skb);
        } else {
                /* Everything else is handled by libfcoe */
index 53e8221..42f9f2a 100644 (file)
@@ -43,8 +43,9 @@ static void qedf_cmd_timeout(struct work_struct *work)
        switch (io_req->cmd_type) {
        case QEDF_ABTS:
                if (qedf == NULL) {
-                       QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
-                           io_req->xid);
+                       QEDF_INFO(NULL, QEDF_LOG_IO,
+                                 "qedf is NULL for ABTS xid=0x%x.\n",
+                                 io_req->xid);
                        return;
                }
 
@@ -61,6 +62,9 @@ static void qedf_cmd_timeout(struct work_struct *work)
                 */
                kref_put(&io_req->refcount, qedf_release_cmd);
 
+               /* Clear in abort bit now that we're done with the command */
+               clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+
                /*
                 * Now that the original I/O and the ABTS are complete see
                 * if we need to reconnect to the target.
@@ -68,6 +72,15 @@ static void qedf_cmd_timeout(struct work_struct *work)
                qedf_restart_rport(fcport);
                break;
        case QEDF_ELS:
+               if (!qedf) {
+                       QEDF_INFO(NULL, QEDF_LOG_IO,
+                                 "qedf is NULL for ELS xid=0x%x.\n",
+                                 io_req->xid);
+                       return;
+               }
+               /* ELS request no longer outstanding since it timed out */
+               clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
                kref_get(&io_req->refcount);
                /*
                 * Don't attempt to clean an ELS timeout as any subseqeunt
@@ -103,7 +116,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
        struct io_bdt *bdt_info;
        struct qedf_ctx *qedf = cmgr->qedf;
        size_t bd_tbl_sz;
-       u16 min_xid = QEDF_MIN_XID;
+       u16 min_xid = 0;
        u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
        int num_ios;
        int i;
@@ -157,6 +170,7 @@ static void qedf_handle_rrq(struct work_struct *work)
        struct qedf_ioreq *io_req =
            container_of(work, struct qedf_ioreq, rrq_work.work);
 
+       atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
        qedf_send_rrq(io_req);
 
 }
@@ -169,7 +183,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
        u16 xid;
        int i;
        int num_ios;
-       u16 min_xid = QEDF_MIN_XID;
+       u16 min_xid = 0;
        u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
 
        /* Make sure num_queues is already set before calling this function */
@@ -201,7 +215,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
        /*
         * Initialize I/O request fields.
         */
-       xid = QEDF_MIN_XID;
+       xid = 0;
 
        for (i = 0; i < num_ios; i++) {
                io_req = &cmgr->cmds[i];
@@ -329,7 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
                        cmd_mgr->idx = 0;
 
                /* Check to make sure command was previously freed */
-               if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
+               if (!io_req->alloc)
                        break;
        }
 
@@ -338,7 +352,14 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
                goto out_failed;
        }
 
-       set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+       if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "io_req found to be dirty ox_id = 0x%x.\n",
+                        io_req->xid);
+
+       /* Clear any flags now that we've reallocated the xid */
+       io_req->flags = 0;
+       io_req->alloc = 1;
        spin_unlock_irqrestore(&cmd_mgr->lock, flags);
 
        atomic_inc(&fcport->num_active_ios);
@@ -349,8 +370,13 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
        io_req->cmd_mgr = cmd_mgr;
        io_req->fcport = fcport;
 
+       /* Clear any stale sc_cmd back pointer */
+       io_req->sc_cmd = NULL;
+       io_req->lun = -1;
+
        /* Hold the io_req against deletion */
-       kref_init(&io_req->refcount);
+       kref_init(&io_req->refcount);   /* ID: 001 */
+       atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
 
        /* Bind io_bdt for this io_req */
        /* Have a static link between io_req and io_bdt_pool */
@@ -412,6 +438,10 @@ void qedf_release_cmd(struct kref *ref)
            container_of(ref, struct qedf_ioreq, refcount);
        struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
        struct qedf_rport *fcport = io_req->fcport;
+       unsigned long flags;
+
+       if (io_req->cmd_type == QEDF_SCSI_CMD)
+               WARN_ON(io_req->sc_cmd);
 
        if (io_req->cmd_type == QEDF_ELS ||
            io_req->cmd_type == QEDF_TASK_MGMT_CMD)
@@ -419,36 +449,20 @@ void qedf_release_cmd(struct kref *ref)
 
        atomic_inc(&cmd_mgr->free_list_cnt);
        atomic_dec(&fcport->num_active_ios);
+       atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
        if (atomic_read(&fcport->num_active_ios) < 0)
                QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
 
        /* Increment task retry identifier now that the request is released */
        io_req->task_retry_identifier++;
+       io_req->fcport = NULL;
 
-       clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
-}
-
-static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
-       int bd_index)
-{
-       struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
-       int frag_size, sg_frags;
-
-       sg_frags = 0;
-       while (sg_len) {
-               if (sg_len > QEDF_BD_SPLIT_SZ)
-                       frag_size = QEDF_BD_SPLIT_SZ;
-               else
-                       frag_size = sg_len;
-               bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
-               bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
-               bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
-
-               addr += (u64)frag_size;
-               sg_frags++;
-               sg_len -= frag_size;
-       }
-       return sg_frags;
+       clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
+       io_req->cpu = 0;
+       spin_lock_irqsave(&cmd_mgr->lock, flags);
+       io_req->fcport = NULL;
+       io_req->alloc = 0;
+       spin_unlock_irqrestore(&cmd_mgr->lock, flags);
 }
 
 static int qedf_map_sg(struct qedf_ioreq *io_req)
@@ -462,75 +476,45 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
        int byte_count = 0;
        int sg_count = 0;
        int bd_count = 0;
-       int sg_frags;
-       unsigned int sg_len;
+       u32 sg_len;
        u64 addr, end_addr;
-       int i;
+       int i = 0;
 
        sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
            scsi_sg_count(sc), sc->sc_data_direction);
-
        sg = scsi_sglist(sc);
 
-       /*
-        * New condition to send single SGE as cached-SGL with length less
-        * than 64k.
-        */
-       if ((sg_count == 1) && (sg_dma_len(sg) <=
-           QEDF_MAX_SGLEN_FOR_CACHESGL)) {
-               sg_len = sg_dma_len(sg);
-               addr = (u64)sg_dma_address(sg);
-
-               bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
-               bd[bd_count].sge_addr.hi = (addr >> 32);
-               bd[bd_count].sge_len = (u16)sg_len;
+       io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
 
-               return ++bd_count;
-       }
+       if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
+               io_req->sge_type = QEDF_IOREQ_FAST_SGE;
 
        scsi_for_each_sg(sc, sg, sg_count, i) {
-               sg_len = sg_dma_len(sg);
+               sg_len = (u32)sg_dma_len(sg);
                addr = (u64)sg_dma_address(sg);
                end_addr = (u64)(addr + sg_len);
 
-               /*
-                * First s/g element in the list so check if the end_addr
-                * is paged aligned. Also check to make sure the length is
-                * at least page size.
-                */
-               if ((i == 0) && (sg_count > 1) &&
-                   ((end_addr % QEDF_PAGE_SIZE) ||
-                   sg_len < QEDF_PAGE_SIZE))
-                       io_req->use_slowpath = true;
-               /*
-                * Last s/g element so check if the start address is paged
-                * aligned.
-                */
-               else if ((i == (sg_count - 1)) && (sg_count > 1) &&
-                   (addr % QEDF_PAGE_SIZE))
-                       io_req->use_slowpath = true;
                /*
                 * Intermediate s/g element so check if start and end address
-                * is page aligned.
+                * is page aligned.  Only required for writes and only if the
+                * number of scatter/gather elements is 8 or more.
                 */
-               else if ((i != 0) && (i != (sg_count - 1)) &&
-                   ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
-                       io_req->use_slowpath = true;
+               if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
+                   (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
+                       io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
 
-               if (sg_len > QEDF_MAX_BD_LEN) {
-                       sg_frags = qedf_split_bd(io_req, addr, sg_len,
-                           bd_count);
-               } else {
-                       sg_frags = 1;
-                       bd[bd_count].sge_addr.lo = U64_LO(addr);
-                       bd[bd_count].sge_addr.hi  = U64_HI(addr);
-                       bd[bd_count].sge_len = (uint16_t)sg_len;
-               }
+               bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
+               bd[bd_count].sge_addr.hi  = cpu_to_le32(U64_HI(addr));
+               bd[bd_count].sge_len = cpu_to_le32(sg_len);
 
-               bd_count += sg_frags;
+               bd_count++;
                byte_count += sg_len;
        }
 
+       /* To catch a case where FAST and SLOW nothing is set, set FAST */
+       if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
+               io_req->sge_type = QEDF_IOREQ_FAST_SGE;
+
        if (byte_count != scsi_bufflen(sc))
                QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
                          "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
@@ -655,8 +639,10 @@ static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
                io_req->sgl_task_params->num_sges = bd_count;
                io_req->sgl_task_params->total_buffer_size =
                    scsi_bufflen(io_req->sc_cmd);
-               io_req->sgl_task_params->small_mid_sge =
-                       io_req->use_slowpath;
+               if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
+                       io_req->sgl_task_params->small_mid_sge = 1;
+               else
+                       io_req->sgl_task_params->small_mid_sge = 0;
        }
 
        /* Fill in physical address of sense buffer */
@@ -679,16 +665,10 @@ static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
                                    io_req->task_retry_identifier, fcp_cmnd);
 
        /* Increment SGL type counters */
-       if (bd_count == 1) {
-               qedf->single_sge_ios++;
-               io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
-       } else if (io_req->use_slowpath) {
+       if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
                qedf->slow_sge_ios++;
-               io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
-       } else {
+       else
                qedf->fast_sge_ios++;
-               io_req->sge_type = QEDF_IOREQ_FAST_SGE;
-       }
 }
 
 void qedf_init_mp_task(struct qedf_ioreq *io_req,
@@ -770,9 +750,6 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
                                                     &task_fc_hdr,
                                                     &tx_sgl_task_params,
                                                     &rx_sgl_task_params, 0);
-
-       /* Midpath requests always consume 1 SGE */
-       qedf->single_sge_ios++;
 }
 
 /* Presumed that fcport->rport_lock is held */
@@ -804,8 +781,17 @@ void qedf_ring_doorbell(struct qedf_rport *fcport)
            FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
 
        dbell.sq_prod = fcport->fw_sq_prod_idx;
+       /* wmb makes sure that the BDs data is updated before updating the
+        * producer, otherwise FW may read old data from the BDs.
+        */
+       wmb();
+       barrier();
        writel(*(u32 *)&dbell, fcport->p_doorbell);
-       /* Make sure SQ index is updated so f/w prcesses requests in order */
+       /*
+        * Fence required to flush the write combined buffer, since another
+        * CPU may write to the same doorbell address and data may be lost
+        * due to relaxed order nature of write combined bar.
+        */
        wmb();
 }
 
@@ -871,7 +857,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
        /* Initialize rest of io_req fileds */
        io_req->data_xfer_len = scsi_bufflen(sc_cmd);
        sc_cmd->SCp.ptr = (char *)io_req;
-       io_req->use_slowpath = false; /* Assume fast SGL by default */
+       io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
 
        /* Record which cpu this request is associated with */
        io_req->cpu = smp_processor_id();
@@ -894,15 +880,24 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
        /* Build buffer descriptor list for firmware from sg list */
        if (qedf_build_bd_list_from_sg(io_req)) {
                QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
+               /* Release cmd will release io_req, but sc_cmd is assigned */
+               io_req->sc_cmd = NULL;
                kref_put(&io_req->refcount, qedf_release_cmd);
                return -EAGAIN;
        }
 
-       if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+       if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
+           test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
                QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+               /* Release cmd will release io_req, but sc_cmd is assigned */
+               io_req->sc_cmd = NULL;
                kref_put(&io_req->refcount, qedf_release_cmd);
+               return -EINVAL;
        }
 
+       /* Record LUN number for later use if we neeed them */
+       io_req->lun = (int)sc_cmd->device->lun;
+
        /* Obtain free SQE */
        sqe_idx = qedf_get_sqe_idx(fcport);
        sqe = &fcport->sq[sqe_idx];
@@ -913,6 +908,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
        if (!task_ctx) {
                QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
                           xid);
+               /* Release cmd will release io_req, but sc_cmd is assigned */
+               io_req->sc_cmd = NULL;
                kref_put(&io_req->refcount, qedf_release_cmd);
                return -EINVAL;
        }
@@ -922,6 +919,9 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
        /* Ring doorbell */
        qedf_ring_doorbell(fcport);
 
+       /* Set that command is with the firmware now */
+       set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
        if (qedf_io_tracing && io_req->sc_cmd)
                qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
 
@@ -940,7 +940,17 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
        int rc = 0;
        int rval;
        unsigned long flags = 0;
-
+       int num_sgs = 0;
+
+       num_sgs = scsi_sg_count(sc_cmd);
+       if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "Number of SG elements %d exceeds what hardware limitation of %d.\n",
+                        num_sgs, QEDF_MAX_BDS_PER_CMD);
+               sc_cmd->result = DID_ERROR;
+               sc_cmd->scsi_done(sc_cmd);
+               return 0;
+       }
 
        if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
            test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
@@ -980,7 +990,8 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
        /* rport and tgt are allocated together, so tgt should be non-NULL */
        fcport = (struct qedf_rport *)&rp[1];
 
-       if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+       if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
+           test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
                /*
                 * Session is not offloaded yet. Let SCSI-ml retry
                 * the command.
@@ -988,12 +999,16 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
                rc = SCSI_MLQUEUE_TARGET_BUSY;
                goto exit_qcmd;
        }
+
+       atomic_inc(&fcport->ios_to_queue);
+
        if (fcport->retry_delay_timestamp) {
                if (time_after(jiffies, fcport->retry_delay_timestamp)) {
                        fcport->retry_delay_timestamp = 0;
                } else {
                        /* If retry_delay timer is active, flow off the ML */
                        rc = SCSI_MLQUEUE_TARGET_BUSY;
+                       atomic_dec(&fcport->ios_to_queue);
                        goto exit_qcmd;
                }
        }
@@ -1001,6 +1016,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
        io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
        if (!io_req) {
                rc = SCSI_MLQUEUE_HOST_BUSY;
+               atomic_dec(&fcport->ios_to_queue);
                goto exit_qcmd;
        }
 
@@ -1015,6 +1031,7 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
                rc = SCSI_MLQUEUE_HOST_BUSY;
        }
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
+       atomic_dec(&fcport->ios_to_queue);
 
 exit_qcmd:
        return rc;
@@ -1091,7 +1108,7 @@ static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        struct qedf_ioreq *io_req)
 {
-       u16 xid, rval;
+       u16 xid;
        struct e4_fcoe_task_context *task_ctx;
        struct scsi_cmnd *sc_cmd;
        struct fcoe_cqe_rsp_info *fcp_rsp;
@@ -1105,6 +1122,15 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        if (!cqe)
                return;
 
+       if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
+           test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
+           test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
+                        io_req->xid);
+               return;
+       }
+
        xid = io_req->xid;
        task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
        sc_cmd = io_req->sc_cmd;
@@ -1121,6 +1147,12 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
                return;
        }
 
+       if (!sc_cmd->device) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "Device for sc_cmd %p is NULL.\n", sc_cmd);
+               return;
+       }
+
        if (!sc_cmd->request) {
                QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
                    "sc_cmd=%p.\n", sc_cmd);
@@ -1135,6 +1167,19 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 
        fcport = io_req->fcport;
 
+       /*
+        * When flush is active, let the cmds be completed from the cleanup
+        * context
+        */
+       if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+           (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
+            sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                         "Dropping good completion xid=0x%x as fcport is flushing",
+                         io_req->xid);
+               return;
+       }
+
        qedf_parse_fcp_rsp(io_req, fcp_rsp);
 
        qedf_unmap_sg_list(qedf, io_req);
@@ -1152,25 +1197,18 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
            FCOE_CQE_RSP_INFO_FW_UNDERRUN);
        if (fw_residual_flag) {
-               QEDF_ERR(&(qedf->dbg_ctx),
-                   "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
-                   "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
-                   fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
-                   cqe->cqe_info.rsp_info.fw_residual);
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
+                        io_req->xid, fcp_rsp->rsp_flags.flags,
+                        io_req->fcp_resid,
+                        cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
+                        sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
 
                if (io_req->cdb_status == 0)
                        sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
                else
                        sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
 
-               /* Abort the command since we did not get all the data */
-               init_completion(&io_req->abts_done);
-               rval = qedf_initiate_abts(io_req, true);
-               if (rval) {
-                       QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
-                       sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
-               }
-
                /*
                 * Set resid to the whole buffer length so we won't try to resue
                 * any previously data.
@@ -1242,6 +1280,12 @@ out:
        if (qedf_io_tracing)
                qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
 
+       /*
+        * We wait till the end of the function to clear the
+        * outstanding bit in case we need to send an abort
+        */
+       clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
        io_req->sc_cmd = NULL;
        sc_cmd->SCp.ptr =  NULL;
        sc_cmd->scsi_done(sc_cmd);
@@ -1259,6 +1303,19 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
        if (!io_req)
                return;
 
+       if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                         "io_req:%p scsi_done handling already done\n",
+                         io_req);
+               return;
+       }
+
+       /*
+        * We will be done with this command after this call so clear the
+        * outstanding bit.
+        */
+       clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
        xid = io_req->xid;
        sc_cmd = io_req->sc_cmd;
 
@@ -1267,12 +1324,50 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
                return;
        }
 
+       if (!virt_addr_valid(sc_cmd)) {
+               QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
+               goto bad_scsi_ptr;
+       }
+
        if (!sc_cmd->SCp.ptr) {
                QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
                    "another context.\n");
                return;
        }
 
+       if (!sc_cmd->device) {
+               QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
+                        sc_cmd);
+               goto bad_scsi_ptr;
+       }
+
+       if (!virt_addr_valid(sc_cmd->device)) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
+               goto bad_scsi_ptr;
+       }
+
+       if (!sc_cmd->sense_buffer) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
+                        sc_cmd);
+               goto bad_scsi_ptr;
+       }
+
+       if (!virt_addr_valid(sc_cmd->sense_buffer)) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
+                        sc_cmd);
+               goto bad_scsi_ptr;
+       }
+
+       if (!sc_cmd->scsi_done) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
+                        sc_cmd);
+               goto bad_scsi_ptr;
+       }
+
        qedf_unmap_sg_list(qedf, io_req);
 
        sc_cmd->result = result << 16;
@@ -1299,6 +1394,15 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
        sc_cmd->SCp.ptr = NULL;
        sc_cmd->scsi_done(sc_cmd);
        kref_put(&io_req->refcount, qedf_release_cmd);
+       return;
+
+bad_scsi_ptr:
+       /*
+        * Clear the io_req->sc_cmd backpointer so we don't try to process
+        * this again
+        */
+       io_req->sc_cmd = NULL;
+       kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 001 */
 }
 
 /*
@@ -1437,6 +1541,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
        struct qedf_ctx *qedf;
        struct qedf_cmd_mgr *cmd_mgr;
        int i, rc;
+       unsigned long flags;
+       int flush_cnt = 0;
+       int wait_cnt = 100;
+       int refcount = 0;
 
        if (!fcport)
                return;
@@ -1448,18 +1556,102 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
        }
 
        qedf = fcport->qedf;
+
+       if (!qedf) {
+               QEDF_ERR(NULL, "qedf is NULL.\n");
+               return;
+       }
+
+       /* Only wait for all commands to be queued in the Upload context */
+       if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
+           (lun == -1)) {
+               while (atomic_read(&fcport->ios_to_queue)) {
+                       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                                 "Waiting for %d I/Os to be queued\n",
+                                 atomic_read(&fcport->ios_to_queue));
+                       if (wait_cnt == 0) {
+                               QEDF_ERR(NULL,
+                                        "%d IOs request could not be queued\n",
+                                        atomic_read(&fcport->ios_to_queue));
+                       }
+                       msleep(20);
+                       wait_cnt--;
+               }
+       }
+
        cmd_mgr = qedf->cmd_mgr;
 
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
+                 atomic_read(&fcport->num_active_ios), fcport,
+                 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
+
+       mutex_lock(&qedf->flush_mutex);
+       if (lun == -1) {
+               set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
+       } else {
+               set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
+               fcport->lun_reset_lun = lun;
+       }
 
        for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
                io_req = &cmd_mgr->cmds[i];
 
                if (!io_req)
                        continue;
+               if (!io_req->fcport)
+                       continue;
+
+               spin_lock_irqsave(&cmd_mgr->lock, flags);
+
+               if (io_req->alloc) {
+                       if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
+                               if (io_req->cmd_type == QEDF_SCSI_CMD)
+                                       QEDF_ERR(&qedf->dbg_ctx,
+                                                "Allocated but not queued, xid=0x%x\n",
+                                                io_req->xid);
+                       }
+                       spin_unlock_irqrestore(&cmd_mgr->lock, flags);
+               } else {
+                       spin_unlock_irqrestore(&cmd_mgr->lock, flags);
+                       continue;
+               }
+
                if (io_req->fcport != fcport)
                        continue;
-               if (io_req->cmd_type == QEDF_ELS) {
+
+               /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
+                * but RRQ is still pending.
+                * Workaround: Within qedf_send_rrq, we check if the fcport is
+                * NULL, and we drop the ref on the io_req to clean it up.
+                */
+               if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
+                       refcount = kref_read(&io_req->refcount);
+                       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                                 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
+                                 io_req->xid, io_req->cmd_type, refcount);
+                       /* If RRQ work has been queue, try to cancel it and
+                        * free the io_req
+                        */
+                       if (atomic_read(&io_req->state) ==
+                           QEDFC_CMD_ST_RRQ_WAIT) {
+                               if (cancel_delayed_work_sync
+                                   (&io_req->rrq_work)) {
+                                       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                                                 "Putting reference for pending RRQ work xid=0x%x.\n",
+                                                 io_req->xid);
+                                       /* ID: 003 */
+                                       kref_put(&io_req->refcount,
+                                                qedf_release_cmd);
+                               }
+                       }
+                       continue;
+               }
+
+               /* Only consider flushing ELS during target reset */
+               if (io_req->cmd_type == QEDF_ELS &&
+                   lun == -1) {
                        rc = kref_get_unless_zero(&io_req->refcount);
                        if (!rc) {
                                QEDF_ERR(&(qedf->dbg_ctx),
@@ -1467,6 +1659,7 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
                                    io_req, io_req->xid);
                                continue;
                        }
+                       flush_cnt++;
                        qedf_flush_els_req(qedf, io_req);
                        /*
                         * Release the kref and go back to the top of the
@@ -1476,6 +1669,7 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
                }
 
                if (io_req->cmd_type == QEDF_ABTS) {
+                       /* ID: 004 */
                        rc = kref_get_unless_zero(&io_req->refcount);
                        if (!rc) {
                                QEDF_ERR(&(qedf->dbg_ctx),
@@ -1483,28 +1677,50 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
                                    io_req, io_req->xid);
                                continue;
                        }
+                       if (lun != -1 && io_req->lun != lun)
+                               goto free_cmd;
+
                        QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
                            "Flushing abort xid=0x%x.\n", io_req->xid);
 
-                       clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
-
-                       if (io_req->sc_cmd) {
-                               if (io_req->return_scsi_cmd_on_abts)
-                                       qedf_scsi_done(qedf, io_req, DID_ERROR);
+                       if (cancel_delayed_work_sync(&io_req->rrq_work)) {
+                               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                                         "Putting ref for cancelled RRQ work xid=0x%x.\n",
+                                         io_req->xid);
+                               kref_put(&io_req->refcount, qedf_release_cmd);
                        }
 
-                       /* Notify eh_abort handler that ABTS is complete */
-                       complete(&io_req->abts_done);
-                       kref_put(&io_req->refcount, qedf_release_cmd);
-
+                       if (cancel_delayed_work_sync(&io_req->timeout_work)) {
+                               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                                         "Putting ref for cancelled tmo work xid=0x%x.\n",
+                                         io_req->xid);
+                               qedf_initiate_cleanup(io_req, true);
+                               /* Notify eh_abort handler that ABTS is
+                                * complete
+                                */
+                               complete(&io_req->abts_done);
+                               clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+                               /* ID: 002 */
+                               kref_put(&io_req->refcount, qedf_release_cmd);
+                       }
+                       flush_cnt++;
                        goto free_cmd;
                }
 
                if (!io_req->sc_cmd)
                        continue;
-               if (lun > 0) {
-                       if (io_req->sc_cmd->device->lun !=
-                           (u64)lun)
+               if (!io_req->sc_cmd->device) {
+                       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                                 "Device backpointer NULL for sc_cmd=%p.\n",
+                                 io_req->sc_cmd);
+                       /* Put reference for non-existent scsi_cmnd */
+                       io_req->sc_cmd = NULL;
+                       qedf_initiate_cleanup(io_req, false);
+                       kref_put(&io_req->refcount, qedf_release_cmd);
+                       continue;
+               }
+               if (lun > -1) {
+                       if (io_req->lun != lun)
                                continue;
                }
 
@@ -1518,15 +1734,65 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
                            "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
                        continue;
                }
+
                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
                    "Cleanup xid=0x%x.\n", io_req->xid);
+               flush_cnt++;
 
                /* Cleanup task and return I/O mid-layer */
                qedf_initiate_cleanup(io_req, true);
 
 free_cmd:
-               kref_put(&io_req->refcount, qedf_release_cmd);
+               kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 004 */
+       }
+
+       wait_cnt = 60;
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                 "Flushed 0x%x I/Os, active=0x%x.\n",
+                 flush_cnt, atomic_read(&fcport->num_active_ios));
+       /* Only wait for all commands to complete in the Upload context */
+       if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
+           (lun == -1)) {
+               while (atomic_read(&fcport->num_active_ios)) {
+                       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                                 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
+                                 flush_cnt,
+                                 atomic_read(&fcport->num_active_ios),
+                                 wait_cnt);
+                       if (wait_cnt == 0) {
+                               QEDF_ERR(&qedf->dbg_ctx,
+                                        "Flushed %d I/Os, active=%d.\n",
+                                        flush_cnt,
+                                        atomic_read(&fcport->num_active_ios));
+                               for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
+                                       io_req = &cmd_mgr->cmds[i];
+                                       if (io_req->fcport &&
+                                           io_req->fcport == fcport) {
+                                               refcount =
+                                               kref_read(&io_req->refcount);
+                                               set_bit(QEDF_CMD_DIRTY,
+                                                       &io_req->flags);
+                                               QEDF_ERR(&qedf->dbg_ctx,
+                                                        "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
+                                                        io_req, io_req->xid,
+                                                        io_req->flags,
+                                                        io_req->sc_cmd,
+                                                        refcount,
+                                                        io_req->cmd_type);
+                                       }
+                               }
+                               WARN_ON(1);
+                               break;
+                       }
+                       msleep(500);
+                       wait_cnt--;
+               }
        }
+
+       clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
+       clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
+       mutex_unlock(&qedf->flush_mutex);
 }
 
 /*
@@ -1545,52 +1811,60 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
        unsigned long flags;
        struct fcoe_wqe *sqe;
        u16 sqe_idx;
+       int refcount = 0;
 
        /* Sanity check qedf_rport before dereferencing any pointers */
        if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
                QEDF_ERR(NULL, "tgt not offloaded\n");
                rc = 1;
-               goto abts_err;
+               goto out;
        }
 
+       qedf = fcport->qedf;
        rdata = fcport->rdata;
+
+       if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+               QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
+               rc = 1;
+               goto out;
+       }
+
        r_a_tov = rdata->r_a_tov;
-       qedf = fcport->qedf;
        lport = qedf->lport;
 
        if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
                QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
                rc = 1;
-               goto abts_err;
+               goto drop_rdata_kref;
        }
 
        if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
                QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
                rc = 1;
-               goto abts_err;
+               goto drop_rdata_kref;
        }
 
        /* Ensure room on SQ */
        if (!atomic_read(&fcport->free_sqes)) {
                QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
                rc = 1;
-               goto abts_err;
+               goto drop_rdata_kref;
        }
 
        if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
                QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
                rc = 1;
-               goto out;
+               goto drop_rdata_kref;
        }
 
        if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
            test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
            test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
-               QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
-                         "cleanup or abort processing or already "
-                         "completed.\n", io_req->xid);
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
+                        io_req->xid, io_req->sc_cmd);
                rc = 1;
-               goto out;
+               goto drop_rdata_kref;
        }
 
        kref_get(&io_req->refcount);
@@ -1599,18 +1873,17 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
        qedf->control_requests++;
        qedf->packet_aborts++;
 
-       /* Set the return CPU to be the same as the request one */
-       io_req->cpu = smp_processor_id();
-
        /* Set the command type to abort */
        io_req->cmd_type = QEDF_ABTS;
        io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
 
        set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
-                  "0x%x\n", xid);
+       refcount = kref_read(&io_req->refcount);
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+                 "ABTS io_req xid = 0x%x refcount=%d\n",
+                 xid, refcount);
 
-       qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
+       qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
 
        spin_lock_irqsave(&fcport->rport_lock, flags);
 
@@ -1624,13 +1897,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
 
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
-       return rc;
-abts_err:
-       /*
-        * If the ABTS task fails to queue then we need to cleanup the
-        * task at the firmware.
-        */
-       qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
+drop_rdata_kref:
+       kref_put(&rdata->kref, fc_rport_destroy);
 out:
        return rc;
 }
@@ -1640,27 +1908,62 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 {
        uint32_t r_ctl;
        uint16_t xid;
+       int rc;
+       struct qedf_rport *fcport = io_req->fcport;
 
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
                   "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
 
-       cancel_delayed_work(&io_req->timeout_work);
-
        xid = io_req->xid;
        r_ctl = cqe->cqe_info.abts_info.r_ctl;
 
+       /* This was added at a point when we were scheduling abts_compl &
+        * cleanup_compl on different CPUs and there was a possibility of
+        * the io_req to be freed from the other context before we got here.
+        */
+       if (!fcport) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                         "Dropping ABTS completion xid=0x%x as fcport is NULL",
+                         io_req->xid);
+               return;
+       }
+
+       /*
+        * When flush is active, let the cmds be completed from the cleanup
+        * context
+        */
+       if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+           test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                         "Dropping ABTS completion xid=0x%x as fcport is flushing",
+                         io_req->xid);
+               return;
+       }
+
+       if (!cancel_delayed_work(&io_req->timeout_work)) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "Wasn't able to cancel abts timeout work.\n");
+       }
+
        switch (r_ctl) {
        case FC_RCTL_BA_ACC:
                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
                    "ABTS response - ACC Send RRQ after R_A_TOV\n");
                io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
+               rc = kref_get_unless_zero(&io_req->refcount);   /* ID: 003 */
+               if (!rc) {
+                       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+                                 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
+                                 io_req->xid);
+                       return;
+               }
                /*
                 * Dont release this cmd yet. It will be relesed
                 * after we get RRQ response
                 */
-               kref_get(&io_req->refcount);
                queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
                    msecs_to_jiffies(qedf->lport->r_a_tov));
+               atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
                break;
        /* For error cases let the cleanup return the command */
        case FC_RCTL_BA_RJT:
@@ -1802,6 +2105,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
        unsigned long flags;
        struct fcoe_wqe *sqe;
        u16 sqe_idx;
+       int refcount = 0;
 
        fcport = io_req->fcport;
        if (!fcport) {
@@ -1823,36 +2127,45 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
        }
 
        if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
-           test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
+           test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
                QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
                          "cleanup processing or already completed.\n",
                          io_req->xid);
                return SUCCESS;
        }
+       set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
 
        /* Ensure room on SQ */
        if (!atomic_read(&fcport->free_sqes)) {
                QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
+               /* Need to make sure we clear the flag since it was set */
+               clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
                return FAILED;
        }
 
+       if (io_req->cmd_type == QEDF_CLEANUP) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
+                        io_req->xid, io_req->cmd_type);
+               clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+               return SUCCESS;
+       }
 
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
-           io_req->xid);
+       refcount = kref_read(&io_req->refcount);
+
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+                 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
+                 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
+                 refcount, fcport, fcport->rdata->ids.port_id);
 
        /* Cleanup cmds re-use the same TID as the original I/O */
        xid = io_req->xid;
        io_req->cmd_type = QEDF_CLEANUP;
        io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
 
-       /* Set the return CPU to be the same as the request one */
-       io_req->cpu = smp_processor_id();
-
-       set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
-
        task = qedf_get_task_mem(&qedf->tasks, xid);
 
-       init_completion(&io_req->tm_done);
+       init_completion(&io_req->cleanup_done);
 
        spin_lock_irqsave(&fcport->rport_lock, flags);
 
@@ -1866,8 +2179,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
 
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
-       tmo = wait_for_completion_timeout(&io_req->tm_done,
-           QEDF_CLEANUP_TIMEOUT * HZ);
+       tmo = wait_for_completion_timeout(&io_req->cleanup_done,
+                                         QEDF_CLEANUP_TIMEOUT * HZ);
 
        if (!tmo) {
                rc = FAILED;
@@ -1880,6 +2193,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
                qedf_drain_request(qedf);
        }
 
+       /* If it TASK MGMT handle it, reference will be decreased
+        * in qedf_execute_tmf
+        */
+       if (io_req->tm_flags  == FCP_TMF_LUN_RESET ||
+           io_req->tm_flags == FCP_TMF_TGT_RESET) {
+               clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+               io_req->sc_cmd = NULL;
+               complete(&io_req->tm_done);
+       }
+
        if (io_req->sc_cmd) {
                if (io_req->return_scsi_cmd_on_abts)
                        qedf_scsi_done(qedf, io_req, DID_ERROR);
@@ -1902,7 +2225,7 @@ void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
 
        /* Complete so we can finish cleaning up the I/O */
-       complete(&io_req->tm_done);
+       complete(&io_req->cleanup_done);
 }
 
 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
@@ -1915,6 +2238,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
        int rc = 0;
        uint16_t xid;
        int tmo = 0;
+       int lun = 0;
        unsigned long flags;
        struct fcoe_wqe *sqe;
        u16 sqe_idx;
@@ -1924,20 +2248,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
                return FAILED;
        }
 
+       lun = (int)sc_cmd->device->lun;
        if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
                QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
                rc = FAILED;
-               return FAILED;
+               goto no_flush;
        }
 
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
-                  "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
-
        io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
        if (!io_req) {
                QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
                rc = -EAGAIN;
-               goto reset_tmf_err;
+               goto no_flush;
        }
 
        if (tm_flags == FCP_TMF_LUN_RESET)
@@ -1950,7 +2272,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
        io_req->fcport = fcport;
        io_req->cmd_type = QEDF_TASK_MGMT_CMD;
 
-       /* Set the return CPU to be the same as the request one */
+       /* Record which cpu this request is associated with */
        io_req->cpu = smp_processor_id();
 
        /* Set TM flags */
@@ -1959,7 +2281,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
        io_req->tm_flags = tm_flags;
 
        /* Default is to return a SCSI command when an error occurs */
-       io_req->return_scsi_cmd_on_abts = true;
+       io_req->return_scsi_cmd_on_abts = false;
 
        /* Obtain exchange id */
        xid = io_req->xid;
@@ -1983,12 +2305,16 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
+       set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
        tmo = wait_for_completion_timeout(&io_req->tm_done,
            QEDF_TM_TIMEOUT * HZ);
 
        if (!tmo) {
                rc = FAILED;
                QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
+               /* Clear outstanding bit since command timed out */
+               clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+               io_req->sc_cmd = NULL;
        } else {
                /* Check TMF response code */
                if (io_req->fcp_rsp_code == 0)
@@ -1996,14 +2322,25 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
                else
                        rc = FAILED;
        }
+       /*
+        * Double check that fcport has not gone into an uploading state before
+        * executing the command flush for the LUN/target.
+        */
+       if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "fcport is uploading, not executing flush.\n");
+               goto no_flush;
+       }
+       /* We do not need this io_req any more */
+       kref_put(&io_req->refcount, qedf_release_cmd);
+
 
        if (tm_flags == FCP_TMF_LUN_RESET)
-               qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
+               qedf_flush_active_ios(fcport, lun);
        else
                qedf_flush_active_ios(fcport, -1);
 
-       kref_put(&io_req->refcount, qedf_release_cmd);
-
+no_flush:
        if (rc != SUCCESS) {
                QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
                rc = FAILED;
@@ -2011,7 +2348,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
                QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
                rc = SUCCESS;
        }
-reset_tmf_err:
        return rc;
 }
 
@@ -2021,26 +2357,65 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
        struct fc_rport_libfc_priv *rp = rport->dd_data;
        struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
        struct qedf_ctx *qedf;
-       struct fc_lport *lport;
+       struct fc_lport *lport = shost_priv(sc_cmd->device->host);
        int rc = SUCCESS;
        int rval;
+       struct qedf_ioreq *io_req = NULL;
+       int ref_cnt = 0;
+       struct fc_rport_priv *rdata = fcport->rdata;
 
-       rval = fc_remote_port_chkready(rport);
+       QEDF_ERR(NULL,
+                "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
+                tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id,
+                (int)sc_cmd->device->lun);
+
+       if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+               QEDF_ERR(NULL, "stale rport\n");
+               return FAILED;
+       }
+
+       QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
+                (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
+                "LUN RESET");
+
+       if (sc_cmd->SCp.ptr) {
+               io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+               ref_cnt = kref_read(&io_req->refcount);
+               QEDF_ERR(NULL,
+                        "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
+                        io_req, io_req->xid, ref_cnt);
+       }
 
+       rval = fc_remote_port_chkready(rport);
        if (rval) {
                QEDF_ERR(NULL, "device_reset rport not ready\n");
                rc = FAILED;
                goto tmf_err;
        }
 
-       if (fcport == NULL) {
+       rc = fc_block_scsi_eh(sc_cmd);
+       if (rc)
+               goto tmf_err;
+
+       if (!fcport) {
                QEDF_ERR(NULL, "device_reset: rport is NULL\n");
                rc = FAILED;
                goto tmf_err;
        }
 
        qedf = fcport->qedf;
-       lport = qedf->lport;
+
+       if (!qedf) {
+               QEDF_ERR(NULL, "qedf is NULL.\n");
+               rc = FAILED;
+               goto tmf_err;
+       }
+
+       if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+               QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
+               rc = SUCCESS;
+               goto tmf_err;
+       }
 
        if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
            test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
@@ -2054,9 +2429,22 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
                goto tmf_err;
        }
 
+       if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+               if (!fcport->rdata)
+                       QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
+                                fcport);
+               else
+                       QEDF_ERR(&qedf->dbg_ctx,
+                                "fcport %p port_id=%06x is uploading.\n",
+                                fcport, fcport->rdata->ids.port_id);
+               rc = FAILED;
+               goto tmf_err;
+       }
+
        rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
 
 tmf_err:
+       kref_put(&rdata->kref, fc_rport_destroy);
        return rc;
 }
 
@@ -2065,6 +2453,8 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 {
        struct fcoe_cqe_rsp_info *fcp_rsp;
 
+       clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+
        fcp_rsp = &cqe->cqe_info.rsp_info;
        qedf_parse_fcp_rsp(io_req, fcp_rsp);
 
index 9f9431a..5b07235 100644 (file)
@@ -124,21 +124,24 @@ static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
 {
        int rc;
 
-       if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
-               QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
-               return  false;
-       }
-
        while (qedf->fipvlan_retries--) {
+               /* This is to catch if link goes down during fipvlan retries */
+               if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+                       QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
+                       return false;
+               }
+
                if (qedf->vlan_id > 0)
                        return true;
+
                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
                           "Retry %d.\n", qedf->fipvlan_retries);
                init_completion(&qedf->fipvlan_compl);
                qedf_fcoe_send_vlan_req(qedf);
                rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
                    1 * HZ);
-               if (rc > 0) {
+               if (rc > 0 &&
+                   (atomic_read(&qedf->link_state) == QEDF_LINK_UP)) {
                        fcoe_ctlr_link_up(&qedf->ctlr);
                        return true;
                }
@@ -153,12 +156,19 @@ static void qedf_handle_link_update(struct work_struct *work)
            container_of(work, struct qedf_ctx, link_update.work);
        int rc;
 
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
+                 atomic_read(&qedf->link_state));
 
        if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
                rc = qedf_initiate_fipvlan_req(qedf);
                if (rc)
                        return;
+
+               if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+                       qedf->vlan_id = 0;
+                       return;
+               }
+
                /*
                 * If we get here then we never received a repsonse to our
                 * fip vlan request so set the vlan_id to the default and
@@ -185,7 +195,9 @@ static void qedf_handle_link_update(struct work_struct *work)
                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
                    "Calling fcoe_ctlr_link_down().\n");
                fcoe_ctlr_link_down(&qedf->ctlr);
-               qedf_wait_for_upload(qedf);
+               if (qedf_wait_for_upload(qedf) == false)
+                       QEDF_ERR(&qedf->dbg_ctx,
+                                "Could not upload all sessions.\n");
                /* Reset the number of FIP VLAN retries */
                qedf->fipvlan_retries = qedf_fipvlan_retries;
        }
@@ -615,50 +627,113 @@ static struct scsi_transport_template *qedf_fc_vport_transport_template;
 static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
 {
        struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
-       struct fc_rport_libfc_priv *rp = rport->dd_data;
-       struct qedf_rport *fcport;
        struct fc_lport *lport;
        struct qedf_ctx *qedf;
        struct qedf_ioreq *io_req;
+       struct fc_rport_libfc_priv *rp = rport->dd_data;
+       struct fc_rport_priv *rdata;
+       struct qedf_rport *fcport = NULL;
        int rc = FAILED;
+       int wait_count = 100;
+       int refcount = 0;
        int rval;
-
-       if (fc_remote_port_chkready(rport)) {
-               QEDF_ERR(NULL, "rport not ready\n");
-               goto out;
-       }
+       int got_ref = 0;
 
        lport = shost_priv(sc_cmd->device->host);
        qedf = (struct qedf_ctx *)lport_priv(lport);
 
-       if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
-               QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
+       /* rport and tgt are allocated together, so tgt should be non-NULL */
+       fcport = (struct qedf_rport *)&rp[1];
+       rdata = fcport->rdata;
+       if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+               QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
+               rc = 1;
                goto out;
        }
 
-       fcport = (struct qedf_rport *)&rp[1];
 
        io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
        if (!io_req) {
-               QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
+                        sc_cmd, sc_cmd->cmnd[0],
+                        rdata->ids.port_id);
                rc = SUCCESS;
-               goto out;
+               goto drop_rdata_kref;
+       }
+
+       rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
+       if (rval)
+               got_ref = 1;
+
+       /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
+       if (!rval || io_req->sc_cmd != sc_cmd) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
+                        io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
+
+               goto drop_rdata_kref;
+       }
+
+       if (fc_remote_port_chkready(rport)) {
+               refcount = kref_read(&io_req->refcount);
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
+                        io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
+                        refcount, rdata->ids.port_id);
+
+               goto drop_rdata_kref;
+       }
+
+       rc = fc_block_scsi_eh(sc_cmd);
+       if (rc)
+               goto drop_rdata_kref;
+
+       if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "Connection uploading, xid=0x%x., port_id=%06x\n",
+                        io_req->xid, rdata->ids.port_id);
+               while (io_req->sc_cmd && (wait_count != 0)) {
+                       msleep(100);
+                       wait_count--;
+               }
+               if (wait_count) {
+                       QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
+                       rc = SUCCESS;
+               } else {
+                       QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
+                       rc = FAILED;
+               }
+               goto drop_rdata_kref;
        }
 
-       QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
-                 "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
+       if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+               QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
+               goto drop_rdata_kref;
+       }
+
+       QEDF_ERR(&qedf->dbg_ctx,
+                "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
+                io_req, sc_cmd, io_req->xid, io_req->fp_idx,
+                rdata->ids.port_id);
 
        if (qedf->stop_io_on_error) {
                qedf_stop_all_io(qedf);
                rc = SUCCESS;
-               goto out;
+               goto drop_rdata_kref;
        }
 
        init_completion(&io_req->abts_done);
        rval = qedf_initiate_abts(io_req, true);
        if (rval) {
                QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
-               goto out;
+               /*
+                * If we fail to queue the ABTS then return this command to
+                * the SCSI layer as it will own and free the xid
+                */
+               rc = SUCCESS;
+               qedf_scsi_done(qedf, io_req, DID_ERROR);
+               goto drop_rdata_kref;
        }
 
        wait_for_completion(&io_req->abts_done);
@@ -684,38 +759,68 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
                QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
                          io_req->xid);
 
+drop_rdata_kref:
+       kref_put(&rdata->kref, fc_rport_destroy);
 out:
+       if (got_ref)
+               kref_put(&io_req->refcount, qedf_release_cmd);
        return rc;
 }
 
 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
 {
-       QEDF_ERR(NULL, "TARGET RESET Issued...");
+       QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
+                sc_cmd->device->host->host_no, sc_cmd->device->id,
+                sc_cmd->device->lun);
        return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
 }
 
 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
 {
-       QEDF_ERR(NULL, "LUN RESET Issued...\n");
+       QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
+                sc_cmd->device->host->host_no, sc_cmd->device->id,
+                sc_cmd->device->lun);
        return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 
-void qedf_wait_for_upload(struct qedf_ctx *qedf)
+bool qedf_wait_for_upload(struct qedf_ctx *qedf)
 {
-       while (1) {
+       struct qedf_rport *fcport = NULL;
+       int wait_cnt = 120;
+
+       while (wait_cnt--) {
                if (atomic_read(&qedf->num_offloads))
-                       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
-                           "Waiting for all uploads to complete.\n");
+                       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                                 "Waiting for all uploads to complete num_offloads = 0x%x.\n",
+                                 atomic_read(&qedf->num_offloads));
                else
-                       break;
+                       return true;
                msleep(500);
        }
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
+               if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
+                                      &fcport->flags)) {
+                       if (fcport->rdata)
+                               QEDF_ERR(&qedf->dbg_ctx,
+                                        "Waiting for fcport %p portid=%06x.\n",
+                                        fcport, fcport->rdata->ids.port_id);
+                       } else {
+                               QEDF_ERR(&qedf->dbg_ctx,
+                                        "Waiting for fcport %p.\n", fcport);
+                       }
+       }
+       rcu_read_unlock();
+       return false;
+
 }
 
 /* Performs soft reset of qedf_ctx by simulating a link down/up */
-static void qedf_ctx_soft_reset(struct fc_lport *lport)
+void qedf_ctx_soft_reset(struct fc_lport *lport)
 {
        struct qedf_ctx *qedf;
+       struct qed_link_output if_link;
 
        if (lport->vport) {
                QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
@@ -726,11 +831,32 @@ static void qedf_ctx_soft_reset(struct fc_lport *lport)
 
        /* For host reset, essentially do a soft link up/down */
        atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                 "Queuing link down work.\n");
        queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
            0);
-       qedf_wait_for_upload(qedf);
+
+       if (qedf_wait_for_upload(qedf) == false) {
+               QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
+               WARN_ON(atomic_read(&qedf->num_offloads));
+       }
+
+       /* Before setting link up query physical link state */
+       qed_ops->common->get_link(qedf->cdev, &if_link);
+       /* Bail if the physical link is not up */
+       if (!if_link.link_up) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                         "Physical link is not up.\n");
+               return;
+       }
+       /* Flush and wait to make sure link down is processed */
+       flush_delayed_work(&qedf->link_update);
+       msleep(500);
+
        atomic_set(&qedf->link_state, QEDF_LINK_UP);
        qedf->vlan_id  = 0;
+       QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+                 "Queue link up work.\n");
        queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
            0);
 }
@@ -740,22 +866,6 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
 {
        struct fc_lport *lport;
        struct qedf_ctx *qedf;
-       struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
-       struct fc_rport_libfc_priv *rp = rport->dd_data;
-       struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
-       int rval;
-
-       rval = fc_remote_port_chkready(rport);
-
-       if (rval) {
-               QEDF_ERR(NULL, "device_reset rport not ready\n");
-               return FAILED;
-       }
-
-       if (fcport == NULL) {
-               QEDF_ERR(NULL, "device_reset: rport is NULL\n");
-               return FAILED;
-       }
 
        lport = shost_priv(sc_cmd->device->host);
        qedf = lport_priv(lport);
@@ -907,8 +1017,10 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
                    "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
                kfree_skb(skb);
                rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
-               if (rdata)
+               if (rdata) {
                        rdata->retries = lport->max_rport_retry_count;
+                       kref_put(&rdata->kref, fc_rport_destroy);
+               }
                return -EINVAL;
        }
        /* End NPIV filtering */
@@ -1031,7 +1143,12 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
        if (qedf_dump_frames)
                print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
                    1, skb->data, skb->len, false);
-       qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+       rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+       if (rc) {
+               QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+               kfree_skb(skb);
+               return rc;
+       }
 
        return 0;
 }
@@ -1224,6 +1341,8 @@ static void qedf_upload_connection(struct qedf_ctx *qedf,
 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
        struct qedf_rport *fcport)
 {
+       struct fc_rport_priv *rdata = fcport->rdata;
+
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
            fcport->rdata->ids.port_id);
 
@@ -1235,6 +1354,7 @@ static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
        qedf_free_sq(qedf, fcport);
        fcport->rdata = NULL;
        fcport->qedf = NULL;
+       kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
@@ -1310,6 +1430,8 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
                        break;
                }
 
+               /* Initial reference held on entry, so this can't fail */
+               kref_get(&rdata->kref);
                fcport->rdata = rdata;
                fcport->rport = rport;
 
@@ -1369,11 +1491,15 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
                 */
                fcport = (struct qedf_rport *)&rp[1];
 
+               spin_lock_irqsave(&fcport->rport_lock, flags);
                /* Only free this fcport if it is offloaded already */
-               if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
-                       set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
+               if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
+                   !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+                   &fcport->flags)) {
+                       set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+                               &fcport->flags);
+                       spin_unlock_irqrestore(&fcport->rport_lock, flags);
                        qedf_cleanup_fcport(qedf, fcport);
-
                        /*
                         * Remove fcport to list of qedf_ctx list of offloaded
                         * ports
@@ -1385,8 +1511,9 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
                        clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
                            &fcport->flags);
                        atomic_dec(&qedf->num_offloads);
+               } else {
+                       spin_unlock_irqrestore(&fcport->rport_lock, flags);
                }
-
                break;
 
        case RPORT_EV_NONE:
@@ -1498,11 +1625,15 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
        fc_set_wwnn(lport, qedf->wwnn);
        fc_set_wwpn(lport, qedf->wwpn);
 
-       fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
+       if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
+               QEDF_ERR(&qedf->dbg_ctx,
+                        "fcoe_libfc_config failed.\n");
+               return -ENOMEM;
+       }
 
        /* Allocate the exchange manager */
-       fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
-           qedf->max_els_xid, NULL);
+       fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
+                         0xfffe, NULL);
 
        if (fc_lport_init_stats(lport))
                return -ENOMEM;
@@ -1625,14 +1756,15 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
        vport_qedf->wwpn = vn_port->wwpn;
 
        vn_port->host->transportt = qedf_fc_vport_transport_template;
-       vn_port->host->can_queue = QEDF_MAX_ELS_XID;
+       vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
        vn_port->host->max_lun = qedf_max_lun;
        vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
        vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
 
        rc = scsi_add_host(vn_port->host, &vport->dev);
        if (rc) {
-               QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
+               QEDF_WARN(&base_qedf->dbg_ctx,
+                         "Error adding Scsi_Host rc=0x%x.\n", rc);
                goto err2;
        }
 
@@ -2155,7 +2287,8 @@ static int qedf_setup_int(struct qedf_ctx *qedf)
            QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
        qedf->int_info.used_cnt = 1;
 
-       QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
+       QEDF_ERR(&qedf->dbg_ctx,
+                "Cannot load driver due to a lack of MSI-X vectors.\n");
        return -EINVAL;
 }
 
@@ -2356,6 +2489,13 @@ static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
        struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
        struct qedf_skb_work *skb_work;
 
+       if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+                         "Dropping frame as link state is down.\n");
+               kfree_skb(skb);
+               return 0;
+       }
+
        skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
        if (!skb_work) {
                QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
@@ -2990,6 +3130,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
                        goto err0;
                }
 
+               fc_disc_init(lport);
+
                /* Initialize qedf_ctx */
                qedf = lport_priv(lport);
                qedf->lport = lport;
@@ -3005,6 +3147,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
                pci_set_drvdata(pdev, qedf);
                init_completion(&qedf->fipvlan_compl);
                mutex_init(&qedf->stats_mutex);
+               mutex_init(&qedf->flush_mutex);
 
                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
                   "QLogic FastLinQ FCoE Module qedf %s, "
@@ -3181,11 +3324,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
        sprintf(host_buf, "host_%d", host->host_no);
        qed_ops->common->set_name(qedf->cdev, host_buf);
 
-
-       /* Set xid max values */
-       qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
-       qedf->max_els_xid = QEDF_MAX_ELS_XID;
-
        /* Allocate cmd mgr */
        qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
        if (!qedf->cmd_mgr) {
@@ -3196,12 +3334,15 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 
        if (mode != QEDF_MODE_RECOVERY) {
                host->transportt = qedf_fc_transport_template;
-               host->can_queue = QEDF_MAX_ELS_XID;
                host->max_lun = qedf_max_lun;
                host->max_cmd_len = QEDF_MAX_CDB_LEN;
+               host->can_queue = FCOE_PARAMS_NUM_TASKS;
                rc = scsi_add_host(host, &pdev->dev);
-               if (rc)
+               if (rc) {
+                       QEDF_WARN(&qedf->dbg_ctx,
+                                 "Error adding Scsi_Host rc=0x%x.\n", rc);
                        goto err6;
+               }
        }
 
        memset(&params, 0, sizeof(params));
@@ -3377,7 +3518,9 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
                fcoe_ctlr_link_down(&qedf->ctlr);
        else
                fc_fabric_logoff(qedf->lport);
-       qedf_wait_for_upload(qedf);
+
+       if (qedf_wait_for_upload(qedf) == false)
+               QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
 
 #ifdef CONFIG_DEBUG_FS
        qedf_dbg_host_exit(&(qedf->dbg_ctx));
index 9455faa..334a9cd 100644 (file)
@@ -7,9 +7,9 @@
  *  this source tree.
  */
 
-#define QEDF_VERSION           "8.33.16.20"
+#define QEDF_VERSION           "8.37.25.20"
 #define QEDF_DRIVER_MAJOR_VER          8
-#define QEDF_DRIVER_MINOR_VER          33
-#define QEDF_DRIVER_REV_VER            16
+#define QEDF_DRIVER_MINOR_VER          37
+#define QEDF_DRIVER_REV_VER            25
 #define QEDF_DRIVER_ENG_VER            20
 
index f8f8677..bd81bbe 100644 (file)
@@ -155,12 +155,10 @@ static void qedi_tmf_resp_work(struct work_struct *work)
        struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
        struct iscsi_tm_rsp *resp_hdr_ptr;
-       struct iscsi_cls_session *cls_sess;
        int rval = 0;
 
        set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
        resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
-       cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
 
        iscsi_block_session(session->cls_session);
        rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
@@ -1366,7 +1364,6 @@ static void qedi_tmf_work(struct work_struct *work)
        struct qedi_conn *qedi_conn = qedi_cmd->conn;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
-       struct iscsi_cls_session *cls_sess;
        struct qedi_work_map *list_work = NULL;
        struct iscsi_task *mtask;
        struct qedi_cmd *cmd;
@@ -1377,7 +1374,6 @@ static void qedi_tmf_work(struct work_struct *work)
 
        mtask = qedi_cmd->task;
        tmf_hdr = (struct iscsi_tm *)mtask->hdr;
-       cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
        set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
 
        ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
index 6d6d601..615cea4 100644 (file)
@@ -579,7 +579,7 @@ static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
        rval = qedi_iscsi_update_conn(qedi, qedi_conn);
        if (rval) {
                iscsi_conn_printk(KERN_ALERT, conn,
-                                 "conn_start: FW oflload conn failed.\n");
+                                 "conn_start: FW offload conn failed.\n");
                rval = -EINVAL;
                goto start_err;
        }
@@ -590,7 +590,7 @@ static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
        rval = iscsi_conn_start(cls_conn);
        if (rval) {
                iscsi_conn_printk(KERN_ALERT, conn,
-                                 "iscsi_conn_start: FW oflload conn failed!!\n");
+                                 "iscsi_conn_start: FW offload conn failed!!\n");
        }
 
 start_err:
@@ -993,13 +993,17 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
        struct iscsi_conn *conn = NULL;
        struct qedi_ctx *qedi;
        int ret = 0;
-       int wait_delay = 20 * HZ;
+       int wait_delay;
        int abrt_conn = 0;
        int count = 10;
 
+       wait_delay = 60 * HZ + DEF_MAX_RT_TIME;
        qedi_ep = ep->dd_data;
        qedi = qedi_ep->qedi;
 
+       if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+               goto ep_exit_recover;
+
        flush_work(&qedi_ep->offload_work);
 
        if (qedi_ep->conn) {
@@ -1163,7 +1167,7 @@ static void qedi_offload_work(struct work_struct *work)
        struct qedi_endpoint *qedi_ep =
                container_of(work, struct qedi_endpoint, offload_work);
        struct qedi_ctx *qedi;
-       int wait_delay = 20 * HZ;
+       int wait_delay = 5 * HZ;
        int ret;
 
        qedi = qedi_ep->qedi;
index f928c4d..8d560c5 100644 (file)
@@ -29,24 +29,27 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
        if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
                return 0;
 
+       mutex_lock(&ha->optrom_mutex);
        if (IS_P3P_TYPE(ha)) {
                if (off < ha->md_template_size) {
                        rval = memory_read_from_buffer(buf, count,
                            &off, ha->md_tmplt_hdr, ha->md_template_size);
-                       return rval;
+               } else {
+                       off -= ha->md_template_size;
+                       rval = memory_read_from_buffer(buf, count,
+                           &off, ha->md_dump, ha->md_dump_size);
                }
-               off -= ha->md_template_size;
-               rval = memory_read_from_buffer(buf, count,
-                   &off, ha->md_dump, ha->md_dump_size);
-               return rval;
-       } else if (ha->mctp_dumped && ha->mctp_dump_reading)
-               return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
+       } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
+               rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
                    MCTP_DUMP_SIZE);
-       else if (ha->fw_dump_reading)
-               return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+       } else if (ha->fw_dump_reading) {
+               rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
                                        ha->fw_dump_len);
-       else
-               return 0;
+       } else {
+               rval = 0;
+       }
+       mutex_unlock(&ha->optrom_mutex);
+       return rval;
 }
 
 static ssize_t
@@ -154,6 +157,8 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
        struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
            struct device, kobj)));
        struct qla_hw_data *ha = vha->hw;
+       uint32_t faddr;
+       struct active_regions active_regions = { };
 
        if (!capable(CAP_SYS_ADMIN))
                return 0;
@@ -164,11 +169,21 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
                return -EAGAIN;
        }
 
-       if (IS_NOCACHE_VPD_TYPE(ha))
-               ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
-                   ha->nvram_size);
+       if (!IS_NOCACHE_VPD_TYPE(ha)) {
+               mutex_unlock(&ha->optrom_mutex);
+               goto skip;
+       }
+
+       faddr = ha->flt_region_nvram;
+       if (IS_QLA28XX(ha)) {
+               if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+                       faddr = ha->flt_region_nvram_sec;
+       }
+       ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
+
        mutex_unlock(&ha->optrom_mutex);
 
+skip:
        return memory_read_from_buffer(buf, count, &off, ha->nvram,
                                        ha->nvram_size);
 }
@@ -223,9 +238,9 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
        }
 
        /* Write NVRAM. */
-       ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
-       ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
-            count);
+       ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
+       ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
+           count);
        mutex_unlock(&ha->optrom_mutex);
 
        ql_dbg(ql_dbg_user, vha, 0x7060,
@@ -364,7 +379,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                }
 
                ha->optrom_region_start = start;
-               ha->optrom_region_size = start + size;
+               ha->optrom_region_size = size;
 
                ha->optrom_state = QLA_SREADING;
                ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -418,6 +433,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                 *      0x000000 -> 0x07ffff -- Boot code.
                 *      0x080000 -> 0x0fffff -- Firmware.
                 *      0x120000 -> 0x12ffff -- VPD and HBA parameters.
+                *
+                * > ISP25xx type boards:
+                *
+                *      None -- should go through BSG.
                 */
                valid = 0;
                if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
@@ -425,9 +444,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                else if (start == (ha->flt_region_boot * 4) ||
                    start == (ha->flt_region_fw * 4))
                        valid = 1;
-               else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
-                       || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
-                       || IS_QLA27XX(ha))
+               else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
                        valid = 1;
                if (!valid) {
                        ql_log(ql_log_warn, vha, 0x7065,
@@ -437,7 +454,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                }
 
                ha->optrom_region_start = start;
-               ha->optrom_region_size = start + size;
+               ha->optrom_region_size = size;
 
                ha->optrom_state = QLA_SWRITING;
                ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -504,6 +521,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
            struct device, kobj)));
        struct qla_hw_data *ha = vha->hw;
        uint32_t faddr;
+       struct active_regions active_regions = { };
 
        if (unlikely(pci_channel_offline(ha->pdev)))
                return -EAGAIN;
@@ -511,22 +529,33 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN))
                return -EINVAL;
 
-       if (IS_NOCACHE_VPD_TYPE(ha)) {
-               faddr = ha->flt_region_vpd << 2;
+       if (IS_NOCACHE_VPD_TYPE(ha))
+               goto skip;
+
+       faddr = ha->flt_region_vpd << 2;
 
-               if (IS_QLA27XX(ha) &&
-                   qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+       if (IS_QLA28XX(ha)) {
+               qla28xx_get_aux_images(vha, &active_regions);
+               if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
                        faddr = ha->flt_region_vpd_sec << 2;
 
-               mutex_lock(&ha->optrom_mutex);
-               if (qla2x00_chip_is_down(vha)) {
-                       mutex_unlock(&ha->optrom_mutex);
-                       return -EAGAIN;
-               }
-               ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
-                   ha->vpd_size);
+               ql_dbg(ql_dbg_init, vha, 0x7070,
+                   "Loading %s nvram image.\n",
+                   active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+                   "primary" : "secondary");
+       }
+
+       mutex_lock(&ha->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
                mutex_unlock(&ha->optrom_mutex);
+               return -EAGAIN;
        }
+
+       ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
+       mutex_unlock(&ha->optrom_mutex);
+
+       ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
+skip:
        return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
 }
 
@@ -563,8 +592,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
        }
 
        /* Write NVRAM. */
-       ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
-       ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
+       ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
+       ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
 
        /* Update flash version information for 4Gb & above. */
        if (!IS_FWI2_CAPABLE(ha)) {
@@ -645,6 +674,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
        int type;
        uint32_t idc_control;
        uint8_t *tmp_data = NULL;
+
        if (off != 0)
                return -EINVAL;
 
@@ -682,7 +712,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                ql_log(ql_log_info, vha, 0x706f,
                    "Issuing MPI reset.\n");
 
-               if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                        uint32_t idc_control;
 
                        qla83xx_idc_lock(vha, 0);
@@ -858,7 +888,7 @@ do_read:
                count = 0;
        }
 
-       count = actual_size > count ? count: actual_size;
+       count = actual_size > count ? count : actual_size;
        memcpy(buf, ha->xgmac_data, count);
 
        return count;
@@ -934,7 +964,7 @@ static struct bin_attribute sysfs_dcbx_tlv_attr = {
 static struct sysfs_entry {
        char *name;
        struct bin_attribute *attr;
-       int is4GBp_only;
+       int type;
 } bin_file_entries[] = {
        { "fw_dump", &sysfs_fw_dump_attr, },
        { "nvram", &sysfs_nvram_attr, },
@@ -957,11 +987,11 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
        int ret;
 
        for (iter = bin_file_entries; iter->name; iter++) {
-               if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
+               if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
                        continue;
-               if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
+               if (iter->type == 2 && !IS_QLA25XX(vha->hw))
                        continue;
-               if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+               if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
                        continue;
 
                ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -985,13 +1015,14 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
        struct qla_hw_data *ha = vha->hw;
 
        for (iter = bin_file_entries; iter->name; iter++) {
-               if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
+               if (iter->type && !IS_FWI2_CAPABLE(ha))
                        continue;
-               if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
+               if (iter->type == 2 && !IS_QLA25XX(ha))
                        continue;
-               if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+               if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
                        continue;
-               if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+               if (iter->type == 0x27 &&
+                   (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
                        continue;
 
                sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -1049,6 +1080,7 @@ qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
                      char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
        return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
 }
 
@@ -1082,6 +1114,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
        return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
 }
 
@@ -1294,6 +1327,7 @@ qla2x00_optrom_bios_version_show(struct device *dev,
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
+
        return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
            ha->bios_revision[0]);
 }
@@ -1304,6 +1338,7 @@ qla2x00_optrom_efi_version_show(struct device *dev,
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
+
        return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
            ha->efi_revision[0]);
 }
@@ -1314,6 +1349,7 @@ qla2x00_optrom_fcode_version_show(struct device *dev,
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
+
        return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
            ha->fcode_revision[0]);
 }
@@ -1324,6 +1360,7 @@ qla2x00_optrom_fw_version_show(struct device *dev,
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
+
        return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
            ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
            ha->fw_revision[3]);
@@ -1336,7 +1373,8 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return scnprintf(buf, PAGE_SIZE, "\n");
 
        return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1349,6 +1387,7 @@ qla2x00_total_isp_aborts_show(struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
        return scnprintf(buf, PAGE_SIZE, "%d\n",
            vha->qla_stats.total_isp_aborts);
 }
@@ -1358,23 +1397,39 @@ qla24xx_84xx_fw_version_show(struct device *dev,
        struct device_attribute *attr, char *buf)
 {
        int rval = QLA_SUCCESS;
-       uint16_t status[2] = {0, 0};
+       uint16_t status[2] = { 0 };
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 
        if (!IS_QLA84XX(ha))
                return scnprintf(buf, PAGE_SIZE, "\n");
 
-       if (ha->cs84xx->op_fw_version == 0)
+       if (!ha->cs84xx->op_fw_version) {
                rval = qla84xx_verify_chip(vha, status);
 
-       if ((rval == QLA_SUCCESS) && (status[0] == 0))
-               return scnprintf(buf, PAGE_SIZE, "%u\n",
-                       (uint32_t)ha->cs84xx->op_fw_version);
+               if (!rval && !status[0])
+                       return scnprintf(buf, PAGE_SIZE, "%u\n",
+                           (uint32_t)ha->cs84xx->op_fw_version);
+       }
 
        return scnprintf(buf, PAGE_SIZE, "\n");
 }
 
+static ssize_t
+qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+               return scnprintf(buf, PAGE_SIZE, "\n");
+
+       return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+           ha->serdes_version[0], ha->serdes_version[1],
+           ha->serdes_version[2]);
+}
+
 static ssize_t
 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
     char *buf)
@@ -1383,7 +1438,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
        struct qla_hw_data *ha = vha->hw;
 
        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
-           !IS_QLA27XX(ha))
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return scnprintf(buf, PAGE_SIZE, "\n");
 
        return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1596,7 +1651,7 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA27XX(ha))
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return scnprintf(buf, PAGE_SIZE, "\n");
 
        return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1604,35 +1659,38 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
 }
 
 static ssize_t
-qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr,
-    char *buf)
+qla2x00_min_supported_speed_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA27XX(ha))
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return scnprintf(buf, PAGE_SIZE, "\n");
 
        return scnprintf(buf, PAGE_SIZE, "%s\n",
-           ha->min_link_speed == 5 ? "32Gps" :
-           ha->min_link_speed == 4 ? "16Gps" :
-           ha->min_link_speed == 3 ? "8Gps" :
-           ha->min_link_speed == 2 ? "4Gps" :
-           ha->min_link_speed != 0 ? "unknown" : "");
+           ha->min_supported_speed == 6 ? "64Gps" :
+           ha->min_supported_speed == 5 ? "32Gps" :
+           ha->min_supported_speed == 4 ? "16Gps" :
+           ha->min_supported_speed == 3 ? "8Gps" :
+           ha->min_supported_speed == 2 ? "4Gps" :
+           ha->min_supported_speed != 0 ? "unknown" : "");
 }
 
 static ssize_t
-qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
-    char *buf)
+qla2x00_max_supported_speed_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA27XX(ha))
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return scnprintf(buf, PAGE_SIZE, "\n");
 
        return scnprintf(buf, PAGE_SIZE, "%s\n",
-           ha->max_speed_sup ? "32Gps" : "16Gps");
+           ha->max_supported_speed  == 2 ? "64Gps" :
+           ha->max_supported_speed  == 1 ? "32Gps" :
+           ha->max_supported_speed  == 0 ? "16Gps" : "unknown");
 }
 
 static ssize_t
@@ -1645,7 +1703,7 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
        int mode = QLA_SET_DATA_RATE_LR;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA27XX(vha->hw)) {
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
                ql_log(ql_log_warn, vha, 0x70d8,
                    "Speed setting not supported \n");
                return -EINVAL;
@@ -2164,6 +2222,32 @@ qla2x00_dif_bundle_statistics_show(struct device *dev,
            ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
 }
 
+static ssize_t
+qla2x00_fw_attr_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+               return scnprintf(buf, PAGE_SIZE, "\n");
+
+       return scnprintf(buf, PAGE_SIZE, "%llx\n",
+           (uint64_t)ha->fw_attributes_ext[1] << 48 |
+           (uint64_t)ha->fw_attributes_ext[0] << 32 |
+           (uint64_t)ha->fw_attributes_h << 16 |
+           (uint64_t)ha->fw_attributes);
+}
+
+static ssize_t
+qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -2192,6 +2276,7 @@ static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
                   NULL);
 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
                   NULL);
+static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
@@ -2209,8 +2294,10 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
                   qla2x00_allow_cna_fw_dump_show,
                   qla2x00_allow_cna_fw_dump_store);
 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
-static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
-static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
+static DEVICE_ATTR(min_supported_speed, 0444,
+                  qla2x00_min_supported_speed_show, NULL);
+static DEVICE_ATTR(max_supported_speed, 0444,
+                  qla2x00_max_supported_speed_show, NULL);
 static DEVICE_ATTR(zio_threshold, 0644,
     qla_zio_threshold_show,
     qla_zio_threshold_store);
@@ -2221,6 +2308,8 @@ static DEVICE_ATTR(dif_bundle_statistics, 0444,
     qla2x00_dif_bundle_statistics_show, NULL);
 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
     qla2x00_port_speed_store);
+static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
+static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
 
 
 struct device_attribute *qla2x00_host_attrs[] = {
@@ -2242,6 +2331,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_optrom_fw_version,
        &dev_attr_84xx_fw_version,
        &dev_attr_total_isp_aborts,
+       &dev_attr_serdes_version,
        &dev_attr_mpi_version,
        &dev_attr_phy_version,
        &dev_attr_flash_block_size,
@@ -2256,11 +2346,13 @@ struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_fw_dump_size,
        &dev_attr_allow_cna_fw_dump,
        &dev_attr_pep_version,
-       &dev_attr_min_link_speed,
-       &dev_attr_max_speed_sup,
+       &dev_attr_min_supported_speed,
+       &dev_attr_max_supported_speed,
        &dev_attr_zio_threshold,
        &dev_attr_dif_bundle_statistics,
        &dev_attr_port_speed,
+       &dev_attr_port_no,
+       &dev_attr_fw_attr,
        NULL, /* reserve for qlini_mode */
        NULL, /* reserve for ql2xiniexchg */
        NULL, /* reserve for ql2xexchoffld */
@@ -2296,16 +2388,15 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
 static void
 qla2x00_get_host_speed(struct Scsi_Host *shost)
 {
-       struct qla_hw_data *ha = ((struct scsi_qla_host *)
-                                       (shost_priv(shost)))->hw;
-       u32 speed = FC_PORTSPEED_UNKNOWN;
+       scsi_qla_host_t *vha = shost_priv(shost);
+       u32 speed;
 
-       if (IS_QLAFX00(ha)) {
+       if (IS_QLAFX00(vha->hw)) {
                qlafx00_get_host_speed(shost);
                return;
        }
 
-       switch (ha->link_data_rate) {
+       switch (vha->hw->link_data_rate) {
        case PORT_SPEED_1GB:
                speed = FC_PORTSPEED_1GBIT;
                break;
@@ -2327,7 +2418,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
        case PORT_SPEED_32GB:
                speed = FC_PORTSPEED_32GBIT;
                break;
+       case PORT_SPEED_64GB:
+               speed = FC_PORTSPEED_64GBIT;
+               break;
+       default:
+               speed = FC_PORTSPEED_UNKNOWN;
+               break;
        }
+
        fc_host_speed(shost) = speed;
 }
 
@@ -2335,7 +2433,7 @@ static void
 qla2x00_get_host_port_type(struct Scsi_Host *shost)
 {
        scsi_qla_host_t *vha = shost_priv(shost);
-       uint32_t port_type = FC_PORTTYPE_UNKNOWN;
+       uint32_t port_type;
 
        if (vha->vp_idx) {
                fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
@@ -2354,7 +2452,11 @@ qla2x00_get_host_port_type(struct Scsi_Host *shost)
        case ISP_CFG_F:
                port_type = FC_PORTTYPE_NPORT;
                break;
+       default:
+               port_type = FC_PORTTYPE_UNKNOWN;
+               break;
        }
+
        fc_host_port_type(shost) = port_type;
 }
 
@@ -2416,13 +2518,10 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
        fc_starget_port_id(starget) = port_id;
 }
 
-static void
+static inline void
 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
 {
-       if (timeout)
-               rport->dev_loss_tmo = timeout;
-       else
-               rport->dev_loss_tmo = 1;
+       rport->dev_loss_tmo = timeout ? timeout : 1;
 }
 
 static void
@@ -2632,8 +2731,9 @@ static void
 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
 {
        scsi_qla_host_t *vha = shost_priv(shost);
-       uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
-               0xFF, 0xFF, 0xFF, 0xFF};
+       static const uint8_t node_name[WWN_SIZE] = {
+               0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+       };
        u64 fabric_name = wwn_to_u64(node_name);
 
        if (vha->device_flags & SWITCH_FOUND)
@@ -2711,8 +2811,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 
        /* initialized vport states */
        atomic_set(&vha->loop_state, LOOP_DOWN);
-       vha->vp_err_state VP_ERR_PORTDWN;
-       vha->vp_prev_err_state VP_ERR_UNKWN;
+       vha->vp_err_state = VP_ERR_PORTDWN;
+       vha->vp_prev_err_state = VP_ERR_UNKWN;
        /* Check if physical ha port is Up */
        if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
            atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
@@ -2727,6 +2827,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
                        int prot = 0, guard;
+
                        vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_user, vha, 0x7082,
                            "Registered for DIF/DIX type 1 and 3 protection.\n");
@@ -2977,7 +3078,7 @@ void
 qla2x00_init_host_attr(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       u32 speed = FC_PORTSPEED_UNKNOWN;
+       u32 speeds = FC_PORTSPEED_UNKNOWN;
 
        fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
        fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
@@ -2988,25 +3089,45 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
        fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
 
        if (IS_CNA_CAPABLE(ha))
-               speed = FC_PORTSPEED_10GBIT;
-       else if (IS_QLA2031(ha))
-               speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
-                   FC_PORTSPEED_4GBIT;
-       else if (IS_QLA25XX(ha))
-               speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
-                   FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+               speeds = FC_PORTSPEED_10GBIT;
+       else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
+               if (ha->max_supported_speed == 2) {
+                       if (ha->min_supported_speed <= 6)
+                               speeds |= FC_PORTSPEED_64GBIT;
+               }
+               if (ha->max_supported_speed == 2 ||
+                   ha->max_supported_speed == 1) {
+                       if (ha->min_supported_speed <= 5)
+                               speeds |= FC_PORTSPEED_32GBIT;
+               }
+               if (ha->max_supported_speed == 2 ||
+                   ha->max_supported_speed == 1 ||
+                   ha->max_supported_speed == 0) {
+                       if (ha->min_supported_speed <= 4)
+                               speeds |= FC_PORTSPEED_16GBIT;
+               }
+               if (ha->max_supported_speed == 1 ||
+                   ha->max_supported_speed == 0) {
+                       if (ha->min_supported_speed <= 3)
+                               speeds |= FC_PORTSPEED_8GBIT;
+               }
+               if (ha->max_supported_speed == 0) {
+                       if (ha->min_supported_speed <= 2)
+                               speeds |= FC_PORTSPEED_4GBIT;
+               }
+       } else if (IS_QLA2031(ha))
+               speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
+                       FC_PORTSPEED_4GBIT;
+       else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
+               speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
+                       FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
        else if (IS_QLA24XX_TYPE(ha))
-               speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
-                   FC_PORTSPEED_1GBIT;
+               speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
+                       FC_PORTSPEED_1GBIT;
        else if (IS_QLA23XX(ha))
-               speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
-       else if (IS_QLAFX00(ha))
-               speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
-                   FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
-       else if (IS_QLA27XX(ha))
-               speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
-                   FC_PORTSPEED_8GBIT;
+               speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
        else
-               speed = FC_PORTSPEED_1GBIT;
-       fc_host_supported_speeds(vha->host) = speed;
+               speeds = FC_PORTSPEED_1GBIT;
+
+       fc_host_supported_speeds(vha->host) = speeds;
 }
index 17d4265..5441557 100644 (file)
@@ -1,4 +1,4 @@
-       /*
+/*
  * QLogic Fibre Channel HBA Driver
  * Copyright (c)  2003-2014 QLogic Corporation
  *
@@ -84,8 +84,7 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
                return 0;
        }
 
-       if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
-                       bcode[3] != 'S') {
+       if (memcmp(bcode, "HQOS", 4)) {
                /* Invalid FCP priority data header*/
                ql_dbg(ql_dbg_user, vha, 0x7052,
                    "Invalid FCP Priority data header. bcode=0x%x.\n",
@@ -1044,7 +1043,7 @@ qla84xx_updatefw(struct bsg_job *bsg_job)
        }
 
        flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
-       fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
+       fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
 
        mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
        mn->entry_count = 1;
@@ -1057,9 +1056,8 @@ qla84xx_updatefw(struct bsg_job *bsg_job)
        mn->fw_ver =  cpu_to_le32(fw_ver);
        mn->fw_size =  cpu_to_le32(data_len);
        mn->fw_seq_size =  cpu_to_le32(data_len);
-       mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
-       mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
-       mn->dseg_length = cpu_to_le32(data_len);
+       put_unaligned_le64(fw_dma, &mn->dsd.address);
+       mn->dsd.length = cpu_to_le32(data_len);
        mn->data_seg_cnt = cpu_to_le16(1);
 
        rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
@@ -1238,9 +1236,8 @@ qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
        if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
                mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
                mn->dseg_count = cpu_to_le16(1);
-               mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
-               mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
-               mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
+               put_unaligned_le64(mgmt_dma, &mn->dsd.address);
+               mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
        }
 
        rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
@@ -1354,7 +1351,7 @@ qla24xx_iidma(struct bsg_job *bsg_job)
 
        if (rval) {
                ql_log(ql_log_warn, vha, 0x704c,
-                   "iIDMA cmd failed for %8phN -- "
+                   "iiDMA cmd failed for %8phN -- "
                    "%04x %x %04x %04x.\n", fcport->port_name,
                    rval, fcport->fp_speed, mb[0], mb[1]);
                rval = (DID_ERROR << 16);
@@ -1412,7 +1409,8 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
                    start == (ha->flt_region_fw * 4))
                        valid = 1;
                else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
-                   IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
+                   IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+                   IS_QLA28XX(ha))
                        valid = 1;
                if (!valid) {
                        ql_log(ql_log_warn, vha, 0x7058,
@@ -1534,6 +1532,7 @@ qla2x00_update_fru_versions(struct bsg_job *bsg_job)
        uint32_t count;
        dma_addr_t sfp_dma;
        void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
        if (!sfp) {
                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
                    EXT_STATUS_NO_MEMORY;
@@ -1584,6 +1583,7 @@ qla2x00_read_fru_status(struct bsg_job *bsg_job)
        struct qla_status_reg *sr = (void *)bsg;
        dma_addr_t sfp_dma;
        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
        if (!sfp) {
                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
                    EXT_STATUS_NO_MEMORY;
@@ -1634,6 +1634,7 @@ qla2x00_write_fru_status(struct bsg_job *bsg_job)
        struct qla_status_reg *sr = (void *)bsg;
        dma_addr_t sfp_dma;
        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
        if (!sfp) {
                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
                    EXT_STATUS_NO_MEMORY;
@@ -1680,6 +1681,7 @@ qla2x00_write_i2c(struct bsg_job *bsg_job)
        struct qla_i2c_access *i2c = (void *)bsg;
        dma_addr_t sfp_dma;
        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
        if (!sfp) {
                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
                    EXT_STATUS_NO_MEMORY;
@@ -1725,6 +1727,7 @@ qla2x00_read_i2c(struct bsg_job *bsg_job)
        struct qla_i2c_access *i2c = (void *)bsg;
        dma_addr_t sfp_dma;
        uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+
        if (!sfp) {
                bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
                    EXT_STATUS_NO_MEMORY;
@@ -1961,7 +1964,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
 
        /* Dump the vendor information */
        ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
-           (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
+           piocb_rqst, sizeof(*piocb_rqst));
 
        if (!vha->flags.online) {
                ql_log(ql_log_warn, vha, 0x70d0,
@@ -2157,7 +2160,7 @@ qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
        struct qla_hw_data *ha = vha->hw;
        struct qla_flash_update_caps cap;
 
-       if (!(IS_QLA27XX(ha)))
+       if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
                return -EPERM;
 
        memset(&cap, 0, sizeof(cap));
@@ -2190,7 +2193,7 @@ qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
        uint64_t online_fw_attr = 0;
        struct qla_flash_update_caps cap;
 
-       if (!(IS_QLA27XX(ha)))
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return -EPERM;
 
        memset(&cap, 0, sizeof(cap));
@@ -2238,7 +2241,7 @@ qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
        uint8_t domain, area, al_pa, state;
        int rval;
 
-       if (!(IS_QLA27XX(ha)))
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return -EPERM;
 
        memset(&bbcr, 0, sizeof(bbcr));
@@ -2323,8 +2326,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
        rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
 
        if (rval == QLA_SUCCESS) {
-               ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
-                   (uint8_t *)stats, sizeof(*stats));
+               ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
+                       stats, sizeof(*stats));
                sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
                        bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
        }
@@ -2353,7 +2356,8 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
        int rval;
        struct qla_dport_diag *dd;
 
-       if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+       if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+           !IS_QLA28XX(vha->hw))
                return -EPERM;
 
        dd = kmalloc(sizeof(*dd), GFP_KERNEL);
@@ -2387,6 +2391,45 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
        return 0;
 }
 
+static int
+qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
+{
+       scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+       struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_active_regions regions = { };
+       struct active_regions active_regions = { };
+
+       qla28xx_get_aux_images(vha, &active_regions);
+       regions.global_image = active_regions.global;
+
+       if (IS_QLA28XX(ha)) {
+               qla27xx_get_active_image(vha, &active_regions);
+               regions.board_config = active_regions.aux.board_config;
+               regions.vpd_nvram = active_regions.aux.vpd_nvram;
+               regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
+               regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
+       }
+
+       ql_dbg(ql_dbg_user, vha, 0x70e1,
+           "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
+           __func__, vha->host_no, regions.global_image,
+           regions.board_config, regions.vpd_nvram,
+           regions.npiv_config_0_1, regions.npiv_config_2_3);
+
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+           bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
+
+       bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+       bsg_reply->reply_payload_rcv_len = sizeof(regions);
+       bsg_reply->result = DID_OK << 16;
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job_done(bsg_job, bsg_reply->result,
+           bsg_reply->reply_payload_rcv_len);
+
+       return 0;
+}
+
 static int
 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
 {
@@ -2460,6 +2503,9 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
        case QL_VND_DPORT_DIAGNOSTICS:
                return qla2x00_do_dport_diagnostics(bsg_job);
 
+       case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
+               return qla2x00_get_flash_image_status(bsg_job);
+
        default:
                return -ENOSYS;
        }
index d97dfd5..7594fad 100644 (file)
@@ -31,6 +31,7 @@
 #define QL_VND_GET_PRIV_STATS  0x18
 #define QL_VND_DPORT_DIAGNOSTICS       0x19
 #define QL_VND_GET_PRIV_STATS_EX       0x1A
+#define QL_VND_SS_GET_FLASH_IMAGE_STATUS       0x1E
 
 /* BSG Vendor specific subcode returns */
 #define EXT_STATUS_OK                  0
@@ -279,4 +280,14 @@ struct qla_dport_diag {
 #define QLA_DPORT_RESULT       0x0
 #define QLA_DPORT_START                0x2
 
+/* active images in flash */
+struct qla_active_regions {
+       uint8_t global_image;
+       uint8_t board_config;
+       uint8_t vpd_nvram;
+       uint8_t npiv_config_0_1;
+       uint8_t npiv_config_2_3;
+       uint8_t reserved[32];
+} __packed;
+
 #endif
index c7533fa..9e80646 100644 (file)
@@ -111,30 +111,25 @@ int
 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
        uint32_t ram_dwords, void **nxt)
 {
-       int rval;
-       uint32_t cnt, stat, timer, dwords, idx;
-       uint16_t mb0;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
        dma_addr_t dump_dma = ha->gid_list_dma;
-       uint32_t *dump = (uint32_t *)ha->gid_list;
-
-       rval = QLA_SUCCESS;
-       mb0 = 0;
+       uint32_t *chunk = (void *)ha->gid_list;
+       uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
+       uint32_t stat;
+       ulong i, j, timer = 6000000;
+       int rval = QLA_FUNCTION_FAILED;
 
-       WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
        clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+       for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
+               if (i + dwords > ram_dwords)
+                       dwords = ram_dwords - i;
 
-       dwords = qla2x00_gid_list_size(ha) / 4;
-       for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
-           cnt += dwords, addr += dwords) {
-               if (cnt + dwords > ram_dwords)
-                       dwords = ram_dwords - cnt;
-
+               WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
                WRT_REG_WORD(&reg->mailbox1, LSW(addr));
                WRT_REG_WORD(&reg->mailbox8, MSW(addr));
 
-               WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
-               WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+               WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
+               WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
                WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
                WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
 
@@ -145,76 +140,76 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
                WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
 
                ha->flags.mbox_int = 0;
-               for (timer = 6000000; timer; timer--) {
-                       /* Check for pending interrupts. */
-                       stat = RD_REG_DWORD(&reg->host_status);
-                       if (stat & HSRX_RISC_INT) {
-                               stat &= 0xff;
-
-                               if (stat == 0x1 || stat == 0x2 ||
-                                   stat == 0x10 || stat == 0x11) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
+               while (timer--) {
+                       udelay(5);
 
-                                       mb0 = RD_REG_WORD(&reg->mailbox0);
-                                       RD_REG_WORD(&reg->mailbox1);
+                       stat = RD_REG_DWORD(&reg->host_status);
+                       /* Check for pending interrupts. */
+                       if (!(stat & HSRX_RISC_INT))
+                               continue;
 
-                                       WRT_REG_DWORD(&reg->hccr,
-                                           HCCRX_CLR_RISC_INT);
-                                       RD_REG_DWORD(&reg->hccr);
-                                       break;
-                               }
+                       stat &= 0xff;
+                       if (stat != 0x1 && stat != 0x2 &&
+                           stat != 0x10 && stat != 0x11) {
 
                                /* Clear this intr; it wasn't a mailbox intr */
                                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
                                RD_REG_DWORD(&reg->hccr);
+                               continue;
                        }
-                       udelay(5);
+
+                       set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+                       rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
+                       WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+                       RD_REG_DWORD(&reg->hccr);
+                       break;
                }
                ha->flags.mbox_int = 1;
+               *nxt = ram + i;
 
-               if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-                       rval = mb0 & MBS_MASK;
-                       for (idx = 0; idx < dwords; idx++)
-                               ram[cnt + idx] = IS_QLA27XX(ha) ?
-                                   le32_to_cpu(dump[idx]) : swab32(dump[idx]);
-               } else {
-                       rval = QLA_FUNCTION_FAILED;
+               if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+                       /* no interrupt, timed out*/
+                       return rval;
+               }
+               if (rval) {
+                       /* error completion status */
+                       return rval;
+               }
+               for (j = 0; j < dwords; j++) {
+                       ram[i + j] =
+                           (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+                           chunk[j] : swab32(chunk[j]);
                }
        }
 
-       *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
-       return rval;
+       *nxt = ram + i;
+       return QLA_SUCCESS;
 }
 
 int
 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
     uint32_t ram_dwords, void **nxt)
 {
-       int rval;
-       uint32_t cnt, stat, timer, dwords, idx;
-       uint16_t mb0;
+       int rval = QLA_FUNCTION_FAILED;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
        dma_addr_t dump_dma = ha->gid_list_dma;
-       uint32_t *dump = (uint32_t *)ha->gid_list;
+       uint32_t *chunk = (void *)ha->gid_list;
+       uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
+       uint32_t stat;
+       ulong i, j, timer = 6000000;
 
-       rval = QLA_SUCCESS;
-       mb0 = 0;
-
-       WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
        clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
-       dwords = qla2x00_gid_list_size(ha) / 4;
-       for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
-           cnt += dwords, addr += dwords) {
-               if (cnt + dwords > ram_dwords)
-                       dwords = ram_dwords - cnt;
+       for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
+               if (i + dwords > ram_dwords)
+                       dwords = ram_dwords - i;
 
+               WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
                WRT_REG_WORD(&reg->mailbox1, LSW(addr));
                WRT_REG_WORD(&reg->mailbox8, MSW(addr));
 
-               WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
-               WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+               WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
+               WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
                WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
                WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
 
@@ -223,45 +218,48 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
                WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
 
                ha->flags.mbox_int = 0;
-               for (timer = 6000000; timer; timer--) {
-                       /* Check for pending interrupts. */
+               while (timer--) {
+                       udelay(5);
                        stat = RD_REG_DWORD(&reg->host_status);
-                       if (stat & HSRX_RISC_INT) {
-                               stat &= 0xff;
 
-                               if (stat == 0x1 || stat == 0x2 ||
-                                   stat == 0x10 || stat == 0x11) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
-
-                                       mb0 = RD_REG_WORD(&reg->mailbox0);
-
-                                       WRT_REG_DWORD(&reg->hccr,
-                                           HCCRX_CLR_RISC_INT);
-                                       RD_REG_DWORD(&reg->hccr);
-                                       break;
-                               }
+                       /* Check for pending interrupts. */
+                       if (!(stat & HSRX_RISC_INT))
+                               continue;
 
-                               /* Clear this intr; it wasn't a mailbox intr */
+                       stat &= 0xff;
+                       if (stat != 0x1 && stat != 0x2 &&
+                           stat != 0x10 && stat != 0x11) {
                                WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
                                RD_REG_DWORD(&reg->hccr);
+                               continue;
                        }
-                       udelay(5);
+
+                       set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+                       rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
+                       WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+                       RD_REG_DWORD(&reg->hccr);
+                       break;
                }
                ha->flags.mbox_int = 1;
+               *nxt = ram + i;
 
-               if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-                       rval = mb0 & MBS_MASK;
-                       for (idx = 0; idx < dwords; idx++)
-                               ram[cnt + idx] = IS_QLA27XX(ha) ?
-                                   le32_to_cpu(dump[idx]) : swab32(dump[idx]);
-               } else {
-                       rval = QLA_FUNCTION_FAILED;
+               if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+                       /* no interrupt, timed out*/
+                       return rval;
+               }
+               if (rval) {
+                       /* error completion status */
+                       return rval;
+               }
+               for (j = 0; j < dwords; j++) {
+                       ram[i + j] =
+                           (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+                           chunk[j] : swab32(chunk[j]);
                }
        }
 
-       *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
-       return rval;
+       *nxt = ram + i;
+       return QLA_SUCCESS;
 }
 
 static int
@@ -447,7 +445,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
                }
        }
 
-       *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+       *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
        return rval;
 }
 
@@ -669,7 +667,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
        struct qla2xxx_mq_chain *mq = ptr;
        device_reg_t *reg;
 
-       if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha))
                return ptr;
 
        mq = ptr;
@@ -2521,7 +2520,7 @@ qla83xx_fw_dump_failed:
 /****************************************************************************/
 
 static inline int
-ql_mask_match(uint32_t level)
+ql_mask_match(uint level)
 {
        return (level & ql2xextended_error_logging) == level;
 }
@@ -2540,7 +2539,7 @@ ql_mask_match(uint32_t level)
  * msg:   The message to be displayed.
  */
 void
-ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
 {
        va_list va;
        struct va_format vaf;
@@ -2583,8 +2582,7 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
  * msg:   The message to be displayed.
  */
 void
-ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
-          const char *fmt, ...)
+ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
 {
        va_list va;
        struct va_format vaf;
@@ -2620,7 +2618,7 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
  * msg:   The message to be displayed.
  */
 void
-ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
 {
        va_list va;
        struct va_format vaf;
@@ -2678,8 +2676,7 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
  * msg:   The message to be displayed.
  */
 void
-ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
-          const char *fmt, ...)
+ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
 {
        va_list va;
        struct va_format vaf;
@@ -2719,7 +2716,7 @@ ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
 }
 
 void
-ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
+ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
 {
        int i;
        struct qla_hw_data *ha = vha->hw;
@@ -2741,13 +2738,12 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
        ql_dbg(level, vha, id, "Mailbox registers:\n");
        for (i = 0; i < 6; i++, mbx_reg++)
                ql_dbg(level, vha, id,
-                   "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg));
+                   "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
 }
 
 
 void
-ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
-       uint8_t *buf, uint size)
+ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, void *buf, uint size)
 {
        uint cnt;
 
index 8877aa9..bb01b68 100644 (file)
@@ -318,20 +318,20 @@ struct qla2xxx_fw_dump {
                                   * as compared to other log levels.
                                   */
 
-extern int ql_errlev;
+extern uint ql_errlev;
 
 void __attribute__((format (printf, 4, 5)))
-ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+ql_dbg(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...);
 void __attribute__((format (printf, 4, 5)))
-ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+ql_dbg_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...);
 void __attribute__((format (printf, 4, 5)))
 ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
 
 
 void __attribute__((format (printf, 4, 5)))
-ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...);
 void __attribute__((format (printf, 4, 5)))
-ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...);
 
 void __attribute__((format (printf, 4, 5)))
 ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
index 3d46975..1a4095c 100644 (file)
@@ -35,6 +35,7 @@
 #include <scsi/scsi_bsg_fc.h>
 
 #include "qla_bsg.h"
+#include "qla_dsd.h"
 #include "qla_nx.h"
 #include "qla_nx2.h"
 #include "qla_nvme.h"
@@ -545,7 +546,7 @@ typedef struct srb {
        u32 gen2;       /* scratch */
        int rc;
        int retry_count;
-       struct completion comp;
+       struct completion *comp;
        union {
                struct srb_iocb iocb_cmd;
                struct bsg_job *bsg_job;
@@ -1033,6 +1034,7 @@ struct mbx_cmd_32 {
 #define MBC_GET_FIRMWARE_VERSION       8       /* Get firmware revision. */
 #define MBC_LOAD_RISC_RAM              9       /* Load RAM command. */
 #define MBC_DUMP_RISC_RAM              0xa     /* Dump RAM command. */
+#define MBC_SECURE_FLASH_UPDATE                0xa     /* Secure Flash Update(28xx) */
 #define MBC_LOAD_RISC_RAM_EXTENDED     0xb     /* Load RAM extended. */
 #define MBC_DUMP_RISC_RAM_EXTENDED     0xc     /* Dump RAM extended. */
 #define MBC_WRITE_RAM_WORD_EXTENDED    0xd     /* Write RAM word extended */
@@ -1203,6 +1205,10 @@ struct mbx_cmd_32 {
 #define QLA27XX_IMG_STATUS_VER_MAJOR   0x01
 #define QLA27XX_IMG_STATUS_VER_MINOR    0x00
 #define QLA27XX_IMG_STATUS_SIGN   0xFACEFADE
+#define QLA28XX_IMG_STATUS_SIGN    0xFACEFADF
+#define QLA28XX_IMG_STATUS_SIGN                0xFACEFADF
+#define QLA28XX_AUX_IMG_STATUS_SIGN    0xFACEFAED
+#define QLA27XX_DEFAULT_IMAGE          0
 #define QLA27XX_PRIMARY_IMAGE  1
 #define QLA27XX_SECONDARY_IMAGE    2
 
@@ -1323,8 +1329,8 @@ typedef struct {
        uint16_t response_q_inpointer;
        uint16_t request_q_length;
        uint16_t response_q_length;
-       uint32_t request_q_address[2];
-       uint32_t response_q_address[2];
+       __le64   request_q_address __packed;
+       __le64   response_q_address __packed;
 
        uint16_t lun_enables;
        uint8_t  command_resource_count;
@@ -1749,12 +1755,10 @@ typedef struct {
        uint16_t dseg_count;            /* Data segment count. */
        uint8_t scsi_cdb[MAX_CMDSZ];    /* SCSI command words. */
        uint32_t byte_count;            /* Total byte count. */
-       uint32_t dseg_0_address;        /* Data segment 0 address. */
-       uint32_t dseg_0_length;         /* Data segment 0 length. */
-       uint32_t dseg_1_address;        /* Data segment 1 address. */
-       uint32_t dseg_1_length;         /* Data segment 1 length. */
-       uint32_t dseg_2_address;        /* Data segment 2 address. */
-       uint32_t dseg_2_length;         /* Data segment 2 length. */
+       union {
+               struct dsd32 dsd32[3];
+               struct dsd64 dsd64[2];
+       };
 } cmd_entry_t;
 
 /*
@@ -1775,10 +1779,7 @@ typedef struct {
        uint16_t dseg_count;            /* Data segment count. */
        uint8_t scsi_cdb[MAX_CMDSZ];    /* SCSI command words. */
        uint32_t byte_count;            /* Total byte count. */
-       uint32_t dseg_0_address[2];     /* Data segment 0 address. */
-       uint32_t dseg_0_length;         /* Data segment 0 length. */
-       uint32_t dseg_1_address[2];     /* Data segment 1 address. */
-       uint32_t dseg_1_length;         /* Data segment 1 length. */
+       struct dsd64 dsd[2];
 } cmd_a64_entry_t, request_t;
 
 /*
@@ -1791,20 +1792,7 @@ typedef struct {
        uint8_t sys_define;             /* System defined. */
        uint8_t entry_status;           /* Entry Status. */
        uint32_t reserved;
-       uint32_t dseg_0_address;        /* Data segment 0 address. */
-       uint32_t dseg_0_length;         /* Data segment 0 length. */
-       uint32_t dseg_1_address;        /* Data segment 1 address. */
-       uint32_t dseg_1_length;         /* Data segment 1 length. */
-       uint32_t dseg_2_address;        /* Data segment 2 address. */
-       uint32_t dseg_2_length;         /* Data segment 2 length. */
-       uint32_t dseg_3_address;        /* Data segment 3 address. */
-       uint32_t dseg_3_length;         /* Data segment 3 length. */
-       uint32_t dseg_4_address;        /* Data segment 4 address. */
-       uint32_t dseg_4_length;         /* Data segment 4 length. */
-       uint32_t dseg_5_address;        /* Data segment 5 address. */
-       uint32_t dseg_5_length;         /* Data segment 5 length. */
-       uint32_t dseg_6_address;        /* Data segment 6 address. */
-       uint32_t dseg_6_length;         /* Data segment 6 length. */
+       struct dsd32 dsd[7];
 } cont_entry_t;
 
 /*
@@ -1816,16 +1804,7 @@ typedef struct {
        uint8_t entry_count;            /* Entry count. */
        uint8_t sys_define;             /* System defined. */
        uint8_t entry_status;           /* Entry Status. */
-       uint32_t dseg_0_address[2];     /* Data segment 0 address. */
-       uint32_t dseg_0_length;         /* Data segment 0 length. */
-       uint32_t dseg_1_address[2];     /* Data segment 1 address. */
-       uint32_t dseg_1_length;         /* Data segment 1 length. */
-       uint32_t dseg_2_address [2];    /* Data segment 2 address. */
-       uint32_t dseg_2_length;         /* Data segment 2 length. */
-       uint32_t dseg_3_address[2];     /* Data segment 3 address. */
-       uint32_t dseg_3_length;         /* Data segment 3 length. */
-       uint32_t dseg_4_address[2];     /* Data segment 4 address. */
-       uint32_t dseg_4_length;         /* Data segment 4 length. */
+       struct dsd64 dsd[5];
 } cont_a64_entry_t;
 
 #define PO_MODE_DIF_INSERT     0
@@ -1869,8 +1848,7 @@ struct crc_context {
                        uint16_t        reserved_2;
                        uint16_t        reserved_3;
                        uint32_t        reserved_4;
-                       uint32_t        data_address[2];
-                       uint32_t        data_length;
+                       struct dsd64    data_dsd;
                        uint32_t        reserved_5[2];
                        uint32_t        reserved_6;
                } nobundling;
@@ -1880,11 +1858,8 @@ struct crc_context {
                        uint16_t        reserved_1;
                        __le16  dseg_count;     /* Data segment count */
                        uint32_t        reserved_2;
-                       uint32_t        data_address[2];
-                       uint32_t        data_length;
-                       uint32_t        dif_address[2];
-                       uint32_t        dif_length;     /* Data segment 0
-                                                        * length */
+                       struct dsd64    data_dsd;
+                       struct dsd64    dif_dsd;
                } bundling;
        } u;
 
@@ -2083,10 +2058,8 @@ typedef struct {
        uint32_t handle2;
        uint32_t rsp_bytecount;
        uint32_t req_bytecount;
-       uint32_t dseg_req_address[2];   /* Data segment 0 address. */
-       uint32_t dseg_req_length;       /* Data segment 0 length. */
-       uint32_t dseg_rsp_address[2];   /* Data segment 1 address. */
-       uint32_t dseg_rsp_length;       /* Data segment 1 length. */
+       struct dsd64 req_dsd;
+       struct dsd64 rsp_dsd;
 } ms_iocb_entry_t;
 
 
@@ -2258,7 +2231,10 @@ typedef enum {
        FCT_BROADCAST,
        FCT_INITIATOR,
        FCT_TARGET,
-       FCT_NVME
+       FCT_NVME_INITIATOR = 0x10,
+       FCT_NVME_TARGET = 0x20,
+       FCT_NVME_DISCOVERY = 0x40,
+       FCT_NVME = 0xf0,
 } fc_port_type_t;
 
 enum qla_sess_deletion {
@@ -2463,13 +2439,7 @@ struct event_arg {
 #define FCS_DEVICE_LOST                3
 #define FCS_ONLINE             4
 
-static const char * const port_state_str[] = {
-       "Unknown",
-       "UNCONFIGURED",
-       "DEAD",
-       "LOST",
-       "ONLINE"
-};
+extern const char *const port_state_str[5];
 
 /*
  * FC port flags.
@@ -2672,6 +2642,7 @@ struct ct_fdmiv2_hba_attributes {
 #define FDMI_PORT_SPEED_8GB            0x10
 #define FDMI_PORT_SPEED_16GB           0x20
 #define FDMI_PORT_SPEED_32GB           0x40
+#define FDMI_PORT_SPEED_64GB           0x80
 #define FDMI_PORT_SPEED_UNKNOWN                0x8000
 
 #define FC_CLASS_2     0x04
@@ -3060,7 +3031,7 @@ struct sns_cmd_pkt {
                struct {
                        uint16_t buffer_length;
                        uint16_t reserved_1;
-                       uint32_t buffer_address[2];
+                       __le64   buffer_address __packed;
                        uint16_t subcommand_length;
                        uint16_t reserved_2;
                        uint16_t subcommand;
@@ -3130,10 +3101,10 @@ struct rsp_que;
 struct isp_operations {
 
        int (*pci_config) (struct scsi_qla_host *);
-       void (*reset_chip) (struct scsi_qla_host *);
+       int (*reset_chip)(struct scsi_qla_host *);
        int (*chip_diag) (struct scsi_qla_host *);
        void (*config_rings) (struct scsi_qla_host *);
-       void (*reset_adapter) (struct scsi_qla_host *);
+       int (*reset_adapter)(struct scsi_qla_host *);
        int (*nvram_config) (struct scsi_qla_host *);
        void (*update_fw_options) (struct scsi_qla_host *);
        int (*load_risc) (struct scsi_qla_host *, uint32_t *);
@@ -3159,9 +3130,9 @@ struct isp_operations {
        void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
            uint32_t);
 
-       uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *,
+       uint8_t *(*read_nvram)(struct scsi_qla_host *, void *,
                uint32_t, uint32_t);
-       int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t,
+       int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t,
                uint32_t);
 
        void (*fw_dump) (struct scsi_qla_host *, int);
@@ -3170,16 +3141,16 @@ struct isp_operations {
        int (*beacon_off) (struct scsi_qla_host *);
        void (*beacon_blink) (struct scsi_qla_host *);
 
-       uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *,
+       void *(*read_optrom)(struct scsi_qla_host *, void *,
                uint32_t, uint32_t);
-       int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
+       int (*write_optrom)(struct scsi_qla_host *, void *, uint32_t,
                uint32_t);
 
        int (*get_flash_version) (struct scsi_qla_host *, void *);
        int (*start_scsi) (srb_t *);
        int (*start_scsi_mq) (srb_t *);
        int (*abort_isp) (struct scsi_qla_host *);
-       int (*iospace_config)(struct qla_hw_data*);
+       int (*iospace_config)(struct qla_hw_data *);
        int (*initialize_adapter)(struct scsi_qla_host *);
 };
 
@@ -3368,7 +3339,8 @@ struct qla_tc_param {
 #define QLA_MQ_SIZE 32
 #define QLA_MAX_QUEUES 256
 #define ISP_QUE_REG(ha, id) \
-       ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
+       ((ha->mqenable || IS_QLA83XX(ha) || \
+         IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? \
         ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
         ((void __iomem *)ha->iobase))
 #define QLA_REQ_QUE_ID(tag) \
@@ -3621,6 +3593,8 @@ struct qla_hw_data {
                uint32_t        rida_fmt2:1;
                uint32_t        purge_mbox:1;
                uint32_t        n2n_bigger:1;
+               uint32_t        secure_adapter:1;
+               uint32_t        secure_fw:1;
        } flags;
 
        uint16_t max_exchg;
@@ -3703,6 +3677,7 @@ struct qla_hw_data {
 #define PORT_SPEED_8GB  0x04
 #define PORT_SPEED_16GB 0x05
 #define PORT_SPEED_32GB 0x06
+#define PORT_SPEED_64GB 0x07
 #define PORT_SPEED_10GB        0x13
        uint16_t        link_data_rate;         /* F/W operating speed */
        uint16_t        set_data_rate;          /* Set by user */
@@ -3729,6 +3704,11 @@ struct qla_hw_data {
 #define PCI_DEVICE_ID_QLOGIC_ISP2071   0x2071
 #define PCI_DEVICE_ID_QLOGIC_ISP2271   0x2271
 #define PCI_DEVICE_ID_QLOGIC_ISP2261   0x2261
+#define PCI_DEVICE_ID_QLOGIC_ISP2061   0x2061
+#define PCI_DEVICE_ID_QLOGIC_ISP2081   0x2081
+#define PCI_DEVICE_ID_QLOGIC_ISP2089   0x2089
+#define PCI_DEVICE_ID_QLOGIC_ISP2281   0x2281
+#define PCI_DEVICE_ID_QLOGIC_ISP2289   0x2289
 
        uint32_t        isp_type;
 #define DT_ISP2100                      BIT_0
@@ -3753,7 +3733,12 @@ struct qla_hw_data {
 #define DT_ISP2071                     BIT_19
 #define DT_ISP2271                     BIT_20
 #define DT_ISP2261                     BIT_21
-#define DT_ISP_LAST                    (DT_ISP2261 << 1)
+#define DT_ISP2061                     BIT_22
+#define DT_ISP2081                     BIT_23
+#define DT_ISP2089                     BIT_24
+#define DT_ISP2281                     BIT_25
+#define DT_ISP2289                     BIT_26
+#define DT_ISP_LAST                    (DT_ISP2289 << 1)
 
        uint32_t        device_type;
 #define DT_T10_PI                       BIT_25
@@ -3788,6 +3773,8 @@ struct qla_hw_data {
 #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
 #define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
 #define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261)
+#define IS_QLA2081(ha) (DT_MASK(ha) & DT_ISP2081)
+#define IS_QLA2281(ha) (DT_MASK(ha) & DT_ISP2281)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
                        IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -3797,6 +3784,7 @@ struct qla_hw_data {
 #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
 #define IS_QLA84XX(ha)  (IS_QLA8432(ha))
 #define IS_QLA27XX(ha)  (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
+#define IS_QLA28XX(ha) (IS_QLA2081(ha) || IS_QLA2281(ha))
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
                                IS_QLA84XX(ha))
 #define IS_CNA_CAPABLE(ha)     (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3805,14 +3793,15 @@ struct qla_hw_data {
 #define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
                                IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
                                IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
-                               IS_QLA8044(ha) || IS_QLA27XX(ha))
+                               IS_QLA8044(ha) || IS_QLA27XX(ha) || \
+                               IS_QLA28XX(ha))
 #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
-                               IS_QLA27XX(ha))
+                               IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define IS_NOPOLLING_TYPE(ha)  (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
 #define IS_FAC_REQUIRED(ha)    (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
-                               IS_QLA27XX(ha))
+                               IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define IS_NOCACHE_VPD_TYPE(ha)        (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
-                               IS_QLA27XX(ha))
+                               IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define IS_ALOGIO_CAPABLE(ha)  (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
 
 #define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)
@@ -3823,28 +3812,34 @@ struct qla_hw_data {
 #define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)
 #define IS_CT6_SUPPORTED(ha)   ((ha)->device_type & DT_CT6_SUPPORTED)
 #define IS_MQUE_CAPABLE(ha)    ((ha)->mqenable || IS_QLA83XX(ha) || \
-                               IS_QLA27XX(ha))
-#define IS_BIDI_CAPABLE(ha)    ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
+                               IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_BIDI_CAPABLE(ha) \
+    (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 /* Bit 21 of fw_attributes decides the MCTP capabilities */
 #define IS_MCTP_CAPABLE(ha)    (IS_QLA2031(ha) && \
                                ((ha)->fw_attributes_ext[0] & BIT_0))
 #define IS_PI_UNINIT_CAPABLE(ha)       (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_PI_IPGUARD_CAPABLE(ha)      (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_PI_DIFB_DIX0_CAPABLE(ha)    (0)
-#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+                                       IS_QLA28XX(ha))
 #define IS_PI_SPLIT_DET_CAPABLE(ha)    (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
     (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
-#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+                               IS_QLA28XX(ha))
 #define IS_TGT_MODE_CAPABLE(ha)        (ha->tgt.atio_q_length)
-#define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))
-#define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha))
-#define IS_FAWWN_CAPABLE(ha)   (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+                               IS_QLA28XX(ha))
+#define IS_FAWWN_CAPABLE(ha)   (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+                               IS_QLA28XX(ha))
 #define IS_EXCHG_OFFLD_CAPABLE(ha) \
-       (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
-       (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+        IS_QLA27XX(ha) || IS_QLA28XX(ha))
 #define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
-       IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
 
        /* HBA serial number */
        uint8_t         serial0;
@@ -3888,6 +3883,9 @@ struct qla_hw_data {
        void            *sfp_data;
        dma_addr_t      sfp_data_dma;
 
+       void            *flt;
+       dma_addr_t      flt_dma;
+
 #define XGMAC_DATA_SIZE        4096
        void            *xgmac_data;
        dma_addr_t      xgmac_data_dma;
@@ -3999,18 +3997,23 @@ struct qla_hw_data {
        uint8_t         fw_seriallink_options[4];
        uint16_t        fw_seriallink_options24[4];
 
+       uint8_t         serdes_version[3];
        uint8_t         mpi_version[3];
        uint32_t        mpi_capabilities;
        uint8_t         phy_version[3];
        uint8_t         pep_version[3];
 
        /* Firmware dump template */
-       void            *fw_dump_template;
-       uint32_t        fw_dump_template_len;
-       /* Firmware dump information. */
+       struct fwdt {
+               void *template;
+               ulong length;
+               ulong dump_size;
+       } fwdt[2];
        struct qla2xxx_fw_dump *fw_dump;
        uint32_t        fw_dump_len;
-       int             fw_dumped;
+       u32             fw_dump_alloc_len;
+       bool            fw_dumped;
+       bool            fw_dump_mpi;
        unsigned long   fw_dump_cap_flags;
 #define RISC_PAUSE_CMPL                0
 #define DMA_SHUTDOWN_CMPL      1
@@ -4049,7 +4052,6 @@ struct qla_hw_data {
        uint16_t        product_id[4];
 
        uint8_t         model_number[16+1];
-#define BINZERO                "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
        char            model_desc[80];
        uint8_t         adapter_id[16+1];
 
@@ -4089,22 +4091,28 @@ struct qla_hw_data {
        uint32_t        fdt_protect_sec_cmd;
        uint32_t        fdt_wrt_sts_reg_cmd;
 
-       uint32_t        flt_region_flt;
-       uint32_t        flt_region_fdt;
-       uint32_t        flt_region_boot;
-       uint32_t        flt_region_boot_sec;
-       uint32_t        flt_region_fw;
-       uint32_t        flt_region_fw_sec;
-       uint32_t        flt_region_vpd_nvram;
-       uint32_t        flt_region_vpd;
-       uint32_t        flt_region_vpd_sec;
-       uint32_t        flt_region_nvram;
-       uint32_t        flt_region_npiv_conf;
-       uint32_t        flt_region_gold_fw;
-       uint32_t        flt_region_fcp_prio;
-       uint32_t        flt_region_bootload;
-       uint32_t        flt_region_img_status_pri;
-       uint32_t        flt_region_img_status_sec;
+       struct {
+               uint32_t        flt_region_flt;
+               uint32_t        flt_region_fdt;
+               uint32_t        flt_region_boot;
+               uint32_t        flt_region_boot_sec;
+               uint32_t        flt_region_fw;
+               uint32_t        flt_region_fw_sec;
+               uint32_t        flt_region_vpd_nvram;
+               uint32_t        flt_region_vpd_nvram_sec;
+               uint32_t        flt_region_vpd;
+               uint32_t        flt_region_vpd_sec;
+               uint32_t        flt_region_nvram;
+               uint32_t        flt_region_nvram_sec;
+               uint32_t        flt_region_npiv_conf;
+               uint32_t        flt_region_gold_fw;
+               uint32_t        flt_region_fcp_prio;
+               uint32_t        flt_region_bootload;
+               uint32_t        flt_region_img_status_pri;
+               uint32_t        flt_region_img_status_sec;
+               uint32_t        flt_region_aux_img_status_pri;
+               uint32_t        flt_region_aux_img_status_sec;
+       };
        uint8_t         active_image;
 
        /* Needed for BEACON */
@@ -4197,8 +4205,8 @@ struct qla_hw_data {
        struct qlt_hw_data tgt;
        int     allow_cna_fw_dump;
        uint32_t fw_ability_mask;
-       uint16_t min_link_speed;
-       uint16_t max_speed_sup;
+       uint16_t min_supported_speed;
+       uint16_t max_supported_speed;
 
        /* DMA pool for the DIF bundling buffers */
        struct dma_pool *dif_bundl_pool;
@@ -4225,9 +4233,20 @@ struct qla_hw_data {
 
        atomic_t zio_threshold;
        uint16_t last_zio_threshold;
+
 #define DEFAULT_ZIO_THRESHOLD 5
 };
 
+struct active_regions {
+       uint8_t global;
+       struct {
+               uint8_t board_config;
+               uint8_t vpd_nvram;
+               uint8_t npiv_config_0_1;
+               uint8_t npiv_config_2_3;
+       } aux;
+};
+
 #define FW_ABILITY_MAX_SPEED_MASK      0xFUL
 #define FW_ABILITY_MAX_SPEED_16G       0x0
 #define FW_ABILITY_MAX_SPEED_32G       0x1
@@ -4315,6 +4334,7 @@ typedef struct scsi_qla_host {
 #define N2N_LOGIN_NEEDED       30
 #define IOCB_WORK_ACTIVE       31
 #define SET_ZIO_THRESHOLD_NEEDED 32
+#define ISP_ABORT_TO_ROM       33
 
        unsigned long   pci_flags;
 #define PFLG_DISCONNECTED      0       /* PCI device removed */
@@ -4429,7 +4449,7 @@ typedef struct scsi_qla_host {
        int fcport_count;
        wait_queue_head_t fcport_waitQ;
        wait_queue_head_t vref_waitq;
-       uint8_t min_link_speed_feat;
+       uint8_t min_supported_speed;
        uint8_t n2n_node_name[WWN_SIZE];
        uint8_t n2n_port_name[WWN_SIZE];
        uint16_t        n2n_id;
@@ -4441,14 +4461,21 @@ typedef struct scsi_qla_host {
 
 struct qla27xx_image_status {
        uint8_t image_status_mask;
-       uint16_t generation_number;
-       uint8_t reserved[3];
-       uint8_t ver_minor;
+       uint16_t generation;
        uint8_t ver_major;
+       uint8_t ver_minor;
+       uint8_t bitmap;         /* 28xx only */
+       uint8_t reserved[2];
        uint32_t checksum;
        uint32_t signature;
 } __packed;
 
+/* 28xx aux image status bimap values */
+#define QLA28XX_AUX_IMG_BOARD_CONFIG           BIT_0
+#define QLA28XX_AUX_IMG_VPD_NVRAM              BIT_1
+#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1                BIT_2
+#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3                BIT_3
+
 #define SET_VP_IDX     1
 #define SET_AL_PA      2
 #define RESET_VP_IDX   3
@@ -4495,6 +4522,24 @@ struct qla2_sgx {
        }                                       \
 }
 
+
+#define SFUB_CHECKSUM_SIZE     4
+
+struct secure_flash_update_block {
+       uint32_t        block_info;
+       uint32_t        signature_lo;
+       uint32_t        signature_hi;
+       uint32_t        signature_upper[0x3e];
+};
+
+struct secure_flash_update_block_pk {
+       uint32_t        block_info;
+       uint32_t        signature_lo;
+       uint32_t        signature_hi;
+       uint32_t        signature_upper[0x3e];
+       uint32_t        public_key[0x41];
+};
+
 /*
  * Macros to help code, maintain, etc.
  */
@@ -4595,6 +4640,7 @@ struct qla2_sgx {
 #define OPTROM_SIZE_81XX       0x400000
 #define OPTROM_SIZE_82XX       0x800000
 #define OPTROM_SIZE_83XX       0x1000000
+#define OPTROM_SIZE_28XX       0x2000000
 
 #define OPTROM_BURST_SIZE      0x1000
 #define OPTROM_BURST_DWORDS    (OPTROM_BURST_SIZE / 4)
@@ -4691,10 +4737,13 @@ struct sff_8247_a0 {
 #define AUTO_DETECT_SFP_SUPPORT(_vha)\
        (ql2xautodetectsfp && !_vha->vp_idx &&          \
        (IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
-       IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw)))
+       IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw) || \
+        IS_QLA28XX(_vha->hw)))
+
+#define FLASH_SEMAPHORE_REGISTER_ADDR   0x00101016
 
 #define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
-       (IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+       (IS_QLA27XX(_ha) || IS_QLA28XX(_ha) || IS_QLA83XX(_ha)))
 
 #define SAVE_TOPO(_ha) { \
        if (_ha->current_topology)                              \
index 5819a45..a432cae 100644 (file)
@@ -41,6 +41,7 @@ static int
 qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
 {
        scsi_qla_host_t *vha = inode->i_private;
+
        return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
 }
 
@@ -161,6 +162,7 @@ static int
 qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
 {
        struct scsi_qla_host *vha = inode->i_private;
+
        return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
 }
 
@@ -250,6 +252,7 @@ static int
 qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
 {
        struct scsi_qla_host *vha = inode->i_private;
+
        return single_open(file, qla_dfs_tgt_counters_show, vha);
 }
 
@@ -386,7 +389,7 @@ qla_dfs_naqp_write(struct file *file, const char __user *buffer,
        int rc = 0;
        unsigned long num_act_qp;
 
-       if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) {
+       if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
                pr_err("host%ld: this adapter does not support Multi Q.",
                    vha->host_no);
                return -EINVAL;
@@ -438,7 +441,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
 
        if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
-           !IS_QLA27XX(ha))
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                goto out;
        if (!ha->fce)
                goto out;
@@ -474,7 +477,7 @@ create_nodes:
        ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
                S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
 
-       if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+       if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
                ha->tgt.dfs_naqp = debugfs_create_file("naqp",
                    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
 out:
diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h
new file mode 100644 (file)
index 0000000..7479924
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef _QLA_DSD_H_
+#define _QLA_DSD_H_
+
+/* 32-bit data segment descriptor (8 bytes) */
+struct dsd32 {
+       __le32 address;
+       __le32 length;
+};
+
+static inline void append_dsd32(struct dsd32 **dsd, struct scatterlist *sg)
+{
+       put_unaligned_le32(sg_dma_address(sg), &(*dsd)->address);
+       put_unaligned_le32(sg_dma_len(sg),     &(*dsd)->length);
+       (*dsd)++;
+}
+
+/* 64-bit data segment descriptor (12 bytes) */
+struct dsd64 {
+       __le64 address;
+       __le32 length;
+} __packed;
+
+static inline void append_dsd64(struct dsd64 **dsd, struct scatterlist *sg)
+{
+       put_unaligned_le64(sg_dma_address(sg), &(*dsd)->address);
+       put_unaligned_le32(sg_dma_len(sg),     &(*dsd)->length);
+       (*dsd)++;
+}
+
+#endif
index 50c1e6c..df079a8 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/nvme.h>
 #include <linux/nvme-fc.h>
 
+#include "qla_dsd.h"
+
 #define MBS_CHECKSUM_ERROR     0x4010
 #define MBS_INVALID_PRODUCT_KEY        0x4020
 
@@ -339,9 +341,9 @@ struct init_cb_24xx {
 
        uint16_t prio_request_q_length;
 
-       uint32_t request_q_address[2];
-       uint32_t response_q_address[2];
-       uint32_t prio_request_q_address[2];
+       __le64   request_q_address __packed;
+       __le64   response_q_address __packed;
+       __le64   prio_request_q_address __packed;
 
        uint16_t msix;
        uint16_t msix_atio;
@@ -349,7 +351,7 @@ struct init_cb_24xx {
 
        uint16_t atio_q_inpointer;
        uint16_t atio_q_length;
-       uint32_t atio_q_address[2];
+       __le64   atio_q_address __packed;
 
        uint16_t interrupt_delay_timer;         /* 100us increments. */
        uint16_t login_timeout;
@@ -453,7 +455,7 @@ struct cmd_bidir {
 #define BD_WRITE_DATA                  BIT_0
 
        uint16_t fcp_cmnd_dseg_len;             /* Data segment length. */
-       uint32_t fcp_cmnd_dseg_address[2];      /* Data segment address. */
+       __le64   fcp_cmnd_dseg_address __packed;/* Data segment address. */
 
        uint16_t reserved[2];                   /* Reserved */
 
@@ -463,8 +465,7 @@ struct cmd_bidir {
        uint8_t port_id[3];                     /* PortID of destination port.*/
        uint8_t vp_index;
 
-       uint32_t fcp_data_dseg_address[2];      /* Data segment address. */
-       uint16_t fcp_data_dseg_len;             /* Data segment length. */
+       struct dsd64 fcp_dsd;
 };
 
 #define COMMAND_TYPE_6 0x48            /* Command Type 6 entry */
@@ -491,18 +492,18 @@ struct cmd_type_6 {
 #define CF_READ_DATA                   BIT_1
 #define CF_WRITE_DATA                  BIT_0
 
-       uint16_t fcp_cmnd_dseg_len;             /* Data segment length. */
-       uint32_t fcp_cmnd_dseg_address[2];      /* Data segment address. */
-
-       uint32_t fcp_rsp_dseg_address[2];       /* Data segment address. */
+       uint16_t fcp_cmnd_dseg_len;     /* Data segment length. */
+                                       /* Data segment address. */
+       __le64   fcp_cmnd_dseg_address __packed;
+                                       /* Data segment address. */
+       __le64   fcp_rsp_dseg_address __packed;
 
        uint32_t byte_count;            /* Total byte count. */
 
        uint8_t port_id[3];             /* PortID of destination port. */
        uint8_t vp_index;
 
-       uint32_t fcp_data_dseg_address[2];      /* Data segment address. */
-       uint32_t fcp_data_dseg_len;             /* Data segment length. */
+       struct dsd64 fcp_dsd;
 };
 
 #define COMMAND_TYPE_7 0x18            /* Command Type 7 entry */
@@ -548,8 +549,7 @@ struct cmd_type_7 {
        uint8_t port_id[3];             /* PortID of destination port. */
        uint8_t vp_index;
 
-       uint32_t dseg_0_address[2];     /* Data segment 0 address. */
-       uint32_t dseg_0_len;            /* Data segment 0 length. */
+       struct dsd64 dsd;
 };
 
 #define COMMAND_TYPE_CRC_2     0x6A    /* Command Type CRC_2 (Type 6)
@@ -573,17 +573,17 @@ struct cmd_type_crc_2 {
 
        uint16_t control_flags;         /* Control flags. */
 
-       uint16_t fcp_cmnd_dseg_len;             /* Data segment length. */
-       uint32_t fcp_cmnd_dseg_address[2];      /* Data segment address. */
-
-       uint32_t fcp_rsp_dseg_address[2];       /* Data segment address. */
+       uint16_t fcp_cmnd_dseg_len;     /* Data segment length. */
+       __le64   fcp_cmnd_dseg_address __packed;
+                                       /* Data segment address. */
+       __le64   fcp_rsp_dseg_address __packed;
 
        uint32_t byte_count;            /* Total byte count. */
 
        uint8_t port_id[3];             /* PortID of destination port. */
        uint8_t vp_index;
 
-       uint32_t crc_context_address[2];        /* Data segment address. */
+       __le64   crc_context_address __packed;  /* Data segment address. */
        uint16_t crc_context_len;               /* Data segment length. */
        uint16_t reserved_1;                    /* MUST be set to 0. */
 };
@@ -717,10 +717,7 @@ struct ct_entry_24xx {
        uint32_t rsp_byte_count;
        uint32_t cmd_byte_count;
 
-       uint32_t dseg_0_address[2];     /* Data segment 0 address. */
-       uint32_t dseg_0_len;            /* Data segment 0 length. */
-       uint32_t dseg_1_address[2];     /* Data segment 1 address. */
-       uint32_t dseg_1_len;            /* Data segment 1 length. */
+       struct dsd64 dsd[2];
 };
 
 /*
@@ -767,9 +764,9 @@ struct els_entry_24xx {
        uint32_t rx_byte_count;
        uint32_t tx_byte_count;
 
-       uint32_t tx_address[2];         /* Data segment 0 address. */
+       __le64   tx_address __packed;   /* Data segment 0 address. */
        uint32_t tx_len;                /* Data segment 0 length. */
-       uint32_t rx_address[2];         /* Data segment 1 address. */
+       __le64   rx_address __packed;   /* Data segment 1 address. */
        uint32_t rx_len;                /* Data segment 1 length. */
 };
 
@@ -1422,9 +1419,9 @@ struct vf_evfp_entry_24xx {
         uint16_t control_flags;
         uint32_t io_parameter_0;
         uint32_t io_parameter_1;
-        uint32_t tx_address[2];         /* Data segment 0 address. */
+       __le64   tx_address __packed;   /* Data segment 0 address. */
         uint32_t tx_len;                /* Data segment 0 length. */
-        uint32_t rx_address[2];         /* Data segment 1 address. */
+       __le64   rx_address __packed;   /* Data segment 1 address. */
         uint32_t rx_len;                /* Data segment 1 length. */
 };
 
@@ -1515,13 +1512,31 @@ struct qla_flt_header {
 #define FLT_REG_VPD_SEC_27XX_2 0xD8
 #define FLT_REG_VPD_SEC_27XX_3 0xDA
 
+/* 28xx */
+#define FLT_REG_AUX_IMG_PRI_28XX       0x125
+#define FLT_REG_AUX_IMG_SEC_28XX       0x126
+#define FLT_REG_VPD_SEC_28XX_0         0x10C
+#define FLT_REG_VPD_SEC_28XX_1         0x10E
+#define FLT_REG_VPD_SEC_28XX_2         0x110
+#define FLT_REG_VPD_SEC_28XX_3         0x112
+#define FLT_REG_NVRAM_SEC_28XX_0       0x10D
+#define FLT_REG_NVRAM_SEC_28XX_1       0x10F
+#define FLT_REG_NVRAM_SEC_28XX_2       0x111
+#define FLT_REG_NVRAM_SEC_28XX_3       0x113
+
 struct qla_flt_region {
-       uint32_t code;
+       uint16_t code;
+       uint8_t attribute;
+       uint8_t reserved;
        uint32_t size;
        uint32_t start;
        uint32_t end;
 };
 
+#define FLT_REGION_SIZE                16
+#define FLT_MAX_REGIONS                0xFF
+#define FLT_REGIONS_SIZE       (FLT_REGION_SIZE * FLT_MAX_REGIONS)
+
 /* Flash NPIV Configuration Table ********************************************/
 
 struct qla_npiv_header {
@@ -1588,8 +1603,7 @@ struct verify_chip_entry_84xx {
        uint32_t fw_seq_size;
        uint32_t relative_offset;
 
-       uint32_t dseg_address[2];
-       uint32_t dseg_length;
+       struct dsd64 dsd;
 };
 
 struct verify_chip_rsp_84xx {
@@ -1646,8 +1660,7 @@ struct access_chip_84xx {
        uint32_t total_byte_cnt;
        uint32_t reserved4;
 
-       uint32_t dseg_address[2];
-       uint32_t dseg_length;
+       struct dsd64 dsd;
 };
 
 struct access_chip_rsp_84xx {
@@ -1711,6 +1724,10 @@ struct access_chip_rsp_84xx {
 #define LR_DIST_FW_SHIFT       (LR_DIST_FW_POS - LR_DIST_NV_POS)
 #define LR_DIST_FW_FIELD(x)    ((x) << LR_DIST_FW_SHIFT & 0xf000)
 
+/* FAC semaphore defines */
+#define FAC_SEMAPHORE_UNLOCK    0
+#define FAC_SEMAPHORE_LOCK      1
+
 struct nvram_81xx {
        /* NVRAM header. */
        uint8_t id[4];
@@ -1757,7 +1774,7 @@ struct nvram_81xx {
        uint16_t reserved_6_3[14];
 
        /* Offset 192. */
-       uint8_t min_link_speed;
+       uint8_t min_supported_speed;
        uint8_t reserved_7_0;
        uint16_t reserved_7[31];
 
@@ -1911,15 +1928,15 @@ struct init_cb_81xx {
 
        uint16_t prio_request_q_length;
 
-       uint32_t request_q_address[2];
-       uint32_t response_q_address[2];
-       uint32_t prio_request_q_address[2];
+       __le64   request_q_address __packed;
+       __le64   response_q_address __packed;
+       __le64   prio_request_q_address __packed;
 
        uint8_t reserved_4[8];
 
        uint16_t atio_q_inpointer;
        uint16_t atio_q_length;
-       uint32_t atio_q_address[2];
+       __le64   atio_q_address __packed;
 
        uint16_t interrupt_delay_timer;         /* 100us increments. */
        uint16_t login_timeout;
@@ -2005,6 +2022,8 @@ struct ex_init_cb_81xx {
 
 #define FARX_ACCESS_FLASH_CONF_81XX    0x7FFD0000
 #define FARX_ACCESS_FLASH_DATA_81XX    0x7F800000
+#define FARX_ACCESS_FLASH_CONF_28XX    0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA_28XX    0x7F7D0000
 
 /* FCP priority config defines *************************************/
 /* operations */
@@ -2079,6 +2098,7 @@ struct qla_fcp_prio_cfg {
 #define FA_NPIV_CONF1_ADDR_81  0xD2000
 
 /* 83XX Flash locations -- occupies second 8MB region. */
-#define FA_FLASH_LAYOUT_ADDR_83        0xFC400
+#define FA_FLASH_LAYOUT_ADDR_83        (0x3F1000/4)
+#define FA_FLASH_LAYOUT_ADDR_28        (0x11000/4)
 
 #endif
index 4eefe69..bbe69ab 100644 (file)
@@ -18,14 +18,14 @@ extern int qla2100_pci_config(struct scsi_qla_host *);
 extern int qla2300_pci_config(struct scsi_qla_host *);
 extern int qla24xx_pci_config(scsi_qla_host_t *);
 extern int qla25xx_pci_config(scsi_qla_host_t *);
-extern void qla2x00_reset_chip(struct scsi_qla_host *);
-extern void qla24xx_reset_chip(struct scsi_qla_host *);
+extern int qla2x00_reset_chip(struct scsi_qla_host *);
+extern int qla24xx_reset_chip(struct scsi_qla_host *);
 extern int qla2x00_chip_diag(struct scsi_qla_host *);
 extern int qla24xx_chip_diag(struct scsi_qla_host *);
 extern void qla2x00_config_rings(struct scsi_qla_host *);
 extern void qla24xx_config_rings(struct scsi_qla_host *);
-extern void qla2x00_reset_adapter(struct scsi_qla_host *);
-extern void qla24xx_reset_adapter(struct scsi_qla_host *);
+extern int qla2x00_reset_adapter(struct scsi_qla_host *);
+extern int qla24xx_reset_adapter(struct scsi_qla_host *);
 extern int qla2x00_nvram_config(struct scsi_qla_host *);
 extern int qla24xx_nvram_config(struct scsi_qla_host *);
 extern int qla81xx_nvram_config(struct scsi_qla_host *);
@@ -38,8 +38,7 @@ extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
 
 extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
 extern int qla2x00_loop_resync(scsi_qla_host_t *);
-
-extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
+extern void qla2x00_clear_loop_id(fc_port_t *fcport);
 
 extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
 extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
@@ -80,6 +79,7 @@ int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
 extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
 extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
 
+extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state);
 extern fc_port_t *
 qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
 
@@ -93,7 +93,6 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
 extern int
 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
 extern int qla2x00_init_rings(scsi_qla_host_t *);
-extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
 extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
        int, int, bool);
 extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
@@ -108,6 +107,11 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
 int qla24xx_detect_sfp(scsi_qla_host_t *vha);
 int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
 
+extern void qla28xx_get_aux_images(struct scsi_qla_host *,
+    struct active_regions *);
+extern void qla27xx_get_active_image(struct scsi_qla_host *,
+    struct active_regions *);
+
 void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -118,6 +122,7 @@ int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
 int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
 void qla_rscn_replay(fc_port_t *fcport);
+extern bool qla24xx_risc_firmware_invalid(uint32_t *);
 
 /*
  * Global Data in qla_os.c source file.
@@ -215,7 +220,6 @@ extern void qla24xx_sched_upd_fcport(fc_port_t *);
 void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
        uint16_t *);
 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
-int qla24xx_async_abort_cmd(srb_t *, bool);
 int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
 void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
 
@@ -238,7 +242,7 @@ extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
     struct vp_rpt_id_entry_24xx *);
 extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
 extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
-extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
+extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
 
 extern void qla2x00_sp_free_dma(void *);
 extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
@@ -276,21 +280,20 @@ extern int qla2x00_start_sp(srb_t *);
 extern int qla24xx_dif_start_scsi(srb_t *);
 extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
 extern int qla2xxx_dif_start_scsi_mq(srb_t *);
+extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo);
 extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
 
 extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
 extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *);
 extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
 extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tc_param *);
+       struct dsd64 *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tc_param *);
+       struct dsd64 *, uint16_t, struct qla_tc_param *);
 extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
-       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+       struct dsd64 *, uint16_t, struct qla_tgt_cmd *);
 extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
 extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
-extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
-       struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -466,6 +469,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
 extern int
 qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
 
+extern int qla81xx_fac_semaphore_access(scsi_qla_host_t *, int);
+
 extern int
 qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
 
@@ -511,6 +516,14 @@ extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
 extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
 int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
 
+extern int qla28xx_secure_flash_update(scsi_qla_host_t *, uint16_t, uint16_t,
+    uint32_t, dma_addr_t, uint32_t);
+
+extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t,
+    uint32_t *);
+extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t,
+    uint32_t);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -542,19 +555,20 @@ fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
  */
 extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
 extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
-                                        uint32_t, uint32_t);
-extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-                                       uint32_t);
-extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-                                       uint32_t);
-extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-                                   uint32_t);
-extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-                                   uint32_t);
-extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-                                       uint32_t);
-extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
-                                   uint32_t);
+    uint32_t, uint32_t);
+extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern int qla2x00_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern int qla24xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+extern int qla25xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t,
+    uint32_t);
+
 extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
 bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
 bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
@@ -574,18 +588,18 @@ extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
 extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
                                  uint32_t, uint16_t *);
 
-extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla2x00_read_optrom_data(struct scsi_qla_host *, void *,
                                         uint32_t, uint32_t);
-extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla2x00_write_optrom_data(struct scsi_qla_host *, void *,
                                     uint32_t, uint32_t);
-extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla24xx_read_optrom_data(struct scsi_qla_host *, void *,
                                         uint32_t, uint32_t);
-extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla24xx_write_optrom_data(struct scsi_qla_host *, void *,
                                     uint32_t, uint32_t);
-extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla25xx_read_optrom_data(struct scsi_qla_host *, void *,
                                         uint32_t, uint32_t);
-extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
-                                        uint8_t *, uint32_t, uint32_t);
+extern void *qla8044_read_optrom_data(struct scsi_qla_host *,
+                                        void *, uint32_t, uint32_t);
 extern void qla8044_watchdog(struct scsi_qla_host *vha);
 
 extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
@@ -610,20 +624,13 @@ extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
 extern void qla8044_fw_dump(scsi_qla_host_t *, int);
 
 extern void qla27xx_fwdump(scsi_qla_host_t *, int);
-extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
 extern int qla27xx_fwdt_template_valid(void *);
 extern ulong qla27xx_fwdt_template_size(void *);
-extern const void *qla27xx_fwdt_template_default(void);
-extern ulong qla27xx_fwdt_template_default_size(void);
-
-extern void qla2x00_dump_regs(scsi_qla_host_t *);
-extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
-extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
-extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
-extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
-                          uint8_t *, uint32_t);
-extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
 
+extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
+extern void ql_dump_regs(uint, scsi_qla_host_t *, uint);
+extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, void *, uint);
 /*
  * Global Function Prototypes in qla_gs.c source file.
  */
@@ -722,7 +729,7 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 /* qlafx00 related functions */
 extern int qlafx00_pci_config(struct scsi_qla_host *);
 extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
-extern void qlafx00_soft_reset(scsi_qla_host_t *);
+extern int qlafx00_soft_reset(scsi_qla_host_t *);
 extern int qlafx00_chip_diag(scsi_qla_host_t *);
 extern void qlafx00_config_rings(struct scsi_qla_host *);
 extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
@@ -765,16 +772,16 @@ extern int qla82xx_pci_region_offset(struct pci_dev *, int);
 extern int qla82xx_iospace_config(struct qla_hw_data *);
 
 /* Initialization related functions */
-extern void qla82xx_reset_chip(struct scsi_qla_host *);
+extern int qla82xx_reset_chip(struct scsi_qla_host *);
 extern void qla82xx_config_rings(struct scsi_qla_host *);
 extern void qla82xx_watchdog(scsi_qla_host_t *);
 extern int qla82xx_start_firmware(scsi_qla_host_t *);
 
 /* Firmware and flash related functions */
 extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
-extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern void *qla82xx_read_optrom_data(struct scsi_qla_host *, void *,
                                         uint32_t, uint32_t);
-extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla82xx_write_optrom_data(struct scsi_qla_host *, void *,
                                     uint32_t, uint32_t);
 
 /* Mailbox related functions */
@@ -870,7 +877,7 @@ extern void qla8044_clear_drv_active(struct qla_hw_data *);
 void qla8044_get_minidump(struct scsi_qla_host *vha);
 int qla8044_collect_md_data(struct scsi_qla_host *vha);
 extern int qla8044_md_get_template(scsi_qla_host_t *);
-extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+extern int qla8044_write_optrom_data(struct scsi_qla_host *, void *,
                                     uint32_t, uint32_t);
 extern irqreturn_t qla8044_intr_handler(int, void *);
 extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
index c6fdad1..9f58e59 100644 (file)
@@ -45,13 +45,11 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
        ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
        ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
 
-       ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
-       ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
-       ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+       put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
+       ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
 
-       ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
-       ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
-       ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+       put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
+       ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
 
        vha->qla_stats.control_requests++;
 
@@ -83,13 +81,11 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
        ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
        ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
 
-       ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
-       ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
-       ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+       put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
+       ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
 
-       ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
-       ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
-       ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+       put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
+       ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
        ct_pkt->vp_index = vha->vp_idx;
 
        vha->qla_stats.control_requests++;
@@ -152,8 +148,8 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
                                    vha->d_id.b.area, vha->d_id.b.al_pa,
                                    comp_status, ct_rsp->header.response);
                                ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
-                                   0x2078, (uint8_t *)&ct_rsp->header,
-                                   sizeof(struct ct_rsp_hdr));
+                                   0x2078, ct_rsp,
+                                   offsetof(typeof(*ct_rsp), rsp));
                                rval = QLA_INVALID_COMMAND;
                        } else
                                rval = QLA_SUCCESS;
@@ -1000,8 +996,7 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
        memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
        wc = data_size / 2;                     /* Size in 16bit words. */
        sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
-       sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
-       sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
+       put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
        sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
        sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
        wc = (data_size - 16) / 4;              /* Size in 32bit words. */
@@ -1385,6 +1380,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
        int ret, rval;
        uint16_t mb[MAILBOX_REGISTER_COUNT];
        struct qla_hw_data *ha = vha->hw;
+
        ret = QLA_SUCCESS;
        if (vha->flags.management_server_logged_in)
                return ret;
@@ -1423,6 +1419,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
 {
        ms_iocb_entry_t *ms_pkt;
        struct qla_hw_data *ha = vha->hw;
+
        ms_pkt = ha->ms_iocb;
        memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
 
@@ -1436,13 +1433,11 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
        ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
        ms_pkt->req_bytecount = cpu_to_le32(req_size);
 
-       ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
-       ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
-       ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+       put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
+       ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
 
-       ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
-       ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
-       ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+       put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
+       ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
 
        return ms_pkt;
 }
@@ -1474,13 +1469,11 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
        ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
        ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
 
-       ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
-       ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
-       ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+       put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
+       ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
 
-       ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
-       ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
-       ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+       put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
+       ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
        ct_pkt->vp_index = vha->vp_idx;
 
        return ct_pkt;
@@ -1495,10 +1488,10 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
 
        if (IS_FWI2_CAPABLE(ha)) {
                ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
-               ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+               ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
        } else {
                ms_pkt->req_bytecount = cpu_to_le32(req_size);
-               ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+               ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
        }
 
        return ms_pkt;
@@ -1794,7 +1787,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
        if (IS_CNA_CAPABLE(ha))
                eiter->a.sup_speed = cpu_to_be32(
                    FDMI_PORT_SPEED_10GB);
-       else if (IS_QLA27XX(ha))
+       else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
                eiter->a.sup_speed = cpu_to_be32(
                    FDMI_PORT_SPEED_32GB|
                    FDMI_PORT_SPEED_16GB|
@@ -2373,7 +2366,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
        if (IS_CNA_CAPABLE(ha))
                eiter->a.sup_speed = cpu_to_be32(
                    FDMI_PORT_SPEED_10GB);
-       else if (IS_QLA27XX(ha))
+       else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
                eiter->a.sup_speed = cpu_to_be32(
                    FDMI_PORT_SPEED_32GB|
                    FDMI_PORT_SPEED_16GB|
@@ -2446,7 +2439,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
        eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
        eiter->len = cpu_to_be16(4 + 4);
        eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
-           le16_to_cpu(icb24->frame_payload_size):
+           le16_to_cpu(icb24->frame_payload_size) :
            le16_to_cpu(ha->init_cb->frame_payload_size);
        eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
        size += 4 + 4;
@@ -2783,6 +2776,31 @@ qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
        return &p->p.req;
 }
 
+static uint16_t
+qla2x00_port_speed_capability(uint16_t speed)
+{
+       switch (speed) {
+       case BIT_15:
+               return PORT_SPEED_1GB;
+       case BIT_14:
+               return PORT_SPEED_2GB;
+       case BIT_13:
+               return PORT_SPEED_4GB;
+       case BIT_12:
+               return PORT_SPEED_10GB;
+       case BIT_11:
+               return PORT_SPEED_8GB;
+       case BIT_10:
+               return PORT_SPEED_16GB;
+       case BIT_8:
+               return PORT_SPEED_32GB;
+       case BIT_7:
+               return PORT_SPEED_64GB;
+       default:
+               return PORT_SPEED_UNKNOWN;
+       }
+}
+
 /**
  * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
  * @vha: HA context
@@ -2855,31 +2873,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
                        }
                        rval = QLA_FUNCTION_FAILED;
                } else {
-                       /* Save port-speed */
-                       switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
-                       case BIT_15:
-                               list[i].fp_speed = PORT_SPEED_1GB;
-                               break;
-                       case BIT_14:
-                               list[i].fp_speed = PORT_SPEED_2GB;
-                               break;
-                       case BIT_13:
-                               list[i].fp_speed = PORT_SPEED_4GB;
-                               break;
-                       case BIT_12:
-                               list[i].fp_speed = PORT_SPEED_10GB;
-                               break;
-                       case BIT_11:
-                               list[i].fp_speed = PORT_SPEED_8GB;
-                               break;
-                       case BIT_10:
-                               list[i].fp_speed = PORT_SPEED_16GB;
-                               break;
-                       case BIT_8:
-                               list[i].fp_speed = PORT_SPEED_32GB;
-                               break;
-                       }
-
+                       list->fp_speed = qla2x00_port_speed_capability(
+                           be16_to_cpu(ct_rsp->rsp.gpsc.speed));
                        ql_dbg(ql_dbg_disc, vha, 0x205b,
                            "GPSC ext entry - fpn "
                            "%8phN speeds=%04x speed=%04x.\n",
@@ -3031,6 +3026,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
            "Async done-%s res %x, WWPN %8phC \n",
            sp->name, res, fcport->port_name);
 
+       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
        if (res == QLA_FUNCTION_TIMEOUT)
                return;
 
@@ -3048,29 +3045,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
                        goto done;
                }
        } else {
-               switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
-               case BIT_15:
-                       fcport->fp_speed = PORT_SPEED_1GB;
-                       break;
-               case BIT_14:
-                       fcport->fp_speed = PORT_SPEED_2GB;
-                       break;
-               case BIT_13:
-                       fcport->fp_speed = PORT_SPEED_4GB;
-                       break;
-               case BIT_12:
-                       fcport->fp_speed = PORT_SPEED_10GB;
-                       break;
-               case BIT_11:
-                       fcport->fp_speed = PORT_SPEED_8GB;
-                       break;
-               case BIT_10:
-                       fcport->fp_speed = PORT_SPEED_16GB;
-                       break;
-               case BIT_8:
-                       fcport->fp_speed = PORT_SPEED_32GB;
-                       break;
-               }
+               fcport->fp_speed = qla2x00_port_speed_capability(
+                   be16_to_cpu(ct_rsp->rsp.gpsc.speed));
 
                ql_dbg(ql_dbg_disc, vha, 0x2054,
                    "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
@@ -4370,6 +4346,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
 
 done_free_sp:
        sp->free(sp);
+       fcport->flags &= ~FCF_ASYNC_SENT;
 done:
        return rval;
 }
index 0c700b1..54772d4 100644 (file)
@@ -95,6 +95,79 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
        return tmo;
 }
 
+static void qla24xx_abort_iocb_timeout(void *data)
+{
+       srb_t *sp = data;
+       struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+       abt->u.abt.comp_status = CS_TIMEOUT;
+       sp->done(sp, QLA_FUNCTION_TIMEOUT);
+}
+
+static void qla24xx_abort_sp_done(void *ptr, int res)
+{
+       srb_t *sp = ptr;
+       struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+       if (del_timer(&sp->u.iocb_cmd.timer)) {
+               if (sp->flags & SRB_WAKEUP_ON_COMP)
+                       complete(&abt->u.abt.comp);
+               else
+                       sp->free(sp);
+       }
+}
+
+static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+{
+       scsi_qla_host_t *vha = cmd_sp->vha;
+       struct srb_iocb *abt_iocb;
+       srb_t *sp;
+       int rval = QLA_FUNCTION_FAILED;
+
+       sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+                                 GFP_ATOMIC);
+       if (!sp)
+               goto done;
+
+       abt_iocb = &sp->u.iocb_cmd;
+       sp->type = SRB_ABT_CMD;
+       sp->name = "abort";
+       sp->qpair = cmd_sp->qpair;
+       if (wait)
+               sp->flags = SRB_WAKEUP_ON_COMP;
+
+       abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+       init_completion(&abt_iocb->u.abt.comp);
+       /* FW can send 2 x ABTS's timeout/20s */
+       qla2x00_init_timer(sp, 42);
+
+       abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+       abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
+
+       sp->done = qla24xx_abort_sp_done;
+
+       ql_dbg(ql_dbg_async, vha, 0x507c,
+              "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
+              cmd_sp->type);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
+
+       if (wait) {
+               wait_for_completion(&abt_iocb->u.abt.comp);
+               rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+                       QLA_SUCCESS : QLA_FUNCTION_FAILED;
+       } else {
+               goto done;
+       }
+
+done_free_sp:
+       sp->free(sp);
+done:
+       return rval;
+}
+
 void
 qla2x00_async_iocb_timeout(void *data)
 {
@@ -514,6 +587,72 @@ done:
        return rval;
 }
 
+static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (IS_FWI2_CAPABLE(ha))
+               return loop_id > NPH_LAST_HANDLE;
+
+       return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
+               loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
+}
+
+/**
+ * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
+ * @vha: adapter state pointer.
+ * @dev: port structure pointer.
+ *
+ * Returns:
+ *     qla2x00 local function return status code.
+ *
+ * Context:
+ *     Kernel context.
+ */
+static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
+{
+       int     rval;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
+
+       rval = QLA_SUCCESS;
+
+       spin_lock_irqsave(&ha->vport_slock, flags);
+
+       dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
+       if (dev->loop_id >= LOOPID_MAP_SIZE ||
+           qla2x00_is_reserved_id(vha, dev->loop_id)) {
+               dev->loop_id = FC_NO_LOOP_ID;
+               rval = QLA_FUNCTION_FAILED;
+       } else {
+               set_bit(dev->loop_id, ha->loop_id_map);
+       }
+       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+       if (rval == QLA_SUCCESS)
+               ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+                      "Assigning new loopid=%x, portid=%x.\n",
+                      dev->loop_id, dev->d_id.b24);
+       else
+               ql_log(ql_log_warn, dev->vha, 0x2087,
+                      "No loop_id's available, portid=%x.\n",
+                      dev->d_id.b24);
+
+       return rval;
+}
+
+void qla2x00_clear_loop_id(fc_port_t *fcport)
+{
+       struct qla_hw_data *ha = fcport->vha->hw;
+
+       if (fcport->loop_id == FC_NO_LOOP_ID ||
+           qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
+               return;
+
+       clear_bit(fcport->loop_id, ha->loop_id_map);
+       fcport->loop_id = FC_NO_LOOP_ID;
+}
+
 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
        struct event_arg *ea)
 {
@@ -1482,6 +1621,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
     u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
 {
        struct qla_work_evt *e;
+
        e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
        if (!e)
                return QLA_FUNCTION_FAILED;
@@ -1558,6 +1698,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
                        return;
                {
                        unsigned long flags;
+
                        fcport = qla2x00_find_fcport_by_nportid
                                (vha, &ea->id, 1);
                        if (fcport) {
@@ -1620,21 +1761,21 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
  */
 void qla_rscn_replay(fc_port_t *fcport)
 {
-       struct event_arg ea;
+       struct event_arg ea;
 
-       switch (fcport->disc_state) {
-       case DSC_DELETE_PEND:
-               return;
-       default:
-               break;
-       }
+       switch (fcport->disc_state) {
+       case DSC_DELETE_PEND:
+               return;
+       default:
+               break;
+       }
 
-       if (fcport->scan_needed) {
-               memset(&ea, 0, sizeof(ea));
-               ea.event = FCME_RSCN;
-               ea.id = fcport->d_id;
-               ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
-               qla2x00_fcport_event_handler(fcport->vha, &ea);
+       if (fcport->scan_needed) {
+               memset(&ea, 0, sizeof(ea));
+               ea.event = FCME_RSCN;
+               ea.id = fcport->d_id;
+               ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
+               qla2x00_fcport_event_handler(fcport->vha, &ea);
        }
 }
 
@@ -1717,82 +1858,6 @@ done:
        return rval;
 }
 
-static void
-qla24xx_abort_iocb_timeout(void *data)
-{
-       srb_t *sp = data;
-       struct srb_iocb *abt = &sp->u.iocb_cmd;
-
-       abt->u.abt.comp_status = CS_TIMEOUT;
-       sp->done(sp, QLA_FUNCTION_TIMEOUT);
-}
-
-static void
-qla24xx_abort_sp_done(void *ptr, int res)
-{
-       srb_t *sp = ptr;
-       struct srb_iocb *abt = &sp->u.iocb_cmd;
-
-       if (del_timer(&sp->u.iocb_cmd.timer)) {
-               if (sp->flags & SRB_WAKEUP_ON_COMP)
-                       complete(&abt->u.abt.comp);
-               else
-                       sp->free(sp);
-       }
-}
-
-int
-qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
-{
-       scsi_qla_host_t *vha = cmd_sp->vha;
-       struct srb_iocb *abt_iocb;
-       srb_t *sp;
-       int rval = QLA_FUNCTION_FAILED;
-
-       sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
-           GFP_ATOMIC);
-       if (!sp)
-               goto done;
-
-       abt_iocb = &sp->u.iocb_cmd;
-       sp->type = SRB_ABT_CMD;
-       sp->name = "abort";
-       sp->qpair = cmd_sp->qpair;
-       if (wait)
-               sp->flags = SRB_WAKEUP_ON_COMP;
-
-       abt_iocb->timeout = qla24xx_abort_iocb_timeout;
-       init_completion(&abt_iocb->u.abt.comp);
-       /* FW can send 2 x ABTS's timeout/20s */
-       qla2x00_init_timer(sp, 42);
-
-       abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
-       abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
-
-       sp->done = qla24xx_abort_sp_done;
-
-       ql_dbg(ql_dbg_async, vha, 0x507c,
-           "Abort command issued - hdl=%x, type=%x\n",
-           cmd_sp->handle, cmd_sp->type);
-
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
-       if (wait) {
-               wait_for_completion(&abt_iocb->u.abt.comp);
-               rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
-                       QLA_SUCCESS : QLA_FUNCTION_FAILED;
-       } else {
-               goto done;
-       }
-
-done_free_sp:
-       sp->free(sp);
-done:
-       return rval;
-}
-
 int
 qla24xx_async_abort_command(srb_t *sp)
 {
@@ -2102,6 +2167,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
        int     rval;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
+       struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
        memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
        memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
@@ -2136,6 +2202,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
 
        ha->isp_ops->reset_chip(vha);
 
+       /* Check for secure flash support */
+       if (IS_QLA28XX(ha)) {
+               if (RD_REG_DWORD(&reg->mailbox12) & BIT_0) {
+                       ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
+                       ha->flags.secure_adapter = 1;
+               }
+       }
+
+
        rval = qla2xxx_get_flash_info(vha);
        if (rval) {
                ql_log(ql_log_fatal, vha, 0x004f,
@@ -2452,7 +2527,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
  *
  * Returns 0 on success.
  */
-void
+int
 qla2x00_reset_chip(scsi_qla_host_t *vha)
 {
        unsigned long   flags = 0;
@@ -2460,9 +2535,10 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
        uint32_t        cnt;
        uint16_t        cmd;
+       int rval = QLA_FUNCTION_FAILED;
 
        if (unlikely(pci_channel_offline(ha->pdev)))
-               return;
+               return rval;
 
        ha->isp_ops->disable_intrs(ha);
 
@@ -2588,6 +2664,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
        }
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return QLA_SUCCESS;
 }
 
 /**
@@ -2828,14 +2906,15 @@ acquired:
  *
  * Returns 0 on success.
  */
-void
+int
 qla24xx_reset_chip(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
+       int rval = QLA_FUNCTION_FAILED;
 
        if (pci_channel_offline(ha->pdev) &&
            ha->flags.pci_channel_io_perm_failure) {
-               return;
+               return rval;
        }
 
        ha->isp_ops->disable_intrs(ha);
@@ -2843,7 +2922,9 @@ qla24xx_reset_chip(scsi_qla_host_t *vha)
        qla25xx_manipulate_risc_semaphore(vha);
 
        /* Perform RISC reset. */
-       qla24xx_reset_risc(vha);
+       rval = qla24xx_reset_risc(vha);
+
+       return rval;
 }
 
 /**
@@ -3018,7 +3099,7 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
        if (IS_FWI2_CAPABLE(ha)) {
                /* Allocate memory for Fibre Channel Event Buffer. */
                if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
-                   !IS_QLA27XX(ha))
+                   !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                        goto try_eft;
 
                if (ha->fce)
@@ -3089,12 +3170,15 @@ eft_err:
 void
 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
 {
+       int rval;
        uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
            eft_size, fce_size, mq_size;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
        struct rsp_que *rsp = ha->rsp_q_map[0];
        struct qla2xxx_fw_dump *fw_dump;
+       dma_addr_t tc_dma;
+       void *tc;
 
        dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
        req_q_size = rsp_q_size = 0;
@@ -3106,7 +3190,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                mem_size = (ha->fw_memory_size - 0x11000 + 1) *
                    sizeof(uint16_t);
        } else if (IS_FWI2_CAPABLE(ha)) {
-               if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                        fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
                else if (IS_QLA81XX(ha))
                        fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -3118,40 +3202,72 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                mem_size = (ha->fw_memory_size - 0x100000 + 1) *
                    sizeof(uint32_t);
                if (ha->mqenable) {
-                       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+                       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
+                           !IS_QLA28XX(ha))
                                mq_size = sizeof(struct qla2xxx_mq_chain);
                        /*
-                        * Allocate maximum buffer size for all queues.
+                        * Allocate maximum buffer size for all queues - Q0.
                         * Resizing must be done at end-of-dump processing.
                         */
-                       mq_size += ha->max_req_queues *
+                       mq_size += (ha->max_req_queues - 1) *
                            (req->length * sizeof(request_t));
-                       mq_size += ha->max_rsp_queues *
+                       mq_size += (ha->max_rsp_queues - 1) *
                            (rsp->length * sizeof(response_t));
                }
                if (ha->tgt.atio_ring)
                        mq_size += ha->tgt.atio_q_length * sizeof(request_t);
                /* Allocate memory for Fibre Channel Event Buffer. */
                if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
-                   !IS_QLA27XX(ha))
+                   !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                        goto try_eft;
 
                fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
 try_eft:
+               if (ha->eft)
+                       dma_free_coherent(&ha->pdev->dev,
+                           EFT_SIZE, ha->eft, ha->eft_dma);
+
+               /* Allocate memory for Extended Trace Buffer. */
+               tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+                                        GFP_KERNEL);
+               if (!tc) {
+                       ql_log(ql_log_warn, vha, 0x00c1,
+                           "Unable to allocate (%d KB) for EFT.\n",
+                           EFT_SIZE / 1024);
+                       goto allocate;
+               }
+
+               rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+               if (rval) {
+                       ql_log(ql_log_warn, vha, 0x00c2,
+                           "Unable to initialize EFT (%d).\n", rval);
+                       dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
+                           tc_dma);
+               }
                ql_dbg(ql_dbg_init, vha, 0x00c3,
                    "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
                eft_size = EFT_SIZE;
        }
 
-       if (IS_QLA27XX(ha)) {
-               if (!ha->fw_dump_template) {
-                       ql_log(ql_log_warn, vha, 0x00ba,
-                           "Failed missing fwdump template\n");
-                       return;
+       if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+               struct fwdt *fwdt = ha->fwdt;
+               uint j;
+
+               for (j = 0; j < 2; j++, fwdt++) {
+                       if (!fwdt->template) {
+                               ql_log(ql_log_warn, vha, 0x00ba,
+                                   "-> fwdt%u no template\n", j);
+                               continue;
+                       }
+                       ql_dbg(ql_dbg_init, vha, 0x00fa,
+                           "-> fwdt%u calculating fwdump size...\n", j);
+                       fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
+                           vha, fwdt->template);
+                       ql_dbg(ql_dbg_init, vha, 0x00fa,
+                           "-> fwdt%u calculated fwdump size = %#lx bytes\n",
+                           j, fwdt->dump_size);
+                       dump_size += fwdt->dump_size;
                }
-               dump_size = qla27xx_fwdt_calculate_dump_size(vha);
-               ql_dbg(ql_dbg_init, vha, 0x00fa,
-                   "-> allocating fwdump (%x bytes)...\n", dump_size);
                goto allocate;
        }
 
@@ -3170,42 +3286,66 @@ try_eft:
                        ha->exlogin_size;
 
 allocate:
-       if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
+       if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
+
+               ql_dbg(ql_dbg_init, vha, 0x00c5,
+                   "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
+                   __func__, dump_size, ha->fw_dump_len,
+                   ha->fw_dump_alloc_len);
+
                fw_dump = vmalloc(dump_size);
                if (!fw_dump) {
                        ql_log(ql_log_warn, vha, 0x00c4,
                            "Unable to allocate (%d KB) for firmware dump.\n",
                            dump_size / 1024);
                } else {
-                       if (ha->fw_dump)
+                       mutex_lock(&ha->optrom_mutex);
+                       if (ha->fw_dumped) {
+                               memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
                                vfree(ha->fw_dump);
-                       ha->fw_dump = fw_dump;
-
-                       ha->fw_dump_len = dump_size;
-                       ql_dbg(ql_dbg_init, vha, 0x00c5,
-                           "Allocated (%d KB) for firmware dump.\n",
-                           dump_size / 1024);
-
-                       if (IS_QLA27XX(ha))
-                               return;
-
-                       ha->fw_dump->signature[0] = 'Q';
-                       ha->fw_dump->signature[1] = 'L';
-                       ha->fw_dump->signature[2] = 'G';
-                       ha->fw_dump->signature[3] = 'C';
-                       ha->fw_dump->version = htonl(1);
-
-                       ha->fw_dump->fixed_size = htonl(fixed_size);
-                       ha->fw_dump->mem_size = htonl(mem_size);
-                       ha->fw_dump->req_q_size = htonl(req_q_size);
-                       ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
-
-                       ha->fw_dump->eft_size = htonl(eft_size);
-                       ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
-                       ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+                               ha->fw_dump = fw_dump;
+                               ha->fw_dump_alloc_len =  dump_size;
+                               ql_dbg(ql_dbg_init, vha, 0x00c5,
+                                   "Re-Allocated (%d KB) and save firmware dump.\n",
+                                   dump_size / 1024);
+                       } else {
+                               if (ha->fw_dump)
+                                       vfree(ha->fw_dump);
+                               ha->fw_dump = fw_dump;
+
+                               ha->fw_dump_len = ha->fw_dump_alloc_len =
+                                   dump_size;
+                               ql_dbg(ql_dbg_init, vha, 0x00c5,
+                                   "Allocated (%d KB) for firmware dump.\n",
+                                   dump_size / 1024);
+
+                               if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+                                       mutex_unlock(&ha->optrom_mutex);
+                                       return;
+                               }
 
-                       ha->fw_dump->header_size =
-                               htonl(offsetof(struct qla2xxx_fw_dump, isp));
+                               ha->fw_dump->signature[0] = 'Q';
+                               ha->fw_dump->signature[1] = 'L';
+                               ha->fw_dump->signature[2] = 'G';
+                               ha->fw_dump->signature[3] = 'C';
+                               ha->fw_dump->version = htonl(1);
+
+                               ha->fw_dump->fixed_size = htonl(fixed_size);
+                               ha->fw_dump->mem_size = htonl(mem_size);
+                               ha->fw_dump->req_q_size = htonl(req_q_size);
+                               ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+                               ha->fw_dump->eft_size = htonl(eft_size);
+                               ha->fw_dump->eft_addr_l =
+                                   htonl(LSD(ha->eft_dma));
+                               ha->fw_dump->eft_addr_h =
+                                   htonl(MSD(ha->eft_dma));
+
+                               ha->fw_dump->header_size =
+                                       htonl(offsetof
+                                           (struct qla2xxx_fw_dump, isp));
+                       }
+                       mutex_unlock(&ha->optrom_mutex);
                }
        }
 }
@@ -3498,7 +3638,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
                        if (rval == QLA_SUCCESS) {
                                qla24xx_detect_sfp(vha);
 
-                               if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
+                               if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+                                   IS_QLA28XX(ha)) &&
                                    (ha->zio_mode == QLA_ZIO_MODE_6))
                                        qla27xx_set_zio_threshold(vha,
                                            ha->last_zio_threshold);
@@ -3570,7 +3711,7 @@ enable_82xx_npiv:
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
        }
 
-       if (IS_QLA27XX(ha))
+       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
                ha->flags.fac_supported = 1;
        else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
                uint32_t size;
@@ -3585,7 +3726,8 @@ enable_82xx_npiv:
                            ha->fw_major_version, ha->fw_minor_version,
                            ha->fw_subminor_version);
 
-                       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+                       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+                           IS_QLA28XX(ha)) {
                                ha->flags.fac_supported = 0;
                                rval = QLA_SUCCESS;
                        }
@@ -3647,8 +3789,7 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
            "Serial link options.\n");
        ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
-           (uint8_t *)&ha->fw_seriallink_options,
-           sizeof(ha->fw_seriallink_options));
+           ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
 
        ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
        if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -3738,7 +3879,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
 
        /* Move PUREX, ABTS RX & RIDA to ATIOQ */
        if (ql2xmvasynctoatio &&
-           (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+           (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
                if (qla_tgt_mode_enabled(vha) ||
                    qla_dual_mode_enabled(vha))
                        ha->fw_options[2] |= BIT_11;
@@ -3746,7 +3887,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
                        ha->fw_options[2] &= ~BIT_11;
        }
 
-       if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha)) {
                /*
                 * Tell FW to track each exchange to prevent
                 * driver from using stale exchange.
@@ -3799,10 +3941,8 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
        ha->init_cb->response_q_inpointer = cpu_to_le16(0);
        ha->init_cb->request_q_length = cpu_to_le16(req->length);
        ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
-       ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
-       ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
-       ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
-       ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+       put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
+       put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
 
        WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
        WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -3829,21 +3969,19 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
        icb->response_q_inpointer = cpu_to_le16(0);
        icb->request_q_length = cpu_to_le16(req->length);
        icb->response_q_length = cpu_to_le16(rsp->length);
-       icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
-       icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
-       icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
-       icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+       put_unaligned_le64(req->dma, &icb->request_q_address);
+       put_unaligned_le64(rsp->dma, &icb->response_q_address);
 
        /* Setup ATIO queue dma pointers for target mode */
        icb->atio_q_inpointer = cpu_to_le16(0);
        icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
-       icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
-       icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+       put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
 
        if (IS_SHADOW_REG_CAPABLE(ha))
                icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
 
-       if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha)) {
                icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
                icb->rid = cpu_to_le16(rid);
                if (ha->flags.msix_enabled) {
@@ -4266,11 +4404,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
 {
        char *st, *en;
        uint16_t index;
+       uint64_t zero[2] = { 0 };
        struct qla_hw_data *ha = vha->hw;
        int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
            !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
 
-       if (memcmp(model, BINZERO, len) != 0) {
+       if (len > sizeof(zero))
+               len = sizeof(zero);
+       if (memcmp(model, &zero, len) != 0) {
                strncpy(ha->model_number, model, len);
                st = en = ha->model_number;
                en += len - 1;
@@ -4357,7 +4498,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
        rval = QLA_SUCCESS;
 
        /* Determine NVRAM starting address. */
-       ha->nvram_size = sizeof(nvram_t);
+       ha->nvram_size = sizeof(*nv);
        ha->nvram_base = 0;
        if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
                if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
@@ -4371,16 +4512,15 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
            "Contents of NVRAM.\n");
        ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
-           (uint8_t *)nv, ha->nvram_size);
+           nv, ha->nvram_size);
 
        /* Bad NVRAM data, set defaults parameters. */
-       if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
-           nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
+       if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+           nv->nvram_version < 1) {
                /* Reset NVRAM data. */
                ql_log(ql_log_warn, vha, 0x0064,
-                   "Inconsistent NVRAM "
-                   "detected: checksum=0x%x id=%c version=0x%x.\n",
-                   chksum, nv->id[0], nv->nvram_version);
+                   "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
+                   chksum, nv->id, nv->nvram_version);
                ql_log(ql_log_warn, vha, 0x0065,
                    "Falling back to "
                    "functioning (yet invalid -- WWPN) defaults.\n");
@@ -4629,7 +4769,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
                        ha->zio_mode = icb->add_firmware_options[0] &
                            (BIT_3 | BIT_2 | BIT_1 | BIT_0);
                        ha->zio_timer = icb->interrupt_delay_timer ?
-                           icb->interrupt_delay_timer: 2;
+                           icb->interrupt_delay_timer : 2;
                }
                icb->add_firmware_options[0] &=
                    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
@@ -4662,7 +4802,7 @@ qla2x00_rport_del(void *data)
        unsigned long flags;
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
-       rport = fcport->drport ? fcport->drport: fcport->rport;
+       rport = fcport->drport ? fcport->drport : fcport->rport;
        fcport->drport = NULL;
        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
        if (rport) {
@@ -4675,10 +4815,27 @@ qla2x00_rport_del(void *data)
        }
 }
 
-/**
- * qla2x00_alloc_fcport() - Allocate a generic fcport.
- * @vha: HA context
- * @flags: allocation flags
+void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
+{
+       int old_state;
+
+       old_state = atomic_read(&fcport->state);
+       atomic_set(&fcport->state, state);
+
+       /* Don't print state transitions during initial allocation of fcport */
+       if (old_state && old_state != state) {
+               ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
+                      "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
+                      fcport->port_name, port_state_str[old_state],
+                      port_state_str[state], fcport->d_id.b.domain,
+                      fcport->d_id.b.area, fcport->d_id.b.al_pa);
+       }
+}
+
+/**
+ * qla2x00_alloc_fcport() - Allocate a generic fcport.
+ * @vha: HA context
+ * @flags: allocation flags
  *
  * Returns a pointer to the allocated fcport, or NULL, if none available.
  */
@@ -4741,6 +4898,8 @@ qla2x00_free_fcport(fc_port_t *fcport)
 
                fcport->ct_desc.ct_sns = NULL;
        }
+       list_del(&fcport->list);
+       qla2x00_clear_loop_id(fcport);
        kfree(fcport);
 }
 
@@ -4762,6 +4921,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
        int  rval;
        unsigned long flags, save_flags;
        struct qla_hw_data *ha = vha->hw;
+
        rval = QLA_SUCCESS;
 
        /* Get Initiator ID */
@@ -4943,8 +5103,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
        ql_dbg(ql_dbg_disc, vha, 0x2011,
            "Entries in ID list (%d).\n", entries);
        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
-           (uint8_t *)ha->gid_list,
-           entries * sizeof(struct gid_list_info));
+           ha->gid_list, entries * sizeof(*ha->gid_list));
 
        if (entries == 0) {
                spin_lock_irqsave(&vha->work_lock, flags);
@@ -5194,16 +5353,23 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
 
        rport->supported_classes = fcport->supported_classes;
 
-       rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+       rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
        if (fcport->port_type == FCT_INITIATOR)
-               rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+               rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
        if (fcport->port_type == FCT_TARGET)
-               rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+               rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
+       if (fcport->port_type & FCT_NVME_INITIATOR)
+               rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
+       if (fcport->port_type & FCT_NVME_TARGET)
+               rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
+       if (fcport->port_type & FCT_NVME_DISCOVERY)
+               rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
 
        ql_dbg(ql_dbg_disc, vha, 0x20ee,
            "%s %8phN. rport %p is %s mode\n",
            __func__, fcport->port_name, rport,
-           (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
+           (fcport->port_type == FCT_TARGET) ? "tgt" :
+           ((fcport->port_type & FCT_NVME) ? "nvme" :"ini"));
 
        fc_remote_port_rolechg(rport, rport_ids.roles);
 }
@@ -5778,55 +5944,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
        return (rval);
 }
 
-/*
- * qla2x00_find_new_loop_id
- *     Scan through our port list and find a new usable loop ID.
- *
- * Input:
- *     ha:     adapter state pointer.
- *     dev:    port structure pointer.
- *
- * Returns:
- *     qla2x00 local function return status code.
- *
- * Context:
- *     Kernel context.
- */
-int
-qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
-{
-       int     rval;
-       struct qla_hw_data *ha = vha->hw;
-       unsigned long flags = 0;
-
-       rval = QLA_SUCCESS;
-
-       spin_lock_irqsave(&ha->vport_slock, flags);
-
-       dev->loop_id = find_first_zero_bit(ha->loop_id_map,
-           LOOPID_MAP_SIZE);
-       if (dev->loop_id >= LOOPID_MAP_SIZE ||
-           qla2x00_is_reserved_id(vha, dev->loop_id)) {
-               dev->loop_id = FC_NO_LOOP_ID;
-               rval = QLA_FUNCTION_FAILED;
-       } else
-               set_bit(dev->loop_id, ha->loop_id_map);
-
-       spin_unlock_irqrestore(&ha->vport_slock, flags);
-
-       if (rval == QLA_SUCCESS)
-               ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
-                   "Assigning new loopid=%x, portid=%x.\n",
-                   dev->loop_id, dev->d_id.b24);
-       else
-               ql_log(ql_log_warn, dev->vha, 0x2087,
-                   "No loop_id's available, portid=%x.\n",
-                   dev->d_id.b24);
-
-       return (rval);
-}
-
-
 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
 int
 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
@@ -6318,6 +6435,7 @@ qla83xx_initiating_reset(scsi_qla_host_t *vha)
                qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
        } else {
                const char *state = qla83xx_dev_state_to_string(dev_state);
+
                ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
 
                /* SV: XXX: Is timeout required here? */
@@ -6639,6 +6757,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
        if (vha->flags.online) {
                qla2x00_abort_isp_cleanup(vha);
 
+               if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
+                       ha->flags.chip_reset_done = 1;
+                       vha->flags.online = 1;
+                       status = 0;
+                       clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+                       return status;
+               }
+
                if (IS_QLA8031(ha)) {
                        ql_dbg(ql_dbg_p3p, vha, 0xb05c,
                            "Clearing fcoe driver presence.\n");
@@ -6879,7 +7005,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
 * Input:
 *      ha = adapter block pointer.
 */
-void
+int
 qla2x00_reset_adapter(scsi_qla_host_t *vha)
 {
        unsigned long flags = 0;
@@ -6895,17 +7021,20 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha)
        WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
        RD_REG_WORD(&reg->hccr);                        /* PCI Posting. */
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return QLA_SUCCESS;
 }
 
-void
+int
 qla24xx_reset_adapter(scsi_qla_host_t *vha)
 {
        unsigned long flags = 0;
        struct qla_hw_data *ha = vha->hw;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       int rval = QLA_SUCCESS;
 
        if (IS_P3P_TYPE(ha))
-               return;
+               return rval;
 
        vha->flags.online = 0;
        ha->isp_ops->disable_intrs(ha);
@@ -6919,6 +7048,8 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
 
        if (IS_NOPOLLING_TYPE(ha))
                ha->isp_ops->enable_intrs(ha);
+
+       return rval;
 }
 
 /* On sparc systems, obtain port and node WWN from firmware
@@ -6969,34 +7100,33 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
                ha->vpd_base = FA_NVRAM_VPD1_ADDR;
        }
 
-       ha->nvram_size = sizeof(struct nvram_24xx);
+       ha->nvram_size = sizeof(*nv);
        ha->vpd_size = FA_NVRAM_VPD_SIZE;
 
        /* Get VPD data into cache */
        ha->vpd = ha->nvram + VPD_OFFSET;
-       ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
+       ha->isp_ops->read_nvram(vha, ha->vpd,
            ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
 
        /* Get NVRAM data into cache and calculate checksum. */
        dptr = (uint32_t *)nv;
-       ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
-           ha->nvram_size);
+       ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
        for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
                chksum += le32_to_cpu(*dptr);
 
        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
            "Contents of NVRAM\n");
        ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
-           (uint8_t *)nv, ha->nvram_size);
+           nv, ha->nvram_size);
 
        /* Bad NVRAM data, set defaults parameters. */
-       if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
-           || nv->id[3] != ' ' ||
-           nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
+       if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+           le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
                /* Reset NVRAM data. */
                ql_log(ql_log_warn, vha, 0x006b,
-                   "Inconsistent NVRAM detected: checksum=0x%x id=%c "
-                   "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
+                   "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
+                   chksum, nv->id, nv->nvram_version);
+               ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
                ql_log(ql_log_warn, vha, 0x006c,
                    "Falling back to functioning (yet invalid -- WWPN) "
                    "defaults.\n");
@@ -7104,11 +7234,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        ha->flags.disable_risc_code_load = 0;
        ha->flags.enable_lip_reset = 0;
        ha->flags.enable_lip_full_login =
-           le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+           le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
        ha->flags.enable_target_reset =
-           le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+           le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
        ha->flags.enable_led_scheme = 0;
-       ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+       ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
 
        ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
            (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -7182,7 +7312,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
                ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
                    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
                ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
-                   le16_to_cpu(icb->interrupt_delay_timer): 2;
+                   le16_to_cpu(icb->interrupt_delay_timer) : 2;
        }
        icb->firmware_options_2 &= cpu_to_le32(
            ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
@@ -7205,128 +7335,311 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        return (rval);
 }
 
-uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+static void
+qla27xx_print_image(struct scsi_qla_host *vha, char *name,
+    struct qla27xx_image_status *image_status)
+{
+       ql_dbg(ql_dbg_init, vha, 0x018b,
+           "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
+           name, "status",
+           image_status->image_status_mask,
+           le16_to_cpu(image_status->generation),
+           image_status->ver_major,
+           image_status->ver_minor,
+           image_status->bitmap,
+           le32_to_cpu(image_status->checksum),
+           le32_to_cpu(image_status->signature));
+}
+
+static bool
+qla28xx_check_aux_image_status_signature(
+    struct qla27xx_image_status *image_status)
+{
+       ulong signature = le32_to_cpu(image_status->signature);
+
+       return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
+}
+
+static bool
+qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
+{
+       ulong signature = le32_to_cpu(image_status->signature);
+
+       return
+           signature != QLA27XX_IMG_STATUS_SIGN &&
+           signature != QLA28XX_IMG_STATUS_SIGN;
+}
+
+static ulong
+qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
+{
+       uint32_t *p = (void *)image_status;
+       uint n = sizeof(*image_status) / sizeof(*p);
+       uint32_t sum = 0;
+
+       for ( ; n--; p++)
+               sum += le32_to_cpup(p);
+
+       return sum;
+}
+
+static inline uint
+qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
+{
+       return aux->bitmap & bitmask ?
+           QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
+}
+
+static void
+qla28xx_component_status(
+    struct active_regions *active_regions, struct qla27xx_image_status *aux)
+{
+       active_regions->aux.board_config =
+           qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
+
+       active_regions->aux.vpd_nvram =
+           qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
+
+       active_regions->aux.npiv_config_0_1 =
+           qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
+
+       active_regions->aux.npiv_config_2_3 =
+           qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
+}
+
+static int
+qla27xx_compare_image_generation(
+    struct qla27xx_image_status *pri_image_status,
+    struct qla27xx_image_status *sec_image_status)
+{
+       /* calculate generation delta as uint16 (this accounts for wrap) */
+       int16_t delta =
+           le16_to_cpu(pri_image_status->generation) -
+           le16_to_cpu(sec_image_status->generation);
+
+       ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
+
+       return delta;
+}
+
+void
+qla28xx_get_aux_images(
+       struct scsi_qla_host *vha, struct active_regions *active_regions)
 {
-       struct qla27xx_image_status pri_image_status, sec_image_status;
-       uint8_t valid_pri_image, valid_sec_image;
-       uint32_t *wptr;
-       uint32_t cnt, chksum, size;
        struct qla_hw_data *ha = vha->hw;
+       struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
+       bool valid_pri_image = false, valid_sec_image = false;
+       bool active_pri_image = false, active_sec_image = false;
 
-       valid_pri_image = valid_sec_image = 1;
-       ha->active_image = 0;
-       size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
+       if (!ha->flt_region_aux_img_status_pri) {
+               ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
+               goto check_sec_image;
+       }
 
-       if (!ha->flt_region_img_status_pri) {
-               valid_pri_image = 0;
+       qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
+           ha->flt_region_aux_img_status_pri,
+           sizeof(pri_aux_image_status) >> 2);
+       qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
+
+       if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
+               ql_dbg(ql_dbg_init, vha, 0x018b,
+                   "Primary aux image signature (%#x) not valid\n",
+                   le32_to_cpu(pri_aux_image_status.signature));
                goto check_sec_image;
        }
 
-       qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
-           ha->flt_region_img_status_pri, size);
+       if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
+               ql_dbg(ql_dbg_init, vha, 0x018c,
+                   "Primary aux image checksum failed\n");
+               goto check_sec_image;
+       }
+
+       valid_pri_image = true;
+
+       if (pri_aux_image_status.image_status_mask & 1) {
+               ql_dbg(ql_dbg_init, vha, 0x018d,
+                   "Primary aux image is active\n");
+               active_pri_image = true;
+       }
+
+check_sec_image:
+       if (!ha->flt_region_aux_img_status_sec) {
+               ql_dbg(ql_dbg_init, vha, 0x018a,
+                   "Secondary aux image not addressed\n");
+               goto check_valid_image;
+       }
 
-       if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+       qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
+           ha->flt_region_aux_img_status_sec,
+           sizeof(sec_aux_image_status) >> 2);
+       qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
+
+       if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
                ql_dbg(ql_dbg_init, vha, 0x018b,
-                   "Primary image signature (0x%x) not valid\n",
-                   pri_image_status.signature);
-               valid_pri_image = 0;
+                   "Secondary aux image signature (%#x) not valid\n",
+                   le32_to_cpu(sec_aux_image_status.signature));
+               goto check_valid_image;
+       }
+
+       if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
+               ql_dbg(ql_dbg_init, vha, 0x018c,
+                   "Secondary aux image checksum failed\n");
+               goto check_valid_image;
+       }
+
+       valid_sec_image = true;
+
+       if (sec_aux_image_status.image_status_mask & 1) {
+               ql_dbg(ql_dbg_init, vha, 0x018d,
+                   "Secondary aux image is active\n");
+               active_sec_image = true;
+       }
+
+check_valid_image:
+       if (valid_pri_image && active_pri_image &&
+           valid_sec_image && active_sec_image) {
+               if (qla27xx_compare_image_generation(&pri_aux_image_status,
+                   &sec_aux_image_status) >= 0) {
+                       qla28xx_component_status(active_regions,
+                           &pri_aux_image_status);
+               } else {
+                       qla28xx_component_status(active_regions,
+                           &sec_aux_image_status);
+               }
+       } else if (valid_pri_image && active_pri_image) {
+               qla28xx_component_status(active_regions, &pri_aux_image_status);
+       } else if (valid_sec_image && active_sec_image) {
+               qla28xx_component_status(active_regions, &sec_aux_image_status);
+       }
+
+       ql_dbg(ql_dbg_init, vha, 0x018f,
+           "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
+           active_regions->aux.board_config,
+           active_regions->aux.vpd_nvram,
+           active_regions->aux.npiv_config_0_1,
+           active_regions->aux.npiv_config_2_3);
+}
+
+void
+qla27xx_get_active_image(struct scsi_qla_host *vha,
+    struct active_regions *active_regions)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla27xx_image_status pri_image_status, sec_image_status;
+       bool valid_pri_image = false, valid_sec_image = false;
+       bool active_pri_image = false, active_sec_image = false;
+
+       if (!ha->flt_region_img_status_pri) {
+               ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
                goto check_sec_image;
        }
 
-       wptr = (uint32_t *)(&pri_image_status);
-       cnt = size;
+       qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
+           ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2);
+       qla27xx_print_image(vha, "Primary image", &pri_image_status);
 
-       for (chksum = 0; cnt--; wptr++)
-               chksum += le32_to_cpu(*wptr);
+       if (qla27xx_check_image_status_signature(&pri_image_status)) {
+               ql_dbg(ql_dbg_init, vha, 0x018b,
+                   "Primary image signature (%#x) not valid\n",
+                   le32_to_cpu(pri_image_status.signature));
+               goto check_sec_image;
+       }
 
-       if (chksum) {
+       if (qla27xx_image_status_checksum(&pri_image_status)) {
                ql_dbg(ql_dbg_init, vha, 0x018c,
-                   "Checksum validation failed for primary image (0x%x)\n",
-                   chksum);
-               valid_pri_image = 0;
+                   "Primary image checksum failed\n");
+               goto check_sec_image;
+       }
+
+       valid_pri_image = true;
+
+       if (pri_image_status.image_status_mask & 1) {
+               ql_dbg(ql_dbg_init, vha, 0x018d,
+                   "Primary image is active\n");
+               active_pri_image = true;
        }
 
 check_sec_image:
        if (!ha->flt_region_img_status_sec) {
-               valid_sec_image = 0;
+               ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
                goto check_valid_image;
        }
 
        qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
-           ha->flt_region_img_status_sec, size);
+           ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
+       qla27xx_print_image(vha, "Secondary image", &sec_image_status);
 
-       if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
-               ql_dbg(ql_dbg_init, vha, 0x018d,
-                   "Secondary image signature(0x%x) not valid\n",
-                   sec_image_status.signature);
-               valid_sec_image = 0;
+       if (qla27xx_check_image_status_signature(&sec_image_status)) {
+               ql_dbg(ql_dbg_init, vha, 0x018b,
+                   "Secondary image signature (%#x) not valid\n",
+                   le32_to_cpu(sec_image_status.signature));
                goto check_valid_image;
        }
 
-       wptr = (uint32_t *)(&sec_image_status);
-       cnt = size;
-       for (chksum = 0; cnt--; wptr++)
-               chksum += le32_to_cpu(*wptr);
-       if (chksum) {
-               ql_dbg(ql_dbg_init, vha, 0x018e,
-                   "Checksum validation failed for secondary image (0x%x)\n",
-                   chksum);
-               valid_sec_image = 0;
+       if (qla27xx_image_status_checksum(&sec_image_status)) {
+               ql_dbg(ql_dbg_init, vha, 0x018c,
+                   "Secondary image checksum failed\n");
+               goto check_valid_image;
+       }
+
+       valid_sec_image = true;
+
+       if (sec_image_status.image_status_mask & 1) {
+               ql_dbg(ql_dbg_init, vha, 0x018d,
+                   "Secondary image is active\n");
+               active_sec_image = true;
        }
 
 check_valid_image:
-       if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
-               ha->active_image = QLA27XX_PRIMARY_IMAGE;
-       if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
-               if (!ha->active_image ||
-                   pri_image_status.generation_number <
-                   sec_image_status.generation_number)
-                       ha->active_image = QLA27XX_SECONDARY_IMAGE;
+       if (valid_pri_image && active_pri_image)
+               active_regions->global = QLA27XX_PRIMARY_IMAGE;
+
+       if (valid_sec_image && active_sec_image) {
+               if (!active_regions->global ||
+                   qla27xx_compare_image_generation(
+                       &pri_image_status, &sec_image_status) < 0) {
+                       active_regions->global = QLA27XX_SECONDARY_IMAGE;
+               }
        }
 
-       ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
-           ha->active_image == 0 ? "default bootld and fw" :
-           ha->active_image == 1 ? "primary" :
-           ha->active_image == 2 ? "secondary" :
-           "Invalid");
+       ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
+           active_regions->global == QLA27XX_DEFAULT_IMAGE ?
+               "default (boot/fw)" :
+           active_regions->global == QLA27XX_PRIMARY_IMAGE ?
+               "primary" :
+           active_regions->global == QLA27XX_SECONDARY_IMAGE ?
+               "secondary" : "invalid",
+           active_regions->global);
+}
 
-       return ha->active_image;
+bool qla24xx_risc_firmware_invalid(uint32_t *dword)
+{
+       return
+           !(dword[4] | dword[5] | dword[6] | dword[7]) ||
+           !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
 }
 
 static int
 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
     uint32_t faddr)
 {
-       int     rval = QLA_SUCCESS;
-       int     segments, fragment;
-       uint32_t *dcode, dlen;
-       uint32_t risc_addr;
-       uint32_t risc_size;
-       uint32_t i;
+       int rval;
+       uint templates, segments, fragment;
+       ulong i;
+       uint j;
+       ulong dlen;
+       uint32_t *dcode;
+       uint32_t risc_addr, risc_size, risc_attr = 0;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
+       struct fwdt *fwdt = ha->fwdt;
 
        ql_dbg(ql_dbg_init, vha, 0x008b,
            "FW: Loading firmware from flash (%x).\n", faddr);
 
-       rval = QLA_SUCCESS;
-
-       segments = FA_RISC_CODE_SEGMENTS;
-       dcode = (uint32_t *)req->ring;
-       *srisc_addr = 0;
-
-       if (IS_QLA27XX(ha) &&
-           qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
-               faddr = ha->flt_region_fw_sec;
-
-       /* Validate firmware image by checking version. */
-       qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
-       for (i = 0; i < 4; i++)
-               dcode[i] = be32_to_cpu(dcode[i]);
-       if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
-           dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
-           (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
-               dcode[3] == 0)) {
+       dcode = (void *)req->ring;
+       qla24xx_read_flash_data(vha, dcode, faddr, 8);
+       if (qla24xx_risc_firmware_invalid(dcode)) {
                ql_log(ql_log_fatal, vha, 0x008c,
                    "Unable to verify the integrity of flash firmware "
                    "image.\n");
@@ -7337,34 +7650,36 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
                return QLA_FUNCTION_FAILED;
        }
 
-       while (segments && rval == QLA_SUCCESS) {
-               /* Read segment's load information. */
-               qla24xx_read_flash_data(vha, dcode, faddr, 4);
-
+       dcode = (void *)req->ring;
+       *srisc_addr = 0;
+       segments = FA_RISC_CODE_SEGMENTS;
+       for (j = 0; j < segments; j++) {
+               ql_dbg(ql_dbg_init, vha, 0x008d,
+                   "-> Loading segment %u...\n", j);
+               qla24xx_read_flash_data(vha, dcode, faddr, 10);
                risc_addr = be32_to_cpu(dcode[2]);
-               *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
                risc_size = be32_to_cpu(dcode[3]);
+               if (!*srisc_addr) {
+                       *srisc_addr = risc_addr;
+                       risc_attr = be32_to_cpu(dcode[9]);
+               }
 
-               fragment = 0;
-               while (risc_size > 0 && rval == QLA_SUCCESS) {
-                       dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+               dlen = ha->fw_transfer_size >> 2;
+               for (fragment = 0; risc_size; fragment++) {
                        if (dlen > risc_size)
                                dlen = risc_size;
 
                        ql_dbg(ql_dbg_init, vha, 0x008e,
-                           "Loading risc segment@ risc addr %x "
-                           "number of dwords 0x%x offset 0x%x.\n",
-                           risc_addr, dlen, faddr);
-
+                           "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
+                           fragment, risc_addr, faddr, dlen);
                        qla24xx_read_flash_data(vha, dcode, faddr, dlen);
                        for (i = 0; i < dlen; i++)
                                dcode[i] = swab32(dcode[i]);
 
-                       rval = qla2x00_load_ram(vha, req->dma, risc_addr,
-                           dlen);
+                       rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
                        if (rval) {
                                ql_log(ql_log_fatal, vha, 0x008f,
-                                   "Failed to load segment %d of firmware.\n",
+                                   "-> Failed load firmware fragment %u.\n",
                                    fragment);
                                return QLA_FUNCTION_FAILED;
                        }
@@ -7372,107 +7687,82 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
                        faddr += dlen;
                        risc_addr += dlen;
                        risc_size -= dlen;
-                       fragment++;
                }
-
-               /* Next segment. */
-               segments--;
        }
 
-       if (!IS_QLA27XX(ha))
-               return rval;
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+               return QLA_SUCCESS;
 
-       if (ha->fw_dump_template)
-               vfree(ha->fw_dump_template);
-       ha->fw_dump_template = NULL;
-       ha->fw_dump_template_len = 0;
-
-       ql_dbg(ql_dbg_init, vha, 0x0161,
-           "Loading fwdump template from %x\n", faddr);
-       qla24xx_read_flash_data(vha, dcode, faddr, 7);
-       risc_size = be32_to_cpu(dcode[2]);
-       ql_dbg(ql_dbg_init, vha, 0x0162,
-           "-> array size %x dwords\n", risc_size);
-       if (risc_size == 0 || risc_size == ~0)
-               goto default_template;
-
-       dlen = (risc_size - 8) * sizeof(*dcode);
-       ql_dbg(ql_dbg_init, vha, 0x0163,
-           "-> template allocating %x bytes...\n", dlen);
-       ha->fw_dump_template = vmalloc(dlen);
-       if (!ha->fw_dump_template) {
-               ql_log(ql_log_warn, vha, 0x0164,
-                   "Failed fwdump template allocate %x bytes.\n", risc_size);
-               goto default_template;
-       }
-
-       faddr += 7;
-       risc_size -= 8;
-       dcode = ha->fw_dump_template;
-       qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
-       for (i = 0; i < risc_size; i++)
-               dcode[i] = le32_to_cpu(dcode[i]);
-
-       if (!qla27xx_fwdt_template_valid(dcode)) {
-               ql_log(ql_log_warn, vha, 0x0165,
-                   "Failed fwdump template validate\n");
-               goto default_template;
-       }
-
-       dlen = qla27xx_fwdt_template_size(dcode);
-       ql_dbg(ql_dbg_init, vha, 0x0166,
-           "-> template size %x bytes\n", dlen);
-       if (dlen > risc_size * sizeof(*dcode)) {
-               ql_log(ql_log_warn, vha, 0x0167,
-                   "Failed fwdump template exceeds array by %zx bytes\n",
-                   (size_t)(dlen - risc_size * sizeof(*dcode)));
-               goto default_template;
-       }
-       ha->fw_dump_template_len = dlen;
-       return rval;
+       templates = (risc_attr & BIT_9) ? 2 : 1;
+       ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
+       for (j = 0; j < templates; j++, fwdt++) {
+               if (fwdt->template)
+                       vfree(fwdt->template);
+               fwdt->template = NULL;
+               fwdt->length = 0;
+
+               dcode = (void *)req->ring;
+               qla24xx_read_flash_data(vha, dcode, faddr, 7);
+               risc_size = be32_to_cpu(dcode[2]);
+               ql_dbg(ql_dbg_init, vha, 0x0161,
+                   "-> fwdt%u template array at %#x (%#x dwords)\n",
+                   j, faddr, risc_size);
+               if (!risc_size || !~risc_size) {
+                       ql_dbg(ql_dbg_init, vha, 0x0162,
+                           "-> fwdt%u failed to read array\n", j);
+                       goto failed;
+               }
 
-default_template:
-       ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
-       if (ha->fw_dump_template)
-               vfree(ha->fw_dump_template);
-       ha->fw_dump_template = NULL;
-       ha->fw_dump_template_len = 0;
-
-       dlen = qla27xx_fwdt_template_default_size();
-       ql_dbg(ql_dbg_init, vha, 0x0169,
-           "-> template allocating %x bytes...\n", dlen);
-       ha->fw_dump_template = vmalloc(dlen);
-       if (!ha->fw_dump_template) {
-               ql_log(ql_log_warn, vha, 0x016a,
-                   "Failed fwdump template allocate %x bytes.\n", risc_size);
-               goto failed_template;
-       }
-
-       dcode = ha->fw_dump_template;
-       risc_size = dlen / sizeof(*dcode);
-       memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
-       for (i = 0; i < risc_size; i++)
-               dcode[i] = be32_to_cpu(dcode[i]);
-
-       if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
-               ql_log(ql_log_warn, vha, 0x016b,
-                   "Failed fwdump template validate\n");
-               goto failed_template;
-       }
-
-       dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
-       ql_dbg(ql_dbg_init, vha, 0x016c,
-           "-> template size %x bytes\n", dlen);
-       ha->fw_dump_template_len = dlen;
-       return rval;
+               /* skip header and ignore checksum */
+               faddr += 7;
+               risc_size -= 8;
+
+               ql_dbg(ql_dbg_init, vha, 0x0163,
+                   "-> fwdt%u template allocate template %#x words...\n",
+                   j, risc_size);
+               fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+               if (!fwdt->template) {
+                       ql_log(ql_log_warn, vha, 0x0164,
+                           "-> fwdt%u failed allocate template.\n", j);
+                       goto failed;
+               }
 
-failed_template:
-       ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
-       if (ha->fw_dump_template)
-               vfree(ha->fw_dump_template);
-       ha->fw_dump_template = NULL;
-       ha->fw_dump_template_len = 0;
-       return rval;
+               dcode = fwdt->template;
+               qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+
+               if (!qla27xx_fwdt_template_valid(dcode)) {
+                       ql_log(ql_log_warn, vha, 0x0165,
+                           "-> fwdt%u failed template validate\n", j);
+                       goto failed;
+               }
+
+               dlen = qla27xx_fwdt_template_size(dcode);
+               ql_dbg(ql_dbg_init, vha, 0x0166,
+                   "-> fwdt%u template size %#lx bytes (%#lx words)\n",
+                   j, dlen, dlen / sizeof(*dcode));
+               if (dlen > risc_size * sizeof(*dcode)) {
+                       ql_log(ql_log_warn, vha, 0x0167,
+                           "-> fwdt%u template exceeds array (%-lu bytes)\n",
+                           j, dlen - risc_size * sizeof(*dcode));
+                       goto failed;
+               }
+
+               fwdt->length = dlen;
+               ql_dbg(ql_dbg_init, vha, 0x0168,
+                   "-> fwdt%u loaded template ok\n", j);
+
+               faddr += risc_size + 1;
+       }
+
+       return QLA_SUCCESS;
+
+failed:
+       if (fwdt->template)
+               vfree(fwdt->template);
+       fwdt->template = NULL;
+       fwdt->length = 0;
+
+       return QLA_SUCCESS;
 }
 
 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
@@ -7580,94 +7870,73 @@ static int
 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
        int     rval;
-       int     segments, fragment;
-       uint32_t *dcode, dlen;
-       uint32_t risc_addr;
-       uint32_t risc_size;
-       uint32_t i;
+       uint templates, segments, fragment;
+       uint32_t *dcode;
+       ulong dlen;
+       uint32_t risc_addr, risc_size, risc_attr = 0;
+       ulong i;
+       uint j;
        struct fw_blob *blob;
-       const uint32_t *fwcode;
-       uint32_t fwclen;
+       uint32_t *fwcode;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
+       struct fwdt *fwdt = ha->fwdt;
+
+       ql_dbg(ql_dbg_init, vha, 0x0090,
+           "-> FW: Loading via request-firmware.\n");
 
-       /* Load firmware blob. */
        blob = qla2x00_request_firmware(vha);
        if (!blob) {
-               ql_log(ql_log_warn, vha, 0x0090,
-                   "Firmware image unavailable.\n");
-               ql_log(ql_log_warn, vha, 0x0091,
-                   "Firmware images can be retrieved from: "
-                   QLA_FW_URL ".\n");
+               ql_log(ql_log_warn, vha, 0x0092,
+                   "-> Firmware file not found.\n");
 
                return QLA_FUNCTION_FAILED;
        }
 
-       ql_dbg(ql_dbg_init, vha, 0x0092,
-           "FW: Loading via request-firmware.\n");
-
-       rval = QLA_SUCCESS;
-
-       segments = FA_RISC_CODE_SEGMENTS;
-       dcode = (uint32_t *)req->ring;
-       *srisc_addr = 0;
-       fwcode = (uint32_t *)blob->fw->data;
-       fwclen = 0;
-
-       /* Validate firmware image by checking version. */
-       if (blob->fw->size < 8 * sizeof(uint32_t)) {
+       fwcode = (void *)blob->fw->data;
+       dcode = fwcode;
+       if (qla24xx_risc_firmware_invalid(dcode)) {
                ql_log(ql_log_fatal, vha, 0x0093,
                    "Unable to verify integrity of firmware image (%zd).\n",
                    blob->fw->size);
-               return QLA_FUNCTION_FAILED;
-       }
-       for (i = 0; i < 4; i++)
-               dcode[i] = be32_to_cpu(fwcode[i + 4]);
-       if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
-           dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
-           (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
-               dcode[3] == 0)) {
-               ql_log(ql_log_fatal, vha, 0x0094,
-                   "Unable to verify integrity of firmware image (%zd).\n",
-                   blob->fw->size);
                ql_log(ql_log_fatal, vha, 0x0095,
                    "Firmware data: %08x %08x %08x %08x.\n",
                    dcode[0], dcode[1], dcode[2], dcode[3]);
                return QLA_FUNCTION_FAILED;
        }
 
-       while (segments && rval == QLA_SUCCESS) {
+       dcode = (void *)req->ring;
+       *srisc_addr = 0;
+       segments = FA_RISC_CODE_SEGMENTS;
+       for (j = 0; j < segments; j++) {
+               ql_dbg(ql_dbg_init, vha, 0x0096,
+                   "-> Loading segment %u...\n", j);
                risc_addr = be32_to_cpu(fwcode[2]);
-               *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
                risc_size = be32_to_cpu(fwcode[3]);
 
-               /* Validate firmware image size. */
-               fwclen += risc_size * sizeof(uint32_t);
-               if (blob->fw->size < fwclen) {
-                       ql_log(ql_log_fatal, vha, 0x0096,
-                           "Unable to verify integrity of firmware image "
-                           "(%zd).\n", blob->fw->size);
-                       return QLA_FUNCTION_FAILED;
+               if (!*srisc_addr) {
+                       *srisc_addr = risc_addr;
+                       risc_attr = be32_to_cpu(fwcode[9]);
                }
 
-               fragment = 0;
-               while (risc_size > 0 && rval == QLA_SUCCESS) {
-                       dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+               dlen = ha->fw_transfer_size >> 2;
+               for (fragment = 0; risc_size; fragment++) {
                        if (dlen > risc_size)
                                dlen = risc_size;
 
                        ql_dbg(ql_dbg_init, vha, 0x0097,
-                           "Loading risc segment@ risc addr %x "
-                           "number of dwords 0x%x.\n", risc_addr, dlen);
+                           "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
+                           fragment, risc_addr,
+                           (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
+                           dlen);
 
                        for (i = 0; i < dlen; i++)
                                dcode[i] = swab32(fwcode[i]);
 
-                       rval = qla2x00_load_ram(vha, req->dma, risc_addr,
-                           dlen);
+                       rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
                        if (rval) {
                                ql_log(ql_log_fatal, vha, 0x0098,
-                                   "Failed to load segment %d of firmware.\n",
+                                   "-> Failed load firmware fragment %u.\n",
                                    fragment);
                                return QLA_FUNCTION_FAILED;
                        }
@@ -7675,106 +7944,82 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
                        fwcode += dlen;
                        risc_addr += dlen;
                        risc_size -= dlen;
-                       fragment++;
                }
-
-               /* Next segment. */
-               segments--;
        }
 
-       if (!IS_QLA27XX(ha))
-               return rval;
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+               return QLA_SUCCESS;
 
-       if (ha->fw_dump_template)
-               vfree(ha->fw_dump_template);
-       ha->fw_dump_template = NULL;
-       ha->fw_dump_template_len = 0;
-
-       ql_dbg(ql_dbg_init, vha, 0x171,
-           "Loading fwdump template from %x\n",
-           (uint32_t)((void *)fwcode - (void *)blob->fw->data));
-       risc_size = be32_to_cpu(fwcode[2]);
-       ql_dbg(ql_dbg_init, vha, 0x172,
-           "-> array size %x dwords\n", risc_size);
-       if (risc_size == 0 || risc_size == ~0)
-               goto default_template;
-
-       dlen = (risc_size - 8) * sizeof(*fwcode);
-       ql_dbg(ql_dbg_init, vha, 0x0173,
-           "-> template allocating %x bytes...\n", dlen);
-       ha->fw_dump_template = vmalloc(dlen);
-       if (!ha->fw_dump_template) {
-               ql_log(ql_log_warn, vha, 0x0174,
-                   "Failed fwdump template allocate %x bytes.\n", risc_size);
-               goto default_template;
-       }
-
-       fwcode += 7;
-       risc_size -= 8;
-       dcode = ha->fw_dump_template;
-       for (i = 0; i < risc_size; i++)
-               dcode[i] = le32_to_cpu(fwcode[i]);
-
-       if (!qla27xx_fwdt_template_valid(dcode)) {
-               ql_log(ql_log_warn, vha, 0x0175,
-                   "Failed fwdump template validate\n");
-               goto default_template;
-       }
-
-       dlen = qla27xx_fwdt_template_size(dcode);
-       ql_dbg(ql_dbg_init, vha, 0x0176,
-           "-> template size %x bytes\n", dlen);
-       if (dlen > risc_size * sizeof(*fwcode)) {
-               ql_log(ql_log_warn, vha, 0x0177,
-                   "Failed fwdump template exceeds array by %zx bytes\n",
-                   (size_t)(dlen - risc_size * sizeof(*fwcode)));
-               goto default_template;
-       }
-       ha->fw_dump_template_len = dlen;
-       return rval;
+       templates = (risc_attr & BIT_9) ? 2 : 1;
+       ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
+       for (j = 0; j < templates; j++, fwdt++) {
+               if (fwdt->template)
+                       vfree(fwdt->template);
+               fwdt->template = NULL;
+               fwdt->length = 0;
+
+               risc_size = be32_to_cpu(fwcode[2]);
+               ql_dbg(ql_dbg_init, vha, 0x0171,
+                   "-> fwdt%u template array at %#x (%#x dwords)\n",
+                   j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
+                   risc_size);
+               if (!risc_size || !~risc_size) {
+                       ql_dbg(ql_dbg_init, vha, 0x0172,
+                           "-> fwdt%u failed to read array\n", j);
+                       goto failed;
+               }
 
-default_template:
-       ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
-       if (ha->fw_dump_template)
-               vfree(ha->fw_dump_template);
-       ha->fw_dump_template = NULL;
-       ha->fw_dump_template_len = 0;
-
-       dlen = qla27xx_fwdt_template_default_size();
-       ql_dbg(ql_dbg_init, vha, 0x0179,
-           "-> template allocating %x bytes...\n", dlen);
-       ha->fw_dump_template = vmalloc(dlen);
-       if (!ha->fw_dump_template) {
-               ql_log(ql_log_warn, vha, 0x017a,
-                   "Failed fwdump template allocate %x bytes.\n", risc_size);
-               goto failed_template;
-       }
-
-       dcode = ha->fw_dump_template;
-       risc_size = dlen / sizeof(*fwcode);
-       fwcode = qla27xx_fwdt_template_default();
-       for (i = 0; i < risc_size; i++)
-               dcode[i] = be32_to_cpu(fwcode[i]);
-
-       if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
-               ql_log(ql_log_warn, vha, 0x017b,
-                   "Failed fwdump template validate\n");
-               goto failed_template;
-       }
-
-       dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
-       ql_dbg(ql_dbg_init, vha, 0x017c,
-           "-> template size %x bytes\n", dlen);
-       ha->fw_dump_template_len = dlen;
-       return rval;
+               /* skip header and ignore checksum */
+               fwcode += 7;
+               risc_size -= 8;
+
+               ql_dbg(ql_dbg_init, vha, 0x0173,
+                   "-> fwdt%u template allocate template %#x words...\n",
+                   j, risc_size);
+               fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+               if (!fwdt->template) {
+                       ql_log(ql_log_warn, vha, 0x0174,
+                           "-> fwdt%u failed allocate template.\n", j);
+                       goto failed;
+               }
 
-failed_template:
-       ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
-       if (ha->fw_dump_template)
-               vfree(ha->fw_dump_template);
-       ha->fw_dump_template = NULL;
-       ha->fw_dump_template_len = 0;
-       return rval;
+               dcode = fwdt->template;
+               for (i = 0; i < risc_size; i++)
+                       dcode[i] = fwcode[i];
+
+               if (!qla27xx_fwdt_template_valid(dcode)) {
+                       ql_log(ql_log_warn, vha, 0x0175,
+                           "-> fwdt%u failed template validate\n", j);
+                       goto failed;
+               }
+
+               dlen = qla27xx_fwdt_template_size(dcode);
+               ql_dbg(ql_dbg_init, vha, 0x0176,
+                   "-> fwdt%u template size %#lx bytes (%#lx words)\n",
+                   j, dlen, dlen / sizeof(*dcode));
+               if (dlen > risc_size * sizeof(*dcode)) {
+                       ql_log(ql_log_warn, vha, 0x0177,
+                           "-> fwdt%u template exceeds array (%-lu bytes)\n",
+                           j, dlen - risc_size * sizeof(*dcode));
+                       goto failed;
+               }
+
+               fwdt->length = dlen;
+               ql_dbg(ql_dbg_init, vha, 0x0178,
+                   "-> fwdt%u loaded template ok\n", j);
+
+               fwcode += risc_size + 1;
+       }
+
+       return QLA_SUCCESS;
+
+failed:
+       if (fwdt->template)
+               vfree(fwdt->template);
+       fwdt->template = NULL;
+       fwdt->length = 0;
+
+       return QLA_SUCCESS;
 }
 
 int
@@ -7803,32 +8048,50 @@ qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
        int rval;
        struct qla_hw_data *ha = vha->hw;
+       struct active_regions active_regions = { };
 
        if (ql2xfwloadbin == 2)
                goto try_blob_fw;
 
-       /*
-        * FW Load priority:
+       /* FW Load priority:
         * 1) Firmware residing in flash.
         * 2) Firmware via request-firmware interface (.bin file).
-        * 3) Golden-Firmware residing in flash -- limited operation.
+        * 3) Golden-Firmware residing in flash -- (limited operation).
         */
+
+       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+               goto try_primary_fw;
+
+       qla27xx_get_active_image(vha, &active_regions);
+
+       if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
+               goto try_primary_fw;
+
+       ql_dbg(ql_dbg_init, vha, 0x008b,
+           "Loading secondary firmware image.\n");
+       rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
+       if (!rval)
+               return rval;
+
+try_primary_fw:
+       ql_dbg(ql_dbg_init, vha, 0x008b,
+           "Loading primary firmware image.\n");
        rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
-       if (rval == QLA_SUCCESS)
+       if (!rval)
                return rval;
 
 try_blob_fw:
        rval = qla24xx_load_risc_blob(vha, srisc_addr);
-       if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
+       if (!rval || !ha->flt_region_gold_fw)
                return rval;
 
        ql_log(ql_log_info, vha, 0x0099,
            "Attempting to fallback to golden firmware.\n");
        rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
-       if (rval != QLA_SUCCESS)
+       if (rval)
                return rval;
 
-       ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
+       ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
        ha->flags.running_gold_fw = 1;
        return rval;
 }
@@ -7963,6 +8226,7 @@ void
 qla84xx_put_chip(struct scsi_qla_host *vha)
 {
        struct qla_hw_data *ha = vha->hw;
+
        if (ha->cs84xx)
                kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
 }
@@ -7980,7 +8244,7 @@ qla84xx_init_chip(scsi_qla_host_t *vha)
 
        mutex_unlock(&ha->cs84xx->fw_update_mutex);
 
-       return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
+       return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
            QLA_SUCCESS;
 }
 
@@ -7997,25 +8261,48 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        uint32_t chksum;
        uint16_t cnt;
        struct qla_hw_data *ha = vha->hw;
+       uint32_t faddr;
+       struct active_regions active_regions = { };
 
        rval = QLA_SUCCESS;
        icb = (struct init_cb_81xx *)ha->init_cb;
        nv = ha->nvram;
 
        /* Determine NVRAM starting address. */
-       ha->nvram_size = sizeof(struct nvram_81xx);
+       ha->nvram_size = sizeof(*nv);
        ha->vpd_size = FA_NVRAM_VPD_SIZE;
        if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
                ha->vpd_size = FA_VPD_SIZE_82XX;
 
+       if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
+               qla28xx_get_aux_images(vha, &active_regions);
+
        /* Get VPD data into cache */
        ha->vpd = ha->nvram + VPD_OFFSET;
-       ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
-           ha->vpd_size);
+
+       faddr = ha->flt_region_vpd;
+       if (IS_QLA28XX(ha)) {
+               if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+                       faddr = ha->flt_region_vpd_sec;
+               ql_dbg(ql_dbg_init, vha, 0x0110,
+                   "Loading %s nvram image.\n",
+                   active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+                   "primary" : "secondary");
+       }
+       qla24xx_read_flash_data(vha, ha->vpd, faddr, ha->vpd_size >> 2);
 
        /* Get NVRAM data into cache and calculate checksum. */
-       ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
-           ha->nvram_size);
+       faddr = ha->flt_region_nvram;
+       if (IS_QLA28XX(ha)) {
+               if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
+                       faddr = ha->flt_region_nvram_sec;
+       }
+       ql_dbg(ql_dbg_init, vha, 0x0110,
+           "Loading %s nvram image.\n",
+           active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
+           "primary" : "secondary");
+       qla24xx_read_flash_data(vha, ha->nvram, faddr, ha->nvram_size >> 2);
+
        dptr = (uint32_t *)nv;
        for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
                chksum += le32_to_cpu(*dptr);
@@ -8023,17 +8310,16 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
            "Contents of NVRAM:\n");
        ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
-           (uint8_t *)nv, ha->nvram_size);
+           nv, ha->nvram_size);
 
        /* Bad NVRAM data, set defaults parameters. */
-       if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
-           || nv->id[3] != ' ' ||
-           nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
+       if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
+           le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
                /* Reset NVRAM data. */
                ql_log(ql_log_info, vha, 0x0073,
-                   "Inconsistent NVRAM detected: checksum=0x%x id=%c "
-                   "version=0x%x.\n", chksum, nv->id[0],
-                   le16_to_cpu(nv->nvram_version));
+                   "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
+                   chksum, nv->id, le16_to_cpu(nv->nvram_version));
+               ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
                ql_log(ql_log_info, vha, 0x0074,
                    "Falling back to functioning (yet invalid -- WWPN) "
                    "defaults.\n");
@@ -8154,11 +8440,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        ha->flags.disable_risc_code_load = 0;
        ha->flags.enable_lip_reset = 0;
        ha->flags.enable_lip_full_login =
-           le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+           le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
        ha->flags.enable_target_reset =
-           le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+           le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
        ha->flags.enable_led_scheme = 0;
-       ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+       ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
 
        ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
            (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -8222,7 +8508,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
                ha->login_retry_count = ql2xloginretrycount;
 
        /* if not running MSI-X we need handshaking on interrupts */
-       if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
+       if (!vha->hw->flags.msix_enabled &&
+           (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
                icb->firmware_options_2 |= cpu_to_le32(BIT_22);
 
        /* Enable ZIO. */
@@ -8230,7 +8517,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
                ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
                    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
                ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
-                   le16_to_cpu(icb->interrupt_delay_timer): 2;
+                   le16_to_cpu(icb->interrupt_delay_timer) : 2;
        }
        icb->firmware_options_2 &= cpu_to_le32(
            ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
@@ -8255,12 +8542,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        /* N2N: driver will initiate Login instead of FW */
        icb->firmware_options_3 |= BIT_8;
 
-       if (IS_QLA27XX(ha)) {
-               icb->firmware_options_3 |= BIT_8;
-               ql_dbg(ql_log_info, vha, 0x0075,
-                   "Enabling direct connection.\n");
-       }
-
        if (rval) {
                ql_log(ql_log_warn, vha, 0x0076,
                    "NVRAM configuration failed.\n");
@@ -8621,7 +8902,6 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
                            "Failed to allocate memory for queue pair.\n");
                        return NULL;
                }
-               memset(qpair, 0, sizeof(struct qla_qpair));
 
                qpair->hw = vha->hw;
                qpair->vha = vha;
@@ -8668,7 +8948,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
                qpair->msix->in_use = 1;
                list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
                qpair->pdev = ha->pdev;
-               if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+               if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
                        qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
 
                mutex_unlock(&ha->mq_lock);
index 512c3c3..bf063c6 100644 (file)
@@ -90,43 +90,6 @@ host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
                *odest++ = cpu_to_le32(*isrc);
 }
 
-static inline void
-qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
-{
-       int i;
-
-       if (IS_FWI2_CAPABLE(ha))
-               return;
-
-       for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
-               set_bit(i, ha->loop_id_map);
-       set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
-       set_bit(BROADCAST, ha->loop_id_map);
-}
-
-static inline int
-qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
-{
-       struct qla_hw_data *ha = vha->hw;
-       if (IS_FWI2_CAPABLE(ha))
-               return (loop_id > NPH_LAST_HANDLE);
-
-       return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
-           loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
-}
-
-static inline void
-qla2x00_clear_loop_id(fc_port_t *fcport) {
-       struct qla_hw_data *ha = fcport->vha->hw;
-
-       if (fcport->loop_id == FC_NO_LOOP_ID ||
-           qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
-               return;
-
-       clear_bit(fcport->loop_id, ha->loop_id_map);
-       fcport->loop_id = FC_NO_LOOP_ID;
-}
-
 static inline void
 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
 {
@@ -142,25 +105,6 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
        INIT_LIST_HEAD(&ctx->dsd_list);
 }
 
-static inline void
-qla2x00_set_fcport_state(fc_port_t *fcport, int state)
-{
-       int old_state;
-
-       old_state = atomic_read(&fcport->state);
-       atomic_set(&fcport->state, state);
-
-       /* Don't print state transitions during initial allocation of fcport */
-       if (old_state && old_state != state) {
-               ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
-                   "FCPort %8phC state transitioned from %s to %s - "
-                       "portid=%02x%02x%02x.\n", fcport->port_name,
-                   port_state_str[old_state], port_state_str[state],
-                   fcport->d_id.b.domain, fcport->d_id.b.area,
-                   fcport->d_id.b.al_pa);
-       }
-}
-
 static inline int
 qla2x00_hba_err_chk_enabled(srb_t *sp)
 {
@@ -240,6 +184,7 @@ done:
 static inline void
 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
 {
+       sp->qpair = NULL;
        mempool_free(sp, qpair->srb_mempool);
        QLA_QPAIR_MARK_NOT_BUSY(qpair);
 }
@@ -274,18 +219,6 @@ qla2x00_rel_sp(srb_t *sp)
        qla2xxx_rel_qpair_sp(sp->qpair, sp);
 }
 
-static inline void
-qla2x00_init_timer(srb_t *sp, unsigned long tmo)
-{
-       timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
-       sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
-       sp->free = qla2x00_sp_free;
-       init_completion(&sp->comp);
-       if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
-               init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
-       add_timer(&sp->u.iocb_cmd.timer);
-}
-
 static inline int
 qla2x00_gid_list_size(struct qla_hw_data *ha)
 {
index 456a41d..9312b19 100644 (file)
@@ -107,7 +107,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
        cont_pkt = (cont_entry_t *)req->ring_ptr;
 
        /* Load packet defaults. */
-       *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
+       put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
 
        return (cont_pkt);
 }
@@ -136,9 +136,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 
        /* Load packet defaults. */
-       *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
-           cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
-           cpu_to_le32(CONTINUE_A64_TYPE);
+       put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
+                          CONTINUE_A64_TYPE, &cont_pkt->entry_type);
 
        return (cont_pkt);
 }
@@ -193,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
     uint16_t tot_dsds)
 {
        uint16_t        avail_dsds;
-       uint32_t        *cur_dsd;
+       struct dsd32    *cur_dsd;
        scsi_qla_host_t *vha;
        struct scsi_cmnd *cmd;
        struct scatterlist *sg;
@@ -202,8 +201,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
        cmd = GET_CMD_SP(sp);
 
        /* Update entry type to indicate Command Type 2 IOCB */
-       *((uint32_t *)(&cmd_pkt->entry_type)) =
-           cpu_to_le32(COMMAND_TYPE);
+       put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
 
        /* No data transfer */
        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -215,8 +213,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
        /* Three DSDs are available in the Command Type 2 IOCB */
-       avail_dsds = 3;
-       cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+       avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
+       cur_dsd = cmd_pkt->dsd32;
 
        /* Load data segments */
        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
@@ -229,12 +227,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
                         * Type 0 IOCB.
                         */
                        cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
-                       cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
-                       avail_dsds = 7;
+                       cur_dsd = cont_pkt->dsd;
+                       avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
                }
 
-               *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
-               *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+               append_dsd32(&cur_dsd, sg);
                avail_dsds--;
        }
 }
@@ -251,7 +248,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
     uint16_t tot_dsds)
 {
        uint16_t        avail_dsds;
-       uint32_t        *cur_dsd;
+       struct dsd64    *cur_dsd;
        scsi_qla_host_t *vha;
        struct scsi_cmnd *cmd;
        struct scatterlist *sg;
@@ -260,7 +257,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
        cmd = GET_CMD_SP(sp);
 
        /* Update entry type to indicate Command Type 3 IOCB */
-       *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
+       put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
 
        /* No data transfer */
        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -272,12 +269,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
        /* Two DSDs are available in the Command Type 3 IOCB */
-       avail_dsds = 2;
-       cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+       avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
+       cur_dsd = cmd_pkt->dsd64;
 
        /* Load data segments */
        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
-               dma_addr_t      sle_dma;
                cont_a64_entry_t *cont_pkt;
 
                /* Allocate additional continuation packets? */
@@ -287,14 +283,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
                         * Type 1 IOCB.
                         */
                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-                       cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
-                       avail_dsds = 5;
+                       cur_dsd = cont_pkt->dsd;
+                       avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
                }
 
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
        }
 }
@@ -467,7 +460,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
                        req->ring_ptr++;
 
                /* Set chip new ring index. */
-               if (ha->mqenable || IS_QLA27XX(ha)) {
+               if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
                } else if (IS_QLA83XX(ha)) {
                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
@@ -580,13 +573,11 @@ static inline int
 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
        uint16_t tot_dsds)
 {
-       uint32_t *cur_dsd = NULL;
+       struct dsd64 *cur_dsd = NULL, *next_dsd;
        scsi_qla_host_t *vha;
        struct qla_hw_data *ha;
        struct scsi_cmnd *cmd;
        struct  scatterlist *cur_seg;
-       uint32_t *dsd_seg;
-       void *next_dsd;
        uint8_t avail_dsds;
        uint8_t first_iocb = 1;
        uint32_t dsd_list_len;
@@ -596,7 +587,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
        cmd = GET_CMD_SP(sp);
 
        /* Update entry type to indicate Command Type 3 IOCB */
-       *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
+       put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
 
        /* No data transfer */
        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -638,32 +629,27 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 
                if (first_iocb) {
                        first_iocb = 0;
-                       dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
-                       *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-                       *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-                       cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
+                       put_unaligned_le64(dsd_ptr->dsd_list_dma,
+                                          &cmd_pkt->fcp_dsd.address);
+                       cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
                } else {
-                       *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-                       *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-                       *cur_dsd++ = cpu_to_le32(dsd_list_len);
+                       put_unaligned_le64(dsd_ptr->dsd_list_dma,
+                                          &cur_dsd->address);
+                       cur_dsd->length = cpu_to_le32(dsd_list_len);
+                       cur_dsd++;
                }
-               cur_dsd = (uint32_t *)next_dsd;
+               cur_dsd = next_dsd;
                while (avail_dsds) {
-                       dma_addr_t      sle_dma;
-
-                       sle_dma = sg_dma_address(cur_seg);
-                       *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-                       *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-                       *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+                       append_dsd64(&cur_dsd, cur_seg);
                        cur_seg = sg_next(cur_seg);
                        avail_dsds--;
                }
        }
 
        /* Null termination */
-       *cur_dsd++ =  0;
-       *cur_dsd++ = 0;
-       *cur_dsd++ = 0;
+       cur_dsd->address = 0;
+       cur_dsd->length = 0;
+       cur_dsd++;
        cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
        return 0;
 }
@@ -702,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
        uint16_t tot_dsds, struct req_que *req)
 {
        uint16_t        avail_dsds;
-       uint32_t        *cur_dsd;
+       struct dsd64    *cur_dsd;
        scsi_qla_host_t *vha;
        struct scsi_cmnd *cmd;
        struct scatterlist *sg;
@@ -711,7 +697,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
        cmd = GET_CMD_SP(sp);
 
        /* Update entry type to indicate Command Type 3 IOCB */
-       *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
+       put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
 
        /* No data transfer */
        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -734,12 +720,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 
        /* One DSD is available in the Command Type 3 IOCB */
        avail_dsds = 1;
-       cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+       cur_dsd = &cmd_pkt->dsd;
 
        /* Load data segments */
 
        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
-               dma_addr_t      sle_dma;
                cont_a64_entry_t *cont_pkt;
 
                /* Allocate additional continuation packets? */
@@ -749,14 +734,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
                         * Type 1 IOCB.
                         */
                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
-                       cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
-                       avail_dsds = 5;
+                       cur_dsd = cont_pkt->dsd;
+                       avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
                }
 
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
        }
 }
@@ -892,14 +874,14 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 
 int
 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
-       uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+       struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
        uint32_t dsd_list_len;
        struct dsd_dma *dsd_ptr;
        struct scatterlist *sg_prot;
-       uint32_t *cur_dsd = dsd;
+       struct dsd64 *cur_dsd = dsd;
        uint16_t        used_dsds = tot_dsds;
        uint32_t        prot_int; /* protection interval */
        uint32_t        partial;
@@ -973,14 +955,14 @@ alloc_and_fill:
 
 
                        /* add new list to cmd iocb or last list */
-                       *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-                       *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-                       *cur_dsd++ = dsd_list_len;
-                       cur_dsd = (uint32_t *)next_dsd;
+                       put_unaligned_le64(dsd_ptr->dsd_list_dma,
+                                          &cur_dsd->address);
+                       cur_dsd->length = cpu_to_le32(dsd_list_len);
+                       cur_dsd = next_dsd;
                }
-               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(sle_dma_len);
+               put_unaligned_le64(sle_dma, &cur_dsd->address);
+               cur_dsd->length = cpu_to_le32(sle_dma_len);
+               cur_dsd++;
                avail_dsds--;
 
                if (partial == 0) {
@@ -999,22 +981,22 @@ alloc_and_fill:
                }
        }
        /* Null termination */
-       *cur_dsd++ = 0;
-       *cur_dsd++ = 0;
-       *cur_dsd++ = 0;
+       cur_dsd->address = 0;
+       cur_dsd->length = 0;
+       cur_dsd++;
        return 0;
 }
 
 int
-qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
-       uint16_t tot_dsds, struct qla_tc_param *tc)
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
+       struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
        uint32_t dsd_list_len;
        struct dsd_dma *dsd_ptr;
        struct scatterlist *sg, *sgl;
-       uint32_t *cur_dsd = dsd;
+       struct dsd64 *cur_dsd = dsd;
        int     i;
        uint16_t        used_dsds = tot_dsds;
        struct scsi_cmnd *cmd;
@@ -1031,8 +1013,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
 
 
        for_each_sg(sgl, sg, tot_dsds, i) {
-               dma_addr_t      sle_dma;
-
                /* Allocate additional continuation packets? */
                if (avail_dsds == 0) {
                        avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1072,29 +1052,25 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
                        }
 
                        /* add new list to cmd iocb or last list */
-                       *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-                       *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-                       *cur_dsd++ = dsd_list_len;
-                       cur_dsd = (uint32_t *)next_dsd;
+                       put_unaligned_le64(dsd_ptr->dsd_list_dma,
+                                          &cur_dsd->address);
+                       cur_dsd->length = cpu_to_le32(dsd_list_len);
+                       cur_dsd = next_dsd;
                }
-               sle_dma = sg_dma_address(sg);
-
-               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
 
        }
        /* Null termination */
-       *cur_dsd++ = 0;
-       *cur_dsd++ = 0;
-       *cur_dsd++ = 0;
+       cur_dsd->address = 0;
+       cur_dsd->length = 0;
+       cur_dsd++;
        return 0;
 }
 
 int
 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
-    uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+       struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
 {
        struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
        struct scatterlist *sg, *sgl;
@@ -1109,6 +1085,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
 
        if (sp) {
                struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
                sgl = scsi_prot_sglist(cmd);
                vha = sp->vha;
                difctx = sp->u.scmd.ctx;
@@ -1314,16 +1291,15 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
                                }
 
                                /* add new list to cmd iocb or last list */
-                               *cur_dsd++ =
-                                   cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-                               *cur_dsd++ =
-                                   cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-                               *cur_dsd++ = dsd_list_len;
+                               put_unaligned_le64(dsd_ptr->dsd_list_dma,
+                                                  &cur_dsd->address);
+                               cur_dsd->length = cpu_to_le32(dsd_list_len);
                                cur_dsd = dsd_ptr->dsd_addr;
                        }
-                       *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma));
-                       *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma));
-                       *cur_dsd++ = cpu_to_le32(sglen);
+                       put_unaligned_le64(dif_dsd->dsd_list_dma,
+                                          &cur_dsd->address);
+                       cur_dsd->length = cpu_to_le32(sglen);
+                       cur_dsd++;
                        avail_dsds--;
                        difctx->dif_bundl_len -= sglen;
                        track_difbundl_buf--;
@@ -1334,8 +1310,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
                        difctx->no_ldif_dsd, difctx->no_dif_bundl);
        } else {
                for_each_sg(sgl, sg, tot_dsds, i) {
-                       dma_addr_t sle_dma;
-
                        /* Allocate additional continuation packets? */
                        if (avail_dsds == 0) {
                                avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1375,24 +1349,19 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
                                }
 
                                /* add new list to cmd iocb or last list */
-                               *cur_dsd++ =
-                                   cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-                               *cur_dsd++ =
-                                   cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-                               *cur_dsd++ = dsd_list_len;
+                               put_unaligned_le64(dsd_ptr->dsd_list_dma,
+                                                  &cur_dsd->address);
+                               cur_dsd->length = cpu_to_le32(dsd_list_len);
                                cur_dsd = dsd_ptr->dsd_addr;
                        }
-                       sle_dma = sg_dma_address(sg);
-                       *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-                       *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-                       *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+                       append_dsd64(&cur_dsd, sg);
                        avail_dsds--;
                }
        }
        /* Null termination */
-       *cur_dsd++ = 0;
-       *cur_dsd++ = 0;
-       *cur_dsd++ = 0;
+       cur_dsd->address = 0;
+       cur_dsd->length = 0;
+       cur_dsd++;
        return 0;
 }
 /**
@@ -1405,11 +1374,12 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
  * @tot_prot_dsds: Total number of segments with protection information
  * @fw_prot_opts: Protection options to be passed to firmware
  */
-inline int
+static inline int
 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
 {
-       uint32_t                *cur_dsd, *fcp_dl;
+       struct dsd64            *cur_dsd;
+       uint32_t                *fcp_dl;
        scsi_qla_host_t         *vha;
        struct scsi_cmnd        *cmd;
        uint32_t                total_bytes = 0;
@@ -1427,7 +1397,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        cmd = GET_CMD_SP(sp);
 
        /* Update entry type to indicate Command Type CRC_2 IOCB */
-       *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
+       put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
 
        vha = sp->vha;
        ha = vha->hw;
@@ -1475,8 +1445,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
            &crc_ctx_pkt->ref_tag, tot_prot_dsds);
 
-       cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
-       cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+       put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
        cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
 
        /* Determine SCSI command length -- align to 4 byte boundary */
@@ -1503,10 +1472,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
        memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
        cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
-       cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
-           LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
-       cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
-           MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+       put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
+                          &cmd_pkt->fcp_cmnd_dseg_address);
        fcp_cmnd->task_management = 0;
        fcp_cmnd->task_attribute = TSK_SIMPLE;
 
@@ -1520,18 +1487,18 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
        case SCSI_PROT_READ_INSERT:
        case SCSI_PROT_WRITE_STRIP:
-           total_bytes = data_bytes;
-           data_bytes += dif_bytes;
-           break;
+               total_bytes = data_bytes;
+               data_bytes += dif_bytes;
+               break;
 
        case SCSI_PROT_READ_STRIP:
        case SCSI_PROT_WRITE_INSERT:
        case SCSI_PROT_READ_PASS:
        case SCSI_PROT_WRITE_PASS:
-           total_bytes = data_bytes + dif_bytes;
-           break;
+               total_bytes = data_bytes + dif_bytes;
+               break;
        default:
-           BUG();
+               BUG();
        }
 
        if (!qla2x00_hba_err_chk_enabled(sp))
@@ -1548,7 +1515,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        }
 
        if (!bundling) {
-               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+               cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
        } else {
                /*
                 * Configure Bundling if we need to fetch interlaving
@@ -1558,7 +1525,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
                crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
                                                        tot_prot_dsds);
-               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+               cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
        }
 
        /* Finish the common fields of CRC pkt */
@@ -1591,7 +1558,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        if (bundling && tot_prot_dsds) {
                /* Walks dif segments */
                cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
-               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+               cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
                                tot_prot_dsds, NULL))
                        goto crc_queuing_error;
@@ -2325,7 +2292,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
        if (req->cnt < req_cnt + 2) {
                if (qpair->use_shadow_reg)
                        cnt = *req->out_ptr;
-               else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+               else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+                   IS_QLA28XX(ha))
                        cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
                else if (IS_P3P_TYPE(ha))
                        cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2494,7 +2462,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
        SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
        mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
        mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
-           cpu_to_le16(sp->fcport->loop_id):
+           cpu_to_le16(sp->fcport->loop_id) :
            cpu_to_le16(sp->fcport->loop_id << 8);
        mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
        mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
@@ -2565,6 +2533,16 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
        }
 }
 
+void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+{
+       timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
+       sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+       sp->free = qla2x00_sp_free;
+       if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
+               init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
+       add_timer(&sp->u.iocb_cmd.timer);
+}
+
 static void
 qla2x00_els_dcmd_sp_free(void *data)
 {
@@ -2726,18 +2704,13 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
                els_iocb->tx_byte_count = els_iocb->tx_len =
                        sizeof(struct els_plogi_payload);
-               els_iocb->tx_address[0] =
-                       cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
-               els_iocb->tx_address[1] =
-                       cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
-
+               put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
+                                  &els_iocb->tx_address);
                els_iocb->rx_dsd_count = 1;
                els_iocb->rx_byte_count = els_iocb->rx_len =
                        sizeof(struct els_plogi_payload);
-               els_iocb->rx_address[0] =
-                       cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
-               els_iocb->rx_address[1] =
-                       cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
+               put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
+                                  &els_iocb->rx_address);
 
                ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
                    "PLOGI ELS IOCB:\n");
@@ -2745,15 +2718,12 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
                    (uint8_t *)els_iocb, 0x70);
        } else {
                els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
-               els_iocb->tx_address[0] =
-                   cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
-               els_iocb->tx_address[1] =
-                   cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
+               put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
+                                  &els_iocb->tx_address);
                els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
 
                els_iocb->rx_byte_count = 0;
-               els_iocb->rx_address[0] = 0;
-               els_iocb->rx_address[1] = 0;
+               els_iocb->rx_address = 0;
                els_iocb->rx_len = 0;
        }
 
@@ -2976,17 +2946,13 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
         els_iocb->tx_byte_count =
             cpu_to_le32(bsg_job->request_payload.payload_len);
 
-        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
-            (bsg_job->request_payload.sg_list)));
-        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
-            (bsg_job->request_payload.sg_list)));
+       put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
+                          &els_iocb->tx_address);
         els_iocb->tx_len = cpu_to_le32(sg_dma_len
             (bsg_job->request_payload.sg_list));
 
-        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
-            (bsg_job->reply_payload.sg_list)));
-        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
-            (bsg_job->reply_payload.sg_list)));
+       put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
+                          &els_iocb->rx_address);
         els_iocb->rx_len = cpu_to_le32(sg_dma_len
             (bsg_job->reply_payload.sg_list));
 
@@ -2997,14 +2963,13 @@ static void
 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
 {
        uint16_t        avail_dsds;
-       uint32_t        *cur_dsd;
+       struct dsd64    *cur_dsd;
        struct scatterlist *sg;
        int index;
        uint16_t tot_dsds;
        scsi_qla_host_t *vha = sp->vha;
        struct qla_hw_data *ha = vha->hw;
        struct bsg_job *bsg_job = sp->u.bsg_job;
-       int loop_iterartion = 0;
        int entry_count = 1;
 
        memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
@@ -3024,25 +2989,20 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
        ct_iocb->rsp_bytecount =
            cpu_to_le32(bsg_job->reply_payload.payload_len);
 
-       ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
-           (bsg_job->request_payload.sg_list)));
-       ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
-           (bsg_job->request_payload.sg_list)));
-       ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
+       put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
+                          &ct_iocb->req_dsd.address);
+       ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
 
-       ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
-           (bsg_job->reply_payload.sg_list)));
-       ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
-           (bsg_job->reply_payload.sg_list)));
-       ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
+       put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
+                          &ct_iocb->rsp_dsd.address);
+       ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
 
        avail_dsds = 1;
-       cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
+       cur_dsd = &ct_iocb->rsp_dsd;
        index = 0;
        tot_dsds = bsg_job->reply_payload.sg_cnt;
 
        for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
-               dma_addr_t       sle_dma;
                cont_a64_entry_t *cont_pkt;
 
                /* Allocate additional continuation packets? */
@@ -3053,16 +3013,12 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
                               */
                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
                            vha->hw->req_q_map[0]);
-                       cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+                       cur_dsd = cont_pkt->dsd;
                        avail_dsds = 5;
                        entry_count++;
                }
 
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
-               loop_iterartion++;
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
        }
        ct_iocb->entry_count = entry_count;
@@ -3074,7 +3030,7 @@ static void
 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 {
        uint16_t        avail_dsds;
-       uint32_t        *cur_dsd;
+       struct dsd64    *cur_dsd;
        struct scatterlist *sg;
        int index;
        uint16_t cmd_dsds, rsp_dsds;
@@ -3103,12 +3059,10 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
             cpu_to_le32(bsg_job->request_payload.payload_len);
 
        avail_dsds = 2;
-       cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
+       cur_dsd = ct_iocb->dsd;
        index = 0;
 
        for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
-               dma_addr_t       sle_dma;
-
                /* Allocate additional continuation packets? */
                if (avail_dsds == 0) {
                        /*
@@ -3117,23 +3071,18 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
                         */
                        cont_pkt = qla2x00_prep_cont_type1_iocb(
                            vha, ha->req_q_map[0]);
-                       cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+                       cur_dsd = cont_pkt->dsd;
                        avail_dsds = 5;
                        entry_count++;
                }
 
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
        }
 
        index = 0;
 
        for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
-               dma_addr_t       sle_dma;
-
                /* Allocate additional continuation packets? */
                if (avail_dsds == 0) {
                        /*
@@ -3142,15 +3091,12 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
                               */
                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
                            ha->req_q_map[0]);
-                       cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+                       cur_dsd = cont_pkt->dsd;
                        avail_dsds = 5;
                        entry_count++;
                }
 
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
        }
         ct_iocb->entry_count = entry_count;
@@ -3371,10 +3317,8 @@ sufficient_dsds:
                *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
 
                cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
-               cmd_pkt->fcp_cmnd_dseg_address[0] =
-                   cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
-               cmd_pkt->fcp_cmnd_dseg_address[1] =
-                   cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+               put_unaligned_le64(ctx->fcp_cmnd_dma,
+                                  &cmd_pkt->fcp_cmnd_dseg_address);
 
                sp->flags |= SRB_FCP_CMND_DMA_VALID;
                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
@@ -3386,6 +3330,7 @@ sufficient_dsds:
                cmd_pkt->entry_status = (uint8_t) rsp->id;
        } else {
                struct cmd_type_7 *cmd_pkt;
+
                req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
                if (req->cnt < (req_cnt + 2)) {
                        cnt = (uint16_t)RD_REG_DWORD_RELAXED(
@@ -3590,15 +3535,13 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
 
        cmd_pkt->tx_dseg_count = 1;
        cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
-       cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
-       cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
-       cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
+       cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
+       put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
 
        cmd_pkt->rx_dseg_count = 1;
        cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
-       cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
-       cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
-       cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
+       cmd_pkt->dsd[1].length  = nvme->u.nvme.rsp_len;
+       put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
 
        return rval;
 }
@@ -3737,7 +3680,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
                                struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
 {
        uint16_t avail_dsds;
-       uint32_t *cur_dsd;
+       struct dsd64 *cur_dsd;
        uint32_t req_data_len = 0;
        uint32_t rsp_data_len = 0;
        struct scatterlist *sg;
@@ -3746,8 +3689,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
        struct bsg_job *bsg_job = sp->u.bsg_job;
 
        /*Update entry type to indicate bidir command */
-       *((uint32_t *)(&cmd_pkt->entry_type)) =
-               cpu_to_le32(COMMAND_BIDIRECTIONAL);
+       put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
 
        /* Set the transfer direction, in this set both flags
         * Also set the BD_WRAP_BACK flag, firmware will take care
@@ -3773,13 +3715,12 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
         * are bundled in continuation iocb
         */
        avail_dsds = 1;
-       cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+       cur_dsd = &cmd_pkt->fcp_dsd;
 
        index = 0;
 
        for_each_sg(bsg_job->request_payload.sg_list, sg,
                                bsg_job->request_payload.sg_cnt, index) {
-               dma_addr_t sle_dma;
                cont_a64_entry_t *cont_pkt;
 
                /* Allocate additional continuation packets */
@@ -3788,14 +3729,11 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
                         * 5 DSDS
                         */
                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-                       cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+                       cur_dsd = cont_pkt->dsd;
                        avail_dsds = 5;
                        entry_count++;
                }
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
        }
        /* For read request DSD will always goes to continuation IOCB
@@ -3805,7 +3743,6 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
         */
        for_each_sg(bsg_job->reply_payload.sg_list, sg,
                                bsg_job->reply_payload.sg_cnt, index) {
-               dma_addr_t sle_dma;
                cont_a64_entry_t *cont_pkt;
 
                /* Allocate additional continuation packets */
@@ -3814,14 +3751,11 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
                         * 5 DSDS
                         */
                        cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-                       cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+                       cur_dsd = cont_pkt->dsd;
                        avail_dsds = 5;
                        entry_count++;
                }
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
        }
        /* This value should be same as number of IOCB required for this cmd */
index 69bbea9..78aec50 100644 (file)
@@ -23,6 +23,14 @@ static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
        sts_entry_t *);
 
+const char *const port_state_str[] = {
+       "Unknown",
+       "UNCONFIGURED",
+       "DEAD",
+       "LOST",
+       "ONLINE"
+};
+
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  * @irq: interrupt number
@@ -41,7 +49,7 @@ qla2100_intr_handler(int irq, void *dev_id)
        int             status;
        unsigned long   iter;
        uint16_t        hccr;
-       uint16_t        mb[4];
+       uint16_t        mb[8];
        struct rsp_que *rsp;
        unsigned long   flags;
 
@@ -160,7 +168,7 @@ qla2300_intr_handler(int irq, void *dev_id)
        unsigned long   iter;
        uint32_t        stat;
        uint16_t        hccr;
-       uint16_t        mb[4];
+       uint16_t        mb[8];
        struct rsp_que *rsp;
        struct qla_hw_data *ha;
        unsigned long   flags;
@@ -366,7 +374,7 @@ qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
        static const char *const link_speeds[] = {
                "1", "2", "?", "4", "8", "16", "32", "10"
        };
-#define        QLA_LAST_SPEED  7
+#define        QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
 
        if (IS_QLA2100(ha) || IS_QLA2200(ha))
                return link_speeds[0];
@@ -708,12 +716,15 @@ skip_rio:
                break;
 
        case MBA_SYSTEM_ERR:            /* System Error */
-               mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
+               mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+                   IS_QLA28XX(ha)) ?
                        RD_REG_WORD(&reg24->mailbox7) : 0;
                ql_log(ql_log_warn, vha, 0x5003,
                    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
                    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
-
+               ha->fw_dump_mpi =
+                   (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+                   RD_REG_WORD(&reg24->mailbox7) & BIT_8;
                ha->isp_ops->fw_dump(vha, 1);
                ha->flags.fw_init_done = 0;
                QLA_FW_STOPPED(ha);
@@ -837,6 +848,7 @@ skip_rio:
                                if (ha->flags.fawwpn_enabled &&
                                    (ha->current_topology == ISP_CFG_F)) {
                                        void *wwpn = ha->init_cb->port_name;
+
                                        memcpy(vha->port_name, wwpn, WWN_SIZE);
                                        fc_host_port_name(vha->host) =
                                            wwn_to_u64(vha->port_name);
@@ -1372,7 +1384,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
                    le16_to_cpu(mbx->status_flags));
 
                ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
-                   (uint8_t *)mbx, sizeof(*mbx));
+                   mbx, sizeof(*mbx));
 
                goto logio_done;
        }
@@ -1516,7 +1528,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                            bsg_reply->reply_payload_rcv_len = 0;
                    }
                    ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
-                       (uint8_t *)pkt, sizeof(*pkt));
+                       pkt, sizeof(*pkt));
            } else {
                    res = DID_OK << 16;
                    bsg_reply->reply_payload_rcv_len =
@@ -1591,8 +1603,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
        }
 
        comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
-       fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
-       fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+       fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
+       fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
 
        if (iocb_type == ELS_IOCB_TYPE) {
                els = &sp->u.iocb_cmd;
@@ -1613,7 +1625,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                                res = DID_ERROR << 16;
                        }
                }
-               ql_log(ql_log_info, vha, 0x503f,
+               ql_dbg(ql_dbg_user, vha, 0x503f,
                    "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
                    type, sp->handle, comp_status, fw_status[1], fw_status[2],
                    le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1656,7 +1668,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
                       fw_status, sizeof(fw_status));
                ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
-                               (uint8_t *)pkt, sizeof(*pkt));
+                   pkt, sizeof(*pkt));
        }
        else {
                res =  DID_OK << 16;
@@ -1700,7 +1712,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
                    fcport->d_id.b.area, fcport->d_id.b.al_pa,
                    logio->entry_status);
                ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
-                   (uint8_t *)logio, sizeof(*logio));
+                   logio, sizeof(*logio));
 
                goto logio_done;
        }
@@ -1846,8 +1858,8 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
        }
 
        if (iocb->u.tmf.data != QLA_SUCCESS)
-               ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
-                   (uint8_t *)sts, sizeof(*sts));
+               ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
+                   sts, sizeof(*sts));
 
        sp->done(sp, 0);
 }
@@ -1969,6 +1981,52 @@ static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
        sp->done(sp, rval);
 }
 
+/* Process a single response queue entry. */
+static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
+                                          struct rsp_que *rsp,
+                                          sts_entry_t *pkt)
+{
+       sts21_entry_t *sts21_entry;
+       sts22_entry_t *sts22_entry;
+       uint16_t handle_cnt;
+       uint16_t cnt;
+
+       switch (pkt->entry_type) {
+       case STATUS_TYPE:
+               qla2x00_status_entry(vha, rsp, pkt);
+               break;
+       case STATUS_TYPE_21:
+               sts21_entry = (sts21_entry_t *)pkt;
+               handle_cnt = sts21_entry->handle_count;
+               for (cnt = 0; cnt < handle_cnt; cnt++)
+                       qla2x00_process_completed_request(vha, rsp->req,
+                                               sts21_entry->handle[cnt]);
+               break;
+       case STATUS_TYPE_22:
+               sts22_entry = (sts22_entry_t *)pkt;
+               handle_cnt = sts22_entry->handle_count;
+               for (cnt = 0; cnt < handle_cnt; cnt++)
+                       qla2x00_process_completed_request(vha, rsp->req,
+                                               sts22_entry->handle[cnt]);
+               break;
+       case STATUS_CONT_TYPE:
+               qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+               break;
+       case MBX_IOCB_TYPE:
+               qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
+               break;
+       case CT_IOCB_TYPE:
+               qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+               break;
+       default:
+               /* Type Not Supported. */
+               ql_log(ql_log_warn, vha, 0x504a,
+                      "Received unknown response pkt type %x entry status=%x.\n",
+                      pkt->entry_type, pkt->entry_status);
+               break;
+       }
+}
+
 /**
  * qla2x00_process_response_queue() - Process response queue entries.
  * @rsp: response queue
@@ -1980,8 +2038,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
        struct qla_hw_data *ha = rsp->hw;
        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
        sts_entry_t     *pkt;
-       uint16_t        handle_cnt;
-       uint16_t        cnt;
 
        vha = pci_get_drvdata(ha->pdev);
 
@@ -2006,42 +2062,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
                        continue;
                }
 
-               switch (pkt->entry_type) {
-               case STATUS_TYPE:
-                       qla2x00_status_entry(vha, rsp, pkt);
-                       break;
-               case STATUS_TYPE_21:
-                       handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
-                       for (cnt = 0; cnt < handle_cnt; cnt++) {
-                               qla2x00_process_completed_request(vha, rsp->req,
-                                   ((sts21_entry_t *)pkt)->handle[cnt]);
-                       }
-                       break;
-               case STATUS_TYPE_22:
-                       handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
-                       for (cnt = 0; cnt < handle_cnt; cnt++) {
-                               qla2x00_process_completed_request(vha, rsp->req,
-                                   ((sts22_entry_t *)pkt)->handle[cnt]);
-                       }
-                       break;
-               case STATUS_CONT_TYPE:
-                       qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
-                       break;
-               case MBX_IOCB_TYPE:
-                       qla2x00_mbx_iocb_entry(vha, rsp->req,
-                           (struct mbx_entry *)pkt);
-                       break;
-               case CT_IOCB_TYPE:
-                       qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
-                       break;
-               default:
-                       /* Type Not Supported. */
-                       ql_log(ql_log_warn, vha, 0x504a,
-                           "Received unknown response pkt type %x "
-                           "entry status=%x.\n",
-                           pkt->entry_type, pkt->entry_status);
-                       break;
-               }
+               qla2x00_process_response_entry(vha, rsp, pkt);
                ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
                wmb();
        }
@@ -2238,6 +2259,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
        struct fc_bsg_reply *bsg_reply;
        sts_entry_t *sts;
        struct sts_entry_24xx *sts24;
+
        sts = (sts_entry_t *) pkt;
        sts24 = (struct sts_entry_24xx *) pkt;
 
@@ -3014,7 +3036,8 @@ process_err:
                        qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
                        break;
                case ABTS_RECV_24XX:
-                       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+                       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+                           IS_QLA28XX(ha)) {
                                /* ensure that the ATIO queue is empty */
                                qlt_handle_abts_recv(vha, rsp,
                                    (response_t *)pkt);
@@ -3072,6 +3095,7 @@ process_err:
        /* Adjust ring index */
        if (IS_P3P_TYPE(ha)) {
                struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+
                WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
        } else {
                WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
@@ -3087,7 +3111,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
        if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
-           !IS_QLA27XX(ha))
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return;
 
        rval = QLA_SUCCESS;
@@ -3475,7 +3499,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                ql_log(ql_log_fatal, vha, 0x00c8,
                    "Failed to allocate memory for ha->msix_entries.\n");
                ret = -ENOMEM;
-               goto msix_out;
+               goto free_irqs;
        }
        ha->flags.msix_enabled = 1;
 
@@ -3539,7 +3563,7 @@ msix_register_fail:
        }
 
        /* Enable MSI-X vector for response queue update for queue 0 */
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                if (ha->msixbase && ha->mqiobase &&
                    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
                     ql2xmqsupport))
@@ -3558,6 +3582,10 @@ msix_register_fail:
 
 msix_out:
        return ret;
+
+free_irqs:
+       pci_free_irq_vectors(ha->pdev);
+       goto msix_out;
 }
 
 int
@@ -3570,7 +3598,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
        /* If possible, enable MSI-X. */
        if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
            !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
-           !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
+           !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
                goto skip_msi;
 
        if (ql2xenablemsix == 2)
@@ -3609,7 +3637,7 @@ skip_msix:
 
        if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
            !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
-           !IS_QLA27XX(ha))
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                goto skip_msi;
 
        ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
index 5400696..133f5f6 100644 (file)
@@ -567,9 +567,9 @@ mbx_done:
                    mcp->mb[0]);
        } else if (rval) {
                if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
-                       pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
+                       pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
                            dev_name(&ha->pdev->dev), 0x1020+0x800,
-                           vha->host_no);
+                           vha->host_no, rval);
                        mboxes = mcp->in_mb;
                        cnt = 4;
                        for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
@@ -634,14 +634,15 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
                mcp->out_mb |= MBX_4;
        }
 
-       mcp->in_mb = MBX_0;
+       mcp->in_mb = MBX_1|MBX_0;
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
 
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x1023,
-                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+                   "Failed=%x mb[0]=%x mb[1]=%x.\n",
+                   rval, mcp->mb[0], mcp->mb[1]);
        } else {
                ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
                    "Done %s.\n", __func__);
@@ -656,7 +657,7 @@ static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
 {
        uint16_t mb4 = BIT_0;
 
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
 
        return mb4;
@@ -666,7 +667,7 @@ static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
 {
        uint16_t mb4 = BIT_0;
 
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                struct nvram_81xx *nv = ha->nvram;
 
                mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
@@ -711,7 +712,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
                mcp->mb[4] = 0;
                ha->flags.using_lr_setting = 0;
                if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
-                   IS_QLA27XX(ha)) {
+                   IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                        if (ql2xautodetectsfp) {
                                if (ha->flags.detected_lr_sfp) {
                                        mcp->mb[4] |=
@@ -730,19 +731,20 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
                        }
                }
 
-               if (ql2xnvmeenable && IS_QLA27XX(ha))
+               if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
                        mcp->mb[4] |= NVME_ENABLE_FLAG;
 
-               if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                        struct nvram_81xx *nv = ha->nvram;
                        /* set minimum speed if specified in nvram */
-                       if (nv->min_link_speed >= 2 &&
-                           nv->min_link_speed <= 5) {
+                       if (nv->min_supported_speed >= 2 &&
+                           nv->min_supported_speed <= 5) {
                                mcp->mb[4] |= BIT_4;
-                               mcp->mb[11] = nv->min_link_speed;
+                               mcp->mb[11] |= nv->min_supported_speed & 0xF;
                                mcp->out_mb |= MBX_11;
                                mcp->in_mb |= BIT_5;
-                               vha->min_link_speed_feat = nv->min_link_speed;
+                               vha->min_supported_speed =
+                                   nv->min_supported_speed;
                        }
                }
 
@@ -770,34 +772,39 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x1026,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
-       } else {
-               if (IS_FWI2_CAPABLE(ha)) {
-                       ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
-                       ql_dbg(ql_dbg_mbx, vha, 0x119a,
-                           "fw_ability_mask=%x.\n", ha->fw_ability_mask);
-                       ql_dbg(ql_dbg_mbx, vha, 0x1027,
-                           "exchanges=%x.\n", mcp->mb[1]);
-                       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
-                               ha->max_speed_sup = mcp->mb[2] & BIT_0;
-                               ql_dbg(ql_dbg_mbx, vha, 0x119b,
-                                   "Maximum speed supported=%s.\n",
-                                   ha->max_speed_sup ? "32Gps" : "16Gps");
-                               if (vha->min_link_speed_feat) {
-                                       ha->min_link_speed = mcp->mb[5];
-                                       ql_dbg(ql_dbg_mbx, vha, 0x119c,
-                                           "Minimum speed set=%s.\n",
-                                           mcp->mb[5] == 5 ? "32Gps" :
-                                           mcp->mb[5] == 4 ? "16Gps" :
-                                           mcp->mb[5] == 3 ? "8Gps" :
-                                           mcp->mb[5] == 2 ? "4Gps" :
-                                               "unknown");
-                               }
-                       }
+               return rval;
+       }
+
+       if (!IS_FWI2_CAPABLE(ha))
+               goto done;
+
+       ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
+       ql_dbg(ql_dbg_mbx, vha, 0x119a,
+           "fw_ability_mask=%x.\n", ha->fw_ability_mask);
+       ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
+       if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+               ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
+               ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
+                   ha->max_supported_speed == 0 ? "16Gps" :
+                   ha->max_supported_speed == 1 ? "32Gps" :
+                   ha->max_supported_speed == 2 ? "64Gps" : "unknown");
+               if (vha->min_supported_speed) {
+                       ha->min_supported_speed = mcp->mb[5] &
+                           (BIT_0 | BIT_1 | BIT_2);
+                       ql_dbg(ql_dbg_mbx, vha, 0x119c,
+                           "min_supported_speed=%s.\n",
+                           ha->min_supported_speed == 6 ? "64Gps" :
+                           ha->min_supported_speed == 5 ? "32Gps" :
+                           ha->min_supported_speed == 4 ? "16Gps" :
+                           ha->min_supported_speed == 3 ? "8Gps" :
+                           ha->min_supported_speed == 2 ? "4Gps" : "unknown");
                }
-               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
-                   "Done.\n");
        }
 
+done:
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+           "Done %s.\n", __func__);
+
        return rval;
 }
 
@@ -1053,10 +1060,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
                mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
        if (IS_FWI2_CAPABLE(ha))
                mcp->in_mb |= MBX_17|MBX_16|MBX_15;
-       if (IS_QLA27XX(ha))
+       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
                mcp->in_mb |=
                    MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
-                   MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
+                   MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
 
        mcp->flags = 0;
        mcp->tov = MBX_TOV_SECONDS;
@@ -1122,7 +1129,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
                }
        }
 
-       if (IS_QLA27XX(ha)) {
+       if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+               ha->serdes_version[0] = mcp->mb[7] & 0xff;
+               ha->serdes_version[1] = mcp->mb[8] >> 8;
+               ha->serdes_version[2] = mcp->mb[8] & 0xff;
                ha->mpi_version[0] = mcp->mb[10] & 0xff;
                ha->mpi_version[1] = mcp->mb[11] >> 8;
                ha->mpi_version[2] = mcp->mb[11] & 0xff;
@@ -1133,6 +1143,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
                ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
                ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
                ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
+               if (IS_QLA28XX(ha)) {
+                       if (mcp->mb[16] & BIT_10) {
+                               ql_log(ql_log_info, vha, 0xffff,
+                                   "FW support secure flash updates\n");
+                               ha->flags.secure_fw = 1;
+                       }
+               }
        }
 
 failed:
@@ -1638,7 +1655,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
                mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
        if (IS_FWI2_CAPABLE(vha->hw))
                mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
-       if (IS_QLA27XX(vha->hw))
+       if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
                mcp->in_mb |= MBX_15;
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
@@ -1692,7 +1709,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
                        }
                }
 
-               if (IS_QLA27XX(vha->hw))
+               if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
                        vha->bbcr = mcp->mb[15];
        }
 
@@ -1808,7 +1825,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
        }
        /* 1 and 2 should normally be captured. */
        mcp->in_mb = MBX_2|MBX_1|MBX_0;
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                /* mb3 is additional info about the installed SFP. */
                mcp->in_mb  |= MBX_3;
        mcp->buf_size = size;
@@ -1819,10 +1836,20 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
        if (rval != QLA_SUCCESS) {
                /*EMPTY*/
                ql_dbg(ql_dbg_mbx, vha, 0x104d,
-                   "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+                   "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
                    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
+               if (ha->init_cb) {
+                       ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
+                       ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
+                           0x0104d, ha->init_cb, sizeof(*ha->init_cb));
+               }
+               if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
+                       ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
+                       ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
+                           0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
+               }
        } else {
-               if (IS_QLA27XX(ha)) {
+               if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                        if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
                                ql_dbg(ql_dbg_mbx, vha, 0x119d,
                                    "Invalid SFP/Validation Failed\n");
@@ -2006,7 +2033,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
 
                /* Passback COS information. */
                fcport->supported_classes = (pd->options & BIT_4) ?
-                   FC_COS_CLASS2: FC_COS_CLASS3;
+                   FC_COS_CLASS2 : FC_COS_CLASS3;
        }
 
 gpd_error_out:
@@ -2076,7 +2103,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
                /*EMPTY*/
                ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
        } else {
-               if (IS_QLA27XX(ha)) {
+               if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                        if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
                                ql_dbg(ql_dbg_mbx, vha, 0x119e,
                                    "Invalid SFP/Validation Failed\n");
@@ -2859,7 +2886,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
        mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
        mcp->out_mb = MBX_0;
        mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
-       if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
+       if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+           IS_QLA27XX(ha) || IS_QLA28XX(ha))
                mcp->in_mb |= MBX_12;
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
@@ -2884,7 +2912,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
                ha->orig_fw_iocb_count = mcp->mb[10];
                if (ha->flags.npiv_supported)
                        ha->max_npiv_vports = mcp->mb[11];
-               if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+               if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+                   IS_QLA28XX(ha))
                        ha->fw_max_fcf_count = mcp->mb[12];
        }
 
@@ -3248,7 +3277,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
 
        /* Issue marker IOCB. */
        rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
-           type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
+           type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
        if (rval2 != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x1099,
                    "Failed to issue marker IOCB (%x).\n", rval2);
@@ -3323,7 +3352,7 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
        mbx_cmd_t *mcp = &mc;
 
        if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
-           !IS_QLA27XX(vha->hw))
+           !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@@ -3362,7 +3391,7 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
        mbx_cmd_t *mcp = &mc;
 
        if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
-           !IS_QLA27XX(vha->hw))
+           !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@@ -3631,7 +3660,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
            "Entered %s.\n", __func__);
 
        if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
-           !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+           !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+           !IS_QLA28XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
        if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -3744,7 +3774,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        rval = qla2x00_mailbox_command(vha, mcp);
 
        /* Return mailbox statuses. */
-       if (mb != NULL) {
+       if (mb) {
                mb[0] = mcp->mb[0];
                mb[1] = mcp->mb[1];
                mb[3] = mcp->mb[3];
@@ -3779,7 +3809,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        mcp->mb[0] = MBC_PORT_PARAMS;
        mcp->mb[1] = loop_id;
        mcp->mb[2] = BIT_0;
-       mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+       mcp->mb[3] = port_speed & 0x3F;
        mcp->mb[9] = vha->vp_idx;
        mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_3|MBX_1|MBX_0;
@@ -3788,7 +3818,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        rval = qla2x00_mailbox_command(vha, mcp);
 
        /* Return mailbox statuses. */
-       if (mb != NULL) {
+       if (mb) {
                mb[0] = mcp->mb[0];
                mb[1] = mcp->mb[1];
                mb[3] = mcp->mb[3];
@@ -4230,7 +4260,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
                ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
                    "Dump of Verify Request.\n");
                ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
-                   (uint8_t *)mn, sizeof(*mn));
+                   mn, sizeof(*mn));
 
                rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
                if (rval != QLA_SUCCESS) {
@@ -4242,7 +4272,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
                ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
                    "Dump of Verify Response.\n");
                ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
-                   (uint8_t *)mn, sizeof(*mn));
+                   mn, sizeof(*mn));
 
                status[0] = le16_to_cpu(mn->p.rsp.comp_status);
                status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
@@ -4318,7 +4348,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        mcp->mb[12] = req->qos;
        mcp->mb[11] = req->vp_idx;
        mcp->mb[13] = req->rid;
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                mcp->mb[15] = 0;
 
        mcp->mb[4] = req->id;
@@ -4332,9 +4362,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        mcp->flags = MBX_DMA_OUT;
        mcp->tov = MBX_TOV_SECONDS * 2;
 
-       if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha))
                mcp->in_mb |= MBX_1;
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                mcp->out_mb |= MBX_15;
                /* debug q create issue in SR-IOV */
                mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -4343,7 +4374,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        spin_lock_irqsave(&ha->hardware_lock, flags);
        if (!(req->options & BIT_0)) {
                WRT_REG_DWORD(req->req_q_in, 0);
-               if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+               if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                        WRT_REG_DWORD(req->req_q_out, 0);
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4387,7 +4418,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        mcp->mb[5] = rsp->length;
        mcp->mb[14] = rsp->msix->entry;
        mcp->mb[13] = rsp->rid;
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                mcp->mb[15] = 0;
 
        mcp->mb[4] = rsp->id;
@@ -4404,7 +4435,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        if (IS_QLA81XX(ha)) {
                mcp->out_mb |= MBX_12|MBX_11|MBX_10;
                mcp->in_mb |= MBX_1;
-       } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
                mcp->in_mb |= MBX_1;
                /* debug q create issue in SR-IOV */
@@ -4414,7 +4445,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        spin_lock_irqsave(&ha->hardware_lock, flags);
        if (!(rsp->options & BIT_0)) {
                WRT_REG_DWORD(rsp->rsp_q_out, 0);
-               if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+               if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                        WRT_REG_DWORD(rsp->rsp_q_in, 0);
        }
 
@@ -4472,7 +4503,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
            "Entered %s.\n", __func__);
 
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
-           !IS_QLA27XX(vha->hw))
+           !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
        mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -4504,7 +4535,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
        mbx_cmd_t *mcp = &mc;
 
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
-           !IS_QLA27XX(vha->hw))
+           !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
@@ -4539,7 +4570,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
        mbx_cmd_t *mcp = &mc;
 
        if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
-           !IS_QLA27XX(vha->hw))
+           !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
@@ -4569,6 +4600,42 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
        return rval;
 }
 
+int
+qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
+{
+       int rval = QLA_SUCCESS;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+               return rval;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+       mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
+           FAC_OPT_CMD_UNLOCK_SEMAPHORE);
+       mcp->out_mb = MBX_1|MBX_0;
+       mcp->in_mb = MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x10e3,
+                   "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+                   rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
 int
 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
 {
@@ -4818,10 +4885,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x10e9,
                    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
-               if (mcp->mb[0] == MBS_COMMAND_ERROR &&
-                   mcp->mb[1] == 0x22)
+               if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
                        /* sfp is not there */
                        rval = QLA_INTERFACE_ERROR;
+               }
        } else {
                ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
                    "Done %s.\n", __func__);
@@ -5161,13 +5228,14 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
        mcp->mb[3] = MSW(data);
        mcp->mb[8] = MSW(risc_addr);
        mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
-       mcp->in_mb = MBX_0;
+       mcp->in_mb = MBX_1|MBX_0;
        mcp->tov = 30;
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_mbx, vha, 0x1101,
-                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+                   "Failed=%x mb[0]=%x mb[1]=%x.\n",
+                   rval, mcp->mb[0], mcp->mb[1]);
        } else {
                ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
                    "Done %s.\n", __func__);
@@ -5278,7 +5346,7 @@ qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
 
        mcp->out_mb = MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_2|MBX_1|MBX_0;
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                mcp->in_mb |= MBX_4|MBX_3;
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
@@ -5316,7 +5384,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
        mcp->mb[1] = QLA_GET_DATA_RATE;
        mcp->out_mb = MBX_1|MBX_0;
        mcp->in_mb = MBX_2|MBX_1|MBX_0;
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                mcp->in_mb |= MBX_3;
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
@@ -5346,7 +5414,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
            "Entered %s.\n", __func__);
 
        if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
-           !IS_QLA27XX(ha))
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return QLA_FUNCTION_FAILED;
        mcp->mb[0] = MBC_GET_PORT_CONFIG;
        mcp->out_mb = MBX_0;
@@ -5662,6 +5730,7 @@ qla8044_md_get_template(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        int rval = QLA_FUNCTION_FAILED;
        int offset = 0, size = MINIDUMP_SIZE_36K;
+
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
            "Entered %s.\n", __func__);
 
@@ -5842,7 +5911,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
@@ -5917,7 +5986,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
        struct qla_hw_data *ha = vha->hw;
        unsigned long retry_max_time = jiffies + (2 * HZ);
 
-       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
@@ -5967,7 +6036,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+       if (!IS_QLA83XX(ha))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
@@ -6101,7 +6170,8 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
        mbx_cmd_t *mcp = &mc;
        dma_addr_t dd_dma;
 
-       if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+       if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
+           !IS_QLA28XX(vha->hw))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
@@ -6318,7 +6388,13 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
        fcport->d_id.b.rsvd_1 = 0;
 
        if (fcport->fc4f_nvme) {
-               fcport->port_type = FCT_NVME;
+               fcport->port_type = 0;
+               if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
+                       fcport->port_type |= FCT_NVME_INITIATOR;
+               if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+                       fcport->port_type |= FCT_NVME_TARGET;
+               if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
+                       fcport->port_type |= FCT_NVME_DISCOVERY;
        } else {
                /* If not target must be initiator or unknown type. */
                if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -6507,3 +6583,101 @@ int qla24xx_res_count_wait(struct scsi_qla_host *vha,
 done:
        return rval;
 }
+
+int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
+    uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
+    uint32_t sfub_len)
+{
+       int             rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
+       mcp->mb[1] = opts;
+       mcp->mb[2] = region;
+       mcp->mb[3] = MSW(len);
+       mcp->mb[4] = LSW(len);
+       mcp->mb[5] = MSW(sfub_dma_addr);
+       mcp->mb[6] = LSW(sfub_dma_addr);
+       mcp->mb[7] = MSW(MSD(sfub_dma_addr));
+       mcp->mb[8] = LSW(MSD(sfub_dma_addr));
+       mcp->mb[9] = sfub_len;
+       mcp->out_mb =
+           MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_2|MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
+                       __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
+                       mcp->mb[2]);
+       }
+
+       return rval;
+}
+
+int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
+    uint32_t data)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_WRITE_REMOTE_REG;
+       mcp->mb[1] = LSW(addr);
+       mcp->mb[2] = MSW(addr);
+       mcp->mb[3] = LSW(data);
+       mcp->mb[4] = MSW(data);
+       mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
+    uint32_t *data)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_READ_REMOTE_REG;
+       mcp->mb[1] = LSW(addr);
+       mcp->mb[2] = MSW(addr);
+       mcp->out_mb = MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
index 099d8e9..b2977e4 100644 (file)
@@ -905,7 +905,8 @@ static void qla_ctrlvp_sp_done(void *s, int res)
 {
        struct srb *sp = s;
 
-       complete(&sp->comp);
+       if (sp->comp)
+               complete(sp->comp);
        /* don't free sp here. Let the caller do the free */
 }
 
@@ -922,6 +923,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
        struct qla_hw_data *ha = vha->hw;
        int     vp_index = vha->vp_idx;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+       DECLARE_COMPLETION_ONSTACK(comp);
        srb_t *sp;
 
        ql_dbg(ql_dbg_vport, vha, 0x10c1,
@@ -936,6 +938,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
 
        sp->type = SRB_CTRL_VP;
        sp->name = "ctrl_vp";
+       sp->comp = &comp;
        sp->done = qla_ctrlvp_sp_done;
        sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
@@ -953,7 +956,9 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
        ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
            sp->name, sp->handle);
 
-       wait_for_completion(&sp->comp);
+       wait_for_completion(&comp);
+       sp->comp = NULL;
+
        rval = sp->rc;
        switch (rval) {
        case QLA_FUNCTION_TIMEOUT:
index 60f964c..942ee13 100644 (file)
@@ -273,9 +273,9 @@ premature_exit:
 
        if (rval) {
                ql_log(ql_log_warn, base_vha, 0x1163,
-                   "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
-                   "mb[3]=%x, cmd=%x ****.\n",
-                   mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+                      "**** Failed=%x mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
+                      rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
+                      command);
        } else {
                ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
        }
@@ -629,17 +629,20 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
  *
  * Returns 0 on success.
  */
-void
+int
 qlafx00_soft_reset(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
+       int rval = QLA_FUNCTION_FAILED;
 
        if (unlikely(pci_channel_offline(ha->pdev) &&
            ha->flags.pci_channel_io_perm_failure))
-               return;
+               return rval;
 
        ha->isp_ops->disable_intrs(ha);
        qlafx00_soc_cpu_reset(vha);
+
+       return QLA_SUCCESS;
 }
 
 /**
@@ -1138,8 +1141,8 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
 
        ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
            "Listing Target bit map...\n");
-       ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
-           0x2089, (uint8_t *)ha->gid_list, 32);
+       ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089,
+           ha->gid_list, 32);
 
        /* Allocate temporary rmtport for any new rmtports discovered. */
        new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
@@ -1320,6 +1323,7 @@ qlafx00_configure_devices(scsi_qla_host_t *vha)
 {
        int  rval;
        unsigned long flags;
+
        rval = QLA_SUCCESS;
 
        flags = vha->dpc_flags;
@@ -1913,8 +1917,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
                            phost_info->domainname,
                            phost_info->hostdriver);
                        ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
-                           (uint8_t *)phost_info,
-                           sizeof(struct host_system_info));
+                           phost_info, sizeof(*phost_info));
                }
        }
 
@@ -1968,7 +1971,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
                vha->d_id.b.al_pa = pinfo->port_id[2];
                qlafx00_update_host_attr(vha, pinfo);
                ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
-                   (uint8_t *)pinfo, 16);
+                   pinfo, 16);
        } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
                struct qlafx00_tgt_node_info *pinfo =
                    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
@@ -1976,12 +1979,12 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
                memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
                fcport->port_type = FCT_TARGET;
                ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
-                   (uint8_t *)pinfo, 16);
+                   pinfo, 16);
        } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
                struct qlafx00_tgt_node_info *pinfo =
                    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
                ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
-                   (uint8_t *)pinfo, 16);
+                   pinfo, 16);
                memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
        } else if (fx_type == FXDISC_ABORT_IOCTL)
                fdisc->u.fxiocb.result =
@@ -2248,18 +2251,16 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
 
                fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
 
-               memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
-                   sizeof(struct qla_mt_iocb_rsp_fx00));
+               memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus));
                bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
                        sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
 
                ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
-                   sp->fcport->vha, 0x5080,
-                   (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+                   sp->vha, 0x5080, pkt, sizeof(*pkt));
 
                ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
-                   sp->fcport->vha, 0x5074,
-                   (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+                   sp->vha, 0x5074,
+                   fw_sts_ptr, sizeof(fstatus));
 
                res = bsg_reply->result = DID_OK << 16;
                bsg_reply->reply_payload_rcv_len =
@@ -2597,7 +2598,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
 
                /* Move sense data. */
                ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
-                   (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+                   pkt, sizeof(*pkt));
                memcpy(sense_ptr, pkt->data, sense_sz);
                ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
                    sense_ptr, sense_sz);
@@ -2992,7 +2993,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
                         uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
 {
        uint16_t        avail_dsds;
-       __le32 *cur_dsd;
+       struct dsd64    *cur_dsd;
        scsi_qla_host_t *vha;
        struct scsi_cmnd *cmd;
        struct scatterlist *sg;
@@ -3028,12 +3029,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
 
        /* One DSD is available in the Command Type 3 IOCB */
        avail_dsds = 1;
-       cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
+       cur_dsd = &lcmd_pkt->dsd;
 
        /* Load data segments */
        scsi_for_each_sg(cmd, sg, tot_dsds, i) {
-               dma_addr_t      sle_dma;
-
                /* Allocate additional continuation packets? */
                if (avail_dsds == 0) {
                        /*
@@ -3043,26 +3042,23 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
                        memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
                        cont_pkt =
                            qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
-                       cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
+                       cur_dsd = lcont_pkt.dsd;
                        avail_dsds = 5;
                        cont = 1;
                }
 
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
                if (avail_dsds == 0 && cont == 1) {
                        cont = 0;
                        memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
-                           REQUEST_ENTRY_SIZE);
+                           sizeof(lcont_pkt));
                }
 
        }
        if (avail_dsds != 0 && cont == 1) {
                memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
-                   REQUEST_ENTRY_SIZE);
+                   sizeof(lcont_pkt));
        }
 }
 
@@ -3172,9 +3168,9 @@ qlafx00_start_scsi(srb_t *sp)
        lcmd_pkt.entry_status = (uint8_t) rsp->id;
 
        ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
-           (uint8_t *)cmd->cmnd, cmd->cmd_len);
+           cmd->cmnd, cmd->cmd_len);
        ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
-           (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+           &lcmd_pkt, sizeof(lcmd_pkt));
 
        memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
        wmb();
@@ -3282,11 +3278,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                        fx_iocb.req_dsdcnt = cpu_to_le16(1);
                        fx_iocb.req_xfrcnt =
                            cpu_to_le16(fxio->u.fxiocb.req_len);
-                       fx_iocb.dseg_rq_address[0] =
-                           cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
-                       fx_iocb.dseg_rq_address[1] =
-                           cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
-                       fx_iocb.dseg_rq_len =
+                       put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
+                                          &fx_iocb.dseg_rq.address);
+                       fx_iocb.dseg_rq.length =
                            cpu_to_le32(fxio->u.fxiocb.req_len);
                }
 
@@ -3294,11 +3288,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                        fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
                        fx_iocb.rsp_xfrcnt =
                            cpu_to_le16(fxio->u.fxiocb.rsp_len);
-                       fx_iocb.dseg_rsp_address[0] =
-                           cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
-                       fx_iocb.dseg_rsp_address[1] =
-                           cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
-                       fx_iocb.dseg_rsp_len =
+                       put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
+                                          &fx_iocb.dseg_rsp.address);
+                       fx_iocb.dseg_rsp.length =
                            cpu_to_le32(fxio->u.fxiocb.rsp_len);
                }
 
@@ -3308,6 +3300,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                fx_iocb.flags = fxio->u.fxiocb.flags;
        } else {
                struct scatterlist *sg;
+
                bsg_job = sp->u.bsg_job;
                bsg_request = bsg_job->request;
                piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
@@ -3327,19 +3320,17 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                        int avail_dsds, tot_dsds;
                        cont_a64_entry_t lcont_pkt;
                        cont_a64_entry_t *cont_pkt = NULL;
-                       __le32 *cur_dsd;
+                       struct dsd64 *cur_dsd;
                        int index = 0, cont = 0;
 
                        fx_iocb.req_dsdcnt =
                            cpu_to_le16(bsg_job->request_payload.sg_cnt);
                        tot_dsds =
                            bsg_job->request_payload.sg_cnt;
-                       cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
+                       cur_dsd = &fx_iocb.dseg_rq;
                        avail_dsds = 1;
                        for_each_sg(bsg_job->request_payload.sg_list, sg,
                            tot_dsds, index) {
-                               dma_addr_t sle_dma;
-
                                /* Allocate additional continuation packets? */
                                if (avail_dsds == 0) {
                                        /*
@@ -3351,17 +3342,13 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                                        cont_pkt =
                                            qlafx00_prep_cont_type1_iocb(
                                                sp->vha->req, &lcont_pkt);
-                                       cur_dsd = (__le32 *)
-                                           lcont_pkt.dseg_0_address;
+                                       cur_dsd = lcont_pkt.dsd;
                                        avail_dsds = 5;
                                        cont = 1;
                                        entry_cnt++;
                                }
 
-                               sle_dma = sg_dma_address(sg);
-                               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-                               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-                               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+                               append_dsd64(&cur_dsd, sg);
                                avail_dsds--;
 
                                if (avail_dsds == 0 && cont == 1) {
@@ -3389,19 +3376,17 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                        int avail_dsds, tot_dsds;
                        cont_a64_entry_t lcont_pkt;
                        cont_a64_entry_t *cont_pkt = NULL;
-                       __le32 *cur_dsd;
+                       struct dsd64 *cur_dsd;
                        int index = 0, cont = 0;
 
                        fx_iocb.rsp_dsdcnt =
                           cpu_to_le16(bsg_job->reply_payload.sg_cnt);
                        tot_dsds = bsg_job->reply_payload.sg_cnt;
-                       cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
+                       cur_dsd = &fx_iocb.dseg_rsp;
                        avail_dsds = 1;
 
                        for_each_sg(bsg_job->reply_payload.sg_list, sg,
                            tot_dsds, index) {
-                               dma_addr_t sle_dma;
-
                                /* Allocate additional continuation packets? */
                                if (avail_dsds == 0) {
                                        /*
@@ -3413,17 +3398,13 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
                                        cont_pkt =
                                            qlafx00_prep_cont_type1_iocb(
                                                sp->vha->req, &lcont_pkt);
-                                       cur_dsd = (__le32 *)
-                                           lcont_pkt.dseg_0_address;
+                                       cur_dsd = lcont_pkt.dsd;
                                        avail_dsds = 5;
                                        cont = 1;
                                        entry_cnt++;
                                }
 
-                               sle_dma = sg_dma_address(sg);
-                               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-                               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-                               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+                               append_dsd64(&cur_dsd, sg);
                                avail_dsds--;
 
                                if (avail_dsds == 0 && cont == 1) {
@@ -3454,10 +3435,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
        }
 
        ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
-           sp->vha, 0x3047,
-           (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+           sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb));
 
-       memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
-           sizeof(struct fxdisc_entry_fx00));
+       memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb));
        wmb();
 }
index aeaa1b4..4567f0c 100644 (file)
@@ -7,6 +7,8 @@
 #ifndef __QLA_MR_H
 #define __QLA_MR_H
 
+#include "qla_dsd.h"
+
 /*
  * The PCI VendorID and DeviceID for our board.
  */
@@ -46,8 +48,7 @@ struct cmd_type_7_fx00 {
        uint8_t fcp_cdb[MAX_CMDSZ];     /* SCSI command words. */
        __le32 byte_count;              /* Total byte count. */
 
-       uint32_t dseg_0_address[2];     /* Data segment 0 address. */
-       uint32_t dseg_0_len;            /* Data segment 0 length. */
+       struct dsd64 dsd;
 };
 
 #define        STATUS_TYPE_FX00        0x01            /* Status entry. */
@@ -176,10 +177,8 @@ struct fxdisc_entry_fx00 {
        uint8_t flags;
        uint8_t reserved_1;
 
-       __le32 dseg_rq_address[2];      /* Data segment 0 address. */
-       __le32 dseg_rq_len;             /* Data segment 0 length. */
-       __le32 dseg_rsp_address[2];     /* Data segment 1 address. */
-       __le32 dseg_rsp_len;            /* Data segment 1 length. */
+       struct dsd64 dseg_rq;
+       struct dsd64 dseg_rsp;
 
        __le32 dataword;
        __le32 adapid;
index 41c85da..22e3fba 100644 (file)
@@ -131,14 +131,10 @@ static void qla_nvme_sp_ls_done(void *ptr, int res)
        struct nvmefc_ls_req   *fd;
        struct nvme_private *priv;
 
-       if (atomic_read(&sp->ref_count) == 0) {
-               ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
-                   "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
+       if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
                return;
-       }
 
-       if (!atomic_dec_and_test(&sp->ref_count))
-               return;
+       atomic_dec(&sp->ref_count);
 
        if (res)
                res = -EINVAL;
@@ -161,15 +157,18 @@ static void qla_nvme_sp_done(void *ptr, int res)
        nvme = &sp->u.iocb_cmd;
        fd = nvme->u.nvme.desc;
 
-       if (!atomic_dec_and_test(&sp->ref_count))
+       if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
                return;
 
-       if (res == QLA_SUCCESS)
-               fd->status = 0;
-       else
-               fd->status = NVME_SC_INTERNAL;
+       atomic_dec(&sp->ref_count);
 
-       fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+       if (res == QLA_SUCCESS) {
+               fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+       } else {
+               fd->rcv_rsplen = 0;
+               fd->transferred_length = 0;
+       }
+       fd->status = 0;
        fd->done(fd);
        qla2xxx_rel_qpair_sp(sp->qpair, sp);
 
@@ -185,14 +184,24 @@ static void qla_nvme_abort_work(struct work_struct *work)
        struct qla_hw_data *ha = fcport->vha->hw;
        int rval;
 
-       if (fcport)
-               ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
-                   "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
-                   __func__, sp, sp->handle, fcport, fcport->deleted);
+       ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
+              "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
+              __func__, sp, sp->handle, fcport, fcport->deleted);
 
        if (!ha->flags.fw_started && (fcport && fcport->deleted))
                return;
 
+       if (ha->flags.host_shutting_down) {
+               ql_log(ql_log_info, sp->fcport->vha, 0xffff,
+                   "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
+                   __func__, sp, sp->type, atomic_read(&sp->ref_count));
+               sp->done(sp, 0);
+               return;
+       }
+
+       if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
+               return;
+
        rval = ha->isp_ops->abort_command(sp);
 
        ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
@@ -291,7 +300,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
        uint16_t        req_cnt;
        uint16_t        tot_dsds;
        uint16_t        avail_dsds;
-       uint32_t        *cur_dsd;
+       struct dsd64    *cur_dsd;
        struct req_que *req = NULL;
        struct scsi_qla_host *vha = sp->fcport->vha;
        struct qla_hw_data *ha = vha->hw;
@@ -340,6 +349,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
 
        if (unlikely(!fd->sqid)) {
                struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
+
                if (cmd->sqe.common.opcode == nvme_admin_async_event) {
                        nvme->u.nvme.aen_op = 1;
                        atomic_inc(&ha->nvme_active_aen_cnt);
@@ -395,25 +405,22 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
 
        /* NVME RSP IU */
        cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
-       cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
-       cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
+       put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
 
        /* NVME CNMD IU */
        cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
-       cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
-       cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
+       cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
 
        cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
        cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
 
        /* One DSD is available in the Command Type NVME IOCB */
        avail_dsds = 1;
-       cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
+       cur_dsd = &cmd_pkt->nvme_dsd;
        sgl = fd->first_sgl;
 
        /* Load data segments */
        for_each_sg(sgl, sg, tot_dsds, i) {
-               dma_addr_t      sle_dma;
                cont_a64_entry_t *cont_pkt;
 
                /* Allocate additional continuation packets? */
@@ -432,17 +439,14 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
                                req->ring_ptr++;
                        }
                        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
-                       *((uint32_t *)(&cont_pkt->entry_type)) =
-                           cpu_to_le32(CONTINUE_A64_TYPE);
+                       put_unaligned_le32(CONTINUE_A64_TYPE,
+                                          &cont_pkt->entry_type);
 
-                       cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
-                       avail_dsds = 5;
+                       cur_dsd = cont_pkt->dsd;
+                       avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
                }
 
-               sle_dma = sg_dma_address(sg);
-               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-               *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+               append_dsd64(&cur_dsd, sg);
                avail_dsds--;
        }
 
@@ -573,7 +577,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
        .fcp_io         = qla_nvme_post_cmd,
        .fcp_abort      = qla_nvme_fcp_abort,
        .max_hw_queues  = 8,
-       .max_sgl_segments = 128,
+       .max_sgl_segments = 1024,
        .max_dif_sgl_segments = 64,
        .dma_boundary = 0xFFFFFFFF,
        .local_priv_sz  = 8,
@@ -582,40 +586,11 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
        .fcprqst_priv_sz = sizeof(struct nvme_private),
 };
 
-#define NVME_ABORT_POLLING_PERIOD    2
-static int qla_nvme_wait_on_command(srb_t *sp)
-{
-       int ret = QLA_SUCCESS;
-
-       wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
-           NVME_ABORT_POLLING_PERIOD*HZ);
-
-       if (atomic_read(&sp->ref_count) > 1)
-               ret = QLA_FUNCTION_FAILED;
-
-       return ret;
-}
-
-void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
-{
-       int rval;
-
-       if (ha->flags.fw_started) {
-               rval = ha->isp_ops->abort_command(sp);
-               if (!rval && !qla_nvme_wait_on_command(sp))
-                       ql_log(ql_log_warn, NULL, 0x2112,
-                           "timed out waiting on sp=%p\n", sp);
-       } else {
-               sp->done(sp, res);
-       }
-}
-
 static void qla_nvme_unregister_remote_port(struct work_struct *work)
 {
        struct fc_port *fcport = container_of(work, struct fc_port,
            nvme_del_work);
        struct qla_nvme_rport *qla_rport, *trport;
-       scsi_qla_host_t *base_vha;
 
        if (!IS_ENABLED(CONFIG_NVME_FC))
                return;
@@ -623,23 +598,19 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work)
        ql_log(ql_log_warn, NULL, 0x2112,
            "%s: unregister remoteport on %p\n",__func__, fcport);
 
-       base_vha = pci_get_drvdata(fcport->vha->hw->pdev);
-       if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) {
-               ql_dbg(ql_dbg_disc, fcport->vha, 0x2114,
-                   "%s: Notify FC-NVMe transport, set devloss=0\n",
-                   __func__);
-
-               nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
-       }
-
        list_for_each_entry_safe(qla_rport, trport,
            &fcport->vha->nvme_rport_list, list) {
                if (qla_rport->fcport == fcport) {
                        ql_log(ql_log_info, fcport->vha, 0x2113,
                            "%s: fcport=%p\n", __func__, fcport);
+                       nvme_fc_set_remoteport_devloss
+                               (fcport->nvme_remote_port, 0);
                        init_completion(&fcport->nvme_del_done);
-                       nvme_fc_unregister_remoteport(
-                           fcport->nvme_remote_port);
+                       if (nvme_fc_unregister_remoteport
+                           (fcport->nvme_remote_port))
+                               ql_log(ql_log_info, fcport->vha, 0x2114,
+                                   "%s: Failed to unregister nvme_remote_port\n",
+                                   __func__);
                        wait_for_completion(&fcport->nvme_del_done);
                        break;
                }
index da8dad5..d3b8a64 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/nvme-fc-driver.h>
 
 #include "qla_def.h"
+#include "qla_dsd.h"
 
 /* default dev loss time (seconds) before transport tears down ctrl */
 #define NVME_FC_DEV_LOSS_TMO  30
@@ -64,16 +65,15 @@ struct cmd_nvme {
 #define CF_WRITE_DATA                   BIT_0
 
        uint16_t nvme_cmnd_dseg_len;             /* Data segment length. */
-       uint32_t nvme_cmnd_dseg_address[2];      /* Data segment address. */
-       uint32_t nvme_rsp_dseg_address[2];       /* Data segment address. */
+       __le64   nvme_cmnd_dseg_address __packed;/* Data segment address. */
+       __le64   nvme_rsp_dseg_address __packed; /* Data segment address. */
 
        uint32_t byte_count;            /* Total byte count. */
 
        uint8_t port_id[3];             /* PortID of destination port. */
        uint8_t vp_index;
 
-       uint32_t nvme_data_dseg_address[2];      /* Data segment address. */
-       uint32_t nvme_data_dseg_len;             /* Data segment length. */
+       struct dsd64 nvme_dsd;
 };
 
 #define PT_LS4_REQUEST 0x89    /* Link Service pass-through IOCB (request) */
@@ -101,10 +101,7 @@ struct pt_ls4_request {
        uint32_t rsvd3;
        uint32_t rx_byte_count;
        uint32_t tx_byte_count;
-       uint32_t dseg0_address[2];
-       uint32_t dseg0_len;
-       uint32_t dseg1_address[2];
-       uint32_t dseg1_len;
+       struct dsd64 dsd[2];
 };
 
 #define PT_LS4_UNSOL 0x56      /* pass-up unsolicited rec FC-NVMe request */
@@ -145,7 +142,6 @@ struct pt_ls4_rx_unsol {
 int qla_nvme_register_hba(struct scsi_qla_host *);
 int  qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
 void qla_nvme_delete(struct scsi_qla_host *);
-void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res);
 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *,
     struct req_que *);
 void qla24xx_async_gffid_sp_done(void *, int);
index f2f5480..c760ae3 100644 (file)
@@ -6,6 +6,7 @@
  */
 #include "qla_def.h"
 #include <linux/delay.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/pci.h>
 #include <linux/ratelimit.h>
 #include <linux/vmalloc.h>
@@ -608,6 +609,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
        } else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
                QLA82XX_ADDR_OCM0_MAX)) {
                unsigned int temp1;
+
                if ((addr & 0x00ff800) == 0xff800) {
                        ql_log(ql_log_warn, vha, 0xb004,
                            "%s: QM access not handled.\n", __func__);
@@ -990,6 +992,7 @@ static int
 qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
        qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
        qla82xx_wait_rom_busy(ha);
        if (qla82xx_wait_rom_done(ha)) {
@@ -1030,6 +1033,7 @@ static int
 qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
 {
        uint32_t val;
+
        qla82xx_wait_rom_busy(ha);
        qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
        qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
@@ -1047,6 +1051,7 @@ static int
 qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
        if (qla82xx_flash_set_write_enable(ha))
                return -1;
        qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
@@ -1063,6 +1068,7 @@ static int
 qla82xx_write_disable_flash(struct qla_hw_data *ha)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
        qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
        if (qla82xx_wait_rom_done(ha)) {
                ql_log(ql_log_warn, vha, 0xb00f,
@@ -1435,6 +1441,7 @@ qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
        long memaddr = BOOTLD_START;
        u64 data;
        u32 high, low;
+
        size = (IMAGE_START - BOOTLD_START) / 8;
 
        for (i = 0; i < size; i++) {
@@ -1757,11 +1764,14 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
  *
  * Returns 0 on success.
  */
-void
+int
 qla82xx_reset_chip(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
+
        ha->isp_ops->disable_intrs(ha);
+
+       return QLA_SUCCESS;
 }
 
 void qla82xx_config_rings(struct scsi_qla_host *vha)
@@ -1778,10 +1788,8 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
        icb->response_q_inpointer = cpu_to_le16(0);
        icb->request_q_length = cpu_to_le16(req->length);
        icb->response_q_length = cpu_to_le16(rsp->length);
-       icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
-       icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
-       icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
-       icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+       put_unaligned_le64(req->dma, &icb->request_q_address);
+       put_unaligned_le64(rsp->dma, &icb->response_q_address);
 
        WRT_REG_DWORD(&reg->req_q_out[0], 0);
        WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
@@ -1992,6 +2000,7 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
        uint16_t __iomem *wptr;
        struct qla_hw_data *ha = vha->hw;
        struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+
        wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
 
        /* Load return mailbox registers. */
@@ -2028,7 +2037,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
        unsigned long   flags;
        unsigned long   iter;
        uint32_t        stat = 0;
-       uint16_t        mb[4];
+       uint16_t        mb[8];
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -2112,7 +2121,7 @@ qla82xx_msix_default(int irq, void *dev_id)
        unsigned long flags;
        uint32_t stat = 0;
        uint32_t host_int = 0;
-       uint16_t mb[4];
+       uint16_t mb[8];
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -2208,7 +2217,7 @@ qla82xx_poll(int irq, void *dev_id)
        int status = 0;
        uint32_t stat;
        uint32_t host_int = 0;
-       uint16_t mb[4];
+       uint16_t mb[8];
        unsigned long flags;
 
        rsp = (struct rsp_que *) dev_id;
@@ -2262,6 +2271,7 @@ void
 qla82xx_enable_intrs(struct qla_hw_data *ha)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
        qla82xx_mbx_intr_enable(vha);
        spin_lock_irq(&ha->hardware_lock);
        if (IS_QLA8044(ha))
@@ -2276,6 +2286,7 @@ void
 qla82xx_disable_intrs(struct qla_hw_data *ha)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
        qla82xx_mbx_intr_disable(vha);
        spin_lock_irq(&ha->hardware_lock);
        if (IS_QLA8044(ha))
@@ -2658,8 +2669,8 @@ done:
 /*
  * Address and length are byte address
  */
-uint8_t *
-qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
        uint32_t offset, uint32_t length)
 {
        scsi_block_requests(vha->host);
@@ -2767,15 +2778,14 @@ write_done:
 }
 
 int
-qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla82xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
        uint32_t offset, uint32_t length)
 {
        int rval;
 
        /* Suspend HBA. */
        scsi_block_requests(vha->host);
-       rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
-               length >> 2);
+       rval = qla82xx_write_flash_data(vha, buf, offset, length >> 2);
        scsi_unblock_requests(vha->host);
 
        /* Convert return ISP82xx to generic */
@@ -4464,6 +4474,7 @@ qla82xx_beacon_on(struct scsi_qla_host *vha)
 
        int rval;
        struct qla_hw_data *ha = vha->hw;
+
        qla82xx_idc_lock(ha);
        rval = qla82xx_mbx_beacon_ctl(vha, 1);
 
@@ -4484,6 +4495,7 @@ qla82xx_beacon_off(struct scsi_qla_host *vha)
 
        int rval;
        struct qla_hw_data *ha = vha->hw;
+
        qla82xx_idc_lock(ha);
        rval = qla82xx_mbx_beacon_ctl(vha, 0);
 
index 71a4109..3c7beef 100644 (file)
@@ -7,7 +7,7 @@
 #ifndef __QLA_NX_H
 #define __QLA_NX_H
 
-#include <linux/io-64-nonatomic-lo-hi.h>
+#include <scsi/scsi.h>
 
 /*
  * Following are the states of the Phantom. Phantom will set them and
index fe856b6..369ac04 100644 (file)
@@ -559,12 +559,12 @@ exit_lock_error:
 /*
  * Address and length are byte address
  */
-uint8_t *
-qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla8044_read_optrom_data(struct scsi_qla_host *vha, void *buf,
        uint32_t offset, uint32_t length)
 {
        scsi_block_requests(vha->host);
-       if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
+       if (qla8044_read_flash_data(vha, buf, offset, length / 4)
            != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha,  0xb08d,
                    "%s: Failed to read from flash\n",
@@ -3007,10 +3007,9 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
        uint16_t count;
        uint32_t poll, mask, modify_mask;
        uint32_t wait_count = 0;
-
        uint32_t *data_ptr = *d_ptr;
-
        struct qla8044_minidump_entry_rddfe *rddfe;
+
        rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
 
        addr1 = rddfe->addr_1;
@@ -3797,7 +3796,7 @@ qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
 }
 
 int
-qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla8044_write_optrom_data(struct scsi_qla_host *vha, void *buf,
                          uint32_t offset, uint32_t length)
 {
        int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
@@ -3896,7 +3895,7 @@ qla8044_intr_handler(int irq, void *dev_id)
        unsigned long   flags;
        unsigned long   iter;
        uint32_t        stat;
-       uint16_t        mb[4];
+       uint16_t        mb[8];
        uint32_t leg_int_ptr = 0, pf_bit;
 
        rsp = (struct rsp_que *) dev_id;
index 91f576d..e1c82a0 100644 (file)
@@ -42,7 +42,7 @@ static struct kmem_cache *ctx_cachep;
 /*
  * error level for logging
  */
-int ql_errlev = ql_log_all;
+uint ql_errlev = 0x8001;
 
 static int ql2xenableclass2;
 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd,
                "Set to control shifting of command type processing "
                "based on total number of SG elements.");
 
-int ql2xfdmienable=1;
+int ql2xfdmienable = 1;
 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xfdmienable,
@@ -154,7 +154,7 @@ MODULE_PARM_DESC(ql2xenablehba_err_chk,
                "  1 -- Error isolation enabled only for DIX Type 0\n"
                "  2 -- Error isolation enabled for all Types\n");
 
-int ql2xiidmaenable=1;
+int ql2xiidmaenable = 1;
 module_param(ql2xiidmaenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xiidmaenable,
                "Enables iIDMA settings "
@@ -285,14 +285,14 @@ MODULE_PARM_DESC(qla2xuseresexchforels,
                 "Reserve 1/2 of emergency exchanges for ELS.\n"
                 " 0 (default): disabled");
 
-int ql2xprotmask;
+static int ql2xprotmask;
 module_param(ql2xprotmask, int, 0644);
 MODULE_PARM_DESC(ql2xprotmask,
                 "Override DIF/DIX protection capabilities mask\n"
                 "Default is 0 which sets protection mask based on "
                 "capabilities reported by HBA firmware.\n");
 
-int ql2xprotguard;
+static int ql2xprotguard;
 module_param(ql2xprotguard, int, 0644);
 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
                 "  0 -- Let HBA firmware decide\n"
@@ -306,58 +306,12 @@ MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
     "0 (Default). Based on check.\n"
     "1 Force using internal buffers\n");
 
-/*
- * SCSI host template entry points
- */
-static int qla2xxx_slave_configure(struct scsi_device * device);
-static int qla2xxx_slave_alloc(struct scsi_device *);
-static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
-static void qla2xxx_scan_start(struct Scsi_Host *);
-static void qla2xxx_slave_destroy(struct scsi_device *);
-static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
-static int qla2xxx_eh_abort(struct scsi_cmnd *);
-static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
-static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
-
 static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
 static int qla2xxx_map_queues(struct Scsi_Host *shost);
 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
 
 
-struct scsi_host_template qla2xxx_driver_template = {
-       .module                 = THIS_MODULE,
-       .name                   = QLA2XXX_DRIVER_NAME,
-       .queuecommand           = qla2xxx_queuecommand,
-
-       .eh_timed_out           = fc_eh_timed_out,
-       .eh_abort_handler       = qla2xxx_eh_abort,
-       .eh_device_reset_handler = qla2xxx_eh_device_reset,
-       .eh_target_reset_handler = qla2xxx_eh_target_reset,
-       .eh_bus_reset_handler   = qla2xxx_eh_bus_reset,
-       .eh_host_reset_handler  = qla2xxx_eh_host_reset,
-
-       .slave_configure        = qla2xxx_slave_configure,
-
-       .slave_alloc            = qla2xxx_slave_alloc,
-       .slave_destroy          = qla2xxx_slave_destroy,
-       .scan_finished          = qla2xxx_scan_finished,
-       .scan_start             = qla2xxx_scan_start,
-       .change_queue_depth     = scsi_change_queue_depth,
-       .map_queues             = qla2xxx_map_queues,
-       .this_id                = -1,
-       .cmd_per_lun            = 3,
-       .sg_tablesize           = SG_ALL,
-
-       .max_sectors            = 0xFFFF,
-       .shost_attrs            = qla2x00_host_attrs,
-
-       .supported_mode         = MODE_INITIATOR,
-       .track_queue_depth      = 1,
-};
-
 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
 
@@ -411,6 +365,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
     struct rsp_que *rsp)
 {
        struct qla_hw_data *ha = vha->hw;
+
        rsp->qpair = ha->base_qpair;
        rsp->req = req;
        ha->base_qpair->hw = ha;
@@ -427,7 +382,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
        qla_cpu_update(rsp->qpair, raw_smp_processor_id());
        ha->base_qpair->pdev = ha->pdev;
 
-       if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+       if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
                ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
 }
 
@@ -435,6 +390,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
                                struct rsp_que *rsp)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
        ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
                                GFP_KERNEL);
        if (!ha->req_q_map) {
@@ -726,7 +682,7 @@ qla2x00_sp_free_dma(void *ptr)
        }
 
        if (!ctx)
-               goto end;
+               return;
 
        if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
                /* List assured to be having elements */
@@ -751,12 +707,6 @@ qla2x00_sp_free_dma(void *ptr)
                ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
                mempool_free(ctx1, ha->ctx_mempool);
        }
-
-end:
-       if (sp->type != SRB_NVME_CMD && sp->type != SRB_NVME_LS) {
-               CMD_SP(cmd) = NULL;
-               qla2x00_rel_sp(sp);
-       }
 }
 
 void
@@ -764,22 +714,20 @@ qla2x00_sp_compl(void *ptr, int res)
 {
        srb_t *sp = ptr;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct completion *comp = sp->comp;
 
-       cmd->result = res;
-
-       if (atomic_read(&sp->ref_count) == 0) {
-               ql_dbg(ql_dbg_io, sp->vha, 0x3015,
-                   "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
-                   sp, GET_CMD_SP(sp));
-               if (ql2xextended_error_logging & ql_dbg_io)
-                       WARN_ON(atomic_read(&sp->ref_count) == 0);
-               return;
-       }
-       if (!atomic_dec_and_test(&sp->ref_count))
+       if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
                return;
 
+       atomic_dec(&sp->ref_count);
+
        sp->free(sp);
+       cmd->result = res;
+       CMD_SP(cmd) = NULL;
        cmd->scsi_done(cmd);
+       if (comp)
+               complete(comp);
+       qla2x00_rel_sp(sp);
 }
 
 void
@@ -802,7 +750,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
        }
 
        if (!ctx)
-               goto end;
+               return;
 
        if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
                /* List assured to be having elements */
@@ -810,25 +758,8 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
                sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
        }
 
-       if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
-               struct crc_context *ctx0 = ctx;
-
-               dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
-               sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
-       }
-
-       if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
-               struct ct6_dsd *ctx1 = ctx;
-               dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
-                   ctx1->fcp_cmnd_dma);
-               list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
-               ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
-               ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
-               mempool_free(ctx1, ha->ctx_mempool);
-               sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
-       }
        if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
-               struct crc_context *difctx = sp->u.scmd.ctx;
+               struct crc_context *difctx = ctx;
                struct dsd_dma *dif_dsd, *nxt_dsd;
 
                list_for_each_entry_safe(dif_dsd, nxt_dsd,
@@ -863,9 +794,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
                sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
        }
 
-end:
-       CMD_SP(cmd) = NULL;
-       qla2xxx_rel_qpair_sp(sp->qpair, sp);
+       if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+               struct ct6_dsd *ctx1 = ctx;
+
+               dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+                   ctx1->fcp_cmnd_dma);
+               list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+               ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+               ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+               mempool_free(ctx1, ha->ctx_mempool);
+               sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+               struct crc_context *ctx0 = ctx;
+
+               dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
+               sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+       }
 }
 
 void
@@ -873,27 +819,22 @@ qla2xxx_qpair_sp_compl(void *ptr, int res)
 {
        srb_t *sp = ptr;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct completion *comp = sp->comp;
 
-       cmd->result = res;
-
-       if (atomic_read(&sp->ref_count) == 0) {
-               ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
-                   "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
-                   sp, GET_CMD_SP(sp));
-               if (ql2xextended_error_logging & ql_dbg_io)
-                       WARN_ON(atomic_read(&sp->ref_count) == 0);
-               return;
-       }
-       if (!atomic_dec_and_test(&sp->ref_count))
+       if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
                return;
 
+       atomic_dec(&sp->ref_count);
+
        sp->free(sp);
+       cmd->result = res;
+       CMD_SP(cmd) = NULL;
        cmd->scsi_done(cmd);
+       if (comp)
+               complete(comp);
+       qla2xxx_rel_qpair_sp(sp->qpair, sp);
 }
 
-/* If we are SP1 here, we need to still take and release the host_lock as SP1
- * does not have the changes necessary to avoid taking host->host_lock.
- */
 static int
 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 {
@@ -908,7 +849,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        uint32_t tag;
        uint16_t hwq;
 
-       if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
+       if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
+           WARN_ON_ONCE(!rport)) {
                cmd->result = DID_NO_CONNECT << 16;
                goto qc24_fail_command;
        }
@@ -1031,7 +973,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
        srb_t *sp;
        int rval;
 
-       rval = fc_remote_port_chkready(rport);
+       rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
        if (rval) {
                cmd->result = rval;
                ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
@@ -1272,7 +1214,7 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
 static int
 sp_get(struct srb *sp)
 {
-       if (!refcount_inc_not_zero((refcount_t*)&sp->ref_count))
+       if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count))
                /* kref get fail */
                return ENXIO;
        else
@@ -1332,7 +1274,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        unsigned int id;
        uint64_t lun;
        unsigned long flags;
-       int rval, wait = 0;
+       int rval;
        struct qla_hw_data *ha = vha->hw;
        struct qla_qpair *qpair;
 
@@ -1345,7 +1287,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        ret = fc_block_scsi_eh(cmd);
        if (ret != 0)
                return ret;
-       ret = SUCCESS;
 
        sp = (srb_t *) CMD_SP(cmd);
        if (!sp)
@@ -1356,7 +1297,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
                return SUCCESS;
 
        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
-       if (!CMD_SP(cmd)) {
+       if (sp->type != SRB_SCSI_CMD || GET_CMD_SP(sp) != cmd) {
                /* there's a chance an interrupt could clear
                   the ptr as part of done & free */
                spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -1377,58 +1318,31 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
            "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
            vha->host_no, id, lun, sp, cmd, sp->handle);
 
-       /* Get a reference to the sp and drop the lock.*/
-
        rval = ha->isp_ops->abort_command(sp);
-       if (rval) {
-               if (rval == QLA_FUNCTION_PARAMETER_ERROR)
-                       ret = SUCCESS;
-               else
-                       ret = FAILED;
-
-               ql_dbg(ql_dbg_taskm, vha, 0x8003,
-                   "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
-       } else {
-               ql_dbg(ql_dbg_taskm, vha, 0x8004,
-                   "Abort command mbx success cmd=%p.\n", cmd);
-               wait = 1;
-       }
-
-       spin_lock_irqsave(qpair->qp_lock_ptr, flags);
-       /*
-        * Clear the slot in the oustanding_cmds array if we can't find the
-        * command to reclaim the resources.
-        */
-       if (rval == QLA_FUNCTION_PARAMETER_ERROR)
-               vha->req->outstanding_cmds[sp->handle] = NULL;
+       ql_dbg(ql_dbg_taskm, vha, 0x8003,
+              "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
 
-       /*
-        * sp->done will do ref_count--
-        * sp_get() took an extra count above
-        */
-       sp->done(sp, DID_RESET << 16);
-
-       /* Did the command return during mailbox execution? */
-       if (ret == FAILED && !CMD_SP(cmd))
+       switch (rval) {
+       case QLA_SUCCESS:
+               /*
+                * The command has been aborted. That means that the firmware
+                * won't report a completion.
+                */
+               sp->done(sp, DID_ABORT << 16);
                ret = SUCCESS;
-
-       if (!CMD_SP(cmd))
-               wait = 0;
-
-       spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
-       /* Wait for the command to be returned. */
-       if (wait) {
-               if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
-                       ql_log(ql_log_warn, vha, 0x8006,
-                           "Abort handler timed out cmd=%p.\n", cmd);
-                       ret = FAILED;
-               }
+               break;
+       default:
+               /*
+                * Either abort failed or abort and completion raced. Let
+                * the SCSI core retry the abort in the former case.
+                */
+               ret = FAILED;
+               break;
        }
 
        ql_log(ql_log_info, vha, 0x801c,
-           "Abort command issued nexus=%ld:%d:%llu --  %d %x.\n",
-           vha->host_no, id, lun, wait, ret);
+           "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
+           vha->host_no, id, lun, ret);
 
        return ret;
 }
@@ -1804,42 +1718,34 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
        __releases(qp->qp_lock_ptr)
        __acquires(qp->qp_lock_ptr)
 {
+       DECLARE_COMPLETION_ONSTACK(comp);
        scsi_qla_host_t *vha = qp->vha;
        struct qla_hw_data *ha = vha->hw;
+       int rval;
 
-       if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS) {
-               if (!sp_get(sp)) {
-                       /* got sp */
-                       spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
-                       qla_nvme_abort(ha, sp, res);
-                       spin_lock_irqsave(qp->qp_lock_ptr, *flags);
-               }
-       } else if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
-                  !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
-                  !qla2x00_isp_reg_stat(ha) && sp->type == SRB_SCSI_CMD) {
-               /*
-                * Don't abort commands in adapter during EEH recovery as it's
-                * not accessible/responding.
-                *
-                * Get a reference to the sp and drop the lock. The reference
-                * ensures this sp->done() call and not the call in
-                * qla2xxx_eh_abort() ends the SCSI cmd (with result 'res').
-                */
-               if (!sp_get(sp)) {
-                       int status;
+       if (sp_get(sp))
+               return;
 
-                       spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
-                       status = qla2xxx_eh_abort(GET_CMD_SP(sp));
-                       spin_lock_irqsave(qp->qp_lock_ptr, *flags);
-                       /*
-                        * Get rid of extra reference caused
-                        * by early exit from qla2xxx_eh_abort
-                        */
-                       if (status == FAST_IO_FAIL)
-                               atomic_dec(&sp->ref_count);
+       if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
+           (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
+            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+            !qla2x00_isp_reg_stat(ha))) {
+               sp->comp = &comp;
+               rval = ha->isp_ops->abort_command(sp);
+               spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
+
+               switch (rval) {
+               case QLA_SUCCESS:
+                       sp->done(sp, res);
+                       break;
+               case QLA_FUNCTION_PARAMETER_ERROR:
+                       wait_for_completion(&comp);
+                       break;
                }
+
+               spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+               sp->comp = NULL;
        }
-       sp->done(sp, res);
 }
 
 static void
@@ -1875,15 +1781,10 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
                                        continue;
                                }
                                cmd = (struct qla_tgt_cmd *)sp;
-                               qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+                               cmd->aborted = 1;
                                break;
                        case TYPE_TGT_TMCMD:
-                               /*
-                                * Currently, only ABTS response gets on the
-                                * outstanding_cmds[]
-                                */
-                               ha->tgt.tgt_ops->free_mcmd(
-                                  (struct qla_tgt_mgmt_cmd *)sp);
+                               /* Skip task management functions. */
                                break;
                        default:
                                break;
@@ -2753,6 +2654,24 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
                ha->device_type |= DT_T10_PI;
                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
                break;
+       case PCI_DEVICE_ID_QLOGIC_ISP2081:
+       case PCI_DEVICE_ID_QLOGIC_ISP2089:
+               ha->isp_type |= DT_ISP2081;
+               ha->device_type |= DT_ZIO_SUPPORTED;
+               ha->device_type |= DT_FWI2;
+               ha->device_type |= DT_IIDMA;
+               ha->device_type |= DT_T10_PI;
+               ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+               break;
+       case PCI_DEVICE_ID_QLOGIC_ISP2281:
+       case PCI_DEVICE_ID_QLOGIC_ISP2289:
+               ha->isp_type |= DT_ISP2281;
+               ha->device_type |= DT_ZIO_SUPPORTED;
+               ha->device_type |= DT_FWI2;
+               ha->device_type |= DT_IIDMA;
+               ha->device_type |= DT_T10_PI;
+               ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+               break;
        }
 
        if (IS_QLA82XX(ha))
@@ -2760,7 +2679,8 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
        else {
                /* Get adapter physical port no from interrupt pin register. */
                pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
-               if (IS_QLA27XX(ha))
+               if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
+                   IS_QLA27XX(ha) || IS_QLA28XX(ha))
                        ha->port_no--;
                else
                        ha->port_no = !(ha->port_no & 1);
@@ -2857,7 +2777,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
-           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
                bars = pci_select_bars(pdev, IORESOURCE_MEM);
                mem_only = 1;
                ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2877,6 +2801,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        /* This may fail but that's ok */
        pci_enable_pcie_error_reporting(pdev);
 
+       /* Turn off T10-DIF when FC-NVMe is enabled */
+       if (ql2xnvmeenable)
+               ql2xenabledif = 0;
+
        ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
        if (!ha) {
                ql_log_pci(ql_log_fatal, pdev, 0x0009,
@@ -2906,7 +2834,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        /* Set EEH reset type to fundamental if required by hba */
        if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
-           IS_QLA83XX(ha) || IS_QLA27XX(ha))
+           IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                pdev->needs_freset = 1;
 
        ha->prev_topology = 0;
@@ -3085,6 +3013,23 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
                ha->nvram_conf_off = ~0;
                ha->nvram_data_off = ~0;
+       } else if (IS_QLA28XX(ha)) {
+               ha->portnum = PCI_FUNC(ha->pdev->devfn);
+               ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+               ha->mbx_count = MAILBOX_REGISTER_COUNT;
+               req_length = REQUEST_ENTRY_CNT_24XX;
+               rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+               ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+               ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+               ha->gid_list_info_size = 8;
+               ha->optrom_size = OPTROM_SIZE_28XX;
+               ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+               ha->isp_ops = &qla27xx_isp_ops;
+               ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
+               ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
+               ha->nvram_conf_off = ~0;
+               ha->nvram_data_off = ~0;
        }
 
        ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -3250,7 +3195,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        req->req_q_out = &ha->iobase->isp24.req_q_out;
        rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
        rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
-       if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha)) {
                req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
                req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
                rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -3395,6 +3341,7 @@ skip_dpc:
        if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                if (ha->fw_attributes & BIT_4) {
                        int prot = 0, guard;
+
                        base_vha->flags.difdix_supported = 1;
                        ql_dbg(ql_dbg_init, base_vha, 0x00f1,
                            "Registering for DIF/DIX type 1 and 3 protection.\n");
@@ -3576,7 +3523,8 @@ qla2x00_shutdown(struct pci_dev *pdev)
        if (ha->eft)
                qla2x00_disable_eft_trace(vha);
 
-       if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+       if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha)) {
                if (ha->flags.fw_started)
                        qla2x00_abort_isp_cleanup(vha);
        } else {
@@ -3681,7 +3629,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
                if (ha->mqiobase)
                        iounmap(ha->mqiobase);
 
-               if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
+               if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+                   ha->msixbase)
                        iounmap(ha->msixbase);
        }
 }
@@ -3732,7 +3681,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
        }
        qla2x00_wait_for_hba_ready(base_vha);
 
-       if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+       if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha)) {
                if (ha->flags.fw_started)
                        qla2x00_abort_isp_cleanup(base_vha);
        } else if (!IS_QLAFX00(ha)) {
@@ -3770,8 +3720,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        qla2x00_delete_all_vps(ha, base_vha);
 
-       qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
-
        qla2x00_dfs_remove(base_vha);
 
        qla84xx_put_chip(base_vha);
@@ -3860,11 +3808,8 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
 {
        fc_port_t *fcport, *tfcport;
 
-       list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
-               list_del(&fcport->list);
-               qla2x00_clear_loop_id(fcport);
-               kfree(fcport);
-       }
+       list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
+               qla2x00_free_fcport(fcport);
 }
 
 static inline void
@@ -3889,6 +3834,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
                qla2xxx_wake_dpc(base_vha);
        } else {
                int now;
+
                if (rport) {
                        ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
                            "%s %8phN. rport %p roles %x\n",
@@ -3980,6 +3926,19 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
        }
 }
 
+static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
+{
+       int i;
+
+       if (IS_FWI2_CAPABLE(ha))
+               return;
+
+       for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
+               set_bit(i, ha->loop_id_map);
+       set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
+       set_bit(BROADCAST, ha->loop_id_map);
+}
+
 /*
 * qla2x00_mem_alloc
 *      Allocates adapter memory.
@@ -4222,7 +4181,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                ha->npiv_info = NULL;
 
        /* Get consistent memory allocated for EX-INIT-CB. */
-       if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+       if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha)) {
                ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
                    &ha->ex_init_cb_dma);
                if (!ha->ex_init_cb)
@@ -4265,8 +4225,20 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                goto fail_sfp_data;
        }
 
+       ha->flt = dma_alloc_coherent(&ha->pdev->dev,
+           sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
+           GFP_KERNEL);
+       if (!ha->flt) {
+               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+                   "Unable to allocate memory for FLT.\n");
+               goto fail_flt_buffer;
+       }
+
        return 0;
 
+fail_flt_buffer:
+       dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+           ha->sfp_data, ha->sfp_data_dma);
 fail_sfp_data:
        kfree(ha->loop_id_map);
 fail_loop_id_map:
@@ -4602,6 +4574,9 @@ qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
 static void
 qla2x00_free_fw_dump(struct qla_hw_data *ha)
 {
+       struct fwdt *fwdt = ha->fwdt;
+       uint j;
+
        if (ha->fce)
                dma_free_coherent(&ha->pdev->dev,
                    FCE_SIZE, ha->fce, ha->fce_dma);
@@ -4612,8 +4587,6 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
 
        if (ha->fw_dump)
                vfree(ha->fw_dump);
-       if (ha->fw_dump_template)
-               vfree(ha->fw_dump_template);
 
        ha->fce = NULL;
        ha->fce_dma = 0;
@@ -4624,8 +4597,13 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
        ha->fw_dump_reading = 0;
        ha->fw_dump = NULL;
        ha->fw_dump_len = 0;
-       ha->fw_dump_template = NULL;
-       ha->fw_dump_template_len = 0;
+
+       for (j = 0; j < 2; j++, fwdt++) {
+               if (fwdt->template)
+                       vfree(fwdt->template);
+               fwdt->template = NULL;
+               fwdt->length = 0;
+       }
 }
 
 /*
@@ -4643,44 +4621,68 @@ qla2x00_mem_free(struct qla_hw_data *ha)
        if (ha->mctp_dump)
                dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
                    ha->mctp_dump_dma);
+       ha->mctp_dump = NULL;
 
        mempool_destroy(ha->srb_mempool);
+       ha->srb_mempool = NULL;
 
        if (ha->dcbx_tlv)
                dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
                    ha->dcbx_tlv, ha->dcbx_tlv_dma);
+       ha->dcbx_tlv = NULL;
 
        if (ha->xgmac_data)
                dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
                    ha->xgmac_data, ha->xgmac_data_dma);
+       ha->xgmac_data = NULL;
 
        if (ha->sns_cmd)
                dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
                ha->sns_cmd, ha->sns_cmd_dma);
+       ha->sns_cmd = NULL;
+       ha->sns_cmd_dma = 0;
 
        if (ha->ct_sns)
                dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
                ha->ct_sns, ha->ct_sns_dma);
+       ha->ct_sns = NULL;
+       ha->ct_sns_dma = 0;
 
        if (ha->sfp_data)
                dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
                    ha->sfp_data_dma);
+       ha->sfp_data = NULL;
+
+       if (ha->flt)
+               dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+                   ha->flt, ha->flt_dma);
+       ha->flt = NULL;
+       ha->flt_dma = 0;
 
        if (ha->ms_iocb)
                dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+       ha->ms_iocb = NULL;
+       ha->ms_iocb_dma = 0;
 
        if (ha->ex_init_cb)
                dma_pool_free(ha->s_dma_pool,
                        ha->ex_init_cb, ha->ex_init_cb_dma);
+       ha->ex_init_cb = NULL;
+       ha->ex_init_cb_dma = 0;
 
        if (ha->async_pd)
                dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+       ha->async_pd = NULL;
+       ha->async_pd_dma = 0;
 
        dma_pool_destroy(ha->s_dma_pool);
+       ha->s_dma_pool = NULL;
 
        if (ha->gid_list)
                dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
                ha->gid_list, ha->gid_list_dma);
+       ha->gid_list = NULL;
+       ha->gid_list_dma = 0;
 
        if (IS_QLA82XX(ha)) {
                if (!list_empty(&ha->gbl_dsd_list)) {
@@ -4698,10 +4700,13 @@ qla2x00_mem_free(struct qla_hw_data *ha)
        }
 
        dma_pool_destroy(ha->dl_dma_pool);
+       ha->dl_dma_pool = NULL;
 
        dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+       ha->fcp_cmnd_dma_pool = NULL;
 
        mempool_destroy(ha->ctx_mempool);
+       ha->ctx_mempool = NULL;
 
        if (ql2xenabledif) {
                struct dsd_dma *dsd, *nxt;
@@ -4728,53 +4733,26 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 
        if (ha->dif_bundl_pool)
                dma_pool_destroy(ha->dif_bundl_pool);
+       ha->dif_bundl_pool = NULL;
 
        qlt_mem_free(ha);
 
        if (ha->init_cb)
                dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
                        ha->init_cb, ha->init_cb_dma);
+       ha->init_cb = NULL;
+       ha->init_cb_dma = 0;
 
        vfree(ha->optrom_buffer);
+       ha->optrom_buffer = NULL;
        kfree(ha->nvram);
+       ha->nvram = NULL;
        kfree(ha->npiv_info);
+       ha->npiv_info = NULL;
        kfree(ha->swl);
+       ha->swl = NULL;
        kfree(ha->loop_id_map);
-
-       ha->srb_mempool = NULL;
-       ha->ctx_mempool = NULL;
-       ha->sns_cmd = NULL;
-       ha->sns_cmd_dma = 0;
-       ha->ct_sns = NULL;
-       ha->ct_sns_dma = 0;
-       ha->ms_iocb = NULL;
-       ha->ms_iocb_dma = 0;
-       ha->init_cb = NULL;
-       ha->init_cb_dma = 0;
-       ha->ex_init_cb = NULL;
-       ha->ex_init_cb_dma = 0;
-       ha->async_pd = NULL;
-       ha->async_pd_dma = 0;
        ha->loop_id_map = NULL;
-       ha->npiv_info = NULL;
-       ha->optrom_buffer = NULL;
-       ha->swl = NULL;
-       ha->nvram = NULL;
-       ha->mctp_dump = NULL;
-       ha->dcbx_tlv = NULL;
-       ha->xgmac_data = NULL;
-       ha->sfp_data = NULL;
-
-       ha->s_dma_pool = NULL;
-       ha->dl_dma_pool = NULL;
-       ha->fcp_cmnd_dma_pool = NULL;
-
-       ha->gid_list = NULL;
-       ha->gid_list_dma = 0;
-
-       ha->tgt.atio_ring = NULL;
-       ha->tgt.atio_dma = 0;
-       ha->tgt.tgt_vp_map = NULL;
 }
 
 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -5608,6 +5586,7 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
        uint32_t idc_lck_rcvry_stage_mask = 0x3;
        uint32_t idc_lck_rcvry_owner_mask = 0x3c;
        struct qla_hw_data *ha = base_vha->hw;
+
        ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
            "Trying force recovery of the IDC lock.\n");
 
@@ -6677,8 +6656,10 @@ qla2x00_timer(struct timer_list *t)
         * FC-NVME
         * see if the active AEN count has changed from what was last reported.
         */
-       if (!vha->vp_idx && (atomic_read(&ha->nvme_active_aen_cnt) !=
-           ha->nvme_last_rptd_aen) && ha->zio_mode == QLA_ZIO_MODE_6) {
+       if (!vha->vp_idx &&
+           (atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) &&
+           ha->zio_mode == QLA_ZIO_MODE_6 &&
+           !ha->flags.host_shutting_down) {
                ql_log(ql_log_info, vha, 0x3002,
                    "nvme: Sched: Set ZIO exchange threshold to %d.\n",
                    ha->nvme_last_rptd_aen);
@@ -6690,7 +6671,7 @@ qla2x00_timer(struct timer_list *t)
        if (!vha->vp_idx &&
            (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
            (ha->zio_mode == QLA_ZIO_MODE_6) &&
-           (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+           (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
                ql_log(ql_log_info, vha, 0x3002,
                    "Sched: Set ZIO exchange threshold to %d.\n",
                    ha->last_zio_threshold);
@@ -6736,7 +6717,6 @@ qla2x00_timer(struct timer_list *t)
 
 /* Firmware interface routines. */
 
-#define FW_BLOBS       11
 #define FW_ISP21XX     0
 #define FW_ISP22XX     1
 #define FW_ISP2300     2
@@ -6748,6 +6728,7 @@ qla2x00_timer(struct timer_list *t)
 #define FW_ISP2031     8
 #define FW_ISP8031     9
 #define FW_ISP27XX     10
+#define FW_ISP28XX     11
 
 #define FW_FILE_ISP21XX        "ql2100_fw.bin"
 #define FW_FILE_ISP22XX        "ql2200_fw.bin"
@@ -6760,11 +6741,12 @@ qla2x00_timer(struct timer_list *t)
 #define FW_FILE_ISP2031        "ql2600_fw.bin"
 #define FW_FILE_ISP8031        "ql8300_fw.bin"
 #define FW_FILE_ISP27XX        "ql2700_fw.bin"
+#define FW_FILE_ISP28XX        "ql2800_fw.bin"
 
 
 static DEFINE_MUTEX(qla_fw_lock);
 
-static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
+static struct fw_blob qla_fw_blobs[] = {
        { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
        { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
        { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
@@ -6776,6 +6758,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
        { .name = FW_FILE_ISP2031, },
        { .name = FW_FILE_ISP8031, },
        { .name = FW_FILE_ISP27XX, },
+       { .name = FW_FILE_ISP28XX, },
+       { .name = NULL, },
 };
 
 struct fw_blob *
@@ -6806,10 +6790,15 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
                blob = &qla_fw_blobs[FW_ISP8031];
        } else if (IS_QLA27XX(ha)) {
                blob = &qla_fw_blobs[FW_ISP27XX];
+       } else if (IS_QLA28XX(ha)) {
+               blob = &qla_fw_blobs[FW_ISP28XX];
        } else {
                return NULL;
        }
 
+       if (!blob->name)
+               return NULL;
+
        mutex_lock(&qla_fw_lock);
        if (blob->fw)
                goto out;
@@ -6819,7 +6808,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
                    "Failed to load firmware image (%s).\n", blob->name);
                blob->fw = NULL;
                blob = NULL;
-               goto out;
        }
 
 out:
@@ -6830,11 +6818,11 @@ out:
 static void
 qla2x00_release_firmware(void)
 {
-       int idx;
+       struct fw_blob *blob;
 
        mutex_lock(&qla_fw_lock);
-       for (idx = 0; idx < FW_BLOBS; idx++)
-               release_firmware(qla_fw_blobs[idx].fw);
+       for (blob = qla_fw_blobs; blob->name; blob++)
+               release_firmware(blob->fw);
        mutex_unlock(&qla_fw_lock);
 }
 
@@ -7179,7 +7167,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
 {
        int rc;
        scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
-       struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
+       struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 
        if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
                rc = blk_mq_map_queues(qmap);
@@ -7188,6 +7176,37 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
        return rc;
 }
 
+struct scsi_host_template qla2xxx_driver_template = {
+       .module                 = THIS_MODULE,
+       .name                   = QLA2XXX_DRIVER_NAME,
+       .queuecommand           = qla2xxx_queuecommand,
+
+       .eh_timed_out           = fc_eh_timed_out,
+       .eh_abort_handler       = qla2xxx_eh_abort,
+       .eh_device_reset_handler = qla2xxx_eh_device_reset,
+       .eh_target_reset_handler = qla2xxx_eh_target_reset,
+       .eh_bus_reset_handler   = qla2xxx_eh_bus_reset,
+       .eh_host_reset_handler  = qla2xxx_eh_host_reset,
+
+       .slave_configure        = qla2xxx_slave_configure,
+
+       .slave_alloc            = qla2xxx_slave_alloc,
+       .slave_destroy          = qla2xxx_slave_destroy,
+       .scan_finished          = qla2xxx_scan_finished,
+       .scan_start             = qla2xxx_scan_start,
+       .change_queue_depth     = scsi_change_queue_depth,
+       .map_queues             = qla2xxx_map_queues,
+       .this_id                = -1,
+       .cmd_per_lun            = 3,
+       .sg_tablesize           = SG_ALL,
+
+       .max_sectors            = 0xFFFF,
+       .shost_attrs            = qla2x00_host_attrs,
+
+       .supported_mode         = MODE_INITIATOR,
+       .track_queue_depth      = 1,
+};
+
 static const struct pci_error_handlers qla2xxx_err_handler = {
        .error_detected = qla2xxx_pci_error_detected,
        .mmio_enabled = qla2xxx_pci_mmio_enabled,
@@ -7220,6 +7239,11 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -7249,6 +7273,30 @@ qla2x00_module_init(void)
 {
        int ret = 0;
 
+       BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
+       BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
+       BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
+       BUILD_BUG_ON(sizeof(init_cb_t) != 96);
+       BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
+       BUILD_BUG_ON(sizeof(request_t) != 64);
+       BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
+       BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
+       BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
+       BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
+       BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
+       BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
+       BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
+       BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
+       BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+       BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
+       BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
+       BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
+       BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
+       BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
+       BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
+       BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
+       BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
+
        /* Allocate cache for SRBs. */
        srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
            SLAB_HWCACHE_ALIGN, NULL);
@@ -7261,8 +7309,7 @@ qla2x00_module_init(void)
        /* Initialize target kmem_cache and mem_pools */
        ret = qlt_init();
        if (ret < 0) {
-               kmem_cache_destroy(srb_cachep);
-               return ret;
+               goto destroy_cache;
        } else if (ret > 0) {
                /*
                 * If initiator mode is explictly disabled by qlt_init(),
@@ -7286,11 +7333,10 @@ qla2x00_module_init(void)
        qla2xxx_transport_template =
            fc_attach_transport(&qla2xxx_transport_functions);
        if (!qla2xxx_transport_template) {
-               kmem_cache_destroy(srb_cachep);
                ql_log(ql_log_fatal, NULL, 0x0002,
                    "fc_attach_transport failed...Failing load!.\n");
-               qlt_exit();
-               return -ENODEV;
+               ret = -ENODEV;
+               goto qlt_exit;
        }
 
        apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
@@ -7302,27 +7348,37 @@ qla2x00_module_init(void)
        qla2xxx_transport_vport_template =
            fc_attach_transport(&qla2xxx_transport_vport_functions);
        if (!qla2xxx_transport_vport_template) {
-               kmem_cache_destroy(srb_cachep);
-               qlt_exit();
-               fc_release_transport(qla2xxx_transport_template);
                ql_log(ql_log_fatal, NULL, 0x0004,
                    "fc_attach_transport vport failed...Failing load!.\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto unreg_chrdev;
        }
        ql_log(ql_log_info, NULL, 0x0005,
            "QLogic Fibre Channel HBA Driver: %s.\n",
            qla2x00_version_str);
        ret = pci_register_driver(&qla2xxx_pci_driver);
        if (ret) {
-               kmem_cache_destroy(srb_cachep);
-               qlt_exit();
-               fc_release_transport(qla2xxx_transport_template);
-               fc_release_transport(qla2xxx_transport_vport_template);
                ql_log(ql_log_fatal, NULL, 0x0006,
                    "pci_register_driver failed...ret=%d Failing load!.\n",
                    ret);
+               goto release_vport_transport;
        }
        return ret;
+
+release_vport_transport:
+       fc_release_transport(qla2xxx_transport_vport_template);
+
+unreg_chrdev:
+       if (apidev_major >= 0)
+               unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+       fc_release_transport(qla2xxx_transport_template);
+
+qlt_exit:
+       qlt_exit();
+
+destroy_cache:
+       kmem_cache_destroy(srb_cachep);
+       return ret;
 }
 
 /**
@@ -7331,14 +7387,15 @@ qla2x00_module_init(void)
 static void __exit
 qla2x00_module_exit(void)
 {
-       unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
        pci_unregister_driver(&qla2xxx_pci_driver);
        qla2x00_release_firmware();
-       kmem_cache_destroy(srb_cachep);
-       qlt_exit();
        kmem_cache_destroy(ctx_cachep);
-       fc_release_transport(qla2xxx_transport_template);
        fc_release_transport(qla2xxx_transport_vport_template);
+       if (apidev_major >= 0)
+               unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+       fc_release_transport(qla2xxx_transport_template);
+       qlt_exit();
+       kmem_cache_destroy(srb_cachep);
 }
 
 module_init(qla2x00_module_init);
index 2a3055c..1eb8238 100644 (file)
@@ -429,66 +429,64 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
 static inline uint32_t
 flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
 {
-       return ha->flash_conf_off | faddr;
+       return ha->flash_conf_off + faddr;
 }
 
 static inline uint32_t
 flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
 {
-       return ha->flash_data_off | faddr;
+       return ha->flash_data_off + faddr;
 }
 
 static inline uint32_t
 nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
 {
-       return ha->nvram_conf_off | naddr;
+       return ha->nvram_conf_off + naddr;
 }
 
 static inline uint32_t
 nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
 {
-       return ha->nvram_data_off | naddr;
+       return ha->nvram_data_off + naddr;
 }
 
-static uint32_t
-qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
+static int
+qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
 {
-       int rval;
-       uint32_t cnt, data;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       ulong cnt = 30000;
 
        WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
-       /* Wait for READ cycle to complete. */
-       rval = QLA_SUCCESS;
-       for (cnt = 3000;
-           (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) == 0 &&
-           rval == QLA_SUCCESS; cnt--) {
-               if (cnt)
-                       udelay(10);
-               else
-                       rval = QLA_FUNCTION_TIMEOUT;
+
+       while (cnt--) {
+               if (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) {
+                       *data = RD_REG_DWORD(&reg->flash_data);
+                       return QLA_SUCCESS;
+               }
+               udelay(10);
                cond_resched();
        }
 
-       /* TODO: What happens if we time out? */
-       data = 0xDEADDEAD;
-       if (rval == QLA_SUCCESS)
-               data = RD_REG_DWORD(&reg->flash_data);
-
-       return data;
+       ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090,
+           "Flash read dword at %x timeout.\n", addr);
+       *data = 0xDEADDEAD;
+       return QLA_FUNCTION_TIMEOUT;
 }
 
 uint32_t *
 qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
     uint32_t dwords)
 {
-       uint32_t i;
+       ulong i;
        struct qla_hw_data *ha = vha->hw;
 
        /* Dword reads to flash. */
-       for (i = 0; i < dwords; i++, faddr++)
-               dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
-                   flash_data_addr(ha, faddr)));
+       faddr =  flash_data_addr(ha, faddr);
+       for (i = 0; i < dwords; i++, faddr++, dwptr++) {
+               if (qla24xx_read_flash_dword(ha, faddr, dwptr))
+                       break;
+               cpu_to_le32s(dwptr);
+       }
 
        return dwptr;
 }
@@ -496,35 +494,37 @@ qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
 static int
 qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
 {
-       int rval;
-       uint32_t cnt;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       ulong cnt = 500000;
 
        WRT_REG_DWORD(&reg->flash_data, data);
-       RD_REG_DWORD(&reg->flash_data);         /* PCI Posting. */
        WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
-       /* Wait for Write cycle to complete. */
-       rval = QLA_SUCCESS;
-       for (cnt = 500000; (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) &&
-           rval == QLA_SUCCESS; cnt--) {
-               if (cnt)
-                       udelay(10);
-               else
-                       rval = QLA_FUNCTION_TIMEOUT;
+
+       while (cnt--) {
+               if (!(RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG))
+                       return QLA_SUCCESS;
+               udelay(10);
                cond_resched();
        }
-       return rval;
+
+       ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090,
+           "Flash write dword at %x timeout.\n", addr);
+       return QLA_FUNCTION_TIMEOUT;
 }
 
 static void
 qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
     uint8_t *flash_id)
 {
-       uint32_t ids;
+       uint32_t faddr, ids = 0;
 
-       ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
-       *man_id = LSB(ids);
-       *flash_id = MSB(ids);
+       *man_id = *flash_id = 0;
+
+       faddr = flash_conf_addr(ha, 0x03ab);
+       if (!qla24xx_read_flash_dword(ha, faddr, &ids)) {
+               *man_id = LSB(ids);
+               *flash_id = MSB(ids);
+       }
 
        /* Check if man_id and flash_id are valid. */
        if (ids != 0xDEADDEAD && (*man_id == 0 || *flash_id == 0)) {
@@ -534,9 +534,11 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
                 * Example: ATMEL 0x00 01 45 1F
                 * Extract MFG and Dev ID from last two bytes.
                 */
-               ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
-               *man_id = LSB(ids);
-               *flash_id = MSB(ids);
+               faddr = flash_conf_addr(ha, 0x009f);
+               if (!qla24xx_read_flash_dword(ha, faddr, &ids)) {
+                       *man_id = LSB(ids);
+                       *flash_id = MSB(ids);
+               }
        }
 }
 
@@ -545,12 +547,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
 {
        const char *loc, *locations[] = { "DEF", "PCI" };
        uint32_t pcihdr, pcids;
-       uint32_t *dcode;
-       uint8_t *buf, *bcode, last_image;
        uint16_t cnt, chksum, *wptr;
-       struct qla_flt_location *fltl;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
+       struct qla_flt_location *fltl = (void *)req->ring;
+       uint32_t *dcode = (void *)req->ring;
+       uint8_t *buf = (void *)req->ring, *bcode,  last_image;
 
        /*
         * FLT-location structure resides after the last PCI region.
@@ -571,12 +573,13 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
        } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                *start = FA_FLASH_LAYOUT_ADDR_83;
                goto end;
+       } else if (IS_QLA28XX(ha)) {
+               *start = FA_FLASH_LAYOUT_ADDR_28;
+               goto end;
        }
+
        /* Begin with first PCI expansion ROM header. */
-       buf = (uint8_t *)req->ring;
-       dcode = (uint32_t *)req->ring;
        pcihdr = 0;
-       last_image = 1;
        do {
                /* Verify PCI expansion ROM header. */
                qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
@@ -601,22 +604,19 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
        } while (!last_image);
 
        /* Now verify FLT-location structure. */
-       fltl = (struct qla_flt_location *)req->ring;
-       qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
-           sizeof(struct qla_flt_location) >> 2);
-       if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
-           fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
+       qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2);
+       if (memcmp(fltl->sig, "QFLT", 4))
                goto end;
 
-       wptr = (uint16_t *)req->ring;
-       cnt = sizeof(struct qla_flt_location) >> 1;
+       wptr = (void *)req->ring;
+       cnt = sizeof(*fltl) / sizeof(*wptr);
        for (chksum = 0; cnt--; wptr++)
                chksum += le16_to_cpu(*wptr);
        if (chksum) {
                ql_log(ql_log_fatal, vha, 0x0045,
                    "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
                ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
-                   buf, sizeof(struct qla_flt_location));
+                   fltl, sizeof(*fltl));
                return QLA_FUNCTION_FAILED;
        }
 
@@ -634,7 +634,7 @@ end:
 static void
 qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 {
-       const char *loc, *locations[] = { "DEF", "FLT" };
+       const char *locations[] = { "DEF", "FLT" }, *loc = locations[1];
        const uint32_t def_fw[] =
                { FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
        const uint32_t def_boot[] =
@@ -664,20 +664,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
        const uint32_t fcp_prio_cfg1[] =
                { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
                        0 };
-       uint32_t def;
-       uint16_t *wptr;
-       uint16_t cnt, chksum;
-       uint32_t start;
-       struct qla_flt_header *flt;
-       struct qla_flt_region *region;
-       struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = ha->req_q_map[0];
 
-       def = 0;
-       if (IS_QLA25XX(ha))
-               def = 1;
-       else if (IS_QLA81XX(ha))
-               def = 2;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
+       struct qla_flt_header *flt = (void *)ha->flt;
+       struct qla_flt_region *region = (void *)&flt[1];
+       uint16_t *wptr, cnt, chksum;
+       uint32_t start;
 
        /* Assign FCP prio region since older adapters may not have FLT, or
           FCP prio region in it's FLT.
@@ -686,12 +679,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
            fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
 
        ha->flt_region_flt = flt_addr;
-       wptr = (uint16_t *)req->ring;
-       flt = (struct qla_flt_header *)req->ring;
-       region = (struct qla_flt_region *)&flt[1];
-       ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
-           flt_addr << 2, OPTROM_BURST_SIZE);
-       if (*wptr == cpu_to_le16(0xffff))
+       wptr = (uint16_t *)ha->flt;
+       qla24xx_read_flash_data(vha, (void *)flt, flt_addr,
+           (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE) >> 2);
+
+       if (le16_to_cpu(*wptr) == 0xffff)
                goto no_flash_data;
        if (flt->version != cpu_to_le16(1)) {
                ql_log(ql_log_warn, vha, 0x0047,
@@ -701,7 +693,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                goto no_flash_data;
        }
 
-       cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+       cnt = (sizeof(*flt) + le16_to_cpu(flt->length)) / sizeof(*wptr);
        for (chksum = 0; cnt--; wptr++)
                chksum += le16_to_cpu(*wptr);
        if (chksum) {
@@ -712,18 +704,20 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                goto no_flash_data;
        }
 
-       loc = locations[1];
-       cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+       cnt = le16_to_cpu(flt->length) / sizeof(*region);
        for ( ; cnt; cnt--, region++) {
                /* Store addresses as DWORD offsets. */
                start = le32_to_cpu(region->start) >> 2;
                ql_dbg(ql_dbg_init, vha, 0x0049,
-                   "FLT[%02x]: start=0x%x "
-                   "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff,
-                   start, le32_to_cpu(region->end) >> 2,
-                   le32_to_cpu(region->size));
-
-               switch (le32_to_cpu(region->code) & 0xff) {
+                   "FLT[%#x]: start=%#x end=%#x size=%#x.\n",
+                   le16_to_cpu(region->code), start,
+                   le32_to_cpu(region->end) >> 2,
+                   le32_to_cpu(region->size) >> 2);
+               if (region->attribute)
+                       ql_log(ql_dbg_init, vha, 0xffff,
+                           "Region %x is secure\n", region->code);
+
+               switch (le16_to_cpu(region->code)) {
                case FLT_REG_FCOE_FW:
                        if (!IS_QLA8031(ha))
                                break;
@@ -753,13 +747,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                                ha->flt_region_vpd = start;
                        break;
                case FLT_REG_VPD_2:
-                       if (!IS_QLA27XX(ha))
+                       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                                break;
                        if (ha->port_no == 2)
                                ha->flt_region_vpd = start;
                        break;
                case FLT_REG_VPD_3:
-                       if (!IS_QLA27XX(ha))
+                       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                                break;
                        if (ha->port_no == 3)
                                ha->flt_region_vpd = start;
@@ -777,13 +771,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                                ha->flt_region_nvram = start;
                        break;
                case FLT_REG_NVRAM_2:
-                       if (!IS_QLA27XX(ha))
+                       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                                break;
                        if (ha->port_no == 2)
                                ha->flt_region_nvram = start;
                        break;
                case FLT_REG_NVRAM_3:
-                       if (!IS_QLA27XX(ha))
+                       if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                                break;
                        if (ha->port_no == 3)
                                ha->flt_region_nvram = start;
@@ -847,36 +841,74 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                                ha->flt_region_nvram = start;
                        break;
                case FLT_REG_IMG_PRI_27XX:
-                       if (IS_QLA27XX(ha))
+                       if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                                ha->flt_region_img_status_pri = start;
                        break;
                case FLT_REG_IMG_SEC_27XX:
-                       if (IS_QLA27XX(ha))
+                       if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                                ha->flt_region_img_status_sec = start;
                        break;
                case FLT_REG_FW_SEC_27XX:
-                       if (IS_QLA27XX(ha))
+                       if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                                ha->flt_region_fw_sec = start;
                        break;
                case FLT_REG_BOOTLOAD_SEC_27XX:
-                       if (IS_QLA27XX(ha))
+                       if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                                ha->flt_region_boot_sec = start;
                        break;
+               case FLT_REG_AUX_IMG_PRI_28XX:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               ha->flt_region_aux_img_status_pri = start;
+                       break;
+               case FLT_REG_AUX_IMG_SEC_28XX:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               ha->flt_region_aux_img_status_sec = start;
+                       break;
+               case FLT_REG_NVRAM_SEC_28XX_0:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               if (ha->port_no == 0)
+                                       ha->flt_region_nvram_sec = start;
+                       break;
+               case FLT_REG_NVRAM_SEC_28XX_1:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               if (ha->port_no == 1)
+                                       ha->flt_region_nvram_sec = start;
+                       break;
+               case FLT_REG_NVRAM_SEC_28XX_2:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               if (ha->port_no == 2)
+                                       ha->flt_region_nvram_sec = start;
+                       break;
+               case FLT_REG_NVRAM_SEC_28XX_3:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               if (ha->port_no == 3)
+                                       ha->flt_region_nvram_sec = start;
+                       break;
                case FLT_REG_VPD_SEC_27XX_0:
-                       if (IS_QLA27XX(ha))
-                               ha->flt_region_vpd_sec = start;
+               case FLT_REG_VPD_SEC_28XX_0:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+                               ha->flt_region_vpd_nvram_sec = start;
+                               if (ha->port_no == 0)
+                                       ha->flt_region_vpd_sec = start;
+                       }
                        break;
                case FLT_REG_VPD_SEC_27XX_1:
-                       if (IS_QLA27XX(ha))
-                               ha->flt_region_vpd_sec = start;
+               case FLT_REG_VPD_SEC_28XX_1:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               if (ha->port_no == 1)
+                                       ha->flt_region_vpd_sec = start;
                        break;
                case FLT_REG_VPD_SEC_27XX_2:
-                       if (IS_QLA27XX(ha))
-                               ha->flt_region_vpd_sec = start;
+               case FLT_REG_VPD_SEC_28XX_2:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               if (ha->port_no == 2)
+                                       ha->flt_region_vpd_sec = start;
                        break;
                case FLT_REG_VPD_SEC_27XX_3:
-                       if (IS_QLA27XX(ha))
-                               ha->flt_region_vpd_sec = start;
+               case FLT_REG_VPD_SEC_28XX_3:
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               if (ha->port_no == 3)
+                                       ha->flt_region_vpd_sec = start;
                        break;
                }
        }
@@ -912,22 +944,19 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
 #define FLASH_BLK_SIZE_32K     0x8000
 #define FLASH_BLK_SIZE_64K     0x10000
        const char *loc, *locations[] = { "MID", "FDT" };
+       struct qla_hw_data *ha = vha->hw;
+       struct req_que *req = ha->req_q_map[0];
        uint16_t cnt, chksum;
-       uint16_t *wptr;
-       struct qla_fdt_layout *fdt;
+       uint16_t *wptr = (void *)req->ring;
+       struct qla_fdt_layout *fdt = (void *)req->ring;
        uint8_t man_id, flash_id;
        uint16_t mid = 0, fid = 0;
-       struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = ha->req_q_map[0];
 
-       wptr = (uint16_t *)req->ring;
-       fdt = (struct qla_fdt_layout *)req->ring;
-       ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
-           ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
-       if (*wptr == cpu_to_le16(0xffff))
+       qla24xx_read_flash_data(vha, (void *)fdt, ha->flt_region_fdt,
+           OPTROM_BURST_DWORDS);
+       if (le16_to_cpu(*wptr) == 0xffff)
                goto no_flash_data;
-       if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
-           fdt->sig[3] != 'D')
+       if (memcmp(fdt->sig, "QLID", 4))
                goto no_flash_data;
 
        for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++)
@@ -938,7 +967,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
                    " checksum=0x%x id=%c version0x%x.\n", chksum,
                    fdt->sig[0], le16_to_cpu(fdt->version));
                ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
-                   (uint8_t *)fdt, sizeof(*fdt));
+                   fdt, sizeof(*fdt));
                goto no_flash_data;
        }
 
@@ -958,7 +987,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
                ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
                    fdt->unprotect_sec_cmd);
                ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
-                   flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
+                   flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd) :
                    flash_conf_addr(ha, 0x0336);
        }
        goto done;
@@ -1019,8 +1048,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
                return;
 
        wptr = (uint32_t *)req->ring;
-       ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
-               QLA82XX_IDC_PARAM_ADDR , 8);
+       ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
 
        if (*wptr == cpu_to_le32(0xffffffff)) {
                ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
@@ -1045,7 +1073,8 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
 
        if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
-           !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
+           !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                return QLA_SUCCESS;
 
        ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -1081,8 +1110,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
        if (IS_QLA8044(ha))
                return;
 
-       ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
-           ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
+       ha->isp_ops->read_optrom(vha, &hdr, ha->flt_region_npiv_conf << 2,
+           sizeof(struct qla_npiv_header));
        if (hdr.version == cpu_to_le16(0xffff))
                return;
        if (hdr.version != cpu_to_le16(1)) {
@@ -1101,8 +1130,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
                return;
        }
 
-       ha->isp_ops->read_optrom(vha, (uint8_t *)data,
-           ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
+       ha->isp_ops->read_optrom(vha, data, ha->flt_region_npiv_conf << 2,
+           NPIV_CONFIG_SIZE);
 
        cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1;
        for (wptr = data, chksum = 0; cnt--; wptr++)
@@ -1139,10 +1168,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
                vid.node_name = wwn_to_u64(entry->node_name);
 
                ql_dbg(ql_dbg_user, vha, 0x7093,
-                   "NPIV[%02x]: wwpn=%llx "
-                   "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
-                   (unsigned long long)vid.port_name,
-                   (unsigned long long)vid.node_name,
+                   "NPIV[%02x]: wwpn=%llx wwnn=%llx vf_id=%#x Q_qos=%#x F_qos=%#x.\n",
+                   cnt, vid.port_name, vid.node_name,
                    le16_to_cpu(entry->vf_id),
                    entry->q_qos, entry->f_qos);
 
@@ -1150,10 +1177,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
                        vport = fc_vport_create(vha->host, 0, &vid);
                        if (!vport)
                                ql_log(ql_log_warn, vha, 0x7094,
-                                   "NPIV-Config Failed to create vport [%02x]: "
-                                   "wwpn=%llx wwnn=%llx.\n", cnt,
-                                   (unsigned long long)vid.port_name,
-                                   (unsigned long long)vid.node_name);
+                                   "NPIV-Config Failed to create vport [%02x]: wwpn=%llx wwnn=%llx.\n",
+                                   cnt, vid.port_name, vid.node_name);
                }
        }
 done:
@@ -1188,9 +1213,10 @@ done:
 static int
 qla24xx_protect_flash(scsi_qla_host_t *vha)
 {
-       uint32_t cnt;
        struct qla_hw_data *ha = vha->hw;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       ulong cnt = 300;
+       uint32_t faddr, dword;
 
        if (ha->flags.fac_supported)
                return qla81xx_fac_do_write_enable(vha, 0);
@@ -1199,11 +1225,14 @@ qla24xx_protect_flash(scsi_qla_host_t *vha)
                goto skip_wrt_protect;
 
        /* Enable flash write-protection and wait for completion. */
-       qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
-           ha->fdt_wrt_disable);
-       for (cnt = 300; cnt &&
-           qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
-           cnt--) {
+       faddr = flash_conf_addr(ha, 0x101);
+       qla24xx_write_flash_dword(ha, faddr, ha->fdt_wrt_disable);
+       faddr = flash_conf_addr(ha, 0x5);
+       while (cnt--) {
+               if (!qla24xx_read_flash_dword(ha, faddr, &dword)) {
+                       if (!(dword & BIT_0))
+                               break;
+               }
                udelay(10);
        }
 
@@ -1211,7 +1240,6 @@ skip_wrt_protect:
        /* Disable flash write. */
        WRT_REG_DWORD(&reg->ctrl_status,
            RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
-       RD_REG_DWORD(&reg->ctrl_status);        /* PCI Posting. */
 
        return QLA_SUCCESS;
 }
@@ -1239,107 +1267,103 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
     uint32_t dwords)
 {
        int ret;
-       uint32_t liter;
-       uint32_t sec_mask, rest_addr;
-       uint32_t fdata;
+       ulong liter;
+       ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */
+       uint32_t sec_mask, rest_addr, fdata;
        dma_addr_t optrom_dma;
        void *optrom = NULL;
        struct qla_hw_data *ha = vha->hw;
 
-       /* Prepare burst-capable write on supported ISPs. */
-       if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
-           IS_QLA27XX(ha)) &&
-           !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
-               optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
-                   &optrom_dma, GFP_KERNEL);
-               if (!optrom) {
-                       ql_log(ql_log_warn, vha, 0x7095,
-                           "Unable to allocate "
-                           "memory for optrom burst write (%x KB).\n",
-                           OPTROM_BURST_SIZE / 1024);
-               }
-       }
+       if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+           !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+               goto next;
 
-       rest_addr = (ha->fdt_block_size >> 2) - 1;
-       sec_mask = ~rest_addr;
+       /* Allocate dma buffer for burst write */
+       optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+           &optrom_dma, GFP_KERNEL);
+       if (!optrom) {
+               ql_log(ql_log_warn, vha, 0x7095,
+                   "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE);
+       }
 
+next:
+       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+           "Unprotect flash...\n");
        ret = qla24xx_unprotect_flash(vha);
-       if (ret != QLA_SUCCESS) {
+       if (ret) {
                ql_log(ql_log_warn, vha, 0x7096,
-                   "Unable to unprotect flash for update.\n");
+                   "Failed to unprotect flash.\n");
                goto done;
        }
 
+       rest_addr = (ha->fdt_block_size >> 2) - 1;
+       sec_mask = ~rest_addr;
        for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
                fdata = (faddr & sec_mask) << 2;
 
                /* Are we at the beginning of a sector? */
-               if ((faddr & rest_addr) == 0) {
-                       /* Do sector unprotect. */
-                       if (ha->fdt_unprotect_sec_cmd)
-                               qla24xx_write_flash_dword(ha,
-                                   ha->fdt_unprotect_sec_cmd,
-                                   (fdata & 0xff00) | ((fdata << 16) &
-                                   0xff0000) | ((fdata >> 16) & 0xff));
+               if (!(faddr & rest_addr)) {
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+                           "Erase sector %#x...\n", faddr);
+
                        ret = qla24xx_erase_sector(vha, fdata);
-                       if (ret != QLA_SUCCESS) {
+                       if (ret) {
                                ql_dbg(ql_dbg_user, vha, 0x7007,
-                                   "Unable to erase erase sector: address=%x.\n",
-                                   faddr);
+                                   "Failed to erase sector %x.\n", faddr);
                                break;
                        }
                }
 
-               /* Go with burst-write. */
-               if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
-                       /* Copy data to DMA'ble buffer. */
-                       memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+               if (optrom) {
+                       /* If smaller than a burst remaining */
+                       if (dwords - liter < dburst)
+                               dburst = dwords - liter;
 
+                       /* Copy to dma buffer */
+                       memcpy(optrom, dwptr, dburst << 2);
+
+                       /* Burst write */
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+                           "Write burst (%#lx dwords)...\n", dburst);
                        ret = qla2x00_load_ram(vha, optrom_dma,
-                           flash_data_addr(ha, faddr),
-                           OPTROM_BURST_DWORDS);
-                       if (ret != QLA_SUCCESS) {
-                               ql_log(ql_log_warn, vha, 0x7097,
-                                   "Unable to burst-write optrom segment "
-                                   "(%x/%x/%llx).\n", ret,
-                                   flash_data_addr(ha, faddr),
-                                   (unsigned long long)optrom_dma);
-                               ql_log(ql_log_warn, vha, 0x7098,
-                                   "Reverting to slow-write.\n");
-
-                               dma_free_coherent(&ha->pdev->dev,
-                                   OPTROM_BURST_SIZE, optrom, optrom_dma);
-                               optrom = NULL;
-                       } else {
-                               liter += OPTROM_BURST_DWORDS - 1;
-                               faddr += OPTROM_BURST_DWORDS - 1;
-                               dwptr += OPTROM_BURST_DWORDS - 1;
+                           flash_data_addr(ha, faddr), dburst);
+                       if (!ret) {
+                               liter += dburst - 1;
+                               faddr += dburst - 1;
+                               dwptr += dburst - 1;
                                continue;
                        }
+
+                       ql_log(ql_log_warn, vha, 0x7097,
+                           "Failed burst-write at %x (%p/%#llx)....\n",
+                           flash_data_addr(ha, faddr), optrom,
+                           (u64)optrom_dma);
+
+                       dma_free_coherent(&ha->pdev->dev,
+                           OPTROM_BURST_SIZE, optrom, optrom_dma);
+                       optrom = NULL;
+                       if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+                               break;
+                       ql_log(ql_log_warn, vha, 0x7098,
+                           "Reverting to slow write...\n");
                }
 
+               /* Slow write */
                ret = qla24xx_write_flash_dword(ha,
                    flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
-               if (ret != QLA_SUCCESS) {
+               if (ret) {
                        ql_dbg(ql_dbg_user, vha, 0x7006,
-                           "Unable to program flash address=%x data=%x.\n",
-                           faddr, *dwptr);
+                           "Failed slopw write %x (%x)\n", faddr, *dwptr);
                        break;
                }
-
-               /* Do sector protect. */
-               if (ha->fdt_unprotect_sec_cmd &&
-                   ((faddr & rest_addr) == rest_addr))
-                       qla24xx_write_flash_dword(ha,
-                           ha->fdt_protect_sec_cmd,
-                           (fdata & 0xff00) | ((fdata << 16) &
-                           0xff0000) | ((fdata >> 16) & 0xff));
        }
 
+       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+           "Protect flash...\n");
        ret = qla24xx_protect_flash(vha);
-       if (ret != QLA_SUCCESS)
+       if (ret)
                ql_log(ql_log_warn, vha, 0x7099,
-                   "Unable to protect flash after update.\n");
+                   "Failed to protect flash\n");
 done:
        if (optrom)
                dma_free_coherent(&ha->pdev->dev,
@@ -1349,7 +1373,7 @@ done:
 }
 
 uint8_t *
-qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
        uint32_t i;
@@ -1368,27 +1392,30 @@ qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
 }
 
 uint8_t *
-qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla24xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
-       uint32_t i;
-       uint32_t *dwptr;
        struct qla_hw_data *ha = vha->hw;
+       uint32_t *dwptr = buf;
+       uint32_t i;
 
        if (IS_P3P_TYPE(ha))
                return  buf;
 
        /* Dword reads to flash. */
-       dwptr = (uint32_t *)buf;
-       for (i = 0; i < bytes >> 2; i++, naddr++)
-               dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
-                   nvram_data_addr(ha, naddr)));
+       naddr = nvram_data_addr(ha, naddr);
+       bytes >>= 2;
+       for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+               if (qla24xx_read_flash_dword(ha, naddr, dwptr))
+                       break;
+               cpu_to_le32s(dwptr);
+       }
 
        return buf;
 }
 
 int
-qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla2x00_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
        int ret, stat;
@@ -1422,14 +1449,14 @@ qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
 }
 
 int
-qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
-       int ret;
-       uint32_t i;
-       uint32_t *dwptr;
        struct qla_hw_data *ha = vha->hw;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       uint32_t *dwptr = buf;
+       uint32_t i;
+       int ret;
 
        ret = QLA_SUCCESS;
 
@@ -1446,11 +1473,10 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
        qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
 
        /* Dword writes to flash. */
-       dwptr = (uint32_t *)buf;
-       for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
-               ret = qla24xx_write_flash_dword(ha,
-                   nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
-               if (ret != QLA_SUCCESS) {
+       naddr = nvram_data_addr(ha, naddr);
+       bytes >>= 2;
+       for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+               if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) {
                        ql_dbg(ql_dbg_user, vha, 0x709a,
                            "Unable to program nvram address=%x data=%x.\n",
                            naddr, *dwptr);
@@ -1470,31 +1496,34 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
 }
 
 uint8_t *
-qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla25xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
-       uint32_t i;
-       uint32_t *dwptr;
        struct qla_hw_data *ha = vha->hw;
+       uint32_t *dwptr = buf;
+       uint32_t i;
 
        /* Dword reads to flash. */
-       dwptr = (uint32_t *)buf;
-       for (i = 0; i < bytes >> 2; i++, naddr++)
-               dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
-                   flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
+       naddr = flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr);
+       bytes >>= 2;
+       for (i = 0; i < bytes; i++, naddr++, dwptr++) {
+               if (qla24xx_read_flash_dword(ha, naddr, dwptr))
+                       break;
+
+               cpu_to_le32s(dwptr);
+       }
 
        return buf;
 }
 
+#define RMW_BUFFER_SIZE        (64 * 1024)
 int
-qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+qla25xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
     uint32_t bytes)
 {
        struct qla_hw_data *ha = vha->hw;
-#define RMW_BUFFER_SIZE        (64 * 1024)
-       uint8_t *dbuf;
+       uint8_t *dbuf = vmalloc(RMW_BUFFER_SIZE);
 
-       dbuf = vmalloc(RMW_BUFFER_SIZE);
        if (!dbuf)
                return QLA_MEMORY_ALLOC_FAILED;
        ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
@@ -1728,7 +1757,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
 {
        uint32_t led_select_value = 0;
 
-       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
                goto out;
 
        if (ha->port_no == 0)
@@ -1749,13 +1778,14 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
        uint16_t orig_led_cfg[6];
        uint32_t led_10_value, led_43_value;
 
-       if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
+       if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha) &&
+           !IS_QLA28XX(ha))
                return;
 
        if (!ha->beacon_blink_led)
                return;
 
-       if (IS_QLA27XX(ha)) {
+       if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
                qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
        } else if (IS_QLA2031(ha)) {
@@ -1845,7 +1875,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
                        return QLA_FUNCTION_FAILED;
                }
 
-               if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+               if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                        goto skip_gpio;
 
                spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1885,7 +1915,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
 
        ha->beacon_blink_led = 0;
 
-       if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+       if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
                goto set_fw_options;
 
        if (IS_QLA8031(ha) || IS_QLA81XX(ha))
@@ -2314,8 +2344,8 @@ qla2x00_resume_hba(struct scsi_qla_host *vha)
        scsi_unblock_requests(vha->host);
 }
 
-uint8_t *
-qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
        uint32_t addr, midpoint;
@@ -2349,12 +2379,12 @@ qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
 }
 
 int
-qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
 
        int rval;
-       uint8_t man_id, flash_id, sec_number, data;
+       uint8_t man_id, flash_id, sec_number, *data;
        uint16_t wd;
        uint32_t addr, liter, sec_mask, rest_addr;
        struct qla_hw_data *ha = vha->hw;
@@ -2483,7 +2513,7 @@ update_flash:
 
                for (addr = offset, liter = 0; liter < length; liter++,
                    addr++) {
-                       data = buf[liter];
+                       data = buf + liter;
                        /* Are we at the beginning of a sector? */
                        if ((addr & rest_addr) == 0) {
                                if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
@@ -2551,7 +2581,7 @@ update_flash:
                                }
                        }
 
-                       if (qla2x00_program_flash_address(ha, addr, data,
+                       if (qla2x00_program_flash_address(ha, addr, *data,
                            man_id, flash_id)) {
                                rval = QLA_FUNCTION_FAILED;
                                break;
@@ -2567,8 +2597,8 @@ update_flash:
        return rval;
 }
 
-uint8_t *
-qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
        struct qla_hw_data *ha = vha->hw;
@@ -2578,7 +2608,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
        set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
 
        /* Go with read. */
-       qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
+       qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2);
 
        /* Resume HBA. */
        clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@@ -2587,8 +2617,340 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
        return buf;
 }
 
+static int
+qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf,
+    uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf)
+{
+       uint32_t *p, check_sum = 0;
+       int i;
+
+       p = buf + buf_size_without_sfub;
+
+       /* Extract SFUB from end of file */
+       memcpy(sfub_buf, (uint8_t *)p,
+           sizeof(struct secure_flash_update_block));
+
+       for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++)
+               check_sum += p[i];
+
+       check_sum = (~check_sum) + 1;
+
+       if (check_sum != p[i]) {
+               ql_log(ql_log_warn, vha, 0x7097,
+                   "SFUB checksum failed, 0x%x, 0x%x\n",
+                   check_sum, p[i]);
+               return QLA_COMMAND_ERROR;
+       }
+
+       return QLA_SUCCESS;
+}
+
+static int
+qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
+    struct qla_flt_region *region)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_flt_header *flt;
+       struct qla_flt_region *flt_reg;
+       uint16_t cnt;
+       int rval = QLA_FUNCTION_FAILED;
+
+       if (!ha->flt)
+               return QLA_FUNCTION_FAILED;
+
+       flt = (struct qla_flt_header *)ha->flt;
+       flt_reg = (struct qla_flt_region *)&flt[1];
+       cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+
+       for (; cnt; cnt--, flt_reg++) {
+               if (flt_reg->start == start) {
+                       memcpy((uint8_t *)region, flt_reg,
+                           sizeof(struct qla_flt_region));
+                       rval = QLA_SUCCESS;
+                       break;
+               }
+       }
+
+       return rval;
+}
+
+static int
+qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+    uint32_t dwords)
+{
+       struct qla_hw_data *ha = vha->hw;
+       ulong liter;
+       ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */
+       uint32_t sec_mask, rest_addr, fdata;
+       void *optrom = NULL;
+       dma_addr_t optrom_dma;
+       int rval;
+       struct secure_flash_update_block *sfub;
+       dma_addr_t sfub_dma;
+       uint32_t offset = faddr << 2;
+       uint32_t buf_size_without_sfub = 0;
+       struct qla_flt_region region;
+       bool reset_to_rom = false;
+       uint32_t risc_size, risc_attr = 0;
+       uint32_t *fw_array = NULL;
+
+       /* Retrieve region info - must be a start address passed in */
+       rval = qla28xx_get_flash_region(vha, offset, &region);
+
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Invalid address %x - not a region start address\n",
+                   offset);
+               goto done;
+       }
+
+       /* Allocate dma buffer for burst write */
+       optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+           &optrom_dma, GFP_KERNEL);
+       if (!optrom) {
+               ql_log(ql_log_warn, vha, 0x7095,
+                   "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE);
+               rval = QLA_COMMAND_ERROR;
+               goto done;
+       }
+
+       /*
+        * If adapter supports secure flash and region is secure
+        * extract secure flash update block (SFUB) and verify
+        */
+       if (ha->flags.secure_adapter && region.attribute) {
+
+               ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                   "Region %x is secure\n", region.code);
+
+               if (region.code == FLT_REG_FW ||
+                   region.code == FLT_REG_FW_SEC_27XX) {
+                       fw_array = dwptr;
+
+                       /* 1st fw array */
+                       risc_size = be32_to_cpu(fw_array[3]);
+                       risc_attr = be32_to_cpu(fw_array[9]);
+
+                       buf_size_without_sfub = risc_size;
+                       fw_array += risc_size;
+
+                       /* 2nd fw array */
+                       risc_size = be32_to_cpu(fw_array[3]);
+
+                       buf_size_without_sfub += risc_size;
+                       fw_array += risc_size;
+
+                       /* 1st dump template */
+                       risc_size = be32_to_cpu(fw_array[2]);
+
+                       /* skip header and ignore checksum */
+                       buf_size_without_sfub += risc_size;
+                       fw_array += risc_size;
+
+                       if (risc_attr & BIT_9) {
+                               /* 2nd dump template */
+                               risc_size = be32_to_cpu(fw_array[2]);
+
+                               /* skip header and ignore checksum */
+                               buf_size_without_sfub += risc_size;
+                               fw_array += risc_size;
+                       }
+               } else {
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                           "Secure region %x not supported\n",
+                           region.code);
+                       rval = QLA_COMMAND_ERROR;
+                       goto done;
+               }
+
+               sfub = dma_alloc_coherent(&ha->pdev->dev,
+                       sizeof(struct secure_flash_update_block), &sfub_dma,
+                       GFP_KERNEL);
+               if (!sfub) {
+                       ql_log(ql_log_warn, vha, 0xffff,
+                           "Unable to allocate memory for SFUB\n");
+                       rval = QLA_COMMAND_ERROR;
+                       goto done;
+               }
+
+               rval = qla28xx_extract_sfub_and_verify(vha, dwptr, dwords,
+                       buf_size_without_sfub, (uint8_t *)sfub);
+
+               if (rval != QLA_SUCCESS)
+                       goto done;
+
+               ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                   "SFUB extract and verify successful\n");
+       }
+
+       rest_addr = (ha->fdt_block_size >> 2) - 1;
+       sec_mask = ~rest_addr;
+
+       /* Lock semaphore */
+       rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK);
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Unable to lock flash semaphore.");
+               goto done;
+       }
+
+       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+           "Unprotect flash...\n");
+       rval = qla24xx_unprotect_flash(vha);
+       if (rval) {
+               qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
+               ql_log(ql_log_warn, vha, 0x7096, "Failed unprotect flash\n");
+               goto done;
+       }
+
+       for (liter = 0; liter < dwords; liter++, faddr++) {
+               fdata = (faddr & sec_mask) << 2;
+
+               /* If start of sector */
+               if (!(faddr & rest_addr)) {
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+                           "Erase sector %#x...\n", faddr);
+                       rval = qla24xx_erase_sector(vha, fdata);
+                       if (rval) {
+                               ql_dbg(ql_dbg_user, vha, 0x7007,
+                                   "Failed erase sector %#x\n", faddr);
+                               goto write_protect;
+                       }
+               }
+       }
+
+       if (ha->flags.secure_adapter) {
+               /*
+                * If adapter supports secure flash but FW doesn't,
+                * disable write protect, release semaphore and reset
+                * chip to execute ROM code in order to update region securely
+                */
+               if (!ha->flags.secure_fw) {
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                           "Disable Write and Release Semaphore.");
+                       rval = qla24xx_protect_flash(vha);
+                       if (rval != QLA_SUCCESS) {
+                               qla81xx_fac_semaphore_access(vha,
+                                       FAC_SEMAPHORE_UNLOCK);
+                               ql_log(ql_log_warn, vha, 0xffff,
+                                   "Unable to protect flash.");
+                               goto done;
+                       }
+
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                           "Reset chip to ROM.");
+                       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+                       set_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+                       rval = qla2x00_wait_for_chip_reset(vha);
+                       if (rval != QLA_SUCCESS) {
+                               ql_log(ql_log_warn, vha, 0xffff,
+                                   "Unable to reset to ROM code.");
+                               goto done;
+                       }
+                       reset_to_rom = true;
+                       ha->flags.fac_supported = 0;
+
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                           "Lock Semaphore");
+                       rval = qla2xxx_write_remote_register(vha,
+                           FLASH_SEMAPHORE_REGISTER_ADDR, 0x00020002);
+                       if (rval != QLA_SUCCESS) {
+                               ql_log(ql_log_warn, vha, 0xffff,
+                                   "Unable to lock flash semaphore.");
+                               goto done;
+                       }
+
+                       /* Unprotect flash */
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                           "Enable Write.");
+                       rval = qla2x00_write_ram_word(vha, 0x7ffd0101, 0);
+                       if (rval) {
+                               ql_log(ql_log_warn, vha, 0x7096,
+                                   "Failed unprotect flash\n");
+                               goto done;
+                       }
+               }
+
+               /* If region is secure, send Secure Flash MB Cmd */
+               if (region.attribute && buf_size_without_sfub) {
+                       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
+                           "Sending Secure Flash MB Cmd\n");
+                       rval = qla28xx_secure_flash_update(vha, 0, region.code,
+                               buf_size_without_sfub, sfub_dma,
+                               sizeof(struct secure_flash_update_block));
+                       if (rval != QLA_SUCCESS) {
+                               ql_log(ql_log_warn, vha, 0xffff,
+                                   "Secure Flash MB Cmd failed %x.", rval);
+                               goto write_protect;
+                       }
+               }
+
+       }
+
+       /* re-init flash offset */
+       faddr = offset >> 2;
+
+       for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
+               fdata = (faddr & sec_mask) << 2;
+
+               /* If smaller than a burst remaining */
+               if (dwords - liter < dburst)
+                       dburst = dwords - liter;
+
+               /* Copy to dma buffer */
+               memcpy(optrom, dwptr, dburst << 2);
+
+               /* Burst write */
+               ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+                   "Write burst (%#lx dwords)...\n", dburst);
+               rval = qla2x00_load_ram(vha, optrom_dma,
+                   flash_data_addr(ha, faddr), dburst);
+               if (rval != QLA_SUCCESS) {
+                       ql_log(ql_log_warn, vha, 0x7097,
+                           "Failed burst write at %x (%p/%#llx)...\n",
+                           flash_data_addr(ha, faddr), optrom,
+                           (u64)optrom_dma);
+                       break;
+               }
+
+               liter += dburst - 1;
+               faddr += dburst - 1;
+               dwptr += dburst - 1;
+               continue;
+       }
+
+write_protect:
+       ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095,
+           "Protect flash...\n");
+       rval = qla24xx_protect_flash(vha);
+       if (rval) {
+               qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
+               ql_log(ql_log_warn, vha, 0x7099,
+                   "Failed protect flash\n");
+       }
+
+       if (reset_to_rom == true) {
+               /* Schedule DPC to restart the RISC */
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               qla2xxx_wake_dpc(vha);
+
+               rval = qla2x00_wait_for_hba_online(vha);
+               if (rval != QLA_SUCCESS)
+                       ql_log(ql_log_warn, vha, 0xffff,
+                           "Adapter did not come out of reset\n");
+       }
+
+done:
+       if (optrom)
+               dma_free_coherent(&ha->pdev->dev,
+                   OPTROM_BURST_SIZE, optrom, optrom_dma);
+
+       return rval;
+}
+
 int
-qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
        int rval;
@@ -2599,8 +2961,12 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
        set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
 
        /* Go with write. */
-       rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
-           length >> 2);
+       if (IS_QLA28XX(ha))
+               rval = qla28xx_write_flash_data(vha, (uint32_t *)buf,
+                   offset >> 2, length >> 2);
+       else
+               rval = qla24xx_write_flash_data(vha, (uint32_t *)buf,
+                   offset >> 2, length >> 2);
 
        clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
        scsi_unblock_requests(vha->host);
@@ -2608,8 +2974,8 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
        return rval;
 }
 
-uint8_t *
-qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+void *
+qla25xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
     uint32_t offset, uint32_t length)
 {
        int rval;
@@ -2620,7 +2986,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
        struct qla_hw_data *ha = vha->hw;
 
        if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
-           IS_QLA27XX(ha))
+           IS_QLA27XX(ha) || IS_QLA28XX(ha))
                goto try_fast;
        if (offset & 0xfff)
                goto slow_read;
@@ -2628,6 +2994,8 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
                goto slow_read;
 
 try_fast:
+       if (offset & 0xff)
+               goto slow_read;
        optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
            &optrom_dma, GFP_KERNEL);
        if (!optrom) {
@@ -2874,7 +3242,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
                    "Dumping fw "
                    "ver from flash:.\n");
                ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
-                   (uint8_t *)dbyte, 8);
+                   dbyte, 32);
 
                if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
                    dcode[2] == 0xffff && dcode[3] == 0xffff) ||
@@ -2905,8 +3273,8 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
 {
        int ret = QLA_SUCCESS;
        uint32_t pcihdr, pcids;
-       uint32_t *dcode;
-       uint8_t *bcode;
+       uint32_t *dcode = mbuf;
+       uint8_t *bcode = mbuf;
        uint8_t code_type, last_image;
        struct qla_hw_data *ha = vha->hw;
 
@@ -2918,17 +3286,14 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
        memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
        memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
 
-       dcode = mbuf;
-
        /* Begin with first PCI expansion ROM header. */
        pcihdr = ha->flt_region_boot << 2;
        last_image = 1;
        do {
                /* Verify PCI expansion ROM header. */
-               ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr,
-                   0x20 * 4);
+               ha->isp_ops->read_optrom(vha, dcode, pcihdr, 0x20 * 4);
                bcode = mbuf + (pcihdr % 4);
-               if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+               if (memcmp(bcode, "\x55\xaa", 2)) {
                        /* No signature */
                        ql_log(ql_log_fatal, vha, 0x0154,
                            "No matching ROM signature.\n");
@@ -2939,13 +3304,11 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
                /* Locate PCI data structure. */
                pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
 
-               ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids,
-                   0x20 * 4);
+               ha->isp_ops->read_optrom(vha, dcode, pcids, 0x20 * 4);
                bcode = mbuf + (pcihdr % 4);
 
                /* Validate signature of PCI data structure. */
-               if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
-                   bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+               if (memcmp(bcode, "PCIR", 4)) {
                        /* Incorrect header. */
                        ql_log(ql_log_fatal, vha, 0x0155,
                            "PCI data struct not found pcir_adr=%x.\n", pcids);
@@ -2996,8 +3359,7 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
        /* Read firmware image information. */
        memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
        dcode = mbuf;
-       ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2,
-           0x20);
+       ha->isp_ops->read_optrom(vha, dcode, ha->flt_region_fw << 2, 0x20);
        bcode = mbuf + (pcihdr % 4);
 
        /* Validate signature of PCI data structure. */
@@ -3019,15 +3381,14 @@ int
 qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
 {
        int ret = QLA_SUCCESS;
-       uint32_t pcihdr, pcids;
-       uint32_t *dcode;
-       uint8_t *bcode;
+       uint32_t pcihdr = 0, pcids = 0;
+       uint32_t *dcode = mbuf;
+       uint8_t *bcode = mbuf;
        uint8_t code_type, last_image;
        int i;
        struct qla_hw_data *ha = vha->hw;
        uint32_t faddr = 0;
-
-       pcihdr = pcids = 0;
+       struct active_regions active_regions = { };
 
        if (IS_P3P_TYPE(ha))
                return ret;
@@ -3040,18 +3401,19 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
        memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
        memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
 
-       dcode = mbuf;
        pcihdr = ha->flt_region_boot << 2;
-       if (IS_QLA27XX(ha) &&
-           qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
-               pcihdr = ha->flt_region_boot_sec << 2;
+       if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+               qla27xx_get_active_image(vha, &active_regions);
+               if (active_regions.global == QLA27XX_SECONDARY_IMAGE) {
+                       pcihdr = ha->flt_region_boot_sec << 2;
+               }
+       }
 
-       last_image = 1;
        do {
                /* Verify PCI expansion ROM header. */
                qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
                bcode = mbuf + (pcihdr % 4);
-               if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+               if (memcmp(bcode, "\x55\xaa", 2)) {
                        /* No signature */
                        ql_log(ql_log_fatal, vha, 0x0059,
                            "No matching ROM signature.\n");
@@ -3066,11 +3428,11 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
                bcode = mbuf + (pcihdr % 4);
 
                /* Validate signature of PCI data structure. */
-               if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
-                   bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+               if (memcmp(bcode, "PCIR", 4)) {
                        /* Incorrect header. */
                        ql_log(ql_log_fatal, vha, 0x005a,
                            "PCI data struct not found pcir_adr=%x.\n", pcids);
+                       ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32);
                        ret = QLA_FUNCTION_FAILED;
                        break;
                }
@@ -3117,30 +3479,24 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
 
        /* Read firmware image information. */
        memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
-       dcode = mbuf;
        faddr = ha->flt_region_fw;
-       if (IS_QLA27XX(ha) &&
-           qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
-               faddr = ha->flt_region_fw_sec;
-
-       qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
-       for (i = 0; i < 4; i++)
-               dcode[i] = be32_to_cpu(dcode[i]);
+       if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+               qla27xx_get_active_image(vha, &active_regions);
+               if (active_regions.global == QLA27XX_SECONDARY_IMAGE)
+                       faddr = ha->flt_region_fw_sec;
+       }
 
-       if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
-           dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
-           (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
-           dcode[3] == 0)) {
+       qla24xx_read_flash_data(vha, dcode, faddr, 8);
+       if (qla24xx_risc_firmware_invalid(dcode)) {
                ql_log(ql_log_warn, vha, 0x005f,
                    "Unrecognized fw revision at %x.\n",
                    ha->flt_region_fw * 4);
+               ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
        } else {
-               ha->fw_revision[0] = dcode[0];
-               ha->fw_revision[1] = dcode[1];
-               ha->fw_revision[2] = dcode[2];
-               ha->fw_revision[3] = dcode[3];
+               for (i = 0; i < 4; i++)
+                       ha->fw_revision[i] = be32_to_cpu(dcode[4+i]);
                ql_dbg(ql_dbg_init, vha, 0x0060,
-                   "Firmware revision %d.%d.%d (%x).\n",
+                   "Firmware revision (flash) %u.%u.%u (%x).\n",
                    ha->fw_revision[0], ha->fw_revision[1],
                    ha->fw_revision[2], ha->fw_revision[3]);
        }
@@ -3152,20 +3508,17 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
        }
 
        memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
-       dcode = mbuf;
-       ha->isp_ops->read_optrom(vha, (uint8_t *)dcode,
-           ha->flt_region_gold_fw << 2, 32);
-
-       if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
-           dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
+       faddr = ha->flt_region_gold_fw;
+       qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8);
+       if (qla24xx_risc_firmware_invalid(dcode)) {
                ql_log(ql_log_warn, vha, 0x0056,
-                   "Unrecognized golden fw at 0x%x.\n",
-                   ha->flt_region_gold_fw * 4);
+                   "Unrecognized golden fw at %#x.\n", faddr);
+               ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32);
                return ret;
        }
 
-       for (i = 4; i < 8; i++)
-               ha->gold_fw_version[i-4] = be32_to_cpu(dcode[i]);
+       for (i = 0; i < 4; i++)
+               ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]);
 
        return ret;
 }
@@ -3237,7 +3590,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
        fcp_prio_addr = ha->flt_region_fcp_prio;
 
        /* first read the fcp priority data header from flash */
-       ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
+       ha->isp_ops->read_optrom(vha, ha->fcp_prio_cfg,
                        fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
 
        if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
@@ -3248,7 +3601,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
        len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
        max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
 
-       ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
+       ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0],
                        fcp_prio_addr << 2, (len < max_len ? len : max_len));
 
        /* revalidate the entire FCP priority config data, including entries */
index 582d166..3eeae72 100644 (file)
@@ -184,6 +184,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
        /* Send marker if required */
        if (unlikely(vha->marker_needed != 0)) {
                int rc = qla2x00_issue_marker(vha, vha_locked);
+
                if (rc != QLA_SUCCESS) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe03d,
                            "qla_target(%d): issue_marker() failed\n",
@@ -557,6 +558,7 @@ static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
        struct imm_ntfy_from_isp *ntfy, int type)
 {
        struct qla_work_evt *e;
+
        e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
        if (!e)
                return QLA_FUNCTION_FAILED;
@@ -680,7 +682,6 @@ done:
 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
 {
        fc_port_t *t;
-       unsigned long flags;
 
        switch (e->u.nack.type) {
        case SRB_NACK_PRLI:
@@ -693,24 +694,19 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
                if (t) {
                        ql_log(ql_log_info, vha, 0xd034,
                            "%s create sess success %p", __func__, t);
-                       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
                        /* create sess has an extra kref */
                        vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
-                       spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
                }
                break;
        }
        qla24xx_async_notify_ack(vha, e->u.nack.fcport,
-           (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
+           (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
 }
 
 void qla24xx_delete_sess_fn(struct work_struct *work)
 {
        fc_port_t *fcport = container_of(work, struct fc_port, del_work);
        struct qla_hw_data *ha = fcport->vha->hw;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 
        if (fcport->se_sess) {
                ha->tgt.tgt_ops->shutdown_sess(fcport);
@@ -718,7 +714,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
        } else {
                qlt_unreg_sess(fcport);
        }
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
 /*
@@ -787,8 +782,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
                    fcport->port_name, sess->loop_id);
                sess->local = 0;
        }
-       ha->tgt.tgt_ops->put_sess(sess);
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+       ha->tgt.tgt_ops->put_sess(sess);
 }
 
 /*
@@ -980,6 +976,8 @@ void qlt_free_session_done(struct work_struct *work)
                sess->send_els_logo);
 
        if (!IS_SW_RESV_ADDR(sess->d_id)) {
+               qla2x00_mark_device_lost(vha, sess, 0, 0);
+
                if (sess->send_els_logo) {
                        qlt_port_logo_t logo;
 
@@ -1076,6 +1074,7 @@ void qlt_free_session_done(struct work_struct *work)
                struct qlt_plogi_ack_t *con =
                    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
                struct imm_ntfy_from_isp *iocb;
+
                own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
 
                if (con) {
@@ -1160,8 +1159,6 @@ void qlt_unreg_sess(struct fc_port *sess)
        if (sess->se_sess)
                vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
 
-       qla2x00_mark_device_lost(vha, sess, 0, 0);
-
        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
        sess->disc_state = DSC_DELETE_PEND;
        sess->last_rscn_gen = sess->rscn_gen;
@@ -1329,6 +1326,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
        res = -ENOENT;
        for (i = 0; i < entries; i++) {
                struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+
                if ((gid->al_pa == s_id[2]) &&
                    (gid->area == s_id[1]) &&
                    (gid->domain == s_id[0])) {
@@ -2331,14 +2329,14 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
                ctio->u.status1.scsi_status |=
                    cpu_to_le16(SS_RESIDUAL_UNDER);
 
-       /* Response code and sense key */
-       put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
-           (&ctio->u.status1.sense_data)[0]);
+       /* Fixed format sense data. */
+       ctio->u.status1.sense_data[0] = 0x70;
+       ctio->u.status1.sense_data[2] = sense_key;
        /* Additional sense length */
-       put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+       ctio->u.status1.sense_data[7] = 0xa;
        /* ASC and ASCQ */
-       put_unaligned_le32(((asc << 24) | (ascq << 16)),
-           (&ctio->u.status1.sense_data)[3]);
+       ctio->u.status1.sense_data[12] = asc;
+       ctio->u.status1.sense_data[13] = ascq;
 
        /* Memory Barrier */
        wmb();
@@ -2387,7 +2385,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
                case ELS_PRLO:
                case ELS_TPRLO:
                        ql_dbg(ql_dbg_disc, vha, 0x2106,
-                           "TM response logo %phC status %#x state %#x",
+                           "TM response logo %8phC status %#x state %#x",
                            mcmd->sess->port_name, mcmd->fc_tm_rsp,
                            mcmd->flags);
                        qlt_schedule_sess_for_deletion(mcmd->sess);
@@ -2485,6 +2483,7 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
 {
        struct qla_hw_data *ha;
        struct qla_qpair *qpair;
+
        if (!cmd->sg_mapped)
                return;
 
@@ -2635,7 +2634,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
 {
        int cnt;
-       uint32_t *dword_ptr;
+       struct dsd64 *cur_dsd;
 
        /* Build continuation packets */
        while (prm->seg_cnt > 0) {
@@ -2656,19 +2655,13 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
                cont_pkt64->sys_define = 0;
 
                cont_pkt64->entry_type = CONTINUE_A64_TYPE;
-               dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
+               cur_dsd = cont_pkt64->dsd;
 
                /* Load continuation entry data segments */
                for (cnt = 0;
                    cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
                    cnt++, prm->seg_cnt--) {
-                       *dword_ptr++ =
-                           cpu_to_le32(lower_32_bits
-                               (sg_dma_address(prm->sg)));
-                       *dword_ptr++ = cpu_to_le32(upper_32_bits
-                           (sg_dma_address(prm->sg)));
-                       *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-
+                       append_dsd64(&cur_dsd, prm->sg);
                        prm->sg = sg_next(prm->sg);
                }
        }
@@ -2681,13 +2674,13 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
 {
        int cnt;
-       uint32_t *dword_ptr;
+       struct dsd64 *cur_dsd;
        struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
 
        pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
 
        /* Setup packet address segment pointer */
-       dword_ptr = pkt24->u.status0.dseg_0_address;
+       cur_dsd = &pkt24->u.status0.dsd;
 
        /* Set total data segment count */
        if (prm->seg_cnt)
@@ -2695,8 +2688,8 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
 
        if (prm->seg_cnt == 0) {
                /* No data transfer */
-               *dword_ptr++ = 0;
-               *dword_ptr = 0;
+               cur_dsd->address = 0;
+               cur_dsd->length = 0;
                return;
        }
 
@@ -2706,14 +2699,7 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
        for (cnt = 0;
            (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
            cnt++, prm->seg_cnt--) {
-               *dword_ptr++ =
-                   cpu_to_le32(lower_32_bits(sg_dma_address(prm->sg)));
-
-               *dword_ptr++ = cpu_to_le32(upper_32_bits(
-                       sg_dma_address(prm->sg)));
-
-               *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
-
+               append_dsd64(&cur_dsd, prm->sg);
                prm->sg = sg_next(prm->sg);
        }
 
@@ -3037,7 +3023,7 @@ qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
 static inline int
 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
 {
-       uint32_t                *cur_dsd;
+       struct dsd64            *cur_dsd;
        uint32_t                transfer_length = 0;
        uint32_t                data_bytes;
        uint32_t                dif_bytes;
@@ -3183,12 +3169,11 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
 
        qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
 
-       pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
-       pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+       put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
        pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
 
        if (!bundling) {
-               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+               cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
        } else {
                /*
                 * Configure Bundling if we need to fetch interlaving
@@ -3198,7 +3183,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
                crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
                crc_ctx_pkt->u.bundling.dseg_count =
                        cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
-               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+               cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
        }
 
        /* Finish the common fields of CRC pkt */
@@ -3231,7 +3216,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
                /* Walks dif segments */
                pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
 
-               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+               cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
                if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
                        prm->prot_seg_cnt, cmd))
                        goto crc_queuing_error;
@@ -3263,7 +3248,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
            (cmd->sess && cmd->sess->deleted)) {
                cmd->state = QLA_TGT_STATE_PROCESSED;
-               qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
                return 0;
        }
 
@@ -3292,7 +3276,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                 * previous life, just abort the processing.
                 */
                cmd->state = QLA_TGT_STATE_PROCESSED;
-               qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
                ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
                        "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
                        vha->flags.online, qla2x00_reset_active(vha),
@@ -3384,9 +3367,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
 
 
        cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
-       spin_lock(&cmd->cmd_lock);
        cmd->cmd_sent_to_fw = 1;
-       spin_unlock(&cmd->cmd_lock);
        cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
 
        /* Memory Barrier */
@@ -3433,8 +3414,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
                 * Either the port is not online or this request was from
                 * previous life, just abort the processing.
                 */
-               cmd->state = QLA_TGT_STATE_NEED_DATA;
-               qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+               cmd->aborted = 1;
+               cmd->write_data_transferred = 0;
+               cmd->state = QLA_TGT_STATE_DATA_IN;
+               vha->hw->tgt.tgt_ops->handle_data(cmd);
                ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
                        "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
                        vha->flags.online, qla2x00_reset_active(vha),
@@ -3465,9 +3448,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
                qlt_load_data_segments(&prm);
 
        cmd->state = QLA_TGT_STATE_NEED_DATA;
-       spin_lock(&cmd->cmd_lock);
        cmd->cmd_sent_to_fw = 1;
-       spin_unlock(&cmd->cmd_lock);
        cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
 
        /* Memory Barrier */
@@ -3646,33 +3627,11 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
        struct imm_ntfy_from_isp *imm, int ha_locked)
 {
-       unsigned long flags = 0;
        int rc;
 
-       if (ha_locked) {
-               rc = __qlt_send_term_imm_notif(vha, imm);
-
-#if 0  /* Todo  */
-               if (rc == -ENOMEM)
-                       qlt_alloc_qfull_cmd(vha, imm, 0, 0);
-#else
-               if (rc) {
-               }
-#endif
-               goto done;
-       }
-
-       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       WARN_ON_ONCE(!ha_locked);
        rc = __qlt_send_term_imm_notif(vha, imm);
-
-#if 0  /* Todo */
-       if (rc == -ENOMEM)
-               qlt_alloc_qfull_cmd(vha, imm, 0, 0);
-#endif
-
-done:
-       if (!ha_locked)
-               spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+       pr_debug("rc = %d\n", rc);
 }
 
 /*
@@ -3913,6 +3872,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
 
        if (ctio != NULL) {
                struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+
                term = !(c->flags &
                    cpu_to_le16(OF_TERM_EXCH));
        } else
@@ -3977,39 +3937,6 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
        return cmd;
 }
 
-/* hardware_lock should be held by caller. */
-void
-qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
-{
-       struct qla_hw_data *ha = vha->hw;
-
-       if (cmd->sg_mapped)
-               qlt_unmap_sg(vha, cmd);
-
-       /* TODO: fix debug message type and ids. */
-       if (cmd->state == QLA_TGT_STATE_PROCESSED) {
-               ql_dbg(ql_dbg_io, vha, 0xff00,
-                   "HOST-ABORT: state=PROCESSED.\n");
-       } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
-               cmd->write_data_transferred = 0;
-               cmd->state = QLA_TGT_STATE_DATA_IN;
-
-               ql_dbg(ql_dbg_io, vha, 0xff01,
-                   "HOST-ABORT: state=DATA_IN.\n");
-
-               ha->tgt.tgt_ops->handle_data(cmd);
-               return;
-       } else {
-               ql_dbg(ql_dbg_io, vha, 0xff03,
-                   "HOST-ABORT: state=BAD(%d).\n",
-                   cmd->state);
-               dump_stack();
-       }
-
-       cmd->trc_flags |= TRC_FLUSH;
-       ha->tgt.tgt_ops->free_cmd(cmd);
-}
-
 /*
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
@@ -4031,7 +3958,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
                return;
        }
 
-       cmd = (struct qla_tgt_cmd *)qlt_ctio_to_cmd(vha, rsp, handle, ctio);
+       cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
        if (cmd == NULL)
                return;
 
@@ -4240,11 +4167,9 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
        if (ret != 0)
                goto out_term;
        /*
-        * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+        * Drop extra session reference from qlt_handle_cmd_for_atio().
         */
-       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
        return;
 
 out_term:
@@ -4261,9 +4186,7 @@ out_term:
        target_free_tag(sess->se_sess, &cmd->se_cmd);
        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 
-       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 }
 
 static void qlt_do_work(struct work_struct *work)
@@ -4472,9 +4395,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
        if (!cmd) {
                ql_dbg(ql_dbg_io, vha, 0x3062,
                    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
-               spin_lock_irqsave(&ha->tgt.sess_lock, flags);
                ha->tgt.tgt_ops->put_sess(sess);
-               spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
                return -EBUSY;
        }
 
@@ -4773,6 +4694,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
 
        list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
                uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+
                if (op_key == key) {
                        op->aborted = true;
                        count++;
@@ -4781,6 +4703,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
 
        list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
                uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+
                if (cmd_key == key) {
                        cmd->aborted = 1;
                        count++;
@@ -5051,6 +4974,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                if (sess != NULL) {
                        bool delete = false;
                        int sec;
+
                        spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
                        switch (sess->fw_login_state) {
                        case DSC_LS_PLOGI_PEND:
@@ -5203,6 +5127,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
        case ELS_ADISC:
        {
                struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
                if (tgt->link_reinit_iocb_pending) {
                        qlt_send_notify_ack(ha->base_qpair,
                            &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
@@ -5266,6 +5191,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
        case IMM_NTFY_LIP_LINK_REINIT:
        {
                struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
                    "qla_target(%d): LINK REINIT (loop %#x, "
                    "subcode %x)\n", vha->vp_idx,
@@ -5492,11 +5418,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
        se_sess = sess->se_sess;
 
        tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
-       if (tag < 0)
-               return;
-
-       cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
-       if (!cmd) {
+       if (tag < 0) {
                ql_dbg(ql_dbg_io, vha, 0x3009,
                        "qla_target(%d): %s: Allocation of cmd failed\n",
                        vha->vp_idx, __func__);
@@ -5511,6 +5433,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
                return;
        }
 
+       cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
        memset(cmd, 0, sizeof(struct qla_tgt_cmd));
 
        qlt_incr_num_pend_cmds(vha);
@@ -5820,8 +5743,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
        struct qla_tgt_mgmt_cmd *mcmd;
        struct qla_hw_data *ha = vha->hw;
 
-       mcmd = (struct qla_tgt_mgmt_cmd *)qlt_ctio_to_cmd(vha, rsp,
-           pkt->handle, pkt);
+       mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
        if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
                ql_dbg(ql_dbg_async, vha, 0xe064,
                    "qla_target(%d): ABTS Comp without mcmd\n",
@@ -5883,6 +5805,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
        case CTIO_TYPE7:
        {
                struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+
                qlt_do_ctio_completion(vha, rsp, entry->handle,
                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
                    entry);
@@ -5893,6 +5816,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
        {
                struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
                int rc;
+
                if (atio->u.isp2x.status !=
                    cpu_to_le16(ATIO_CDB_VALID)) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe05e,
@@ -5941,6 +5865,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
        case CONTINUE_TGT_IO_TYPE:
        {
                struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+
                qlt_do_ctio_completion(vha, rsp, entry->handle,
                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
                    entry);
@@ -5950,6 +5875,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
        case CTIO_A64_TYPE:
        {
                struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+
                qlt_do_ctio_completion(vha, rsp, entry->handle,
                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
                    entry);
@@ -5964,6 +5890,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
        case NOTIFY_ACK_TYPE:
                if (tgt->notify_ack_expected > 0) {
                        struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
                        ql_dbg(ql_dbg_tgt, vha, 0xe036,
                            "NOTIFY_ACK seq %08x status %x\n",
                            le16_to_cpu(entry->u.isp2x.seq_id),
@@ -6239,6 +6166,7 @@ retry:
 
                if (rc == -ENOENT) {
                        qlt_port_logo_t logo;
+
                        sid_to_portid(s_id, &logo.id);
                        logo.cmd_count = 1;
                        qlt_send_first_logo(vha, &logo);
@@ -6318,17 +6246,19 @@ static void qlt_abort_work(struct qla_tgt *tgt,
        }
 
        rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
-       ha->tgt.tgt_ops->put_sess(sess);
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
+       ha->tgt.tgt_ops->put_sess(sess);
+
        if (rc != 0)
                goto out_term;
        return;
 
 out_term2:
+       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
        if (sess)
                ha->tgt.tgt_ops->put_sess(sess);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
 
 out_term:
        spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -6386,9 +6316,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
            scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
 
        rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
-       ha->tgt.tgt_ops->put_sess(sess);
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 
+       ha->tgt.tgt_ops->put_sess(sess);
+
        if (rc != 0)
                goto out_term;
        return;
@@ -6499,6 +6430,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
                unsigned long flags;
 
                struct qla_qpair *qpair = ha->queue_pair_map[i];
+
                h = &tgt->qphints[i + 1];
                INIT_LIST_HEAD(&h->hint_elem);
                if (qpair) {
@@ -6937,7 +6869,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
        RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
 
        if (ha->flags.msix_enabled) {
-               if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                        if (IS_QLA2071(ha)) {
                                /* 4 ports Baker: Enable Interrupt Handshake */
                                icb->msix_atio = 0;
@@ -6952,7 +6884,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
                }
        } else {
                /* INTx|MSI */
-               if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
                        icb->msix_atio = 0;
                        icb->firmware_options_2 |= BIT_26;
                        ql_dbg(ql_dbg_init, vha, 0xf072,
@@ -7201,7 +7133,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
-       if  ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       if  ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+           IS_QLA28XX(ha)) {
                ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
                ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
        } else {
@@ -7329,7 +7262,10 @@ qlt_mem_free(struct qla_hw_data *ha)
                    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
                    ha->tgt.atio_dma);
        }
+       ha->tgt.atio_ring = NULL;
+       ha->tgt.atio_dma = 0;
        kfree(ha->tgt.tgt_vp_map);
+       ha->tgt.tgt_vp_map = NULL;
 }
 
 /* vport_slock to be held by the caller */
@@ -7413,6 +7349,9 @@ int __init qlt_init(void)
 {
        int ret;
 
+       BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
+       BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
+
        if (!qlt_parse_ini_mode()) {
                ql_log(ql_log_fatal, NULL, 0xe06b,
                    "qlt_parse_ini_mode() failed\n");
index f3de750..89ceffa 100644 (file)
@@ -29,6 +29,7 @@
 #define __QLA_TARGET_H
 
 #include "qla_def.h"
+#include "qla_dsd.h"
 
 /*
  * Must be changed on any change in any initiator visible interfaces or
@@ -224,12 +225,7 @@ struct ctio_to_2xxx {
        uint16_t reserved_1[3];
        uint16_t scsi_status;
        uint32_t transfer_length;
-       uint32_t dseg_0_address;        /* Data segment 0 address. */
-       uint32_t dseg_0_length;         /* Data segment 0 length. */
-       uint32_t dseg_1_address;        /* Data segment 1 address. */
-       uint32_t dseg_1_length;         /* Data segment 1 length. */
-       uint32_t dseg_2_address;        /* Data segment 2 address. */
-       uint32_t dseg_2_length;         /* Data segment 2 length. */
+       struct dsd32 dsd[3];
 } __packed;
 #define ATIO_PATH_INVALID       0x07
 #define ATIO_CANT_PROV_CAP      0x16
@@ -429,10 +425,7 @@ struct ctio7_to_24xx {
                        uint32_t reserved2;
                        uint32_t transfer_length;
                        uint32_t reserved3;
-                       /* Data segment 0 address. */
-                       uint32_t dseg_0_address[2];
-                       /* Data segment 0 length. */
-                       uint32_t dseg_0_length;
+                       struct dsd64 dsd;
                } status0;
                struct {
                        uint16_t sense_length;
@@ -526,10 +519,10 @@ struct ctio_crc2_to_fw {
        uint32_t reserved5;
        __le32 transfer_length;         /* total fc transfer length */
        uint32_t reserved6;
-       __le32 crc_context_address[2];/* Data segment address. */
+       __le64   crc_context_address __packed; /* Data segment address. */
        uint16_t crc_context_len;       /* Data segment length. */
        uint16_t reserved_1;            /* MUST be set to 0. */
-} __packed;
+};
 
 /* CTIO Type CRC_x Status IOCB */
 struct ctio_crc_from_fw {
@@ -855,7 +848,7 @@ enum trace_flags {
        TRC_CTIO_ERR = BIT_11,
        TRC_CTIO_DONE = BIT_12,
        TRC_CTIO_ABORTED =  BIT_13,
-       TRC_CTIO_STRANGE= BIT_14,
+       TRC_CTIO_STRANGE = BIT_14,
        TRC_CMD_DONE = BIT_15,
        TRC_CMD_CHK_STOP = BIT_16,
        TRC_CMD_FREE = BIT_17,
@@ -889,10 +882,14 @@ struct qla_tgt_cmd {
        unsigned int term_exchg:1;
        unsigned int cmd_sent_to_fw:1;
        unsigned int cmd_in_wq:1;
-       unsigned int aborted:1;
-       unsigned int data_work:1;
-       unsigned int data_work_free:1;
-       unsigned int released:1;
+
+       /*
+        * This variable may be set from outside the LIO and I/O completion
+        * callback functions. Do not declare this member variable as a
+        * bitfield to avoid a read-modify-write operation when this variable
+        * is set.
+        */
+       unsigned int aborted;
 
        struct scatterlist *sg; /* cmd data buffer SG vector */
        int sg_cnt;             /* SG segments count */
@@ -1103,7 +1100,5 @@ extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
 
 void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t,
     uint8_t, uint8_t, uint8_t);
-extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *,
-    struct qla_tgt_cmd *);
 
 #endif /* __QLA_TARGET_H */
index 9e52500..de696a0 100644 (file)
@@ -7,103 +7,9 @@
 #include "qla_def.h"
 #include "qla_tmpl.h"
 
-/* note default template is in big endian */
-static const uint32_t ql27xx_fwdt_default_template[] = {
-       0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
-       0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x00000000, 0x00000000, 0x00000000,
-       0x00000000, 0x04010000, 0x14000000, 0x00000000,
-       0x02000000, 0x44000000, 0x09010000, 0x10000000,
-       0x00000000, 0x02000000, 0x01010000, 0x1c000000,
-       0x00000000, 0x02000000, 0x00600000, 0x00000000,
-       0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
-       0x02000000, 0x00600000, 0x00000000, 0xcc000000,
-       0x01010000, 0x1c000000, 0x00000000, 0x02000000,
-       0x10600000, 0x00000000, 0xd4000000, 0x01010000,
-       0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
-       0x00000060, 0xf0000000, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x00700000, 0x041000c0,
-       0x00010000, 0x18000000, 0x00000000, 0x02000000,
-       0x10700000, 0x041000c0, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x40700000, 0x041000c0,
-       0x01010000, 0x1c000000, 0x00000000, 0x02000000,
-       0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
-       0x18000000, 0x00000000, 0x02000000, 0x007c0000,
-       0x040300c4, 0x00010000, 0x18000000, 0x00000000,
-       0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
-       0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
-       0x00000000, 0xc0000000, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x007c0000, 0x04200000,
-       0x0b010000, 0x18000000, 0x00000000, 0x02000000,
-       0x0c000000, 0x00000000, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
-       0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
-       0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
-       0x00010000, 0x18000000, 0x00000000, 0x02000000,
-       0x0a000000, 0x04200080, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
-       0x00010000, 0x18000000, 0x00000000, 0x02000000,
-       0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
-       0x00010000, 0x18000000, 0x00000000, 0x02000000,
-       0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
-       0x00010000, 0x18000000, 0x00000000, 0x02000000,
-       0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
-       0x00010000, 0x18000000, 0x00000000, 0x02000000,
-       0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x00300000, 0x041000c0,
-       0x00010000, 0x18000000, 0x00000000, 0x02000000,
-       0x10300000, 0x041000c0, 0x00010000, 0x18000000,
-       0x00000000, 0x02000000, 0x20300000, 0x041000c0,
-       0x00010000, 0x18000000, 0x00000000, 0x02000000,
-       0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
-       0x00000000, 0x02000000, 0x06010000, 0x1c000000,
-       0x00000000, 0x02000000, 0x01000000, 0x00000200,
-       0xff230200, 0x06010000, 0x1c000000, 0x00000000,
-       0x02000000, 0x02000000, 0x00001000, 0x00000000,
-       0x07010000, 0x18000000, 0x00000000, 0x02000000,
-       0x00000000, 0x01000000, 0x07010000, 0x18000000,
-       0x00000000, 0x02000000, 0x00000000, 0x02000000,
-       0x07010000, 0x18000000, 0x00000000, 0x02000000,
-       0x00000000, 0x03000000, 0x0d010000, 0x14000000,
-       0x00000000, 0x02000000, 0x00000000, 0xff000000,
-       0x10000000, 0x00000000, 0x00000080,
-};
-
-static inline void __iomem *
-qla27xx_isp_reg(struct scsi_qla_host *vha)
-{
-       return &vha->hw->iobase->isp24;
-}
+#define ISPREG(vha)    (&(vha)->hw->iobase->isp24)
+#define IOBAR(reg)     offsetof(typeof(*(reg)), iobase_addr)
+#define IOBASE(vha)    IOBAR(ISPREG(vha))
 
 static inline void
 qla27xx_insert16(uint16_t value, void *buf, ulong *len)
@@ -128,7 +34,6 @@ qla27xx_insert32(uint32_t value, void *buf, ulong *len)
 static inline void
 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
 {
-
        if (buf && mem && size) {
                buf += *len;
                memcpy(buf, mem, size);
@@ -190,9 +95,9 @@ static inline void
 qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
        uint offset, uint32_t data, void *buf)
 {
-       __iomem void *window = (void __iomem *)reg + offset;
-
        if (buf) {
+               void __iomem *window = (void __iomem *)reg + offset;
+
                WRT_REG_DWORD(window, data);
        }
 }
@@ -205,7 +110,7 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
        void __iomem *window = (void __iomem *)reg + offset;
        void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
 
-       qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
+       qla27xx_write_reg(reg, IOBAR(reg), addr, buf);
        while (count--) {
                qla27xx_insert32(addr, buf, len);
                readn(window, buf, len);
@@ -224,7 +129,7 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
 static inline struct qla27xx_fwdt_entry *
 qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
 {
-       return (void *)ent + ent->hdr.size;
+       return (void *)ent + le32_to_cpu(ent->hdr.size);
 }
 
 static struct qla27xx_fwdt_entry *
@@ -254,12 +159,14 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       ulong addr = le32_to_cpu(ent->t256.base_addr);
+       uint offset = ent->t256.pci_offset;
+       ulong count = le16_to_cpu(ent->t256.reg_count);
+       uint width = ent->t256.reg_width;
 
        ql_dbg(ql_dbg_misc, vha, 0xd200,
            "%s: rdio t1 [%lx]\n", __func__, *len);
-       qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
-           ent->t256.reg_count, ent->t256.reg_width, buf, len);
+       qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
 
        return qla27xx_next_entry(ent);
 }
@@ -268,12 +175,14 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       ulong addr = le32_to_cpu(ent->t257.base_addr);
+       uint offset = ent->t257.pci_offset;
+       ulong data = le32_to_cpu(ent->t257.write_data);
 
        ql_dbg(ql_dbg_misc, vha, 0xd201,
            "%s: wrio t1 [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
-       qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
+       qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
+       qla27xx_write_reg(ISPREG(vha), offset, data, buf);
 
        return qla27xx_next_entry(ent);
 }
@@ -282,13 +191,17 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       uint banksel = ent->t258.banksel_offset;
+       ulong bank = le32_to_cpu(ent->t258.bank);
+       ulong addr = le32_to_cpu(ent->t258.base_addr);
+       uint offset = ent->t258.pci_offset;
+       uint count = le16_to_cpu(ent->t258.reg_count);
+       uint width = ent->t258.reg_width;
 
        ql_dbg(ql_dbg_misc, vha, 0xd202,
            "%s: rdio t2 [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
-       qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
-           ent->t258.reg_count, ent->t258.reg_width, buf, len);
+       qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
+       qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
 
        return qla27xx_next_entry(ent);
 }
@@ -297,13 +210,17 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       ulong addr = le32_to_cpu(ent->t259.base_addr);
+       uint banksel = ent->t259.banksel_offset;
+       ulong bank = le32_to_cpu(ent->t259.bank);
+       uint offset = ent->t259.pci_offset;
+       ulong data = le32_to_cpu(ent->t259.write_data);
 
        ql_dbg(ql_dbg_misc, vha, 0xd203,
            "%s: wrio t2 [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
-       qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
-       qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
+       qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
+       qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
+       qla27xx_write_reg(ISPREG(vha), offset, data, buf);
 
        return qla27xx_next_entry(ent);
 }
@@ -312,12 +229,12 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       uint offset = ent->t260.pci_offset;
 
        ql_dbg(ql_dbg_misc, vha, 0xd204,
            "%s: rdpci [%lx]\n", __func__, *len);
-       qla27xx_insert32(ent->t260.pci_offset, buf, len);
-       qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
+       qla27xx_insert32(offset, buf, len);
+       qla27xx_read_reg(ISPREG(vha), offset, buf, len);
 
        return qla27xx_next_entry(ent);
 }
@@ -326,11 +243,12 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       uint offset = ent->t261.pci_offset;
+       ulong data = le32_to_cpu(ent->t261.write_data);
 
        ql_dbg(ql_dbg_misc, vha, 0xd205,
            "%s: wrpci [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
+       qla27xx_write_reg(ISPREG(vha), offset, data, buf);
 
        return qla27xx_next_entry(ent);
 }
@@ -339,51 +257,50 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
+       uint area = ent->t262.ram_area;
+       ulong start = le32_to_cpu(ent->t262.start_addr);
+       ulong end = le32_to_cpu(ent->t262.end_addr);
        ulong dwords;
-       ulong start;
-       ulong end;
 
        ql_dbg(ql_dbg_misc, vha, 0xd206,
            "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
-       start = ent->t262.start_addr;
-       end = ent->t262.end_addr;
 
-       if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
+       if (area == T262_RAM_AREA_CRITICAL_RAM) {
                ;
-       } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
+       } else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
                end = vha->hw->fw_memory_size;
                if (buf)
-                       ent->t262.end_addr = end;
-       } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
+                       ent->t262.end_addr = cpu_to_le32(end);
+       } else if (area == T262_RAM_AREA_SHARED_RAM) {
                start = vha->hw->fw_shared_ram_start;
                end = vha->hw->fw_shared_ram_end;
                if (buf) {
-                       ent->t262.start_addr = start;
-                       ent->t262.end_addr = end;
+                       ent->t262.start_addr = cpu_to_le32(start);
+                       ent->t262.end_addr = cpu_to_le32(end);
                }
-       } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
+       } else if (area == T262_RAM_AREA_DDR_RAM) {
                start = vha->hw->fw_ddr_ram_start;
                end = vha->hw->fw_ddr_ram_end;
                if (buf) {
-                       ent->t262.start_addr = start;
-                       ent->t262.end_addr = end;
+                       ent->t262.start_addr = cpu_to_le32(start);
+                       ent->t262.end_addr = cpu_to_le32(end);
                }
-       } else if (ent->t262.ram_area == T262_RAM_AREA_MISC) {
+       } else if (area == T262_RAM_AREA_MISC) {
                if (buf) {
-                       ent->t262.start_addr = start;
-                       ent->t262.end_addr = end;
+                       ent->t262.start_addr = cpu_to_le32(start);
+                       ent->t262.end_addr = cpu_to_le32(end);
                }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd022,
-                   "%s: unknown area %x\n", __func__, ent->t262.ram_area);
+                   "%s: unknown area %x\n", __func__, area);
                qla27xx_skip_entry(ent, buf);
                goto done;
        }
 
        if (end < start || start == 0 || end == 0) {
                ql_dbg(ql_dbg_misc, vha, 0xd023,
-                   "%s: unusable range (start=%x end=%x)\n", __func__,
-                   ent->t262.end_addr, ent->t262.start_addr);
+                   "%s: unusable range (start=%lx end=%lx)\n",
+                   __func__, start, end);
                qla27xx_skip_entry(ent, buf);
                goto done;
        }
@@ -402,13 +319,14 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
+       uint type = ent->t263.queue_type;
        uint count = 0;
        uint i;
        uint length;
 
-       ql_dbg(ql_dbg_misc, vha, 0xd207,
-           "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
-       if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+       ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
+           "%s: getq(%x) [%lx]\n", __func__, type, *len);
+       if (type == T263_QUEUE_TYPE_REQ) {
                for (i = 0; i < vha->hw->max_req_queues; i++) {
                        struct req_que *req = vha->hw->req_q_map[i];
 
@@ -422,7 +340,7 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
                                count++;
                        }
                }
-       } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+       } else if (type == T263_QUEUE_TYPE_RSP) {
                for (i = 0; i < vha->hw->max_rsp_queues; i++) {
                        struct rsp_que *rsp = vha->hw->rsp_q_map[i];
 
@@ -450,7 +368,7 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
                }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd026,
-                   "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
+                   "%s: unknown queue %x\n", __func__, type);
                qla27xx_skip_entry(ent, buf);
        }
 
@@ -496,12 +414,10 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
-
-       ql_dbg(ql_dbg_misc, vha, 0xd209,
+       ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
            "%s: pause risc [%lx]\n", __func__, *len);
        if (buf)
-               qla24xx_pause_risc(reg, vha->hw);
+               qla24xx_pause_risc(ISPREG(vha), vha->hw);
 
        return qla27xx_next_entry(ent);
 }
@@ -522,11 +438,12 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       uint offset = ent->t267.pci_offset;
+       ulong data = le32_to_cpu(ent->t267.data);
 
        ql_dbg(ql_dbg_misc, vha, 0xd20b,
            "%s: dis intr [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
+       qla27xx_write_reg(ISPREG(vha), offset, data, buf);
 
        return qla27xx_next_entry(ent);
 }
@@ -622,17 +539,16 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
-       ulong dwords = ent->t270.count;
-       ulong addr = ent->t270.addr;
+       ulong addr = le32_to_cpu(ent->t270.addr);
+       ulong dwords = le32_to_cpu(ent->t270.count);
 
        ql_dbg(ql_dbg_misc, vha, 0xd20e,
            "%s: rdremreg [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+       qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf);
        while (dwords--) {
-               qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
+               qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf);
                qla27xx_insert32(addr, buf, len);
-               qla27xx_read_reg(reg, 0xc4, buf, len);
+               qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len);
                addr += sizeof(uint32_t);
        }
 
@@ -643,15 +559,14 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
-       ulong addr = ent->t271.addr;
-       ulong data = ent->t271.data;
+       ulong addr = le32_to_cpu(ent->t271.addr);
+       ulong data = le32_to_cpu(ent->t271.data);
 
        ql_dbg(ql_dbg_misc, vha, 0xd20f,
            "%s: wrremreg [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
-       qla27xx_write_reg(reg, 0xc4, data, buf);
-       qla27xx_write_reg(reg, 0xc0, addr, buf);
+       qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf);
+       qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf);
+       qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf);
 
        return qla27xx_next_entry(ent);
 }
@@ -660,8 +575,8 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       ulong dwords = ent->t272.count;
-       ulong start = ent->t272.addr;
+       ulong dwords = le32_to_cpu(ent->t272.count);
+       ulong start = le32_to_cpu(ent->t272.addr);
 
        ql_dbg(ql_dbg_misc, vha, 0xd210,
            "%s: rdremram [%lx]\n", __func__, *len);
@@ -680,8 +595,8 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       ulong dwords = ent->t273.count;
-       ulong addr = ent->t273.addr;
+       ulong dwords = le32_to_cpu(ent->t273.count);
+       ulong addr = le32_to_cpu(ent->t273.addr);
        uint32_t value;
 
        ql_dbg(ql_dbg_misc, vha, 0xd211,
@@ -703,12 +618,13 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
+       ulong type = ent->t274.queue_type;
        uint count = 0;
        uint i;
 
-       ql_dbg(ql_dbg_misc, vha, 0xd212,
-           "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
-       if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+       ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
+           "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
+       if (type == T274_QUEUE_TYPE_REQ_SHAD) {
                for (i = 0; i < vha->hw->max_req_queues; i++) {
                        struct req_que *req = vha->hw->req_q_map[i];
 
@@ -720,7 +636,7 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
                                count++;
                        }
                }
-       } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+       } else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
                for (i = 0; i < vha->hw->max_rsp_queues; i++) {
                        struct rsp_que *rsp = vha->hw->rsp_q_map[i];
 
@@ -746,7 +662,7 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
                }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd02f,
-                   "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
+                   "%s: unknown queue %lx\n", __func__, type);
                qla27xx_skip_entry(ent, buf);
        }
 
@@ -765,23 +681,26 @@ qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
        ulong offset = offsetof(typeof(*ent), t275.buffer);
+       ulong length = le32_to_cpu(ent->t275.length);
+       ulong size = le32_to_cpu(ent->hdr.size);
+       void *buffer = ent->t275.buffer;
 
-       ql_dbg(ql_dbg_misc, vha, 0xd213,
-           "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
-       if (!ent->t275.length) {
+       ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
+           "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
+       if (!length) {
                ql_dbg(ql_dbg_misc, vha, 0xd020,
                    "%s: buffer zero length\n", __func__);
                qla27xx_skip_entry(ent, buf);
                goto done;
        }
-       if (offset + ent->t275.length > ent->hdr.size) {
+       if (offset + length > size) {
+               length = size - offset;
                ql_dbg(ql_dbg_misc, vha, 0xd030,
-                   "%s: buffer overflow\n", __func__);
-               qla27xx_skip_entry(ent, buf);
-               goto done;
+                   "%s: buffer overflow, truncate [%lx]\n", __func__, length);
+               ent->t275.length = cpu_to_le32(length);
        }
 
-       qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
+       qla27xx_insertbuf(buffer, length, buf, len);
 done:
        return qla27xx_next_entry(ent);
 }
@@ -790,15 +709,22 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       uint type = vha->hw->pdev->device >> 4 & 0xf;
-       uint func = vha->hw->port_no & 0x3;
-
        ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
            "%s: cond [%lx]\n", __func__, *len);
 
-       if (type != ent->t276.cond1 || func != ent->t276.cond2) {
-               ent = qla27xx_next_entry(ent);
-               qla27xx_skip_entry(ent, buf);
+       if (buf) {
+               ulong cond1 = le32_to_cpu(ent->t276.cond1);
+               ulong cond2 = le32_to_cpu(ent->t276.cond2);
+               uint type = vha->hw->pdev->device >> 4 & 0xf;
+               uint func = vha->hw->port_no & 0x3;
+
+               if (type != cond1 || func != cond2) {
+                       struct qla27xx_fwdt_template *tmp = buf;
+
+                       tmp->count--;
+                       ent = qla27xx_next_entry(ent);
+                       qla27xx_skip_entry(ent, buf);
+               }
        }
 
        return qla27xx_next_entry(ent);
@@ -808,13 +734,15 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
+       ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
+       ulong data_addr = le32_to_cpu(ent->t277.data_addr);
 
        ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
            "%s: rdpep [%lx]\n", __func__, *len);
-       qla27xx_insert32(ent->t277.wr_cmd_data, buf, len);
-       qla27xx_write_reg(reg, ent->t277.cmd_addr, ent->t277.wr_cmd_data, buf);
-       qla27xx_read_reg(reg, ent->t277.data_addr, buf, len);
+       qla27xx_insert32(wr_cmd_data, buf, len);
+       qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
+       qla27xx_read_reg(ISPREG(vha), data_addr, buf, len);
 
        return qla27xx_next_entry(ent);
 }
@@ -823,12 +751,15 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
     struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
-       struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+       ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
+       ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
+       ulong data_addr = le32_to_cpu(ent->t278.data_addr);
+       ulong wr_data = le32_to_cpu(ent->t278.wr_data);
 
        ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
            "%s: wrpep [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, ent->t278.data_addr, ent->t278.wr_data, buf);
-       qla27xx_write_reg(reg, ent->t278.cmd_addr, ent->t278.wr_cmd_data, buf);
+       qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf);
+       qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
 
        return qla27xx_next_entry(ent);
 }
@@ -837,8 +768,10 @@ static struct qla27xx_fwdt_entry *
 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
+       ulong type = le32_to_cpu(ent->hdr.type);
+
        ql_dbg(ql_dbg_misc, vha, 0xd2ff,
-           "%s: type %x [%lx]\n", __func__, ent->hdr.type, *len);
+           "%s: other %lx [%lx]\n", __func__, type, *len);
        qla27xx_skip_entry(ent, buf);
 
        return qla27xx_next_entry(ent);
@@ -893,36 +826,27 @@ static void
 qla27xx_walk_template(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
 {
-       struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
-       ulong count = tmp->entry_count;
+       struct qla27xx_fwdt_entry *ent = (void *)tmp +
+           le32_to_cpu(tmp->entry_offset);
+       ulong type;
 
+       tmp->count = le32_to_cpu(tmp->entry_count);
        ql_dbg(ql_dbg_misc, vha, 0xd01a,
-           "%s: entry count %lx\n", __func__, count);
-       while (count--) {
-               ent = qla27xx_find_entry(ent->hdr.type)(vha, ent, buf, len);
+           "%s: entry count %u\n", __func__, tmp->count);
+       while (ent && tmp->count--) {
+               type = le32_to_cpu(ent->hdr.type);
+               ent = qla27xx_find_entry(type)(vha, ent, buf, len);
                if (!ent)
                        break;
        }
 
-       if (count)
+       if (tmp->count)
                ql_dbg(ql_dbg_misc, vha, 0xd018,
-                   "%s: entry residual count (%lx)\n", __func__, count);
+                   "%s: entry count residual=+%u\n", __func__, tmp->count);
 
        if (ent)
                ql_dbg(ql_dbg_misc, vha, 0xd019,
-                   "%s: missing end entry (%lx)\n", __func__, count);
-
-       if (buf && *len != vha->hw->fw_dump_len)
-               ql_dbg(ql_dbg_misc, vha, 0xd01b,
-                   "%s: length=%#lx residual=%+ld\n",
-                   __func__, *len, vha->hw->fw_dump_len - *len);
-
-       if (buf) {
-               ql_log(ql_log_warn, vha, 0xd015,
-                   "Firmware dump saved to temp buffer (%lu/%p)\n",
-                   vha->host_no, vha->hw->fw_dump);
-               qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
-       }
+                   "%s: missing end entry\n", __func__);
 }
 
 static void
@@ -945,8 +869,8 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
 }
 
 static void
-qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
-       struct scsi_qla_host *vha)
+qla27xx_firmware_info(struct scsi_qla_host *vha,
+    struct qla27xx_fwdt_template *tmp)
 {
        tmp->firmware_version[0] = vha->hw->fw_major_version;
        tmp->firmware_version[1] = vha->hw->fw_minor_version;
@@ -963,19 +887,19 @@ ql27xx_edit_template(struct scsi_qla_host *vha,
 {
        qla27xx_time_stamp(tmp);
        qla27xx_driver_info(tmp);
-       qla27xx_firmware_info(tmp, vha);
+       qla27xx_firmware_info(vha, tmp);
 }
 
 static inline uint32_t
 qla27xx_template_checksum(void *p, ulong size)
 {
-       uint32_t *buf = p;
+       __le32 *buf = p;
        uint64_t sum = 0;
 
        size /= sizeof(*buf);
 
-       while (size--)
-               sum += *buf++;
+       for ( ; size--; buf++)
+               sum += le32_to_cpu(*buf);
 
        sum = (sum & 0xffffffff) + (sum >> 32);
 
@@ -991,29 +915,29 @@ qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
 static inline int
 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
 {
-       return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
+       return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
 }
 
-static void
-qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
+static ulong
+qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
+    struct qla27xx_fwdt_template *tmp, void *buf)
 {
-       struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
-       ulong len;
+       ulong len = 0;
 
        if (qla27xx_fwdt_template_valid(tmp)) {
                len = tmp->template_size;
-               tmp = memcpy(vha->hw->fw_dump, tmp, len);
+               tmp = memcpy(buf, tmp, len);
                ql27xx_edit_template(vha, tmp);
-               qla27xx_walk_template(vha, tmp, tmp, &len);
-               vha->hw->fw_dump_len = len;
-               vha->hw->fw_dumped = 1;
+               qla27xx_walk_template(vha, tmp, buf, &len);
        }
+
+       return len;
 }
 
 ulong
-qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
+qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
 {
-       struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+       struct qla27xx_fwdt_template *tmp = p;
        ulong len = 0;
 
        if (qla27xx_fwdt_template_valid(tmp)) {
@@ -1032,18 +956,6 @@ qla27xx_fwdt_template_size(void *p)
        return tmp->template_size;
 }
 
-ulong
-qla27xx_fwdt_template_default_size(void)
-{
-       return sizeof(ql27xx_fwdt_default_template);
-}
-
-const void *
-qla27xx_fwdt_template_default(void)
-{
-       return ql27xx_fwdt_default_template;
-}
-
 int
 qla27xx_fwdt_template_valid(void *p)
 {
@@ -1051,7 +963,8 @@ qla27xx_fwdt_template_valid(void *p)
 
        if (!qla27xx_verify_template_header(tmp)) {
                ql_log(ql_log_warn, NULL, 0xd01c,
-                   "%s: template type %x\n", __func__, tmp->template_type);
+                   "%s: template type %x\n", __func__,
+                   le32_to_cpu(tmp->template_type));
                return false;
        }
 
@@ -1074,17 +987,41 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
                spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 #endif
 
-       if (!vha->hw->fw_dump)
-               ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
-       else if (!vha->hw->fw_dump_template)
-               ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
-       else if (vha->hw->fw_dumped)
-               ql_log(ql_log_warn, vha, 0xd300,
-                   "Firmware has been previously dumped (%p),"
-                   " -- ignoring request\n", vha->hw->fw_dump);
-       else {
-               QLA_FW_STOPPED(vha->hw);
-               qla27xx_execute_fwdt_template(vha);
+       if (!vha->hw->fw_dump) {
+               ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
+       } else if (vha->hw->fw_dumped) {
+               ql_log(ql_log_warn, vha, 0xd01f,
+                   "-> Firmware already dumped (%p) -- ignoring request\n",
+                   vha->hw->fw_dump);
+       } else {
+               struct fwdt *fwdt = vha->hw->fwdt;
+               uint j;
+               ulong len;
+               void *buf = vha->hw->fw_dump;
+
+               for (j = 0; j < 2; j++, fwdt++, buf += len) {
+                       ql_log(ql_log_warn, vha, 0xd011,
+                           "-> fwdt%u running...\n", j);
+                       if (!fwdt->template) {
+                               ql_log(ql_log_warn, vha, 0xd012,
+                                   "-> fwdt%u no template\n", j);
+                               break;
+                       }
+                       len = qla27xx_execute_fwdt_template(vha,
+                           fwdt->template, buf);
+                       if (len != fwdt->dump_size) {
+                               ql_log(ql_log_warn, vha, 0xd013,
+                                   "-> fwdt%u fwdump residual=%+ld\n",
+                                   j, fwdt->dump_size - len);
+                       }
+               }
+               vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
+               vha->hw->fw_dumped = 1;
+
+               ql_log(ql_log_warn, vha, 0xd015,
+                   "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
+                   vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
+               qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
        }
 
 #ifndef __CHECKER__
index 5c2c2a8..d2a0014 100644 (file)
 #define IOBASE_ADDR    offsetof(struct device_reg_24xx, iobase_addr)
 
 struct __packed qla27xx_fwdt_template {
-       uint32_t template_type;
-       uint32_t entry_offset;
+       __le32 template_type;
+       __le32 entry_offset;
        uint32_t template_size;
-       uint32_t reserved_1;
+       uint32_t count;         /* borrow field for running/residual count */
 
-       uint32_t entry_count;
+       __le32 entry_count;
        uint32_t template_version;
        uint32_t capture_timestamp;
        uint32_t template_checksum;
@@ -65,8 +65,8 @@ struct __packed qla27xx_fwdt_template {
 
 struct __packed qla27xx_fwdt_entry {
        struct __packed {
-               uint32_t type;
-               uint32_t size;
+               __le32 type;
+               __le32 size;
                uint32_t reserved_1;
 
                uint8_t  capture_flags;
@@ -81,36 +81,36 @@ struct __packed qla27xx_fwdt_entry {
                } t255;
 
                struct __packed {
-                       uint32_t base_addr;
+                       __le32 base_addr;
                        uint8_t  reg_width;
-                       uint16_t reg_count;
+                       __le16 reg_count;
                        uint8_t  pci_offset;
                } t256;
 
                struct __packed {
-                       uint32_t base_addr;
-                       uint32_t write_data;
+                       __le32 base_addr;
+                       __le32 write_data;
                        uint8_t  pci_offset;
                        uint8_t  reserved[3];
                } t257;
 
                struct __packed {
-                       uint32_t base_addr;
+                       __le32 base_addr;
                        uint8_t  reg_width;
-                       uint16_t reg_count;
+                       __le16 reg_count;
                        uint8_t  pci_offset;
                        uint8_t  banksel_offset;
                        uint8_t  reserved[3];
-                       uint32_t bank;
+                       __le32 bank;
                } t258;
 
                struct __packed {
-                       uint32_t base_addr;
-                       uint32_t write_data;
+                       __le32 base_addr;
+                       __le32 write_data;
                        uint8_t  reserved[2];
                        uint8_t  pci_offset;
                        uint8_t  banksel_offset;
-                       uint32_t bank;
+                       __le32 bank;
                } t259;
 
                struct __packed {
@@ -121,14 +121,14 @@ struct __packed qla27xx_fwdt_entry {
                struct __packed {
                        uint8_t pci_offset;
                        uint8_t reserved[3];
-                       uint32_t write_data;
+                       __le32 write_data;
                } t261;
 
                struct __packed {
                        uint8_t  ram_area;
                        uint8_t  reserved[3];
-                       uint32_t start_addr;
-                       uint32_t end_addr;
+                       __le32 start_addr;
+                       __le32 end_addr;
                } t262;
 
                struct __packed {
@@ -158,7 +158,7 @@ struct __packed qla27xx_fwdt_entry {
                struct __packed {
                        uint8_t  pci_offset;
                        uint8_t  reserved[3];
-                       uint32_t data;
+                       __le32 data;
                } t267;
 
                struct __packed {
@@ -173,23 +173,23 @@ struct __packed qla27xx_fwdt_entry {
                } t269;
 
                struct __packed {
-                       uint32_t addr;
-                       uint32_t count;
+                       __le32 addr;
+                       __le32 count;
                } t270;
 
                struct __packed {
-                       uint32_t addr;
-                       uint32_t data;
+                       __le32 addr;
+                       __le32 data;
                } t271;
 
                struct __packed {
-                       uint32_t addr;
-                       uint32_t count;
+                       __le32 addr;
+                       __le32 count;
                } t272;
 
                struct __packed {
-                       uint32_t addr;
-                       uint32_t count;
+                       __le32 addr;
+                       __le32 count;
                } t273;
 
                struct __packed {
@@ -199,26 +199,26 @@ struct __packed qla27xx_fwdt_entry {
                } t274;
 
                struct __packed {
-                       uint32_t length;
+                       __le32 length;
                        uint8_t  buffer[];
                } t275;
 
                struct __packed {
-                       uint32_t cond1;
-                       uint32_t cond2;
+                       __le32 cond1;
+                       __le32 cond2;
                } t276;
 
                struct __packed {
-                       uint32_t cmd_addr;
-                       uint32_t wr_cmd_data;
-                       uint32_t data_addr;
+                       __le32 cmd_addr;
+                       __le32 wr_cmd_data;
+                       __le32 data_addr;
                } t277;
 
                struct __packed {
-                       uint32_t cmd_addr;
-                       uint32_t wr_cmd_data;
-                       uint32_t data_addr;
-                       uint32_t wr_data;
+                       __le32 cmd_addr;
+                       __le32 wr_cmd_data;
+                       __le32 data_addr;
+                       __le32 wr_data;
                } t278;
        };
 };
index 0690dac..cd6bdf7 100644 (file)
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.00.00.14-k"
+#define QLA2XXX_VERSION      "10.01.00.16-k"
 
 #define QLA_DRIVER_MAJOR_VER   10
-#define QLA_DRIVER_MINOR_VER   0
+#define QLA_DRIVER_MINOR_VER   1
 #define QLA_DRIVER_PATCH_VER   0
 #define QLA_DRIVER_BETA_VER    0
index 8a3075d..ec9f199 100644 (file)
 
 
 #include <linux/module.h>
-#include <linux/moduleparam.h>
 #include <linux/utsname.h>
 #include <linux/vmalloc.h>
-#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/kthread.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/configfs.h>
 #include <linux/ctype.h>
 #include <asm/unaligned.h>
-#include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
@@ -267,25 +261,17 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
 static void tcm_qla2xxx_complete_free(struct work_struct *work)
 {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-       bool released = false;
-       unsigned long flags;
 
        cmd->cmd_in_wq = 0;
 
        WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
 
-       spin_lock_irqsave(&cmd->cmd_lock, flags);
+       /* To do: protect all tgt_counters manipulations with proper locking. */
        cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
        cmd->trc_flags |= TRC_CMD_FREE;
        cmd->cmd_sent_to_fw = 0;
-       if (cmd->released)
-               released = true;
-       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
 
-       if (released)
-               qlt_free_cmd(cmd);
-       else
-               transport_generic_free_cmd(&cmd->se_cmd, 0);
+       transport_generic_free_cmd(&cmd->se_cmd, 0);
 }
 
 /*
@@ -326,7 +312,6 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
 static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd;
-       unsigned long flags;
 
        if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
                struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
@@ -336,14 +321,10 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
        }
        cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
 
-       spin_lock_irqsave(&cmd->cmd_lock, flags);
-       if (cmd->cmd_sent_to_fw) {
-               cmd->released = 1;
-               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-       } else {
-               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-               qlt_free_cmd(cmd);
-       }
+       if (WARN_ON(cmd->cmd_sent_to_fw))
+               return;
+
+       qlt_free_cmd(cmd);
 }
 
 static void tcm_qla2xxx_release_session(struct kref *kref)
@@ -359,7 +340,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
        if (!sess)
                return;
 
-       assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
        kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
 }
 
@@ -374,8 +354,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
 
        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
        target_sess_cmd_list_set_waiting(se_sess);
-       tcm_qla2xxx_put_sess(sess);
        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+       tcm_qla2xxx_put_sess(sess);
 }
 
 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
@@ -399,6 +380,8 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
                        cmd->se_cmd.transport_state,
                        cmd->se_cmd.t_state,
                        cmd->se_cmd.se_cmd_flags);
+               transport_generic_request_failure(&cmd->se_cmd,
+                       TCM_CHECK_CONDITION_ABORT_CMD);
                return 0;
        }
        cmd->trc_flags |= TRC_XFR_RDY;
@@ -488,32 +471,18 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
 static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
 {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-       unsigned long flags;
 
        /*
         * Ensure that the complete FCP WRITE payload has been received.
         * Otherwise return an exception via CHECK_CONDITION status.
         */
        cmd->cmd_in_wq = 0;
-
-       spin_lock_irqsave(&cmd->cmd_lock, flags);
        cmd->cmd_sent_to_fw = 0;
-
-       if (cmd->released) {
-               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-               qlt_free_cmd(cmd);
-               return;
-       }
-
-       cmd->data_work = 1;
        if (cmd->aborted) {
-               cmd->data_work_free = 1;
-               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
-
-               tcm_qla2xxx_free_cmd(cmd);
+               transport_generic_request_failure(&cmd->se_cmd,
+                       TCM_CHECK_CONDITION_ABORT_CMD);
                return;
        }
-       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
 
        cmd->qpair->tgt_counters.qla_core_ret_ctio++;
        if (!cmd->write_data_transferred) {
@@ -829,7 +798,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
 
 static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
 {
-       assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
        target_sess_cmd_list_set_waiting(sess->se_sess);
 }
 
@@ -1489,7 +1457,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
         */
        tpg = lport->tpg_1;
        if (!tpg) {
-               pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+               pr_err("Unable to locate struct tcm_qla2xxx_lport->tpg_1\n");
                return -EINVAL;
        }
        /*
index 6e4f493..8c674ec 100644 (file)
@@ -5930,7 +5930,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
                val = rd_nvram_byte(ha, sec_addr);
                if (val & BIT_7)
                        ddb_index[1] = (val & 0x7f);
-
+               goto exit_boot_info;
        } else if (is_qla80XX(ha)) {
                buf = dma_alloc_coherent(&ha->pdev->dev, size,
                                         &buf_dma, GFP_KERNEL);
index 8b471a9..136681a 100644 (file)
@@ -139,7 +139,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
        } else {                /* out */
 #if QL_TURBO_PDMA
                rtrc(4)
-                   if (reqlen >= 128 && inb(qbase + 8) & 0x10) {       /* empty */
+               if (reqlen >= 128 && inb(qbase + 8) & 0x10) {   /* empty */
                        outsl(qbase + 4, request, 32);
                        reqlen -= 128;
                        request += 128;
@@ -240,7 +240,7 @@ static void ql_icmd(struct scsi_cmnd *cmd)
        outb(0x40 | qlcfg8 | priv->qinitid, qbase + 8);
        outb(qlcfg7, qbase + 7);
        outb(qlcfg6, qbase + 6);
-        /**/ outb(qlcfg5, qbase + 5);  /* select timer */
+       outb(qlcfg5, qbase + 5);        /* select timer */
        outb(qlcfg9 & 7, qbase + 9);    /* prescaler */
 /*     outb(0x99, qbase + 5);  */
        outb(scmd_id(cmd), qbase + 4);
index 1b8378f..8e96805 100644 (file)
@@ -2393,7 +2393,6 @@ out_put_autopm_host:
        scsi_autopm_put_host(shost);
        return error;
 }
-EXPORT_SYMBOL(scsi_ioctl_reset);
 
 bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
                                  struct scsi_sense_hdr *sshdr)
index 07dfc17..0916bd6 100644 (file)
@@ -141,8 +141,6 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
 
 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
 {
-       struct scsi_device *sdev = cmd->device;
-
        if (cmd->request->rq_flags & RQF_DONTPREP) {
                cmd->request->rq_flags &= ~RQF_DONTPREP;
                scsi_mq_uninit_cmd(cmd);
@@ -150,7 +148,6 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
                WARN_ON_ONCE(true);
        }
        blk_mq_requeue_request(cmd->request, true);
-       put_device(&sdev->sdev_gendev);
 }
 
 /**
@@ -189,19 +186,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
         */
        cmd->result = 0;
 
-       /*
-        * Before a SCSI command is dispatched,
-        * get_device(&sdev->sdev_gendev) is called and the host,
-        * target and device busy counters are increased. Since
-        * requeuing a request causes these actions to be repeated and
-        * since scsi_device_unbusy() has already been called,
-        * put_device(&device->sdev_gendev) must still be called. Call
-        * put_device() after blk_mq_requeue_request() to avoid that
-        * removal of the SCSI device can start before requeueing has
-        * happened.
-        */
        blk_mq_requeue_request(cmd->request, true);
-       put_device(&device->sdev_gendev);
 }
 
 /*
@@ -619,7 +604,6 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
                blk_mq_run_hw_queues(q, true);
 
        percpu_ref_put(&q->q_usage_counter);
-       put_device(&sdev->sdev_gendev);
        return false;
 }
 
@@ -1613,7 +1597,6 @@ static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
        struct scsi_device *sdev = q->queuedata;
 
        atomic_dec(&sdev->device_busy);
-       put_device(&sdev->sdev_gendev);
 }
 
 static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
@@ -1621,16 +1604,9 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
        struct request_queue *q = hctx->queue;
        struct scsi_device *sdev = q->queuedata;
 
-       if (!get_device(&sdev->sdev_gendev))
-               goto out;
-       if (!scsi_dev_queue_ready(q, sdev))
-               goto out_put_device;
-
-       return true;
+       if (scsi_dev_queue_ready(q, sdev))
+               return true;
 
-out_put_device:
-       put_device(&sdev->sdev_gendev);
-out:
        if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
                blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
        return false;
@@ -1770,7 +1746,7 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)
 
        if (shost->hostt->map_queues)
                return shost->hostt->map_queues(shost);
-       return blk_mq_map_queues(&set->map[0]);
+       return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
 }
 
 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
index 53380e0..058079f 100644 (file)
@@ -1129,7 +1129,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
         * that no LUN is present, so don't add sdev in these cases.
         * Two specific examples are:
         * 1) NetApp targets: return PQ=1, PDT=0x1f
-        * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+        * 2) IBM/2145 targets: return PQ=1, PDT=0
+        * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
         *    in the UFI 1.0 spec (we cannot rely on reserved bits).
         *
         * References:
@@ -1143,8 +1144,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
         * PDT=00h Direct-access device (floppy)
         * PDT=1Fh none (no FDD connected to the requested logical unit)
         */
-       if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
-           (result[0] & 0x1f) == 0x1f &&
+       if (((result[0] >> 5) == 1 ||
+           (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
            !scsi_is_wlun(lun)) {
                SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
                                        "scsi scan: peripheral device type"
index d703527..d9e3cf3 100644 (file)
@@ -147,6 +147,7 @@ static const struct {
        { FCH_EVT_PORT_OFFLINE,         "port_offline" },
        { FCH_EVT_PORT_FABRIC,          "port_fabric" },
        { FCH_EVT_LINK_UNKNOWN,         "link_unknown" },
+       { FCH_EVT_LINK_FPIN,            "link_FPIN" },
        { FCH_EVT_VENDOR_UNIQUE,        "vendor_unique" },
 };
 fc_enum_name_search(host_event_code, fc_host_event_code,
@@ -295,6 +296,9 @@ static const struct {
        { FC_PORT_ROLE_FCP_INITIATOR,           "FCP Initiator" },
        { FC_PORT_ROLE_IP_PORT,                 "IP Port" },
        { FC_PORT_ROLE_FCP_DUMMY_INITIATOR,     "FCP Dummy Initiator" },
+       { FC_PORT_ROLE_NVME_INITIATOR,          "NVMe Initiator" },
+       { FC_PORT_ROLE_NVME_TARGET,             "NVMe Target" },
+       { FC_PORT_ROLE_NVME_DISCOVERY,          "NVMe Discovery" },
 };
 fc_bitfield_name_search(port_roles, fc_port_role_names)
 
@@ -523,20 +527,23 @@ fc_get_event_number(void)
 }
 EXPORT_SYMBOL(fc_get_event_number);
 
-
 /**
- * fc_host_post_event - called to post an even on an fc_host.
+ * fc_host_post_fc_event - routine to do the work of posting an event
+ *                      on an fc_host.
  * @shost:             host the event occurred on
  * @event_number:      fc event number obtained from get_fc_event_number()
  * @event_code:                fc_host event being posted
- * @event_data:                32bits of data for the event being posted
+ * @data_len:          amount, in bytes, of event data
+ * @data_buf:          pointer to event data
+ * @vendor_id:          value for Vendor id
  *
  * Notes:
  *     This routine assumes no locks are held on entry.
  */
 void
-fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
-               enum fc_host_event_code event_code, u32 event_data)
+fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
+               enum fc_host_event_code event_code,
+               u32 data_len, char *data_buf, u64 vendor_id)
 {
        struct sk_buff *skb;
        struct nlmsghdr *nlh;
@@ -545,12 +552,15 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
        u32 len;
        int err;
 
+       if (!data_buf || data_len < 4)
+               data_len = 0;
+
        if (!scsi_nl_sock) {
                err = -ENOENT;
                goto send_fail;
        }
 
-       len = FC_NL_MSGALIGN(sizeof(*event));
+       len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
 
        skb = nlmsg_new(len, GFP_KERNEL);
        if (!skb) {
@@ -568,12 +578,13 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
        INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
                                FC_NL_ASYNC_EVENT, len);
        event->seconds = ktime_get_real_seconds();
-       event->vendor_id = 0;
+       event->vendor_id = vendor_id;
        event->host_no = shost->host_no;
-       event->event_datalen = sizeof(u32);     /* bytes */
+       event->event_datalen = data_len;        /* bytes */
        event->event_num = event_number;
        event->event_code = event_code;
-       event->event_data = event_data;
+       if (data_len)
+               memcpy(&event->event_data, data_buf, data_len);
 
        nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
                        GFP_KERNEL);
@@ -586,14 +597,35 @@ send_fail:
        printk(KERN_WARNING
                "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
                __func__, shost->host_no,
-               (name) ? name : "<unknown>", event_data, err);
+               (name) ? name : "<unknown>",
+               (data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err);
        return;
 }
+EXPORT_SYMBOL(fc_host_post_fc_event);
+
+/**
+ * fc_host_post_event - called to post an even on an fc_host.
+ * @shost:             host the event occurred on
+ * @event_number:      fc event number obtained from get_fc_event_number()
+ * @event_code:                fc_host event being posted
+ * @event_data:                32bits of data for the event being posted
+ *
+ * Notes:
+ *     This routine assumes no locks are held on entry.
+ */
+void
+fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
+               enum fc_host_event_code event_code, u32 event_data)
+{
+       fc_host_post_fc_event(shost, event_number, event_code,
+               (u32)sizeof(u32), (char *)&event_data, 0);
+}
 EXPORT_SYMBOL(fc_host_post_event);
 
 
 /**
- * fc_host_post_vendor_event - called to post a vendor unique event on an fc_host
+ * fc_host_post_vendor_event - called to post a vendor unique event
+ *                      on an fc_host
  * @shost:             host the event occurred on
  * @event_number:      fc event number obtained from get_fc_event_number()
  * @data_len:          amount, in bytes, of vendor unique data
@@ -607,56 +639,27 @@ void
 fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
                u32 data_len, char * data_buf, u64 vendor_id)
 {
-       struct sk_buff *skb;
-       struct nlmsghdr *nlh;
-       struct fc_nl_event *event;
-       u32 len;
-       int err;
-
-       if (!scsi_nl_sock) {
-               err = -ENOENT;
-               goto send_vendor_fail;
-       }
-
-       len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
-
-       skb = nlmsg_new(len, GFP_KERNEL);
-       if (!skb) {
-               err = -ENOBUFS;
-               goto send_vendor_fail;
-       }
-
-       nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
-       if (!nlh) {
-               err = -ENOBUFS;
-               goto send_vendor_fail_skb;
-       }
-       event = nlmsg_data(nlh);
-
-       INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
-                               FC_NL_ASYNC_EVENT, len);
-       event->seconds = ktime_get_real_seconds();
-       event->vendor_id = vendor_id;
-       event->host_no = shost->host_no;
-       event->event_datalen = data_len;        /* bytes */
-       event->event_num = event_number;
-       event->event_code = FCH_EVT_VENDOR_UNIQUE;
-       memcpy(&event->event_data, data_buf, data_len);
-
-       nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
-                       GFP_KERNEL);
-       return;
-
-send_vendor_fail_skb:
-       kfree_skb(skb);
-send_vendor_fail:
-       printk(KERN_WARNING
-               "%s: Dropped Event : host %d vendor_unique - err %d\n",
-               __func__, shost->host_no, err);
-       return;
+       fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE,
+               data_len, data_buf, vendor_id);
 }
 EXPORT_SYMBOL(fc_host_post_vendor_event);
 
+/**
+ * fc_host_rcv_fpin - routine to process a received FPIN.
+ * @shost:             host the FPIN was received on
+ * @fpin_len:          length of FPIN payload, in bytes
+ * @fpin_buf:          pointer to FPIN payload
+ *
+ * Notes:
+ *     This routine assumes no locks are held on entry.
+ */
+void
+fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
+{
+       fc_host_post_fc_event(shost, fc_get_event_number(),
+                               FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0);
+}
+EXPORT_SYMBOL(fc_host_fpin_rcv);
 
 
 static __init int fc_transport_init(void)
index a03a6ed..28985e5 100644 (file)
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
 smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
index af96236..e8e7688 100644 (file)
@@ -1,18 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
index 75ec43a..c26cac8 100644 (file)
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
 #define BUILD_TIMESTAMP
 #endif
 
-#define DRIVER_VERSION         "1.2.4-070"
+#define DRIVER_VERSION         "1.2.6-015"
 #define DRIVER_MAJOR           1
 #define DRIVER_MINOR           2
-#define DRIVER_RELEASE         4
-#define DRIVER_REVISION                70
+#define DRIVER_RELEASE         6
+#define DRIVER_REVISION                15
 
 #define DRIVER_NAME            "Microsemi PQI Driver (v" \
                                DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -5660,9 +5653,11 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
        return rc;
 }
 
+/* Performs a reset at the LUN level. */
+
 #define PQI_LUN_RESET_RETRIES                  3
 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS     10000
-/* Performs a reset at the LUN level. */
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS  120
 
 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *device)
@@ -5673,12 +5668,12 @@ static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
 
        for (retries = 0;;) {
                rc = pqi_lun_reset(ctrl_info, device);
-               if (rc != -EAGAIN ||
-                   ++retries > PQI_LUN_RESET_RETRIES)
+               if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
                        break;
                msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
        }
-       timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT;
+
+       timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
 
        rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
 
@@ -5707,6 +5702,7 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
        pqi_device_reset_done(device);
 
        mutex_unlock(&ctrl_info->lun_reset_mutex);
+
        return rc;
 }
 
@@ -5737,6 +5733,7 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
        pqi_wait_until_ofa_finished(ctrl_info);
 
        rc = pqi_device_reset(ctrl_info, device);
+
 out:
        dev_err(&ctrl_info->pci_dev->dev,
                "reset of scsi %d:%d:%d:%d: %s\n",
@@ -5795,7 +5792,7 @@ static int pqi_map_queues(struct Scsi_Host *shost)
 {
        struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 
-       return blk_mq_pci_map_queues(&shost->tag_set.map[0],
+       return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
                                        ctrl_info->pci_dev, 0);
 }
 
@@ -7946,6 +7943,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x152d, 0x8a37)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x193d, 0x1104)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x193d, 0x1105)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x193d, 0x1106)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x193d, 0x1107)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x193d, 0x8460)
index 0e4ef21..5cca1b9 100644 (file)
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
index dcd11c6..f0d6e88 100644 (file)
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
index d018cb9..86b0e48 100644 (file)
@@ -1,18 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
index 179bda3..0b845ab 100644 (file)
@@ -109,6 +109,20 @@ config SCSI_UFS_QCOM
          Select this if you have UFS controller on QCOM chipset.
          If unsure, say N.
 
+config SCSI_UFS_MEDIATEK
+       tristate "Mediatek specific hooks to UFS controller platform driver"
+       depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
+       select PHY_MTK_UFS
+       help
+         This selects the Mediatek specific additions to UFSHCD platform driver.
+         UFS host on Mediatek needs some vendor specific configuration before
+         accessing the hardware which includes PHY configuration and vendor
+         specific registers.
+
+         Select this if you have UFS controller on Mediatek chipset.
+
+         If unsure, say N.
+
 config SCSI_UFS_HISI
        tristate "Hisilicon specific hooks to UFS controller platform driver"
        depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
index a3bd70c..2a90979 100644 (file)
@@ -10,3 +10,4 @@ ufshcd-core-$(CONFIG_SCSI_UFS_BSG)    += ufs_bsg.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
 obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
 obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
+obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
index 4a37b4f..86dbb72 100644 (file)
@@ -17,7 +17,8 @@
 
 #include "ufshcd-pltfrm.h"
 
-#define CDNS_UFS_REG_HCLKDIV 0xFC
+#define CDNS_UFS_REG_HCLKDIV   0xFC
+#define CDNS_UFS_REG_PHY_XCFGD1        0x113C
 
 /**
  * Sets HCLKDIV register value based on the core_clk
@@ -77,11 +78,66 @@ static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on,
        return cdns_ufs_set_hclkdiv(hba);
 }
 
-static struct ufs_hba_variant_ops cdns_pltfm_hba_vops = {
+/**
+ * cdns_ufs_init - performs additional ufs initialization
+ * @hba: host controller instance
+ *
+ * Returns status of initialization
+ */
+static int cdns_ufs_init(struct ufs_hba *hba)
+{
+       int status = 0;
+
+       if (hba->vops && hba->vops->phy_initialization)
+               status = hba->vops->phy_initialization(hba);
+
+       return status;
+}
+
+/**
+ * cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
+ * @hba: host controller instance
+ *
+ * Always returns 0
+ */
+static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
+{
+       u32 data;
+
+       /* Increase RX_Advanced_Min_ActivateTime_Capability */
+       data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1);
+       data |= BIT(24);
+       ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1);
+
+       return 0;
+}
+
+static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
+       .name = "cdns-ufs-pltfm",
+       .setup_clocks = cdns_ufs_setup_clocks,
+};
+
+static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
        .name = "cdns-ufs-pltfm",
+       .init = cdns_ufs_init,
        .setup_clocks = cdns_ufs_setup_clocks,
+       .phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+};
+
+static const struct of_device_id cdns_ufs_of_match[] = {
+       {
+               .compatible = "cdns,ufshc",
+               .data =  &cdns_ufs_pltfm_hba_vops,
+       },
+       {
+               .compatible = "cdns,ufshc-m31-16nm",
+               .data =  &cdns_ufs_m31_16nm_pltfm_hba_vops,
+       },
+       { },
 };
 
+MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
+
 /**
  * cdns_ufs_pltfrm_probe - probe routine of the driver
  * @pdev: pointer to platform device handle
@@ -91,10 +147,15 @@ static struct ufs_hba_variant_ops cdns_pltfm_hba_vops = {
 static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
 {
        int err;
+       const struct of_device_id *of_id;
+       struct ufs_hba_variant_ops *vops;
        struct device *dev = &pdev->dev;
 
+       of_id = of_match_node(cdns_ufs_of_match, dev->of_node);
+       vops = (struct ufs_hba_variant_ops *)of_id->data;
+
        /* Perform generic probe */
-       err = ufshcd_pltfrm_init(pdev, &cdns_pltfm_hba_vops);
+       err = ufshcd_pltfrm_init(pdev, vops);
        if (err)
                dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
 
@@ -115,13 +176,6 @@ static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct of_device_id cdns_ufs_of_match[] = {
-       { .compatible = "cdns,ufshc" },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
-
 static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
        .suspend         = ufshcd_pltfrm_suspend,
        .resume          = ufshcd_pltfrm_resume,
index 0e855b5..7aed0a1 100644 (file)
@@ -293,108 +293,7 @@ static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
        return err;
 }
 
-struct ufs_hisi_dev_params {
-       u32 pwm_rx_gear; /* pwm rx gear to work in */
-       u32 pwm_tx_gear; /* pwm tx gear to work in */
-       u32 hs_rx_gear;  /* hs rx gear to work in */
-       u32 hs_tx_gear;  /* hs tx gear to work in */
-       u32 rx_lanes;    /* number of rx lanes */
-       u32 tx_lanes;    /* number of tx lanes */
-       u32 rx_pwr_pwm;  /* rx pwm working pwr */
-       u32 tx_pwr_pwm;  /* tx pwm working pwr */
-       u32 rx_pwr_hs;   /* rx hs working pwr */
-       u32 tx_pwr_hs;   /* tx hs working pwr */
-       u32 hs_rate;     /* rate A/B to work in HS */
-       u32 desired_working_mode;
-};
-
-static int ufs_hisi_get_pwr_dev_param(
-                                   struct ufs_hisi_dev_params *hisi_param,
-                                   struct ufs_pa_layer_attr *dev_max,
-                                   struct ufs_pa_layer_attr *agreed_pwr)
-{
-       int min_hisi_gear;
-       int min_dev_gear;
-       bool is_dev_sup_hs = false;
-       bool is_hisi_max_hs = false;
-
-       if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
-               is_dev_sup_hs = true;
-
-       if (hisi_param->desired_working_mode == FAST) {
-               is_hisi_max_hs = true;
-               min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear,
-                                      hisi_param->hs_tx_gear);
-       } else {
-               min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear,
-                                      hisi_param->pwm_tx_gear);
-       }
-
-       /*
-        * device doesn't support HS but
-        * hisi_param->desired_working_mode is HS,
-        * thus device and hisi_param don't agree
-        */
-       if (!is_dev_sup_hs && is_hisi_max_hs) {
-               pr_err("%s: device not support HS\n", __func__);
-               return -ENOTSUPP;
-       } else if (is_dev_sup_hs && is_hisi_max_hs) {
-               /*
-                * since device supports HS, it supports FAST_MODE.
-                * since hisi_param->desired_working_mode is also HS
-                * then final decision (FAST/FASTAUTO) is done according
-                * to hisi_params as it is the restricting factor
-                */
-               agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
-                       hisi_param->rx_pwr_hs;
-       } else {
-               /*
-                * here hisi_param->desired_working_mode is PWM.
-                * it doesn't matter whether device supports HS or PWM,
-                * in both cases hisi_param->desired_working_mode will
-                * determine the mode
-                */
-               agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
-                       hisi_param->rx_pwr_pwm;
-       }
-
-       /*
-        * we would like tx to work in the minimum number of lanes
-        * between device capability and vendor preferences.
-        * the same decision will be made for rx
-        */
-       agreed_pwr->lane_tx =
-               min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes);
-       agreed_pwr->lane_rx =
-               min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes);
-
-       /* device maximum gear is the minimum between device rx and tx gears */
-       min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
-
-       /*
-        * if both device capabilities and vendor pre-defined preferences are
-        * both HS or both PWM then set the minimum gear to be the chosen
-        * working gear.
-        * if one is PWM and one is HS then the one that is PWM get to decide
-        * what is the gear, as it is the one that also decided previously what
-        * pwr the device will be configured to.
-        */
-       if ((is_dev_sup_hs && is_hisi_max_hs) ||
-           (!is_dev_sup_hs && !is_hisi_max_hs))
-               agreed_pwr->gear_rx = agreed_pwr->gear_tx =
-                       min_t(u32, min_dev_gear, min_hisi_gear);
-       else
-               agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear;
-
-       agreed_pwr->hs_rate = hisi_param->hs_rate;
-
-       pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n",
-               agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
-               agreed_pwr->hs_rate);
-       return 0;
-}
-
-static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
+static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
 {
        hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
        hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
@@ -477,7 +376,7 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
                                       struct ufs_pa_layer_attr *dev_max_params,
                                       struct ufs_pa_layer_attr *dev_req_params)
 {
-       struct ufs_hisi_dev_params ufs_hisi_cap;
+       struct ufs_dev_params ufs_hisi_cap;
        int ret = 0;
 
        if (!dev_req_params) {
@@ -490,8 +389,8 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
        switch (status) {
        case PRE_CHANGE:
                ufs_hisi_set_dev_cap(&ufs_hisi_cap);
-               ret = ufs_hisi_get_pwr_dev_param(
-                       &ufs_hisi_cap, dev_max_params, dev_req_params);
+               ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
+                                              dev_max_params, dev_req_params);
                if (ret) {
                        dev_err(hba->dev,
                            "%s: failed to determine capabilities\n", __func__);
@@ -587,6 +486,10 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
        ufshcd_set_variant(hba, host);
 
        host->rst  = devm_reset_control_get(dev, "rst");
+       if (IS_ERR(host->rst)) {
+               dev_err(dev, "%s: failed to get reset control\n", __func__);
+               return PTR_ERR(host->rst);
+       }
 
        ufs_hisi_set_pm_lvl(hba);
 
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
new file mode 100644 (file)
index 0000000..0f6ff33
--- /dev/null
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Authors:
+ *     Stanley Chu <stanley.chu@mediatek.com>
+ *     Peter Wang <peter.wang@mediatek.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#include "ufshcd.h"
+#include "ufshcd-pltfrm.h"
+#include "unipro.h"
+#include "ufs-mediatek.h"
+
+static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
+{
+       u32 tmp;
+
+       if (enable) {
+               ufshcd_dme_get(hba,
+                              UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+               tmp = tmp |
+                     (1 << RX_SYMBOL_CLK_GATE_EN) |
+                     (1 << SYS_CLK_GATE_EN) |
+                     (1 << TX_CLK_GATE_EN);
+               ufshcd_dme_set(hba,
+                              UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+               ufshcd_dme_get(hba,
+                              UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
+               tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
+               ufshcd_dme_set(hba,
+                              UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
+       } else {
+               ufshcd_dme_get(hba,
+                              UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+               tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
+                             (1 << SYS_CLK_GATE_EN) |
+                             (1 << TX_CLK_GATE_EN));
+               ufshcd_dme_set(hba,
+                              UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+               ufshcd_dme_get(hba,
+                              UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
+               tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
+               ufshcd_dme_set(hba,
+                              UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
+       }
+}
+
+static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
+{
+       struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+       struct device *dev = hba->dev;
+       struct device_node *np = dev->of_node;
+       int err = 0;
+
+       host->mphy = devm_of_phy_get_by_index(dev, np, 0);
+
+       if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
+               /*
+                * UFS driver might be probed before the phy driver does.
+                * In that case we would like to return EPROBE_DEFER code.
+                */
+               err = -EPROBE_DEFER;
+               dev_info(dev,
+                        "%s: required phy hasn't probed yet. err = %d\n",
+                       __func__, err);
+       } else if (IS_ERR(host->mphy)) {
+               err = PTR_ERR(host->mphy);
+               dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
+       }
+
+       if (err)
+               host->mphy = NULL;
+
+       return err;
+}
+
+/**
+ * ufs_mtk_setup_clocks - enables/disable clocks
+ * @hba: host controller instance
+ * @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
+ *
+ * Returns 0 on success, non-zero on failure.
+ */
+static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
+                               enum ufs_notify_change_status status)
+{
+       struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+       int ret = -EINVAL;
+
+       /*
+        * In case ufs_mtk_init() is not yet done, simply ignore.
+        * This ufs_mtk_setup_clocks() shall be called from
+        * ufs_mtk_init() after init is done.
+        */
+       if (!host)
+               return 0;
+
+       switch (status) {
+       case PRE_CHANGE:
+               if (!on)
+                       ret = phy_power_off(host->mphy);
+               break;
+       case POST_CHANGE:
+               if (on)
+                       ret = phy_power_on(host->mphy);
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * ufs_mtk_init - find other essential mmio bases
+ * @hba: host controller instance
+ *
+ * Binds PHY with controller and powers up PHY enabling clocks
+ * and regulators.
+ *
+ * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * on phy power up failure and returns zero on success.
+ */
+static int ufs_mtk_init(struct ufs_hba *hba)
+{
+       struct ufs_mtk_host *host;
+       struct device *dev = hba->dev;
+       int err = 0;
+
+       host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+       if (!host) {
+               err = -ENOMEM;
+               dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
+               goto out;
+       }
+
+       host->hba = hba;
+       ufshcd_set_variant(hba, host);
+
+       err = ufs_mtk_bind_mphy(hba);
+       if (err)
+               goto out_variant_clear;
+
+       /*
+        * ufshcd_vops_init() is invoked after
+        * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
+        * phy clock setup is skipped.
+        *
+        * Enable phy clocks specifically here.
+        */
+       ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
+
+       goto out;
+
+out_variant_clear:
+       ufshcd_set_variant(hba, NULL);
+out:
+       return err;
+}
+
+static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
+                                 struct ufs_pa_layer_attr *dev_max_params,
+                                 struct ufs_pa_layer_attr *dev_req_params)
+{
+       struct ufs_dev_params host_cap;
+       int ret;
+
+       host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
+       host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
+       host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
+       host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
+       host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
+       host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
+       host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
+       host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
+       host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
+       host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
+       host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
+       host_cap.desired_working_mode =
+                               UFS_MTK_LIMIT_DESIRED_MODE;
+
+       ret = ufshcd_get_pwr_dev_param(&host_cap,
+                                      dev_max_params,
+                                      dev_req_params);
+       if (ret) {
+               pr_info("%s: failed to determine capabilities\n",
+                       __func__);
+       }
+
+       return ret;
+}
+
+static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
+                                    enum ufs_notify_change_status stage,
+                                    struct ufs_pa_layer_attr *dev_max_params,
+                                    struct ufs_pa_layer_attr *dev_req_params)
+{
+       int ret = 0;
+
+       switch (stage) {
+       case PRE_CHANGE:
+               ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
+                                            dev_req_params);
+               break;
+       case POST_CHANGE:
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int ufs_mtk_pre_link(struct ufs_hba *hba)
+{
+       int ret;
+       u32 tmp;
+
+       /* disable deep stall */
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
+       if (ret)
+               return ret;
+
+       tmp &= ~(1 << 6);
+
+       ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
+
+       return ret;
+}
+
+static int ufs_mtk_post_link(struct ufs_hba *hba)
+{
+       /* disable device LCC */
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+
+       /* enable unipro clock gating feature */
+       ufs_mtk_cfg_unipro_cg(hba, true);
+
+       return 0;
+}
+
+static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
+                                      enum ufs_notify_change_status stage)
+{
+       int ret = 0;
+
+       switch (stage) {
+       case PRE_CHANGE:
+               ret = ufs_mtk_pre_link(hba);
+               break;
+       case POST_CHANGE:
+               ret = ufs_mtk_post_link(hba);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+       struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+       if (ufshcd_is_link_hibern8(hba))
+               phy_power_off(host->mphy);
+
+       return 0;
+}
+
+static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+       struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+       if (ufshcd_is_link_hibern8(hba))
+               phy_power_on(host->mphy);
+
+       return 0;
+}
+
+/**
+ * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
+ *
+ * The variant operations configure the necessary controller and PHY
+ * handshake during initialization.
+ */
+static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
+       .name                = "mediatek.ufshci",
+       .init                = ufs_mtk_init,
+       .setup_clocks        = ufs_mtk_setup_clocks,
+       .link_startup_notify = ufs_mtk_link_startup_notify,
+       .pwr_change_notify   = ufs_mtk_pwr_change_notify,
+       .suspend             = ufs_mtk_suspend,
+       .resume              = ufs_mtk_resume,
+};
+
+/**
+ * ufs_mtk_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int ufs_mtk_probe(struct platform_device *pdev)
+{
+       int err;
+       struct device *dev = &pdev->dev;
+
+       /* perform generic probe */
+       err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
+       if (err)
+               dev_info(dev, "probe failed %d\n", err);
+
+       return err;
+}
+
+/**
+ * ufs_mtk_remove - set driver_data of the device to NULL
+ * @pdev: pointer to platform device handle
+ *
+ * Always return 0
+ */
+static int ufs_mtk_remove(struct platform_device *pdev)
+{
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       pm_runtime_get_sync(&(pdev)->dev);
+       ufshcd_remove(hba);
+       return 0;
+}
+
+static const struct of_device_id ufs_mtk_of_match[] = {
+       { .compatible = "mediatek,mt8183-ufshci"},
+       {},
+};
+
+static const struct dev_pm_ops ufs_mtk_pm_ops = {
+       .suspend         = ufshcd_pltfrm_suspend,
+       .resume          = ufshcd_pltfrm_resume,
+       .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+       .runtime_resume  = ufshcd_pltfrm_runtime_resume,
+       .runtime_idle    = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver ufs_mtk_pltform = {
+       .probe      = ufs_mtk_probe,
+       .remove     = ufs_mtk_remove,
+       .shutdown   = ufshcd_pltfrm_shutdown,
+       .driver = {
+               .name   = "ufshcd-mtk",
+               .pm     = &ufs_mtk_pm_ops,
+               .of_match_table = ufs_mtk_of_match,
+       },
+};
+
+MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
+MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek UFS Host Driver");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(ufs_mtk_pltform);
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
new file mode 100644 (file)
index 0000000..19f8c42
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+
+#ifndef _UFS_MEDIATEK_H
+#define _UFS_MEDIATEK_H
+
+/*
+ * Vendor specific pre-defined parameters
+ */
+#define UFS_MTK_LIMIT_NUM_LANES_RX  1
+#define UFS_MTK_LIMIT_NUM_LANES_TX  1
+#define UFS_MTK_LIMIT_HSGEAR_RX     UFS_HS_G3
+#define UFS_MTK_LIMIT_HSGEAR_TX     UFS_HS_G3
+#define UFS_MTK_LIMIT_PWMGEAR_RX    UFS_PWM_G4
+#define UFS_MTK_LIMIT_PWMGEAR_TX    UFS_PWM_G4
+#define UFS_MTK_LIMIT_RX_PWR_PWM    SLOW_MODE
+#define UFS_MTK_LIMIT_TX_PWR_PWM    SLOW_MODE
+#define UFS_MTK_LIMIT_RX_PWR_HS     FAST_MODE
+#define UFS_MTK_LIMIT_TX_PWR_HS     FAST_MODE
+#define UFS_MTK_LIMIT_HS_RATE       PA_HS_MODE_B
+#define UFS_MTK_LIMIT_DESIRED_MODE  UFS_HS_MODE
+
+/*
+ * Other attributes
+ */
+#define VS_DEBUGCLOCKENABLE         0xD0A1
+#define VS_SAVEPOWERCONTROL         0xD0A6
+#define VS_UNIPROPOWERDOWNCONTROL   0xD0A8
+
+/*
+ * VS_DEBUGCLOCKENABLE
+ */
+enum {
+       TX_SYMBOL_CLK_REQ_FORCE = 5,
+};
+
+/*
+ * VS_SAVEPOWERCONTROL
+ */
+enum {
+       RX_SYMBOL_CLK_GATE_EN   = 0,
+       SYS_CLK_GATE_EN         = 2,
+       TX_CLK_GATE_EN          = 3,
+};
+
+struct ufs_mtk_host {
+       struct ufs_hba *hba;
+       struct phy *mphy;
+};
+
+#endif /* !_UFS_MEDIATEK_H */
index de9d3f5..ea72194 100644 (file)
@@ -580,104 +580,6 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
        return 0;
 }
 
-struct ufs_qcom_dev_params {
-       u32 pwm_rx_gear;        /* pwm rx gear to work in */
-       u32 pwm_tx_gear;        /* pwm tx gear to work in */
-       u32 hs_rx_gear;         /* hs rx gear to work in */
-       u32 hs_tx_gear;         /* hs tx gear to work in */
-       u32 rx_lanes;           /* number of rx lanes */
-       u32 tx_lanes;           /* number of tx lanes */
-       u32 rx_pwr_pwm;         /* rx pwm working pwr */
-       u32 tx_pwr_pwm;         /* tx pwm working pwr */
-       u32 rx_pwr_hs;          /* rx hs working pwr */
-       u32 tx_pwr_hs;          /* tx hs working pwr */
-       u32 hs_rate;            /* rate A/B to work in HS */
-       u32 desired_working_mode;
-};
-
-static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
-                                     struct ufs_pa_layer_attr *dev_max,
-                                     struct ufs_pa_layer_attr *agreed_pwr)
-{
-       int min_qcom_gear;
-       int min_dev_gear;
-       bool is_dev_sup_hs = false;
-       bool is_qcom_max_hs = false;
-
-       if (dev_max->pwr_rx == FAST_MODE)
-               is_dev_sup_hs = true;
-
-       if (qcom_param->desired_working_mode == FAST) {
-               is_qcom_max_hs = true;
-               min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
-                                     qcom_param->hs_tx_gear);
-       } else {
-               min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
-                                     qcom_param->pwm_tx_gear);
-       }
-
-       /*
-        * device doesn't support HS but qcom_param->desired_working_mode is
-        * HS, thus device and qcom_param don't agree
-        */
-       if (!is_dev_sup_hs && is_qcom_max_hs) {
-               pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
-                       __func__);
-               return -ENOTSUPP;
-       } else if (is_dev_sup_hs && is_qcom_max_hs) {
-               /*
-                * since device supports HS, it supports FAST_MODE.
-                * since qcom_param->desired_working_mode is also HS
-                * then final decision (FAST/FASTAUTO) is done according
-                * to qcom_params as it is the restricting factor
-                */
-               agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
-                                               qcom_param->rx_pwr_hs;
-       } else {
-               /*
-                * here qcom_param->desired_working_mode is PWM.
-                * it doesn't matter whether device supports HS or PWM,
-                * in both cases qcom_param->desired_working_mode will
-                * determine the mode
-                */
-                agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
-                                               qcom_param->rx_pwr_pwm;
-       }
-
-       /*
-        * we would like tx to work in the minimum number of lanes
-        * between device capability and vendor preferences.
-        * the same decision will be made for rx
-        */
-       agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
-                                               qcom_param->tx_lanes);
-       agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
-                                               qcom_param->rx_lanes);
-
-       /* device maximum gear is the minimum between device rx and tx gears */
-       min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
-
-       /*
-        * if both device capabilities and vendor pre-defined preferences are
-        * both HS or both PWM then set the minimum gear to be the chosen
-        * working gear.
-        * if one is PWM and one is HS then the one that is PWM get to decide
-        * what is the gear, as it is the one that also decided previously what
-        * pwr the device will be configured to.
-        */
-       if ((is_dev_sup_hs && is_qcom_max_hs) ||
-           (!is_dev_sup_hs && !is_qcom_max_hs))
-               agreed_pwr->gear_rx = agreed_pwr->gear_tx =
-                       min_t(u32, min_dev_gear, min_qcom_gear);
-       else if (!is_dev_sup_hs)
-               agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
-       else
-               agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
-
-       agreed_pwr->hs_rate = qcom_param->hs_rate;
-       return 0;
-}
-
 #ifdef CONFIG_MSM_BUS_SCALING
 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
                const char *speed_mode)
@@ -905,7 +807,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
 {
        u32 val;
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-       struct ufs_qcom_dev_params ufs_qcom_cap;
+       struct ufs_dev_params ufs_qcom_cap;
        int ret = 0;
 
        if (!dev_req_params) {
@@ -944,9 +846,9 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
                                ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
                }
 
-               ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
-                                                dev_max_params,
-                                                dev_req_params);
+               ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
+                                              dev_max_params,
+                                              dev_req_params);
                if (ret) {
                        pr_err("%s: failed to determine capabilities\n",
                                        __func__);
index 21e4ccb..99a9c4d 100644 (file)
@@ -516,7 +516,6 @@ struct ufs_vreg {
        bool enabled;
        int min_uV;
        int max_uV;
-       int min_uA;
        int max_uA;
 };
 
index 2721367..8a74ec3 100644 (file)
@@ -39,6 +39,7 @@
 
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
+#include "unipro.h"
 
 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION             2
 
@@ -151,20 +152,12 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
 
        vreg->name = kstrdup(name, GFP_KERNEL);
 
-       /* if fixed regulator no need further initialization */
-       snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name);
-       if (of_property_read_bool(np, prop_name))
-               goto out;
-
        snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
-       ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
-       if (ret) {
-               dev_err(dev, "%s: unable to find %s err %d\n",
-                               __func__, prop_name, ret);
-               goto out;
+       if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
+               dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
+               vreg->max_uA = 0;
        }
 
-       vreg->min_uA = 0;
        if (!strcmp(name, "vcc")) {
                if (of_property_read_bool(np, "vcc-supply-1p8")) {
                        vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
@@ -289,6 +282,103 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
        }
 }
 
+/**
+ * ufshcd_get_pwr_dev_param - get finally agreed attributes for
+ *                            power mode change
+ * @pltfrm_param: pointer to platform parameters
+ * @dev_max: pointer to device attributes
+ * @agreed_pwr: returned agreed attributes
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
+                            struct ufs_pa_layer_attr *dev_max,
+                            struct ufs_pa_layer_attr *agreed_pwr)
+{
+       int min_pltfrm_gear;
+       int min_dev_gear;
+       bool is_dev_sup_hs = false;
+       bool is_pltfrm_max_hs = false;
+
+       if (dev_max->pwr_rx == FAST_MODE)
+               is_dev_sup_hs = true;
+
+       if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
+               is_pltfrm_max_hs = true;
+               min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
+                                       pltfrm_param->hs_tx_gear);
+       } else {
+               min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
+                                       pltfrm_param->pwm_tx_gear);
+       }
+
+       /*
+        * device doesn't support HS but
+        * pltfrm_param->desired_working_mode is HS,
+        * thus device and pltfrm_param don't agree
+        */
+       if (!is_dev_sup_hs && is_pltfrm_max_hs) {
+               pr_info("%s: device doesn't support HS\n",
+                       __func__);
+               return -ENOTSUPP;
+       } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
+               /*
+                * since device supports HS, it supports FAST_MODE.
+                * since pltfrm_param->desired_working_mode is also HS
+                * then final decision (FAST/FASTAUTO) is done according
+                * to pltfrm_params as it is the restricting factor
+                */
+               agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
+               agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
+       } else {
+               /*
+                * here pltfrm_param->desired_working_mode is PWM.
+                * it doesn't matter whether device supports HS or PWM,
+                * in both cases pltfrm_param->desired_working_mode will
+                * determine the mode
+                */
+               agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
+               agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
+       }
+
+       /*
+        * we would like tx to work in the minimum number of lanes
+        * between device capability and vendor preferences.
+        * the same decision will be made for rx
+        */
+       agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
+                                   pltfrm_param->tx_lanes);
+       agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
+                                   pltfrm_param->rx_lanes);
+
+       /* device maximum gear is the minimum between device rx and tx gears */
+       min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
+
+       /*
+        * if both device capabilities and vendor pre-defined preferences are
+        * both HS or both PWM then set the minimum gear to be the chosen
+        * working gear.
+        * if one is PWM and one is HS then the one that is PWM get to decide
+        * what is the gear, as it is the one that also decided previously what
+        * pwr the device will be configured to.
+        */
+       if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
+           (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
+               agreed_pwr->gear_rx =
+                       min_t(u32, min_dev_gear, min_pltfrm_gear);
+       } else if (!is_dev_sup_hs) {
+               agreed_pwr->gear_rx = min_dev_gear;
+       } else {
+               agreed_pwr->gear_rx = min_pltfrm_gear;
+       }
+       agreed_pwr->gear_tx = agreed_pwr->gear_rx;
+
+       agreed_pwr->hs_rate = pltfrm_param->hs_rate;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
+
 /**
  * ufshcd_pltfrm_init - probe routine of the driver
  * @pdev: pointer to Platform device handle
index 1f29e1f..0919ebb 100644 (file)
 
 #include "ufshcd.h"
 
+#define UFS_PWM_MODE 1
+#define UFS_HS_MODE  2
+
+struct ufs_dev_params {
+       u32 pwm_rx_gear;        /* pwm rx gear to work in */
+       u32 pwm_tx_gear;        /* pwm tx gear to work in */
+       u32 hs_rx_gear;         /* hs rx gear to work in */
+       u32 hs_tx_gear;         /* hs tx gear to work in */
+       u32 rx_lanes;           /* number of rx lanes */
+       u32 tx_lanes;           /* number of tx lanes */
+       u32 rx_pwr_pwm;         /* rx pwm working pwr */
+       u32 tx_pwr_pwm;         /* tx pwm working pwr */
+       u32 rx_pwr_hs;          /* rx hs working pwr */
+       u32 tx_pwr_hs;          /* tx hs working pwr */
+       u32 hs_rate;            /* rate A/B to work in HS */
+       u32 desired_working_mode;
+};
+
+int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
+                            struct ufs_pa_layer_attr *dev_max,
+                            struct ufs_pa_layer_attr *agreed_pwr);
 int ufshcd_pltfrm_init(struct platform_device *pdev,
                       const struct ufs_hba_variant_ops *vops);
 void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
index e040f9d..8c1c551 100644 (file)
@@ -4704,10 +4704,10 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                                "Reject UPIU not fully implemented\n");
                        break;
                default:
-                       result = DID_ERROR << 16;
                        dev_err(hba->dev,
                                "Unexpected request response code = %x\n",
                                result);
+                       result = DID_ERROR << 16;
                        break;
                }
                break;
@@ -6294,19 +6294,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
                goto out;
        }
 
-       if (hba->vreg_info.vcc)
+       if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
                icc_level = ufshcd_get_max_icc_level(
                                hba->vreg_info.vcc->max_uA,
                                POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
                                &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
 
-       if (hba->vreg_info.vccq)
+       if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
                icc_level = ufshcd_get_max_icc_level(
                                hba->vreg_info.vccq->max_uA,
                                icc_level,
                                &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
 
-       if (hba->vreg_info.vccq2)
+       if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
                icc_level = ufshcd_get_max_icc_level(
                                hba->vreg_info.vccq2->max_uA,
                                icc_level,
@@ -7004,6 +7004,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
        if (!vreg)
                return 0;
 
+       /*
+        * "set_load" operation shall be required on those regulators
+        * which specifically configured current limitation. Otherwise
+        * zero max_uA may cause unexpected behavior when regulator is
+        * enabled or set as high power mode.
+        */
+       if (!vreg->max_uA)
+               return 0;
+
        ret = regulator_set_load(vreg->reg, ua);
        if (ret < 0) {
                dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
@@ -7039,12 +7048,15 @@ static int ufshcd_config_vreg(struct device *dev,
        name = vreg->name;
 
        if (regulator_count_voltages(reg) > 0) {
-               min_uV = on ? vreg->min_uV : 0;
-               ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
-               if (ret) {
-                       dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+               if (vreg->min_uV && vreg->max_uV) {
+                       min_uV = on ? vreg->min_uV : 0;
+                       ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+                       if (ret) {
+                               dev_err(dev,
+                                       "%s: %s set voltage failed, err=%d\n",
                                        __func__, name, ret);
-                       goto out;
+                               goto out;
+                       }
                }
 
                uA_load = on ? vreg->max_uA : 0;
@@ -7103,9 +7115,6 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
        struct device *dev = hba->dev;
        struct ufs_vreg_info *info = &hba->vreg_info;
 
-       if (!info)
-               goto out;
-
        ret = ufshcd_toggle_vreg(dev, info->vcc, on);
        if (ret)
                goto out;
@@ -7131,10 +7140,7 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
 {
        struct ufs_vreg_info *info = &hba->vreg_info;
 
-       if (info)
-               return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
-
-       return 0;
+       return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 }
 
 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@ -7160,9 +7166,6 @@ static int ufshcd_init_vreg(struct ufs_hba *hba)
        struct device *dev = hba->dev;
        struct ufs_vreg_info *info = &hba->vreg_info;
 
-       if (!info)
-               goto out;
-
        ret = ufshcd_get_vreg(dev, info->vcc);
        if (ret)
                goto out;
index 23129d7..c77e365 100644 (file)
@@ -52,7 +52,7 @@
 #define RX_HS_UNTERMINATED_ENABLE              0x00A6
 #define RX_ENTER_HIBERN8                       0x00A7
 #define RX_BYPASS_8B10B_ENABLE                 0x00A8
-#define RX_TERMINATION_FORCE_ENABLE            0x0089
+#define RX_TERMINATION_FORCE_ENABLE            0x00A9
 #define RX_MIN_ACTIVATETIME_CAPABILITY         0x008F
 #define RX_HIBERN8TIME_CAPABILITY              0x0092
 #define RX_REFCLKFREQ                          0x00EB
index f8cb7c2..c47d38b 100644 (file)
@@ -659,7 +659,7 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
 static int virtscsi_map_queues(struct Scsi_Host *shost)
 {
        struct virtio_scsi *vscsi = shost_priv(shost);
-       struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
+       struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 
        return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
 }
index 5ce6e2a..59d3245 100644 (file)
@@ -573,7 +573,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        return 0;
 }
 
-static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32);
+static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec,
+                           u32 data_offset, u32 data_length);
 static void iscsit_unmap_iovec(struct iscsi_cmd *);
 static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
                                    u32, u32, u32, u8 *);
@@ -604,7 +605,8 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                         *header_digest);
        }
 
-       iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
+       iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
+                                  cmd->orig_iov_data_count - (iov_count + 2),
                                   datain->offset, datain->length);
        if (iov_ret < 0)
                return -1;
@@ -886,13 +888,10 @@ EXPORT_SYMBOL(iscsit_reject_cmd);
  * Map some portion of the allocated scatterlist to an iovec, suitable for
  * kernel sockets to copy data in/out.
  */
-static int iscsit_map_iovec(
-       struct iscsi_cmd *cmd,
-       struct kvec *iov,
-       u32 data_offset,
-       u32 data_length)
+static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec,
+                           u32 data_offset, u32 data_length)
 {
-       u32 i = 0;
+       u32 i = 0, orig_data_length = data_length;
        struct scatterlist *sg;
        unsigned int page_off;
 
@@ -901,9 +900,12 @@ static int iscsit_map_iovec(
         */
        u32 ent = data_offset / PAGE_SIZE;
 
+       if (!data_length)
+               return 0;
+
        if (ent >= cmd->se_cmd.t_data_nents) {
                pr_err("Initial page entry out-of-bounds\n");
-               return -1;
+               goto overflow;
        }
 
        sg = &cmd->se_cmd.t_data_sg[ent];
@@ -913,7 +915,12 @@ static int iscsit_map_iovec(
        cmd->first_data_sg_off = page_off;
 
        while (data_length) {
-               u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+               u32 cur_len;
+
+               if (WARN_ON_ONCE(!sg || i >= nvec))
+                       goto overflow;
+
+               cur_len = min_t(u32, data_length, sg->length - page_off);
 
                iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
                iov[i].iov_len = cur_len;
@@ -927,6 +934,16 @@ static int iscsit_map_iovec(
        cmd->kmapped_nents = i;
 
        return i;
+
+overflow:
+       pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
+              data_offset, orig_data_length, i, nvec);
+       for_each_sg(cmd->se_cmd.t_data_sg, sg,
+                   cmd->se_cmd.t_data_nents, i) {
+               pr_err("[%d] off %d len %d\n",
+                      i, sg->offset, sg->length);
+       }
+       return -1;
 }
 
 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
@@ -1268,27 +1285,27 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
                          bool dump_payload)
 {
        int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+       int rc;
+
        /*
         * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
         */
-       if (dump_payload)
-               goto after_immediate_data;
-       /*
-        * Check for underflow case where both EDTL and immediate data payload
-        * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
-        * already been set in target_cmd_size_check() as se_cmd->data_length.
-        *
-        * For this special case, fail the command and dump the immediate data
-        * payload.
-        */
-       if (cmd->first_burst_len > cmd->se_cmd.data_length) {
-               cmd->sense_reason = TCM_INVALID_CDB_FIELD;
-               goto after_immediate_data;
+       if (dump_payload) {
+               u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
+                                cmd->first_burst_len);
+
+               pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
+                        cmd->se_cmd.data_length, cmd->write_data_done,
+                        cmd->first_burst_len, length);
+               rc = iscsit_dump_data_payload(cmd->conn, length, 1);
+               pr_debug("Finished dumping immediate data\n");
+               if (rc < 0)
+                       immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
+       } else {
+               immed_ret = iscsit_handle_immediate_data(cmd, hdr,
+                                                        cmd->first_burst_len);
        }
 
-       immed_ret = iscsit_handle_immediate_data(cmd, hdr,
-                                       cmd->first_burst_len);
-after_immediate_data:
        if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
                /*
                 * A PDU/CmdSN carrying Immediate Data passed
@@ -1301,12 +1318,9 @@ after_immediate_data:
                        return -1;
 
                if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
-                       int rc;
-
-                       rc = iscsit_dump_data_payload(cmd->conn,
-                                                     cmd->first_burst_len, 1);
                        target_put_sess_cmd(&cmd->se_cmd);
-                       return rc;
+
+                       return 0;
                } else if (cmd->unsolicited_data)
                        iscsit_set_unsolicited_dataout(cmd);
 
@@ -1568,14 +1582,16 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 {
        struct kvec *iov;
        u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
-       u32 payload_length = ntoh24(hdr->dlength);
+       u32 payload_length;
        int iov_ret, data_crc_failed = 0;
 
+       payload_length = min_t(u32, cmd->se_cmd.data_length,
+                              ntoh24(hdr->dlength));
        rx_size += payload_length;
        iov = &cmd->iov_data[0];
 
-       iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
-                                  payload_length);
+       iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
+                                  be32_to_cpu(hdr->offset), payload_length);
        if (iov_ret < 0)
                return -1;
 
@@ -1595,6 +1611,7 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                rx_size += ISCSI_CRC_LEN;
        }
 
+       WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
        rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
 
        iscsit_unmap_iovec(cmd);
@@ -1860,6 +1877,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                        rx_size += ISCSI_CRC_LEN;
                }
 
+               WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
                rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
                if (rx_got != rx_size) {
                        ret = -1;
@@ -2265,6 +2283,7 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                        rx_size += ISCSI_CRC_LEN;
                }
 
+               WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
                rx_got = rx_data(conn, &iov[0], niov, rx_size);
                if (rx_got != rx_size)
                        goto reject;
@@ -2575,14 +2594,34 @@ static int iscsit_handle_immediate_data(
        u32 checksum, iov_count = 0, padding = 0;
        struct iscsi_conn *conn = cmd->conn;
        struct kvec *iov;
+       void *overflow_buf = NULL;
 
-       iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
+       BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
+       rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
+       iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
+                                  cmd->orig_iov_data_count - 2,
+                                  cmd->write_data_done, rx_size);
        if (iov_ret < 0)
                return IMMEDIATE_DATA_CANNOT_RECOVER;
 
-       rx_size = length;
        iov_count = iov_ret;
        iov = &cmd->iov_data[0];
+       if (rx_size < length) {
+               /*
+                * Special case: length of immediate data exceeds the data
+                * buffer size derived from the CDB.
+                */
+               overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
+               if (!overflow_buf) {
+                       iscsit_unmap_iovec(cmd);
+                       return IMMEDIATE_DATA_CANNOT_RECOVER;
+               }
+               cmd->overflow_buf = overflow_buf;
+               iov[iov_count].iov_base = overflow_buf;
+               iov[iov_count].iov_len = length - rx_size;
+               iov_count++;
+               rx_size = length;
+       }
 
        padding = ((-length) & 3);
        if (padding != 0) {
@@ -2597,6 +2636,7 @@ static int iscsit_handle_immediate_data(
                rx_size += ISCSI_CRC_LEN;
        }
 
+       WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
        rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
 
        iscsit_unmap_iovec(cmd);
@@ -3121,6 +3161,12 @@ int iscsit_build_r2ts_for_cmd(
                                else
                                        xfer_len = conn->sess->sess_ops->MaxBurstLength;
                        }
+
+                       if ((s32)xfer_len < 0) {
+                               cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+                               break;
+                       }
+
                        cmd->r2t_offset += xfer_len;
 
                        if (cmd->r2t_offset == cmd->se_cmd.data_length)
index ae3209e..683d045 100644 (file)
@@ -883,9 +883,6 @@ int iscsit_setup_np(
                return -EINVAL;
        }
 
-       np->np_ip_proto = IPPROTO_TCP;
-       np->np_sock_type = SOCK_STREAM;
-
        ret = sock_create(sockaddr->ss_family, np->np_sock_type,
                        np->np_ip_proto, &sock);
        if (ret < 0) {
@@ -1159,13 +1156,13 @@ static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
 
        if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
                pr_err("Unable to allocate conn->conn_cpumask\n");
-               goto free_mask;
+               goto free_conn_ops;
        }
 
        return conn;
 
-free_mask:
-       free_cpumask_var(conn->conn_cpumask);
+free_conn_ops:
+       kfree(conn->conn_ops);
 put_transport:
        iscsit_put_transport(conn->conn_transport);
 free_conn:
index 3ac494f..fae85bf 100644 (file)
@@ -67,6 +67,8 @@ int iscsit_add_r2t_to_list(
 
        lockdep_assert_held(&cmd->r2t_lock);
 
+       WARN_ON_ONCE((s32)xfer_len < 0);
+
        r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
        if (!r2t) {
                pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
@@ -735,6 +737,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
        kfree(cmd->pdu_list);
        kfree(cmd->seq_list);
        kfree(cmd->tmr_req);
+       kfree(cmd->overflow_buf);
        kfree(cmd->iov_data);
        kfree(cmd->text_in_ptr);
 
@@ -769,6 +772,8 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
        struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
        int rc;
 
+       WARN_ON(!list_empty(&cmd->i_conn_node));
+
        __iscsit_free_cmd(cmd, shutdown);
        if (se_cmd) {
                rc = transport_generic_free_cmd(se_cmd, shutdown);
index e09f0cf..893f1fe 100644 (file)
@@ -1760,8 +1760,10 @@ void core_alua_free_tg_pt_gp(
         * can be made while we are releasing struct t10_alua_tg_pt_gp.
         */
        spin_lock(&dev->t10_alua.tg_pt_gps_lock);
-       list_del(&tg_pt_gp->tg_pt_gp_list);
-       dev->t10_alua.alua_tg_pt_gps_counter--;
+       if (tg_pt_gp->tg_pt_gp_valid_id) {
+               list_del(&tg_pt_gp->tg_pt_gp_list);
+               dev->t10_alua.alua_tg_pt_gps_count--;
+       }
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
        /*
index fc5ef31..db2558f 100644 (file)
@@ -1227,6 +1227,29 @@ static struct t10_wwn *to_t10_wwn(struct config_item *item)
        return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
 }
 
+static ssize_t target_check_inquiry_data(char *buf)
+{
+       size_t len;
+       int i;
+
+       len = strlen(buf);
+
+       /*
+        * SPC 4.3.1:
+        * ASCII data fields shall contain only ASCII printable characters
+        * (i.e., code values 20h to 7Eh) and may be terminated with one or
+        * more ASCII null (00h) characters.
+        */
+       for (i = 0; i < len; i++) {
+               if (buf[i] < 0x20 || buf[i] > 0x7E) {
+                       pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
+                       return -EINVAL;
+               }
+       }
+
+       return len;
+}
+
 /*
  * STANDARD and VPD page 0x83 T10 Vendor Identification
  */
@@ -1245,7 +1268,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
        unsigned char buf[INQUIRY_VENDOR_LEN + 2];
        char *stripped = NULL;
        size_t len;
-       int i;
+       ssize_t ret;
 
        len = strlcpy(buf, page, sizeof(buf));
        if (len < sizeof(buf)) {
@@ -1260,19 +1283,10 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
                return -EOVERFLOW;
        }
 
-       /*
-        * SPC 4.3.1:
-        * ASCII data fields shall contain only ASCII printable characters (i.e.,
-        * code values 20h to 7Eh) and may be terminated with one or more ASCII
-        * null (00h) characters.
-        */
-       for (i = 0; i < len; i++) {
-               if ((stripped[i] < 0x20) || (stripped[i] > 0x7E)) {
-                       pr_err("Emulated T10 Vendor Identification contains"
-                               " non-ASCII-printable characters\n");
-                       return -EINVAL;
-               }
-       }
+       ret = target_check_inquiry_data(stripped);
+
+       if (ret < 0)
+               return ret;
 
        /*
         * Check to see if any active exports exist.  If they do exist, fail
@@ -1295,6 +1309,118 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
        return count;
 }
 
+static ssize_t target_wwn_product_id_show(struct config_item *item,
+               char *page)
+{
+       return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
+}
+
+static ssize_t target_wwn_product_id_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct t10_wwn *t10_wwn = to_t10_wwn(item);
+       struct se_device *dev = t10_wwn->t10_dev;
+       /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+       unsigned char buf[INQUIRY_MODEL_LEN + 2];
+       char *stripped = NULL;
+       size_t len;
+       ssize_t ret;
+
+       len = strlcpy(buf, page, sizeof(buf));
+       if (len < sizeof(buf)) {
+               /* Strip any newline added from userspace. */
+               stripped = strstrip(buf);
+               len = strlen(stripped);
+       }
+       if (len > INQUIRY_MODEL_LEN) {
+               pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
+                        __stringify(INQUIRY_MODEL_LEN)
+                       "\n");
+               return -EOVERFLOW;
+       }
+
+       ret = target_check_inquiry_data(stripped);
+
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Check to see if any active exports exist.  If they do exist, fail
+        * here as changing this information on the fly (underneath the
+        * initiator side OS dependent multipath code) could cause negative
+        * effects.
+        */
+       if (dev->export_count) {
+               pr_err("Unable to set T10 Model while active %d exports exist\n",
+                       dev->export_count);
+               return -EINVAL;
+       }
+
+       BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
+       strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
+
+       pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
+                dev->t10_wwn.model);
+
+       return count;
+}
+
+static ssize_t target_wwn_revision_show(struct config_item *item,
+               char *page)
+{
+       return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
+}
+
+static ssize_t target_wwn_revision_store(struct config_item *item,
+               const char *page, size_t count)
+{
+       struct t10_wwn *t10_wwn = to_t10_wwn(item);
+       struct se_device *dev = t10_wwn->t10_dev;
+       /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+       unsigned char buf[INQUIRY_REVISION_LEN + 2];
+       char *stripped = NULL;
+       size_t len;
+       ssize_t ret;
+
+       len = strlcpy(buf, page, sizeof(buf));
+       if (len < sizeof(buf)) {
+               /* Strip any newline added from userspace. */
+               stripped = strstrip(buf);
+               len = strlen(stripped);
+       }
+       if (len > INQUIRY_REVISION_LEN) {
+               pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
+                        __stringify(INQUIRY_REVISION_LEN)
+                       "\n");
+               return -EOVERFLOW;
+       }
+
+       ret = target_check_inquiry_data(stripped);
+
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Check to see if any active exports exist.  If they do exist, fail
+        * here as changing this information on the fly (underneath the
+        * initiator side OS dependent multipath code) could cause negative
+        * effects.
+        */
+       if (dev->export_count) {
+               pr_err("Unable to set T10 Revision while active %d exports exist\n",
+                       dev->export_count);
+               return -EINVAL;
+       }
+
+       BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
+       strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
+
+       pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
+                dev->t10_wwn.revision);
+
+       return count;
+}
+
 /*
  * VPD page 0x80 Unit serial
  */
@@ -1442,6 +1568,8 @@ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
 
 CONFIGFS_ATTR(target_wwn_, vendor_id);
+CONFIGFS_ATTR(target_wwn_, product_id);
+CONFIGFS_ATTR(target_wwn_, revision);
 CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
 CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
@@ -1450,6 +1578,8 @@ CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
 
 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
        &target_wwn_attr_vendor_id,
+       &target_wwn_attr_product_id,
+       &target_wwn_attr_revision,
        &target_wwn_attr_vpd_unit_serial,
        &target_wwn_attr_vpd_protocol_identifier,
        &target_wwn_attr_vpd_assoc_logical_unit,
@@ -1494,11 +1624,12 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
                char *page)
 {
+       struct se_session *sess = dev->reservation_holder;
        struct se_node_acl *se_nacl;
        ssize_t len;
 
-       se_nacl = dev->dev_reserved_node_acl;
-       if (se_nacl) {
+       if (sess) {
+               se_nacl = sess->se_node_acl;
                len = sprintf(page,
                              "SPC-2 Reservation: %s Initiator: %s\n",
                              se_nacl->se_tpg->se_tpg_tfo->fabric_name,
index 1f8482b..7eae1c8 100644 (file)
@@ -85,7 +85,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
                        goto out_unlock;
                }
 
-               se_cmd->se_lun = rcu_dereference(deve->se_lun);
+               se_cmd->se_lun = se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
@@ -176,7 +176,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
                        goto out_unlock;
                }
 
-               se_cmd->se_lun = rcu_dereference(deve->se_lun);
+               se_cmd->se_lun = se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
index 1597a9e..0376769 100644 (file)
@@ -111,10 +111,10 @@ target_scsi2_reservation_check(struct se_cmd *cmd)
                break;
        }
 
-       if (!dev->dev_reserved_node_acl || !sess)
+       if (!dev->reservation_holder || !sess)
                return 0;
 
-       if (dev->dev_reserved_node_acl != sess->se_node_acl)
+       if (dev->reservation_holder->se_node_acl != sess->se_node_acl)
                return TCM_RESERVATION_CONFLICT;
 
        if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
@@ -200,6 +200,16 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
        return 0;
 }
 
+void target_release_reservation(struct se_device *dev)
+{
+       dev->reservation_holder = NULL;
+       dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+       if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+               dev->dev_res_bin_isid = 0;
+               dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
+       }
+}
+
 sense_reason_t
 target_scsi2_reservation_release(struct se_cmd *cmd)
 {
@@ -217,21 +227,16 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
                return TCM_RESERVATION_CONFLICT;
 
        spin_lock(&dev->dev_reservation_lock);
-       if (!dev->dev_reserved_node_acl || !sess)
+       if (!dev->reservation_holder || !sess)
                goto out_unlock;
 
-       if (dev->dev_reserved_node_acl != sess->se_node_acl)
+       if (dev->reservation_holder->se_node_acl != sess->se_node_acl)
                goto out_unlock;
 
        if (dev->dev_res_bin_isid != sess->sess_bin_isid)
                goto out_unlock;
 
-       dev->dev_reserved_node_acl = NULL;
-       dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
-       if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
-               dev->dev_res_bin_isid = 0;
-               dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
-       }
+       target_release_reservation(dev);
        tpg = sess->se_tpg;
        pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
                " MAPPED LUN: %llu for %s\n",
@@ -275,13 +280,13 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
 
        tpg = sess->se_tpg;
        spin_lock(&dev->dev_reservation_lock);
-       if (dev->dev_reserved_node_acl &&
-          (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+       if (dev->reservation_holder &&
+           dev->reservation_holder->se_node_acl != sess->se_node_acl) {
                pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
                        tpg->se_tpg_tfo->fabric_name);
                pr_err("Original reserver LUN: %llu %s\n",
                        cmd->se_lun->unpacked_lun,
-                       dev->dev_reserved_node_acl->initiatorname);
+                       dev->reservation_holder->se_node_acl->initiatorname);
                pr_err("Current attempt - LUN: %llu -> MAPPED LUN: %llu"
                        " from %s \n", cmd->se_lun->unpacked_lun,
                        cmd->orig_fe_lun,
@@ -290,7 +295,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
                goto out_unlock;
        }
 
-       dev->dev_reserved_node_acl = sess->se_node_acl;
+       dev->reservation_holder = sess;
        dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
        if (sess->sess_bin_isid != 0) {
                dev->dev_res_bin_isid = sess->sess_bin_isid;
index 198fad5..a31c93e 100644 (file)
@@ -58,6 +58,7 @@ extern struct kmem_cache *t10_pr_reg_cache;
 
 extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
                        char *, u32);
+extern void target_release_reservation(struct se_device *dev);
 extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
 extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
 extern int core_scsi3_alloc_aptpl_registration(
index 3a1bb79..344df73 100644 (file)
@@ -390,7 +390,7 @@ int core_tmr_lun_reset(
        if (!preempt_and_abort_list &&
             (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
                spin_lock(&dev->dev_reservation_lock);
-               dev->dev_reserved_node_acl = NULL;
+               dev->reservation_holder = NULL;
                dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
                spin_unlock(&dev->dev_reservation_lock);
                pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
index 9be1418..e59a896 100644 (file)
@@ -389,7 +389,6 @@ out:
  */
 
 struct xcopy_pt_cmd {
-       bool remote_port;
        struct se_cmd se_cmd;
        struct completion xpt_passthrough_sem;
        unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
@@ -506,72 +505,20 @@ void target_xcopy_release_pt(void)
                destroy_workqueue(xcopy_wq);
 }
 
-static void target_xcopy_setup_pt_port(
-       struct xcopy_pt_cmd *xpt_cmd,
-       struct xcopy_op *xop,
-       bool remote_port)
-{
-       struct se_cmd *ec_cmd = xop->xop_se_cmd;
-       struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
-
-       if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
-               /*
-                * Honor destination port reservations for X-COPY PUSH emulation
-                * when CDB is received on local source port, and READs blocks to
-                * WRITE on remote destination port.
-                */
-               if (remote_port) {
-                       xpt_cmd->remote_port = remote_port;
-               } else {
-                       pt_cmd->se_lun = ec_cmd->se_lun;
-                       pt_cmd->se_dev = ec_cmd->se_dev;
-
-                       pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
-                               " %p\n", pt_cmd->se_dev);
-                       pt_cmd->se_lun = ec_cmd->se_lun;
-                       pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
-                               pt_cmd->se_lun);
-               }
-       } else {
-               /*
-                * Honor source port reservation for X-COPY PULL emulation
-                * when CDB is received on local desintation port, and READs
-                * blocks from the remote source port to WRITE on local
-                * destination port.
-                */
-               if (remote_port) {
-                       xpt_cmd->remote_port = remote_port;
-               } else {
-                       pt_cmd->se_lun = ec_cmd->se_lun;
-                       pt_cmd->se_dev = ec_cmd->se_dev;
-
-                       pr_debug("Honoring local DST port from ec_cmd->se_dev:"
-                               " %p\n", pt_cmd->se_dev);
-                       pt_cmd->se_lun = ec_cmd->se_lun;
-                       pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
-                               pt_cmd->se_lun);
-               }
-       }
-}
-
-static void target_xcopy_init_pt_lun(struct se_device *se_dev,
-               struct se_cmd *pt_cmd, bool remote_port)
-{
-       /*
-        * Don't allocate + init an pt_cmd->se_lun if honoring local port for
-        * reservations.  The pt_cmd->se_lun pointer will be setup from within
-        * target_xcopy_setup_pt_port()
-        */
-       if (remote_port) {
-               pr_debug("Setup emulated se_dev: %p from se_dev\n",
-                       pt_cmd->se_dev);
-               pt_cmd->se_lun = &se_dev->xcopy_lun;
-               pt_cmd->se_dev = se_dev;
-       }
-
-       pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
-}
-
+/*
+ * target_xcopy_setup_pt_cmd - set up a pass-through command
+ * @xpt_cmd:    Data structure to initialize.
+ * @xop:        Describes the XCOPY operation received from an initiator.
+ * @se_dev:     Backend device to associate with @xpt_cmd if
+ *              @remote_port == true.
+ * @cdb:        SCSI CDB to be copied into @xpt_cmd.
+ * @remote_port: If false, use the LUN through which the XCOPY command has
+ *              been received. If true, use @se_dev->xcopy_lun.
+ * @alloc_mem:  Whether or not to allocate an SGL list.
+ *
+ * Set up a SCSI command (READ or WRITE) that will be used to execute an
+ * XCOPY command.
+ */
 static int target_xcopy_setup_pt_cmd(
        struct xcopy_pt_cmd *xpt_cmd,
        struct xcopy_op *xop,
@@ -583,12 +530,19 @@ static int target_xcopy_setup_pt_cmd(
        struct se_cmd *cmd = &xpt_cmd->se_cmd;
        sense_reason_t sense_rc;
        int ret = 0, rc;
+
        /*
         * Setup LUN+port to honor reservations based upon xop->op_origin for
         * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
         */
-       target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
-       target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
+       if (remote_port) {
+               cmd->se_lun = &se_dev->xcopy_lun;
+               cmd->se_dev = se_dev;
+       } else {
+               cmd->se_lun = xop->xop_se_cmd->se_lun;
+               cmd->se_dev = xop->xop_se_cmd->se_dev;
+       }
+       cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
 
        cmd->tag = 0;
        sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
index 2bb3490..c48e964 100644 (file)
 
 
 
-/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
-#define FC_PORT_ROLE_NVME_INITIATOR    0x10
-#define FC_PORT_ROLE_NVME_TARGET       0x20
-#define FC_PORT_ROLE_NVME_DISCOVERY    0x40
-
-
 /**
  * struct nvme_fc_port_info - port-specific ids and FC connection-specific
  *                            data element used during NVME Host role
index 56b2dba..b08febe 100644 (file)
@@ -224,6 +224,13 @@ struct sas_work {
        struct work_struct work;
 };
 
+/* Lots of code duplicates this in the SCSI tree, which can be factored out */
+static inline bool sas_dev_type_is_expander(enum sas_device_type type)
+{
+       return type == SAS_EDGE_EXPANDER_DEVICE ||
+              type == SAS_FANOUT_EXPANDER_DEVICE;
+}
+
 static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *))
 {
        INIT_WORK(&sw->work, fn);
@@ -245,9 +252,9 @@ static inline struct sas_discovery_event *to_sas_discovery_event(struct work_str
 struct sas_discovery {
        struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
        unsigned long    pending;
-       u8     fanout_sas_addr[8];
-       u8     eeds_a[8];
-       u8     eeds_b[8];
+       u8     fanout_sas_addr[SAS_ADDR_SIZE];
+       u8     eeds_a[SAS_ADDR_SIZE];
+       u8     eeds_b[SAS_ADDR_SIZE];
        int    max_level;
 };
 
diff --git a/include/scsi/osd_attributes.h b/include/scsi/osd_attributes.h
deleted file mode 100644 (file)
index 8a6acd0..0000000
+++ /dev/null
@@ -1,398 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __OSD_ATTRIBUTES_H__
-#define __OSD_ATTRIBUTES_H__
-
-#include <scsi/osd_protocol.h>
-
-/*
- * Contains types and constants that define attribute pages and attribute
- * numbers and their data types.
- */
-
-#define ATTR_SET(pg, id, l, ptr) \
-       { .attr_page = pg, .attr_id = id, .len = l, .val_ptr = ptr }
-
-#define ATTR_DEF(pg, id, l) ATTR_SET(pg, id, l, NULL)
-
-/* osd-r10 4.7.3 Attributes pages */
-enum {
-       OSD_APAGE_OBJECT_FIRST          = 0x0,
-       OSD_APAGE_OBJECT_DIRECTORY      = 0,
-       OSD_APAGE_OBJECT_INFORMATION    = 1,
-       OSD_APAGE_OBJECT_QUOTAS         = 2,
-       OSD_APAGE_OBJECT_TIMESTAMP      = 3,
-       OSD_APAGE_OBJECT_COLLECTIONS    = 4,
-       OSD_APAGE_OBJECT_SECURITY       = 5,
-       OSD_APAGE_OBJECT_LAST           = 0x2fffffff,
-
-       OSD_APAGE_PARTITION_FIRST       = 0x30000000,
-       OSD_APAGE_PARTITION_DIRECTORY   = OSD_APAGE_PARTITION_FIRST + 0,
-       OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1,
-       OSD_APAGE_PARTITION_QUOTAS      = OSD_APAGE_PARTITION_FIRST + 2,
-       OSD_APAGE_PARTITION_TIMESTAMP   = OSD_APAGE_PARTITION_FIRST + 3,
-       OSD_APAGE_PARTITION_ATTR_ACCESS = OSD_APAGE_PARTITION_FIRST + 4,
-       OSD_APAGE_PARTITION_SECURITY    = OSD_APAGE_PARTITION_FIRST + 5,
-       OSD_APAGE_PARTITION_LAST        = 0x5FFFFFFF,
-
-       OSD_APAGE_COLLECTION_FIRST      = 0x60000000,
-       OSD_APAGE_COLLECTION_DIRECTORY  = OSD_APAGE_COLLECTION_FIRST + 0,
-       OSD_APAGE_COLLECTION_INFORMATION = OSD_APAGE_COLLECTION_FIRST + 1,
-       OSD_APAGE_COLLECTION_TIMESTAMP  = OSD_APAGE_COLLECTION_FIRST + 3,
-       OSD_APAGE_COLLECTION_SECURITY   = OSD_APAGE_COLLECTION_FIRST + 5,
-       OSD_APAGE_COLLECTION_LAST       = 0x8FFFFFFF,
-
-       OSD_APAGE_ROOT_FIRST            = 0x90000000,
-       OSD_APAGE_ROOT_DIRECTORY        = OSD_APAGE_ROOT_FIRST + 0,
-       OSD_APAGE_ROOT_INFORMATION      = OSD_APAGE_ROOT_FIRST + 1,
-       OSD_APAGE_ROOT_QUOTAS           = OSD_APAGE_ROOT_FIRST + 2,
-       OSD_APAGE_ROOT_TIMESTAMP        = OSD_APAGE_ROOT_FIRST + 3,
-       OSD_APAGE_ROOT_SECURITY         = OSD_APAGE_ROOT_FIRST + 5,
-       OSD_APAGE_ROOT_LAST             = 0xBFFFFFFF,
-
-       OSD_APAGE_RESERVED_TYPE_FIRST   = 0xC0000000,
-       OSD_APAGE_RESERVED_TYPE_LAST    = 0xEFFFFFFF,
-
-       OSD_APAGE_COMMON_FIRST          = 0xF0000000,
-       OSD_APAGE_COMMON_LAST           = 0xFFFFFFFD,
-
-       OSD_APAGE_CURRENT_COMMAND       = 0xFFFFFFFE,
-
-       OSD_APAGE_REQUEST_ALL           = 0xFFFFFFFF,
-};
-
-/* subcategories of attr pages within each range above */
-enum {
-       OSD_APAGE_STD_FIRST             = 0x0,
-       OSD_APAGE_STD_DIRECTORY         = 0,
-       OSD_APAGE_STD_INFORMATION       = 1,
-       OSD_APAGE_STD_QUOTAS            = 2,
-       OSD_APAGE_STD_TIMESTAMP         = 3,
-       OSD_APAGE_STD_COLLECTIONS       = 4,
-       OSD_APAGE_STD_POLICY_SECURITY   = 5,
-       OSD_APAGE_STD_LAST              = 0x0000007F,
-
-       OSD_APAGE_RESERVED_FIRST        = 0x00000080,
-       OSD_APAGE_RESERVED_LAST         = 0x00007FFF,
-
-       OSD_APAGE_OTHER_STD_FIRST       = 0x00008000,
-       OSD_APAGE_OTHER_STD_LAST        = 0x0000EFFF,
-
-       OSD_APAGE_PUBLIC_FIRST          = 0x0000F000,
-       OSD_APAGE_PUBLIC_LAST           = 0x0000FFFF,
-
-       OSD_APAGE_APP_DEFINED_FIRST     = 0x00010000,
-       OSD_APAGE_APP_DEFINED_LAST      = 0x1FFFFFFF,
-
-       OSD_APAGE_VENDOR_SPECIFIC_FIRST = 0x20000000,
-       OSD_APAGE_VENDOR_SPECIFIC_LAST  = 0x2FFFFFFF,
-};
-
-enum {
-       OSD_ATTR_PAGE_IDENTIFICATION = 0, /* in all pages 40 bytes */
-};
-
-struct page_identification {
-       u8 vendor_identification[8];
-       u8 page_identification[32];
-}  __packed;
-
-struct osd_attr_page_header {
-       __be32 page_number;
-       __be32 page_length;
-} __packed;
-
-/* 7.1.2.8 Root Information attributes page (OSD_APAGE_ROOT_INFORMATION) */
-enum {
-       OSD_ATTR_RI_OSD_SYSTEM_ID            = 0x3,   /* 20       */
-       OSD_ATTR_RI_VENDOR_IDENTIFICATION    = 0x4,   /* 8        */
-       OSD_ATTR_RI_PRODUCT_IDENTIFICATION   = 0x5,   /* 16       */
-       OSD_ATTR_RI_PRODUCT_MODEL            = 0x6,   /* 32       */
-       OSD_ATTR_RI_PRODUCT_REVISION_LEVEL   = 0x7,   /* 4        */
-       OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER    = 0x8,   /* variable */
-       OSD_ATTR_RI_OSD_NAME                 = 0x9,   /* variable */
-       OSD_ATTR_RI_MAX_CDB_CONTINUATION_LEN = 0xA,   /* 4        */
-       OSD_ATTR_RI_TOTAL_CAPACITY           = 0x80,  /* 8        */
-       OSD_ATTR_RI_USED_CAPACITY            = 0x81,  /* 8        */
-       OSD_ATTR_RI_NUMBER_OF_PARTITIONS     = 0xC0,  /* 8        */
-       OSD_ATTR_RI_CLOCK                    = 0x100, /* 6        */
-       OARI_DEFAULT_ISOLATION_METHOD        = 0X110, /* 1        */
-       OARI_SUPPORTED_ISOLATION_METHODS     = 0X111, /* 32       */
-
-       OARI_DATA_ATOMICITY_GUARANTEE                   = 0X120,   /* 8       */
-       OARI_DATA_ATOMICITY_ALIGNMENT                   = 0X121,   /* 8       */
-       OARI_ATTRIBUTES_ATOMICITY_GUARANTEE             = 0X122,   /* 8       */
-       OARI_DATA_ATTRIBUTES_ATOMICITY_MULTIPLIER       = 0X123,   /* 1       */
-
-       OARI_MAXIMUM_SNAPSHOTS_COUNT                    = 0X1C1,    /* 0 or 4 */
-       OARI_MAXIMUM_CLONES_COUNT                       = 0X1C2,    /* 0 or 4 */
-       OARI_MAXIMUM_BRANCH_DEPTH                       = 0X1CC,    /* 0 or 4 */
-       OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_FIRST  = 0X200,    /* 0 or 4 */
-       OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_LAST   = 0X2ff,    /* 0 or 4 */
-       OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_FIRST = 0X300,    /* 0 or 4 */
-       OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_LAST  = 0X30F,    /* 0 or 4 */
-       OARI_SUPPORT_FOR_DUPLICATED_OBJECT_FREEZING     = 0X310,    /* 0 or 4 */
-       OARI_SUPPORT_FOR_SNAPSHOT_REFRESHING            = 0X311,    /* 0 or 1 */
-       OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_FIRST = 0X7000001,/* 0 or 4 */
-       OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_LAST  = 0X700FFFF,/* 0 or 4 */
-};
-/* Root_Information_attributes_page does not have a get_page structure */
-
-/* 7.1.2.9 Partition Information attributes page
- * (OSD_APAGE_PARTITION_INFORMATION)
- */
-enum {
-       OSD_ATTR_PI_PARTITION_ID            = 0x1,     /* 8        */
-       OSD_ATTR_PI_USERNAME                = 0x9,     /* variable */
-       OSD_ATTR_PI_USED_CAPACITY           = 0x81,    /* 8        */
-       OSD_ATTR_PI_USED_CAPACITY_INCREMENT = 0x84,    /* 0 or 8   */
-       OSD_ATTR_PI_NUMBER_OF_OBJECTS       = 0xC1,    /* 8        */
-
-       OSD_ATTR_PI_ACTUAL_DATA_SPACE                      = 0xD1, /* 0 or 8 */
-       OSD_ATTR_PI_RESERVED_DATA_SPACE                    = 0xD2, /* 0 or 8 */
-       OSD_ATTR_PI_DEFAULT_SNAPSHOT_DUPLICATION_METHOD    = 0x200,/* 0 or 4 */
-       OSD_ATTR_PI_DEFAULT_CLONE_DUPLICATION_METHOD       = 0x201,/* 0 or 4 */
-       OSD_ATTR_PI_DEFAULT_SP_TIME_OF_DUPLICATION         = 0x300,/* 0 or 4 */
-       OSD_ATTR_PI_DEFAULT_CLONE_TIME_OF_DUPLICATION      = 0x301,/* 0 or 4 */
-};
-/* Partition Information attributes page does not have a get_page structure */
-
-/* 7.1.2.10 Collection Information attributes page
- * (OSD_APAGE_COLLECTION_INFORMATION)
- */
-enum {
-       OSD_ATTR_CI_PARTITION_ID           = 0x1,       /* 8        */
-       OSD_ATTR_CI_COLLECTION_OBJECT_ID   = 0x2,       /* 8        */
-       OSD_ATTR_CI_USERNAME               = 0x9,       /* variable */
-       OSD_ATTR_CI_COLLECTION_TYPE        = 0xA,       /* 1        */
-       OSD_ATTR_CI_USED_CAPACITY          = 0x81,      /* 8        */
-};
-/* Collection Information attributes page does not have a get_page structure */
-
-/* 7.1.2.11 User Object Information attributes page
- * (OSD_APAGE_OBJECT_INFORMATION)
- */
-enum {
-       OSD_ATTR_OI_PARTITION_ID         = 0x1,       /* 8        */
-       OSD_ATTR_OI_OBJECT_ID            = 0x2,       /* 8        */
-       OSD_ATTR_OI_USERNAME             = 0x9,       /* variable */
-       OSD_ATTR_OI_USED_CAPACITY        = 0x81,      /* 8        */
-       OSD_ATTR_OI_LOGICAL_LENGTH       = 0x82,      /* 8        */
-       SD_ATTR_OI_ACTUAL_DATA_SPACE     = 0XD1,      /* 0 OR 8   */
-       SD_ATTR_OI_RESERVED_DATA_SPACE   = 0XD2,      /* 0 OR 8   */
-};
-/* Object Information attributes page does not have a get_page structure */
-
-/* 7.1.2.12 Root Quotas attributes page (OSD_APAGE_ROOT_QUOTAS) */
-enum {
-       OSD_ATTR_RQ_DEFAULT_MAXIMUM_USER_OBJECT_LENGTH     = 0x1,      /* 8  */
-       OSD_ATTR_RQ_PARTITION_CAPACITY_QUOTA               = 0x10001,  /* 8  */
-       OSD_ATTR_RQ_PARTITION_OBJECT_COUNT                 = 0x10002,  /* 8  */
-       OSD_ATTR_RQ_PARTITION_COLLECTIONS_PER_USER_OBJECT  = 0x10081,  /* 4  */
-       OSD_ATTR_RQ_PARTITION_COUNT                        = 0x20002,  /* 8  */
-};
-
-struct Root_Quotas_attributes_page {
-       struct osd_attr_page_header hdr; /* id=R+2, size=0x24 */
-       __be64 default_maximum_user_object_length;
-       __be64 partition_capacity_quota;
-       __be64 partition_object_count;
-       __be64 partition_collections_per_user_object;
-       __be64 partition_count;
-}  __packed;
-
-/* 7.1.2.13 Partition Quotas attributes page (OSD_APAGE_PARTITION_QUOTAS)*/
-enum {
-       OSD_ATTR_PQ_DEFAULT_MAXIMUM_USER_OBJECT_LENGTH  = 0x1,        /* 8 */
-       OSD_ATTR_PQ_CAPACITY_QUOTA                      = 0x10001,    /* 8 */
-       OSD_ATTR_PQ_OBJECT_COUNT                        = 0x10002,    /* 8 */
-       OSD_ATTR_PQ_COLLECTIONS_PER_USER_OBJECT         = 0x10081,    /* 4 */
-};
-
-struct Partition_Quotas_attributes_page {
-       struct osd_attr_page_header hdr; /* id=P+2, size=0x1C */
-       __be64 default_maximum_user_object_length;
-       __be64 capacity_quota;
-       __be64 object_count;
-       __be64 collections_per_user_object;
-}  __packed;
-
-/* 7.1.2.14 User Object Quotas attributes page (OSD_APAGE_OBJECT_QUOTAS) */
-enum {
-       OSD_ATTR_OQ_MAXIMUM_LENGTH  = 0x1,        /* 8 */
-};
-
-struct Object_Quotas_attributes_page {
-       struct osd_attr_page_header hdr; /* id=U+2, size=0x8 */
-       __be64 maximum_length;
-}  __packed;
-
-/* 7.1.2.15 Root Timestamps attributes page (OSD_APAGE_ROOT_TIMESTAMP) */
-enum {
-       OSD_ATTR_RT_ATTRIBUTES_ACCESSED_TIME  = 0x2,        /* 6 */
-       OSD_ATTR_RT_ATTRIBUTES_MODIFIED_TIME  = 0x3,        /* 6 */
-       OSD_ATTR_RT_TIMESTAMP_BYPASS          = 0xFFFFFFFE, /* 1 */
-};
-
-struct root_timestamps_attributes_page {
-       struct osd_attr_page_header hdr; /* id=R+3, size=0xD */
-       struct osd_timestamp attributes_accessed_time;
-       struct osd_timestamp attributes_modified_time;
-       u8 timestamp_bypass;
-}  __packed;
-
-/* 7.1.2.16 Partition Timestamps attributes page
- * (OSD_APAGE_PARTITION_TIMESTAMP)
- */
-enum {
-       OSD_ATTR_PT_CREATED_TIME              = 0x1,        /* 6 */
-       OSD_ATTR_PT_ATTRIBUTES_ACCESSED_TIME  = 0x2,        /* 6 */
-       OSD_ATTR_PT_ATTRIBUTES_MODIFIED_TIME  = 0x3,        /* 6 */
-       OSD_ATTR_PT_DATA_ACCESSED_TIME        = 0x4,        /* 6 */
-       OSD_ATTR_PT_DATA_MODIFIED_TIME        = 0x5,        /* 6 */
-       OSD_ATTR_PT_TIMESTAMP_BYPASS          = 0xFFFFFFFE, /* 1 */
-};
-
-struct partition_timestamps_attributes_page {
-       struct osd_attr_page_header hdr; /* id=P+3, size=0x1F */
-       struct osd_timestamp created_time;
-       struct osd_timestamp attributes_accessed_time;
-       struct osd_timestamp attributes_modified_time;
-       struct osd_timestamp data_accessed_time;
-       struct osd_timestamp data_modified_time;
-       u8 timestamp_bypass;
-}  __packed;
-
-/* 7.1.2.17/18 Collection/Object Timestamps attributes page
- * (OSD_APAGE_COLLECTION_TIMESTAMP/OSD_APAGE_OBJECT_TIMESTAMP)
- */
-enum {
-       OSD_ATTR_OT_CREATED_TIME              = 0x1,        /* 6 */
-       OSD_ATTR_OT_ATTRIBUTES_ACCESSED_TIME  = 0x2,        /* 6 */
-       OSD_ATTR_OT_ATTRIBUTES_MODIFIED_TIME  = 0x3,        /* 6 */
-       OSD_ATTR_OT_DATA_ACCESSED_TIME        = 0x4,        /* 6 */
-       OSD_ATTR_OT_DATA_MODIFIED_TIME        = 0x5,        /* 6 */
-};
-
-/* same for collection */
-struct object_timestamps_attributes_page {
-       struct osd_attr_page_header hdr; /* id=C+3/3, size=0x1E */
-       struct osd_timestamp created_time;
-       struct osd_timestamp attributes_accessed_time;
-       struct osd_timestamp attributes_modified_time;
-       struct osd_timestamp data_accessed_time;
-       struct osd_timestamp data_modified_time;
-}  __packed;
-
-/* OSD2r05: 7.1.3.19 Attributes Access attributes page
- * (OSD_APAGE_PARTITION_ATTR_ACCESS)
- *
- * each attribute is of the form below. Total array length is deduced
- * from the attribute's length
- * (See allowed_attributes_access of the struct osd_cap_object_descriptor)
- */
-struct attributes_access_attr {
-       struct osd_attributes_list_attrid attr_list[0];
-} __packed;
-
-/* OSD2r05: 7.1.2.21 Collections attributes page */
-/* TBD */
-
-/* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */
-enum {
-       OSD_ATTR_RS_DEFAULT_SECURITY_METHOD           = 0x1,       /* 1      */
-       OSD_ATTR_RS_OLDEST_VALID_NONCE_LIMIT          = 0x2,       /* 6      */
-       OSD_ATTR_RS_NEWEST_VALID_NONCE_LIMIT          = 0x3,       /* 6      */
-       OSD_ATTR_RS_PARTITION_DEFAULT_SECURITY_METHOD = 0x6,       /* 1      */
-       OSD_ATTR_RS_SUPPORTED_SECURITY_METHODS        = 0x7,       /* 2      */
-       OSD_ATTR_RS_ADJUSTABLE_CLOCK                  = 0x9,       /* 6      */
-       OSD_ATTR_RS_MASTER_KEY_IDENTIFIER             = 0x7FFD,    /* 0 or 7 */
-       OSD_ATTR_RS_ROOT_KEY_IDENTIFIER               = 0x7FFE,    /* 0 or 7 */
-       OSD_ATTR_RS_SUPPORTED_INTEGRITY_ALGORITHM_0   = 0x80000000,/* 1,(x16)*/
-       OSD_ATTR_RS_SUPPORTED_DH_GROUP_0              = 0x80000010,/* 1,(x16)*/
-};
-
-struct root_security_attributes_page {
-       struct osd_attr_page_header hdr; /* id=R+5, size=0x3F */
-       u8 default_security_method;
-       u8 partition_default_security_method;
-       __be16 supported_security_methods;
-       u8 mki_valid_rki_valid;
-       struct osd_timestamp oldest_valid_nonce_limit;
-       struct osd_timestamp newest_valid_nonce_limit;
-       struct osd_timestamp adjustable_clock;
-       u8 master_key_identifier[32-25];
-       u8 root_key_identifier[39-32];
-       u8 supported_integrity_algorithm[16];
-       u8 supported_dh_group[16];
-}  __packed;
-
-/* 7.1.2.21 Partition Policy/Security attributes page
- * (OSD_APAGE_PARTITION_SECURITY)
- */
-enum {
-       OSD_ATTR_PS_DEFAULT_SECURITY_METHOD        = 0x1,        /* 1      */
-       OSD_ATTR_PS_OLDEST_VALID_NONCE             = 0x2,        /* 6      */
-       OSD_ATTR_PS_NEWEST_VALID_NONCE             = 0x3,        /* 6      */
-       OSD_ATTR_PS_REQUEST_NONCE_LIST_DEPTH       = 0x4,        /* 2      */
-       OSD_ATTR_PS_FROZEN_WORKING_KEY_BIT_MASK    = 0x5,        /* 2      */
-       OSD_ATTR_PS_PARTITION_KEY_IDENTIFIER       = 0x7FFF,     /* 0 or 7 */
-       OSD_ATTR_PS_WORKING_KEY_IDENTIFIER_FIRST   = 0x8000,     /* 0 or 7 */
-       OSD_ATTR_PS_WORKING_KEY_IDENTIFIER_LAST    = 0x800F,     /* 0 or 7 */
-       OSD_ATTR_PS_POLICY_ACCESS_TAG              = 0x40000001, /* 4      */
-       OSD_ATTR_PS_USER_OBJECT_POLICY_ACCESS_TAG  = 0x40000002, /* 4      */
-};
-
-struct partition_security_attributes_page {
-       struct osd_attr_page_header hdr; /* id=p+5, size=0x8f */
-       u8 reserved[3];
-       u8 default_security_method;
-       struct osd_timestamp oldest_valid_nonce;
-       struct osd_timestamp newest_valid_nonce;
-       __be16 request_nonce_list_depth;
-       __be16 frozen_working_key_bit_mask;
-       __be32 policy_access_tag;
-       __be32 user_object_policy_access_tag;
-       u8 pki_valid;
-       __be16 wki_00_0f_vld;
-       struct osd_key_identifier partition_key_identifier;
-       struct osd_key_identifier working_key_identifiers[16];
-}  __packed;
-
-/* 7.1.2.22/23 Collection/Object Policy-Security attributes page
- * (OSD_APAGE_COLLECTION_SECURITY/OSD_APAGE_OBJECT_SECURITY)
- */
-enum {
-       OSD_ATTR_OS_POLICY_ACCESS_TAG              = 0x40000001, /* 4      */
-};
-
-struct object_security_attributes_page {
-       struct osd_attr_page_header hdr; /* id=C+5/5, size=4 */
-       __be32 policy_access_tag;
-}  __packed;
-
-/* OSD2r05: 7.1.3.31 Current Command attributes page
- * (OSD_APAGE_CURRENT_COMMAND)
- */
-enum {
-       OSD_ATTR_CC_RESPONSE_INTEGRITY_CHECK_VALUE     = 0x1, /* 32  */
-       OSD_ATTR_CC_OBJECT_TYPE                        = 0x2, /* 1   */
-       OSD_ATTR_CC_PARTITION_ID                       = 0x3, /* 8   */
-       OSD_ATTR_CC_OBJECT_ID                          = 0x4, /* 8   */
-       OSD_ATTR_CC_STARTING_BYTE_ADDRESS_OF_APPEND    = 0x5, /* 8   */
-       OSD_ATTR_CC_CHANGE_IN_USED_CAPACITY            = 0x6, /* 8   */
-};
-
-/*TBD: osdv1_current_command_attributes_page */
-
-struct osdv2_current_command_attributes_page {
-       struct osd_attr_page_header hdr;  /* id=0xFFFFFFFE, size=0x44 */
-       u8 response_integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
-       u8 object_type;
-       u8 reserved[3];
-       __be64 partition_id;
-       __be64 object_id;
-       __be64 starting_byte_address_of_append;
-       __be64 change_in_used_capacity;
-};
-
-#endif /*ndef __OSD_ATTRIBUTES_H__*/
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
deleted file mode 100644 (file)
index e0ca835..0000000
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * osd_protocol.h - OSD T10 standard C definitions.
- *
- * Copyright (C) 2008 Panasas Inc.  All rights reserved.
- *
- * Authors:
- *   Boaz Harrosh <ooo@electrozaur.com>
- *   Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * This file contains types and constants that are defined by the protocol
- * Note: All names and symbols are taken from the OSD standard's text.
- */
-#ifndef __OSD_PROTOCOL_H__
-#define __OSD_PROTOCOL_H__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-
-enum {
-       OSDv1_ADDITIONAL_CDB_LENGTH = 192,
-       OSDv1_TOTAL_CDB_LEN = OSDv1_ADDITIONAL_CDB_LENGTH + 8,
-       OSDv1_CAP_LEN = 80,
-
-       /* Latest supported version */
-       OSDv2_ADDITIONAL_CDB_LENGTH = 228,
-       OSD_ADDITIONAL_CDB_LENGTH =
-               OSDv2_ADDITIONAL_CDB_LENGTH,
-       OSD_TOTAL_CDB_LEN = OSD_ADDITIONAL_CDB_LENGTH + 8,
-       OSD_CAP_LEN = 104,
-
-       OSD_SYSTEMID_LEN = 20,
-       OSDv1_CRYPTO_KEYID_SIZE = 20,
-       OSDv2_CRYPTO_KEYID_SIZE = 32,
-       OSD_CRYPTO_KEYID_SIZE = OSDv2_CRYPTO_KEYID_SIZE,
-       OSD_CRYPTO_SEED_SIZE = 4,
-       OSD_CRYPTO_NONCE_SIZE = 12,
-       OSD_MAX_SENSE_LEN = 252, /* from SPC-3 */
-
-       OSD_PARTITION_FIRST_ID = 0x10000,
-       OSD_OBJECT_FIRST_ID = 0x10000,
-};
-
-/* (osd-r10 5.2.4)
- * osd2r03: 5.2.3 Caching control bits
- */
-enum osd_options_byte {
-       OSD_CDB_FUA = 0x08,     /* Force Unit Access */
-       OSD_CDB_DPO = 0x10,     /* Disable Page Out */
-};
-
-/*
- * osd2r03: 5.2.5 Isolation.
- * First 3 bits, V2-only.
- * Also for attr 110h "default isolation method" at Root Information page
- */
-enum osd_options_byte_isolation {
-       OSD_ISOLATION_DEFAULT = 0,
-       OSD_ISOLATION_NONE = 1,
-       OSD_ISOLATION_STRICT = 2,
-       OSD_ISOLATION_RANGE = 4,
-       OSD_ISOLATION_FUNCTIONAL = 5,
-       OSD_ISOLATION_VENDOR = 7,
-};
-
-/* (osd-r10: 6.7)
- * osd2r03: 6.8 FLUSH, FLUSH COLLECTION, FLUSH OSD, FLUSH PARTITION
- */
-enum osd_options_flush_scope_values {
-       OSD_CDB_FLUSH_ALL = 0,
-       OSD_CDB_FLUSH_ATTR_ONLY = 1,
-
-       OSD_CDB_FLUSH_ALL_RECURSIVE = 2,
-       /* V2-only */
-       OSD_CDB_FLUSH_ALL_RANGE = 2,
-};
-
-/* osd2r03: 5.2.10 Timestamps control */
-enum {
-       OSD_CDB_NORMAL_TIMESTAMPS = 0,
-       OSD_CDB_BYPASS_TIMESTAMPS = 0x7f,
-};
-
-/* (osd-r10: 5.2.2.1)
- * osd2r03: 5.2.4.1 Get and set attributes CDB format selection
- *     2 bits at second nibble of command_specific_options byte
- */
-enum osd_attributes_mode {
-       /* V2-only */
-       OSD_CDB_SET_ONE_ATTR = 0x10,
-
-       OSD_CDB_GET_ATTR_PAGE_SET_ONE = 0x20,
-       OSD_CDB_GET_SET_ATTR_LISTS = 0x30,
-
-       OSD_CDB_GET_SET_ATTR_MASK = 0x30,
-};
-
-/* (osd-r10: 4.12.5)
- * osd2r03: 4.14.5 Data-In and Data-Out buffer offsets
- *     byte offset = mantissa * (2^(exponent+8))
- *     struct {
- *             unsigned mantissa: 28;
- *             int exponent: 04;
- *     }
- */
-typedef __be32 osd_cdb_offset;
-
-enum {
-       OSD_OFFSET_UNUSED = 0xFFFFFFFF,
-       OSD_OFFSET_MAX_BITS = 28,
-
-       OSDv1_OFFSET_MIN_SHIFT = 8,
-       OSD_OFFSET_MIN_SHIFT = 3,
-       OSD_OFFSET_MAX_SHIFT = 16,
-};
-
-/* Return the smallest allowed encoded offset that contains @offset.
- *
- * The actual encoded offset returned is @offset + *padding.
- * (up to max_shift, non-inclusive)
- */
-osd_cdb_offset __osd_encode_offset(u64 offset, unsigned *padding,
-       int min_shift, int max_shift);
-
-/* Minimum alignment is 256 bytes
- * Note: Seems from std v1 that exponent can be from 0+8 to 0xE+8 (inclusive)
- * which is 8 to 23 but IBM code restricts it to 16, so be it.
- */
-static inline osd_cdb_offset osd_encode_offset_v1(u64 offset, unsigned *padding)
-{
-       return __osd_encode_offset(offset, padding,
-                               OSDv1_OFFSET_MIN_SHIFT, OSD_OFFSET_MAX_SHIFT);
-}
-
-/* Minimum 8 bytes alignment
- * Same as v1 but since exponent can be signed than a less than
- * 256 alignment can be reached with small offsets (<2GB)
- */
-static inline osd_cdb_offset osd_encode_offset_v2(u64 offset, unsigned *padding)
-{
-       return __osd_encode_offset(offset, padding,
-                                  OSD_OFFSET_MIN_SHIFT, OSD_OFFSET_MAX_SHIFT);
-}
-
-/* osd2r03: 5.2.1 Overview */
-struct osd_cdb_head {
-       struct scsi_varlen_cdb_hdr varlen_cdb;
-/*10*/ u8              options;
-       u8              command_specific_options;
-       u8              timestamp_control;
-/*13*/ u8              reserved1[3];
-/*16*/ __be64          partition;
-/*24*/ __be64          object;
-/*32*/ union { /* V1 vs V2 alignment differences */
-               struct __osdv1_cdb_addr_len {
-/*32*/                 __be32          list_identifier;/* Rarely used */
-/*36*/                 __be64          length;
-/*44*/                 __be64          start_address;
-               } __packed v1;
-
-               struct __osdv2_cdb_addr_len {
-                       /* called allocation_length in some commands */
-/*32*/                 __be64  length;
-/*40*/                 __be64  start_address;
-                       union {
-/*48*/                         __be32 list_identifier;/* Rarely used */
-                               /* OSD2r05 5.2.5 CDB continuation length */
-/*48*/                         __be32 cdb_continuation_length;
-                       };
-               } __packed v2;
-       };
-/*52*/ union { /* selected attributes mode Page/List/Single */
-               struct osd_attributes_page_mode {
-/*52*/                 __be32          get_attr_page;
-/*56*/                 __be32          get_attr_alloc_length;
-/*60*/                 osd_cdb_offset  get_attr_offset;
-
-/*64*/                 __be32          set_attr_page;
-/*68*/                 __be32          set_attr_id;
-/*72*/                 __be32          set_attr_length;
-/*76*/                 osd_cdb_offset  set_attr_offset;
-/*80*/         } __packed attrs_page;
-
-               struct osd_attributes_list_mode {
-/*52*/                 __be32          get_attr_desc_bytes;
-/*56*/                 osd_cdb_offset  get_attr_desc_offset;
-
-/*60*/                 __be32          get_attr_alloc_length;
-/*64*/                 osd_cdb_offset  get_attr_offset;
-
-/*68*/                 __be32          set_attr_bytes;
-/*72*/                 osd_cdb_offset  set_attr_offset;
-                       __be32 not_used;
-/*80*/         } __packed attrs_list;
-
-               /* osd2r03:5.2.4.2 Set one attribute value using CDB fields */
-               struct osd_attributes_cdb_mode {
-/*52*/                 __be32          set_attr_page;
-/*56*/                 __be32          set_attr_id;
-/*60*/                 __be16          set_attr_len;
-/*62*/                 u8              set_attr_val[18];
-/*80*/         } __packed attrs_cdb;
-/*52*/         u8 get_set_attributes_parameters[28];
-       };
-} __packed;
-/*80*/
-
-/*160 v1*/
-struct osdv1_security_parameters {
-/*160*/u8      integrity_check_value[OSDv1_CRYPTO_KEYID_SIZE];
-/*180*/u8      request_nonce[OSD_CRYPTO_NONCE_SIZE];
-/*192*/osd_cdb_offset  data_in_integrity_check_offset;
-/*196*/osd_cdb_offset  data_out_integrity_check_offset;
-} __packed;
-/*200 v1*/
-
-/*184 v2*/
-struct osdv2_security_parameters {
-/*184*/u8      integrity_check_value[OSDv2_CRYPTO_KEYID_SIZE];
-/*216*/u8      request_nonce[OSD_CRYPTO_NONCE_SIZE];
-/*228*/osd_cdb_offset  data_in_integrity_check_offset;
-/*232*/osd_cdb_offset  data_out_integrity_check_offset;
-} __packed;
-/*236 v2*/
-
-struct osd_security_parameters {
-       union {
-               struct osdv1_security_parameters v1;
-               struct osdv2_security_parameters v2;
-       };
-};
-
-struct osdv1_cdb {
-       struct osd_cdb_head h;
-       u8 caps[OSDv1_CAP_LEN];
-       struct osdv1_security_parameters sec_params;
-} __packed;
-
-struct osdv2_cdb {
-       struct osd_cdb_head h;
-       u8 caps[OSD_CAP_LEN];
-       struct osdv2_security_parameters sec_params;
-} __packed;
-
-struct osd_cdb {
-       union {
-               struct osdv1_cdb v1;
-               struct osdv2_cdb v2;
-               u8 buff[OSD_TOTAL_CDB_LEN];
-       };
-} __packed;
-
-static inline struct osd_cdb_head *osd_cdb_head(struct osd_cdb *ocdb)
-{
-       return (struct osd_cdb_head *)ocdb->buff;
-}
-
-/* define both version actions
- * Ex name = FORMAT_OSD we have OSD_ACT_FORMAT_OSD && OSDv1_ACT_FORMAT_OSD
- */
-#define OSD_ACT___(Name, Num) \
-       OSD_ACT_##Name = cpu_to_be16(0x8880 + Num), \
-       OSDv1_ACT_##Name = cpu_to_be16(0x8800 + Num),
-
-/* V2 only actions */
-#define OSD_ACT_V2(Name, Num) \
-       OSD_ACT_##Name = cpu_to_be16(0x8880 + Num),
-
-#define OSD_ACT_V1_V2(Name, Num1, Num2) \
-       OSD_ACT_##Name = cpu_to_be16(Num2), \
-       OSDv1_ACT_##Name = cpu_to_be16(Num1),
-
-enum osd_service_actions {
-       OSD_ACT_V2(OBJECT_STRUCTURE_CHECK,      0x00)
-       OSD_ACT___(FORMAT_OSD,                  0x01)
-       OSD_ACT___(CREATE,                      0x02)
-       OSD_ACT___(LIST,                        0x03)
-       OSD_ACT_V2(PUNCH,                       0x04)
-       OSD_ACT___(READ,                        0x05)
-       OSD_ACT___(WRITE,                       0x06)
-       OSD_ACT___(APPEND,                      0x07)
-       OSD_ACT___(FLUSH,                       0x08)
-       OSD_ACT_V2(CLEAR,                       0x09)
-       OSD_ACT___(REMOVE,                      0x0A)
-       OSD_ACT___(CREATE_PARTITION,            0x0B)
-       OSD_ACT___(REMOVE_PARTITION,            0x0C)
-       OSD_ACT___(GET_ATTRIBUTES,              0x0E)
-       OSD_ACT___(SET_ATTRIBUTES,              0x0F)
-       OSD_ACT___(CREATE_AND_WRITE,            0x12)
-       OSD_ACT___(CREATE_COLLECTION,           0x15)
-       OSD_ACT___(REMOVE_COLLECTION,           0x16)
-       OSD_ACT___(LIST_COLLECTION,             0x17)
-       OSD_ACT___(SET_KEY,                     0x18)
-       OSD_ACT___(SET_MASTER_KEY,              0x19)
-       OSD_ACT___(FLUSH_COLLECTION,            0x1A)
-       OSD_ACT___(FLUSH_PARTITION,             0x1B)
-       OSD_ACT___(FLUSH_OSD,                   0x1C)
-
-       OSD_ACT_V2(QUERY,                       0x20)
-       OSD_ACT_V2(REMOVE_MEMBER_OBJECTS,       0x21)
-       OSD_ACT_V2(GET_MEMBER_ATTRIBUTES,       0x22)
-       OSD_ACT_V2(SET_MEMBER_ATTRIBUTES,       0x23)
-
-       OSD_ACT_V2(CREATE_CLONE,                0x28)
-       OSD_ACT_V2(CREATE_SNAPSHOT,             0x29)
-       OSD_ACT_V2(DETACH_CLONE,                0x2A)
-       OSD_ACT_V2(REFRESH_SNAPSHOT_CLONE,      0x2B)
-       OSD_ACT_V2(RESTORE_PARTITION_FROM_SNAPSHOT, 0x2C)
-
-       OSD_ACT_V2(READ_MAP,                    0x31)
-       OSD_ACT_V2(READ_MAPS_COMPARE,           0x32)
-
-       OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND,     0x8F7E, 0x8F7C)
-       OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT,     0x8F7F, 0x8F7D)
-       /* 0x8F80 to 0x8FFF are Vendor specific */
-};
-
-/* osd2r03: 7.1.3.2 List entry format for retrieving attributes */
-struct osd_attributes_list_attrid {
-       __be32 attr_page;
-       __be32 attr_id;
-} __packed;
-
-/*
- * NOTE: v1: is not aligned.
- */
-struct osdv1_attributes_list_element {
-       __be32 attr_page;
-       __be32 attr_id;
-       __be16 attr_bytes; /* valid bytes at attr_val without padding */
-       u8 attr_val[0];
-} __packed;
-
-/*
- * osd2r03: 7.1.3.3 List entry format for retrieved attributes and
- *                  for setting attributes
- * NOTE: v2 is 8-bytes aligned
- */
-struct osdv2_attributes_list_element {
-       __be32 attr_page;
-       __be32 attr_id;
-       u8 reserved[6];
-       __be16 attr_bytes; /* valid bytes at attr_val without padding */
-       u8 attr_val[0];
-} __packed;
-
-enum {
-       OSDv1_ATTRIBUTES_ELEM_ALIGN = 1,
-       OSD_ATTRIBUTES_ELEM_ALIGN = 8,
-};
-
-enum {
-       OSD_ATTR_LIST_ALL_PAGES = 0xFFFFFFFF,
-       OSD_ATTR_LIST_ALL_IN_PAGE = 0xFFFFFFFF,
-};
-
-static inline unsigned osdv1_attr_list_elem_size(unsigned len)
-{
-       return ALIGN(len + sizeof(struct osdv1_attributes_list_element),
-                    OSDv1_ATTRIBUTES_ELEM_ALIGN);
-}
-
-static inline unsigned osdv2_attr_list_elem_size(unsigned len)
-{
-       return ALIGN(len + sizeof(struct osdv2_attributes_list_element),
-                    OSD_ATTRIBUTES_ELEM_ALIGN);
-}
-
-/*
- * osd2r03: 7.1.3 OSD attributes lists (Table 184) — List type values
- */
-enum osd_attr_list_types {
-       OSD_ATTR_LIST_GET = 0x1,        /* descriptors only */
-       OSD_ATTR_LIST_SET_RETRIEVE = 0x9, /*descriptors/values variable-length*/
-       OSD_V2_ATTR_LIST_MULTIPLE = 0xE,  /* ver2, Multiple Objects lists*/
-       OSD_V1_ATTR_LIST_CREATE_MULTIPLE = 0xF,/*ver1, used by create_multple*/
-};
-
-/* osd2r03: 7.1.3.4 Multi-object retrieved attributes format */
-struct osd_attributes_list_multi_header {
-       __be64 object_id;
-       u8 object_type; /* object_type enum below */
-       u8 reserved[5];
-       __be16 list_bytes;
-       /* followed by struct osd_attributes_list_element's */
-};
-
-struct osdv1_attributes_list_header {
-       u8 type;        /* low 4-bit only */
-       u8 pad;
-       __be16 list_bytes; /* Initiator shall set to Zero. Only set by target */
-       /*
-        * type=9 followed by struct osd_attributes_list_element's
-        * type=E followed by struct osd_attributes_list_multi_header's
-        */
-} __packed;
-
-static inline unsigned osdv1_list_size(struct osdv1_attributes_list_header *h)
-{
-       return be16_to_cpu(h->list_bytes);
-}
-
-struct osdv2_attributes_list_header {
-       u8 type;        /* lower 4-bits only */
-       u8 pad[3];
-/*4*/  __be32 list_bytes; /* Initiator shall set to zero. Only set by target */
-       /*
-        * type=9 followed by struct osd_attributes_list_element's
-        * type=E followed by struct osd_attributes_list_multi_header's
-        */
-} __packed;
-
-static inline unsigned osdv2_list_size(struct osdv2_attributes_list_header *h)
-{
-       return be32_to_cpu(h->list_bytes);
-}
-
-/* (osd-r10 6.13)
- * osd2r03: 6.15 LIST (Table 79) LIST command parameter data.
- *     for root_lstchg below
- */
-enum {
-       OSD_OBJ_ID_LIST_PAR = 0x1, /* V1-only. Not used in V2 */
-       OSD_OBJ_ID_LIST_LSTCHG = 0x2,
-};
-
-/*
- * osd2r03: 6.15.2 LIST command parameter data
- * (Also for LIST COLLECTION)
- */
-struct osd_obj_id_list {
-       __be64 list_bytes; /* bytes in list excluding list_bytes (-8) */
-       __be64 continuation_id;
-       __be32 list_identifier;
-       u8 pad[3];
-       u8 root_lstchg;
-       __be64 object_ids[0];
-} __packed;
-
-static inline bool osd_is_obj_list_done(struct osd_obj_id_list *list,
-       bool *is_changed)
-{
-       *is_changed = (0 != (list->root_lstchg & OSD_OBJ_ID_LIST_LSTCHG));
-       return 0 != list->continuation_id;
-}
-
-/*
- * osd2r03: 4.12.4.5 The ALLDATA security method
- */
-struct osd_data_out_integrity_info {
-       __be64 data_bytes;
-       __be64 set_attributes_bytes;
-       __be64 get_attributes_bytes;
-       __u8 integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
-} __packed;
-
-/* Same osd_data_out_integrity_info is used for OSD2/OSD1. The only difference
- * Is the sizeof the structure since in OSD1 the last array is smaller. Use
- * below for version independent handling of this structure
- */
-static inline int osd_data_out_integrity_info_sizeof(bool is_ver1)
-{
-       return sizeof(struct osd_data_out_integrity_info) -
-               (is_ver1 * (OSDv2_CRYPTO_KEYID_SIZE - OSDv1_CRYPTO_KEYID_SIZE));
-}
-
-struct osd_data_in_integrity_info {
-       __be64 data_bytes;
-       __be64 retrieved_attributes_bytes;
-       __u8 integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
-} __packed;
-
-/* Same osd_data_in_integrity_info is used for OSD2/OSD1. The only difference
- * Is the sizeof the structure since in OSD1 the last array is smaller. Use
- * below for version independent handling of this structure
- */
-static inline int osd_data_in_integrity_info_sizeof(bool is_ver1)
-{
-       return sizeof(struct osd_data_in_integrity_info) -
-               (is_ver1 * (OSDv2_CRYPTO_KEYID_SIZE - OSDv1_CRYPTO_KEYID_SIZE));
-}
-
-struct osd_timestamp {
-       u8 time[6]; /* number of milliseconds since 1/1/1970 UT (big endian) */
-} __packed;
-/* FIXME: define helper functions to convert to/from osd time format */
-
-/*
- * Capability & Security definitions
- * osd2r03: 4.11.2.2 Capability format
- * osd2r03: 5.2.8 Security parameters
- */
-
-struct osd_key_identifier {
-       u8 id[7]; /* if you know why 7 please email ooo@electrozaur.com */
-} __packed;
-
-/* for osd_capability.format */
-enum {
-       OSD_SEC_CAP_FORMAT_NO_CAPS = 0,
-       OSD_SEC_CAP_FORMAT_VER1 = 1,
-       OSD_SEC_CAP_FORMAT_VER2 = 2,
-};
-
-/* security_method */
-enum {
-       OSD_SEC_NOSEC = 0,
-       OSD_SEC_CAPKEY = 1,
-       OSD_SEC_CMDRSP = 2,
-       OSD_SEC_ALLDATA = 3,
-};
-
-enum object_type {
-       OSD_SEC_OBJ_ROOT = 0x1,
-       OSD_SEC_OBJ_PARTITION = 0x2,
-       OSD_SEC_OBJ_COLLECTION = 0x40,
-       OSD_SEC_OBJ_USER = 0x80,
-};
-
-enum osd_capability_bit_masks {
-       OSD_SEC_CAP_APPEND      = BIT(0),
-       OSD_SEC_CAP_OBJ_MGMT    = BIT(1),
-       OSD_SEC_CAP_REMOVE      = BIT(2),
-       OSD_SEC_CAP_CREATE      = BIT(3),
-       OSD_SEC_CAP_SET_ATTR    = BIT(4),
-       OSD_SEC_CAP_GET_ATTR    = BIT(5),
-       OSD_SEC_CAP_WRITE       = BIT(6),
-       OSD_SEC_CAP_READ        = BIT(7),
-
-       OSD_SEC_CAP_NONE1       = BIT(8),
-       OSD_SEC_CAP_NONE2       = BIT(9),
-       OSD_SEC_GBL_REM         = BIT(10), /*v2 only*/
-       OSD_SEC_CAP_QUERY       = BIT(11), /*v2 only*/
-       OSD_SEC_CAP_M_OBJECT    = BIT(12), /*v2 only*/
-       OSD_SEC_CAP_POL_SEC     = BIT(13),
-       OSD_SEC_CAP_GLOBAL      = BIT(14),
-       OSD_SEC_CAP_DEV_MGMT    = BIT(15),
-};
-
-/* for object_descriptor_type (hi nibble used) */
-enum {
-       OSD_SEC_OBJ_DESC_NONE = 0,     /* Not allowed */
-       OSD_SEC_OBJ_DESC_OBJ = 1 << 4, /* v1: also collection */
-       OSD_SEC_OBJ_DESC_PAR = 2 << 4, /* also root */
-       OSD_SEC_OBJ_DESC_COL = 3 << 4, /* v2 only */
-};
-
-/* (osd-r10:4.9.2.2)
- * osd2r03:4.11.2.2 Capability format
- */
-struct osd_capability_head {
-       u8 format; /* low nibble */
-       u8 integrity_algorithm__key_version; /* MAKE_BYTE(integ_alg, key_ver) */
-       u8 security_method;
-       u8 reserved1;
-/*04*/ struct osd_timestamp expiration_time;
-/*10*/ u8 audit[20];
-/*30*/ u8 discriminator[12];
-/*42*/ struct osd_timestamp object_created_time;
-/*48*/ u8 object_type;
-/*49*/ u8 permissions_bit_mask[5];
-/*54*/ u8 reserved2;
-/*55*/ u8 object_descriptor_type; /* high nibble */
-} __packed;
-
-/*56 v1*/
-struct osdv1_cap_object_descriptor {
-       union {
-               struct {
-/*56*/                 __be32 policy_access_tag;
-/*60*/                 __be64 allowed_partition_id;
-/*68*/                 __be64 allowed_object_id;
-/*76*/                 __be32 reserved;
-               } __packed obj_desc;
-
-/*56*/         u8 object_descriptor[24];
-       };
-} __packed;
-/*80 v1*/
-
-/*56 v2*/
-struct osd_cap_object_descriptor {
-       union {
-               struct {
-/*56*/                 __be32 allowed_attributes_access;
-/*60*/                 __be32 policy_access_tag;
-/*64*/                 __be16 boot_epoch;
-/*66*/                 u8 reserved[6];
-/*72*/                 __be64 allowed_partition_id;
-/*80*/                 __be64 allowed_object_id;
-/*88*/                 __be64 allowed_range_length;
-/*96*/                 __be64 allowed_range_start;
-               } __packed obj_desc;
-
-/*56*/         u8 object_descriptor[48];
-       };
-} __packed;
-/*104 v2*/
-
-struct osdv1_capability {
-       struct osd_capability_head h;
-       struct osdv1_cap_object_descriptor od;
-} __packed;
-
-struct osd_capability {
-       struct osd_capability_head h;
-       struct osd_cap_object_descriptor od;
-} __packed;
-
-/**
- * osd_sec_set_caps - set cap-bits into the capabilities header
- *
- * @cap:       The osd_capability_head to set cap bits to.
- * @bit_mask:  Use an ORed list of enum osd_capability_bit_masks values
- *
- * permissions_bit_mask is unaligned use below to set into caps
- * in a version independent way
- */
-static inline void osd_sec_set_caps(struct osd_capability_head *cap,
-       u16 bit_mask)
-{
-       /*
-        *Note: The bits above are defined LE order this is because this way
-        *      they can grow in the future to more then 16, and still retain
-        *      there constant values.
-        */
-       put_unaligned_le16(bit_mask, &cap->permissions_bit_mask);
-}
-
-/* osd2r05a sec 5.3: CDB continuation segment formats */
-enum osd_continuation_segment_format {
-       CDB_CONTINUATION_FORMAT_V2 = 0x01,
-};
-
-struct osd_continuation_segment_header {
-       u8      format;
-       u8      reserved1;
-       __be16  service_action;
-       __be32  reserved2;
-       u8      integrity_check[OSDv2_CRYPTO_KEYID_SIZE];
-} __packed;
-
-/* osd2r05a sec 5.4.1: CDB continuation descriptors */
-enum osd_continuation_descriptor_type {
-       NO_MORE_DESCRIPTORS = 0x0000,
-       SCATTER_GATHER_LIST = 0x0001,
-       QUERY_LIST = 0x0002,
-       USER_OBJECT = 0x0003,
-       COPY_USER_OBJECT_SOURCE = 0x0101,
-       EXTENSION_CAPABILITIES = 0xFFEE
-};
-
-struct osd_continuation_descriptor_header {
-       __be16  type;
-       u8      reserved;
-       u8      pad_length;
-       __be32  length;
-} __packed;
-
-
-/* osd2r05a sec 5.4.2: Scatter/gather list */
-struct osd_sg_list_entry {
-       __be64 offset;
-       __be64 len;
-};
-
-struct osd_sg_continuation_descriptor {
-       struct osd_continuation_descriptor_header hdr;
-       struct osd_sg_list_entry entries[];
-};
-
-#endif /* ndef __OSD_PROTOCOL_H__ */
diff --git a/include/scsi/osd_sec.h b/include/scsi/osd_sec.h
deleted file mode 100644 (file)
index 7abeb0f..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * osd_sec.h - OSD security manager API
- *
- * Copyright (C) 2008 Panasas Inc.  All rights reserved.
- *
- * Authors:
- *   Boaz Harrosh <ooo@electrozaur.com>
- *   Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- */
-#ifndef __OSD_SEC_H__
-#define __OSD_SEC_H__
-
-#include <scsi/osd_protocol.h>
-#include <scsi/osd_types.h>
-
-/*
- * Contains types and constants of osd capabilities and security
- * encoding/decoding.
- * API is trying to keep security abstract so initiator of an object
- * based pNFS client knows as little as possible about security and
- * capabilities. It is the Server's osd-initiator place to know more.
- * Also can be used by osd-target.
- */
-void osd_sec_encode_caps(void *caps, ...);/* NI */
-void osd_sec_init_nosec_doall_caps(void *caps,
-       const struct osd_obj_id *obj, bool is_collection, const bool is_v1);
-
-bool osd_is_sec_alldata(struct osd_security_parameters *sec_params);
-
-/* Conditionally sign the CDB according to security setting in ocdb
- * with cap_key */
-void osd_sec_sign_cdb(struct osd_cdb *ocdb, const u8 *cap_key);
-
-/* Unconditionally sign the BIO data with cap_key.
- * Check for osd_is_sec_alldata() was done prior to calling this. */
-void osd_sec_sign_data(void *data_integ, struct bio *bio, const u8 *cap_key);
-
-/* Version independent copy of caps into the cdb */
-void osd_set_caps(struct osd_cdb *cdb, const void *caps);
-
-#endif /* ndef __OSD_SEC_H__ */
diff --git a/include/scsi/osd_sense.h b/include/scsi/osd_sense.h
deleted file mode 100644 (file)
index d52aa93..0000000
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * osd_sense.h - OSD Related sense handling definitions.
- *
- * Copyright (C) 2008 Panasas Inc.  All rights reserved.
- *
- * Authors:
- *   Boaz Harrosh <ooo@electrozaur.com>
- *   Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * This file contains types and constants that are defined by the protocol
- * Note: All names and symbols are taken from the OSD standard's text.
- */
-#ifndef __OSD_SENSE_H__
-#define __OSD_SENSE_H__
-
-#include <scsi/osd_protocol.h>
-
-/* SPC3r23 4.5.6 Sense key and sense code definitions table 27 */
-enum scsi_sense_keys {
-       scsi_sk_no_sense        = 0x0,
-       scsi_sk_recovered_error = 0x1,
-       scsi_sk_not_ready       = 0x2,
-       scsi_sk_medium_error    = 0x3,
-       scsi_sk_hardware_error  = 0x4,
-       scsi_sk_illegal_request = 0x5,
-       scsi_sk_unit_attention  = 0x6,
-       scsi_sk_data_protect    = 0x7,
-       scsi_sk_blank_check     = 0x8,
-       scsi_sk_vendor_specific = 0x9,
-       scsi_sk_copy_aborted    = 0xa,
-       scsi_sk_aborted_command = 0xb,
-       scsi_sk_volume_overflow = 0xd,
-       scsi_sk_miscompare      = 0xe,
-       scsi_sk_reserved        = 0xf,
-};
-
-/* SPC3r23 4.5.6 Sense key and sense code definitions table 28 */
-/* Note: only those which can be returned by an OSD target. Most of
- *       these errors are taken care of by the generic scsi layer.
- */
-enum osd_additional_sense_codes {
-       scsi_no_additional_sense_information                    = 0x0000,
-       scsi_operation_in_progress                              = 0x0016,
-       scsi_cleaning_requested                                 = 0x0017,
-       scsi_lunr_cause_not_reportable                          = 0x0400,
-       scsi_logical_unit_is_in_process_of_becoming_ready       = 0x0401,
-       scsi_lunr_initializing_command_required                 = 0x0402,
-       scsi_lunr_manual_intervention_required                  = 0x0403,
-       scsi_lunr_operation_in_progress                         = 0x0407,
-       scsi_lunr_selftest_in_progress                          = 0x0409,
-       scsi_luna_asymmetric_access_state_transition            = 0x040a,
-       scsi_luna_target_port_in_standby_state                  = 0x040b,
-       scsi_luna_target_port_in_unavailable_state              = 0x040c,
-       scsi_lunr_notify_enable_spinup_required                 = 0x0411,
-       scsi_logical_unit_does_not_respond_to_selection         = 0x0500,
-       scsi_logical_unit_communication_failure                 = 0x0800,
-       scsi_logical_unit_communication_timeout                 = 0x0801,
-       scsi_logical_unit_communication_parity_error            = 0x0802,
-       scsi_error_log_overflow                                 = 0x0a00,
-       scsi_warning                                            = 0x0b00,
-       scsi_warning_specified_temperature_exceeded             = 0x0b01,
-       scsi_warning_enclosure_degraded                         = 0x0b02,
-       scsi_write_error_unexpected_unsolicited_data            = 0x0c0c,
-       scsi_write_error_not_enough_unsolicited_data            = 0x0c0d,
-       scsi_invalid_information_unit                           = 0x0e00,
-       scsi_invalid_field_in_command_information_unit          = 0x0e03,
-       scsi_read_error_failed_retransmission_request           = 0x1113,
-       scsi_parameter_list_length_error                        = 0x1a00,
-       scsi_invalid_command_operation_code                     = 0x2000,
-       scsi_invalid_field_in_cdb                               = 0x2400,
-       osd_security_audit_value_frozen                         = 0x2404,
-       osd_security_working_key_frozen                         = 0x2405,
-       osd_nonce_not_unique                                    = 0x2406,
-       osd_nonce_timestamp_out_of_range                        = 0x2407,
-       scsi_logical_unit_not_supported                         = 0x2500,
-       scsi_invalid_field_in_parameter_list                    = 0x2600,
-       scsi_parameter_not_supported                            = 0x2601,
-       scsi_parameter_value_invalid                            = 0x2602,
-       scsi_invalid_release_of_persistent_reservation          = 0x2604,
-       osd_invalid_dataout_buffer_integrity_check_value        = 0x260f,
-       scsi_not_ready_to_ready_change_medium_may_have_changed  = 0x2800,
-       scsi_power_on_reset_or_bus_device_reset_occurred        = 0x2900,
-       scsi_power_on_occurred                                  = 0x2901,
-       scsi_scsi_bus_reset_occurred                            = 0x2902,
-       scsi_bus_device_reset_function_occurred                 = 0x2903,
-       scsi_device_internal_reset                              = 0x2904,
-       scsi_transceiver_mode_changed_to_single_ended           = 0x2905,
-       scsi_transceiver_mode_changed_to_lvd                    = 0x2906,
-       scsi_i_t_nexus_loss_occurred                            = 0x2907,
-       scsi_parameters_changed                                 = 0x2a00,
-       scsi_mode_parameters_changed                            = 0x2a01,
-       scsi_asymmetric_access_state_changed                    = 0x2a06,
-       scsi_priority_changed                                   = 0x2a08,
-       scsi_command_sequence_error                             = 0x2c00,
-       scsi_previous_busy_status                               = 0x2c07,
-       scsi_previous_task_set_full_status                      = 0x2c08,
-       scsi_previous_reservation_conflict_status               = 0x2c09,
-       osd_partition_or_collection_contains_user_objects       = 0x2c0a,
-       scsi_commands_cleared_by_another_initiator              = 0x2f00,
-       scsi_cleaning_failure                                   = 0x3007,
-       scsi_enclosure_failure                                  = 0x3400,
-       scsi_enclosure_services_failure                         = 0x3500,
-       scsi_unsupported_enclosure_function                     = 0x3501,
-       scsi_enclosure_services_unavailable                     = 0x3502,
-       scsi_enclosure_services_transfer_failure                = 0x3503,
-       scsi_enclosure_services_transfer_refused                = 0x3504,
-       scsi_enclosure_services_checksum_error                  = 0x3505,
-       scsi_rounded_parameter                                  = 0x3700,
-       osd_read_past_end_of_user_object                        = 0x3b17,
-       scsi_logical_unit_has_not_self_configured_yet           = 0x3e00,
-       scsi_logical_unit_failure                               = 0x3e01,
-       scsi_timeout_on_logical_unit                            = 0x3e02,
-       scsi_logical_unit_failed_selftest                       = 0x3e03,
-       scsi_logical_unit_unable_to_update_selftest_log         = 0x3e04,
-       scsi_target_operating_conditions_have_changed           = 0x3f00,
-       scsi_microcode_has_been_changed                         = 0x3f01,
-       scsi_inquiry_data_has_changed                           = 0x3f03,
-       scsi_echo_buffer_overwritten                            = 0x3f0f,
-       scsi_diagnostic_failure_on_component_nn_first           = 0x4080,
-       scsi_diagnostic_failure_on_component_nn_last            = 0x40ff,
-       scsi_message_error                                      = 0x4300,
-       scsi_internal_target_failure                            = 0x4400,
-       scsi_select_or_reselect_failure                         = 0x4500,
-       scsi_scsi_parity_error                                  = 0x4700,
-       scsi_data_phase_crc_error_detected                      = 0x4701,
-       scsi_scsi_parity_error_detected_during_st_data_phase    = 0x4702,
-       scsi_asynchronous_information_protection_error_detected = 0x4704,
-       scsi_protocol_service_crc_error                         = 0x4705,
-       scsi_phy_test_function_in_progress                      = 0x4706,
-       scsi_invalid_message_error                              = 0x4900,
-       scsi_command_phase_error                                = 0x4a00,
-       scsi_data_phase_error                                   = 0x4b00,
-       scsi_logical_unit_failed_self_configuration             = 0x4c00,
-       scsi_overlapped_commands_attempted                      = 0x4e00,
-       osd_quota_error                                         = 0x5507,
-       scsi_failure_prediction_threshold_exceeded              = 0x5d00,
-       scsi_failure_prediction_threshold_exceeded_false        = 0x5dff,
-       scsi_voltage_fault                                      = 0x6500,
-};
-
-enum scsi_descriptor_types {
-       scsi_sense_information                  = 0x0,
-       scsi_sense_command_specific_information = 0x1,
-       scsi_sense_key_specific                 = 0x2,
-       scsi_sense_field_replaceable_unit       = 0x3,
-       scsi_sense_stream_commands              = 0x4,
-       scsi_sense_block_commands               = 0x5,
-       osd_sense_object_identification         = 0x6,
-       osd_sense_response_integrity_check      = 0x7,
-       osd_sense_attribute_identification      = 0x8,
-       scsi_sense_ata_return                   = 0x9,
-
-       scsi_sense_Reserved_first               = 0x0A,
-       scsi_sense_Reserved_last                = 0x7F,
-       scsi_sense_Vendor_specific_first        = 0x80,
-       scsi_sense_Vendor_specific_last         = 0xFF,
-};
-
-struct scsi_sense_descriptor { /* for picking into desc type */
-       u8      descriptor_type; /* one of enum scsi_descriptor_types */
-       u8      additional_length; /* n - 1 */
-       u8      data[];
-} __packed;
-
-/* OSD deploys only scsi descriptor_based sense buffers */
-struct scsi_sense_descriptor_based {
-/*0*/  u8      response_code; /* 0x72 or 0x73 */
-/*1*/  u8      sense_key; /* one of enum scsi_sense_keys (4 lower bits) */
-/*2*/  __be16  additional_sense_code; /* enum osd_additional_sense_codes */
-/*4*/  u8      Reserved[3];
-/*7*/  u8      additional_sense_length; /* n - 7 */
-/*8*/  struct  scsi_sense_descriptor ssd[0]; /* variable length, 1 or more */
-} __packed;
-
-/* some descriptors deployed by OSD */
-
-/* SPC3r23 4.5.2.3 Command-specific information sense data descriptor */
-/* Note: this is the same for descriptor_type=00 but with type=00 the
- *        Reserved[0] == 0x80 (ie. bit-7 set)
- */
-struct scsi_sense_command_specific_data_descriptor {
-/*0*/  u8      descriptor_type; /* (00h/01h) */
-/*1*/  u8      additional_length; /* (0Ah) */
-/*2*/  u8      Reserved[2];
-/*4*/  __be64  information;
-} __packed;
-/*12*/
-
-struct scsi_sense_key_specific_data_descriptor {
-/*0*/  u8      descriptor_type; /* (02h) */
-/*1*/  u8      additional_length; /* (06h) */
-/*2*/  u8      Reserved[2];
-/* SKSV, C/D, Reserved (2), BPV, BIT POINTER (3) */
-/*4*/  u8      sksv_cd_bpv_bp;
-/*5*/  __be16  value; /* field-pointer/progress-value/retry-count/... */
-/*7*/  u8      Reserved2;
-} __packed;
-/*8*/
-
-/* 4.16.2.1 OSD error identification sense data descriptor - table 52 */
-/* Note: these bits are defined LE order for easy definition, this way the BIT()
- * number is the same as in the documentation. Below members at
- * osd_sense_identification_data_descriptor are therefore defined __le32.
- */
-enum osd_command_functions_bits {
-       OSD_CFB_COMMAND          = BIT(4),
-       OSD_CFB_CMD_CAP_VERIFIED = BIT(5),
-       OSD_CFB_VALIDATION       = BIT(7),
-       OSD_CFB_IMP_ST_ATT       = BIT(12),
-       OSD_CFB_SET_ATT          = BIT(20),
-       OSD_CFB_SA_CAP_VERIFIED  = BIT(21),
-       OSD_CFB_GET_ATT          = BIT(28),
-       OSD_CFB_GA_CAP_VERIFIED  = BIT(29),
-};
-
-struct osd_sense_identification_data_descriptor {
-/*0*/  u8      descriptor_type; /* (06h) */
-/*1*/  u8      additional_length; /* (1Eh) */
-/*2*/  u8      Reserved[6];
-/*8*/  __le32  not_initiated_functions; /*osd_command_functions_bits*/
-/*12*/ __le32  completed_functions; /*osd_command_functions_bits*/
-/*16*/         __be64  partition_id;
-/*24*/ __be64  object_id;
-} __packed;
-/*32*/
-
-struct osd_sense_response_integrity_check_descriptor {
-/*0*/  u8      descriptor_type; /* (07h) */
-/*1*/  u8      additional_length; /* (20h) */
-/*2*/  u8      integrity_check_value[32]; /*FIXME: OSDv2_CRYPTO_KEYID_SIZE*/
-} __packed;
-/*34*/
-
-struct osd_sense_attributes_data_descriptor {
-/*0*/  u8      descriptor_type; /* (08h) */
-/*1*/  u8      additional_length; /* (n-2) */
-/*2*/  u8      Reserved[6];
-       struct osd_sense_attr {
-/*8*/          __be32  attr_page;
-/*12*/         __be32  attr_id;
-/*16*/ } sense_attrs[0]; /* 1 or more */
-} __packed;
-/*variable*/
-
-/* Dig into scsi_sk_illegal_request/scsi_invalid_field_in_cdb errors */
-
-/*FIXME: Support also field in CAPS*/
-#define OSD_CDB_OFFSET(F) offsetof(struct osd_cdb_head, F)
-
-enum osdv2_cdb_field_offset {
-       OSDv1_CFO_STARTING_BYTE = OSD_CDB_OFFSET(v1.start_address),
-       OSD_CFO_STARTING_BYTE   = OSD_CDB_OFFSET(v2.start_address),
-       OSD_CFO_PARTITION_ID    = OSD_CDB_OFFSET(partition),
-       OSD_CFO_OBJECT_ID       = OSD_CDB_OFFSET(object),
-       OSD_CFO_PERMISSIONS     = sizeof(struct osd_cdb_head) +
-                                       offsetof(struct osd_capability_head,
-                                                permissions_bit_mask),
-};
-
-#endif /* ndef __OSD_SENSE_H__ */
diff --git a/include/scsi/osd_types.h b/include/scsi/osd_types.h
deleted file mode 100644 (file)
index 48e8a16..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * osd_types.h - Types and constants which are not part of the protocol.
- *
- * Copyright (C) 2008 Panasas Inc.  All rights reserved.
- *
- * Authors:
- *   Boaz Harrosh <ooo@electrozaur.com>
- *   Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * Contains types and constants that are implementation specific and are
- * used by more than one part of the osd library.
- *     (Eg initiator/target/security_manager/...)
- */
-#ifndef __OSD_TYPES_H__
-#define __OSD_TYPES_H__
-
-struct osd_systemid {
-       u8 data[OSD_SYSTEMID_LEN];
-};
-
-typedef u64 __bitwise osd_id;
-
-struct osd_obj_id {
-       osd_id partition;
-       osd_id id;
-};
-
-static const struct __weak osd_obj_id osd_root_object = {0, 0};
-
-struct osd_attr {
-       u32 attr_page;
-       u32 attr_id;
-       u16 len;                /* byte count of operand */
-       void *val_ptr;          /* in network order */
-};
-
-struct osd_sg_entry {
-       u64 offset;
-       u64 len;
-};
-
-#endif /* ndef __OSD_TYPES_H__ */
index 2b539a1..a5fcdad 100644 (file)
@@ -628,6 +628,9 @@ struct Scsi_Host {
        /* Host responded with short (<36 bytes) INQUIRY result */
        unsigned short_inquiry:1;
 
+       /* The transport requires the LUN bits NOT to be stored in CDB[1] */
+       unsigned no_scsi2_lun_in_cdb:1;
+
        /*
         * Optional work queue to be utilized by the transport
         */
@@ -639,9 +642,6 @@ struct Scsi_Host {
         */
        struct workqueue_struct *tmf_work_q;
 
-       /* The transport requires the LUN bits NOT to be stored in CDB[1] */
-       unsigned no_scsi2_lun_in_cdb:1;
-
        /*
         * Value host_blocked counts down from
         */
index 15da45d..b375c33 100644 (file)
@@ -165,6 +165,9 @@ enum fc_tgtid_binding_type  {
 #define FC_PORT_ROLE_FCP_INITIATOR             0x02
 #define FC_PORT_ROLE_IP_PORT                   0x04
 #define FC_PORT_ROLE_FCP_DUMMY_INITIATOR       0x08
+#define FC_PORT_ROLE_NVME_INITIATOR            0x10
+#define FC_PORT_ROLE_NVME_TARGET               0x20
+#define FC_PORT_ROLE_NVME_DISCOVERY            0x40
 
 /* The following are for compatibility */
 #define FC_RPORT_ROLE_UNKNOWN                  FC_PORT_ROLE_UNKNOWN
@@ -473,6 +476,7 @@ enum fc_host_event_code  {
        FCH_EVT_PORT_ONLINE             = 0x202,
        FCH_EVT_PORT_FABRIC             = 0x204,
        FCH_EVT_LINK_UNKNOWN            = 0x500,
+       FCH_EVT_LINK_FPIN               = 0x501,
        FCH_EVT_VENDOR_UNIQUE           = 0xffff,
 };
 
@@ -755,7 +759,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
        return result;
 }
 
-static inline u64 wwn_to_u64(u8 *wwn)
+static inline u64 wwn_to_u64(const u8 *wwn)
 {
        return get_unaligned_be64(wwn);
 }
@@ -798,11 +802,17 @@ u32 fc_get_event_number(void);
 void fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
                enum fc_host_event_code event_code, u32 event_data);
 void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
-               u32 data_len, char * data_buf, u64 vendor_id);
+               u32 data_len, char *data_buf, u64 vendor_id);
+void fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
+               enum fc_host_event_code event_code,
+               u32 data_len, char *data_buf, u64 vendor_id);
        /* Note: when specifying vendor_id to fc_host_post_vendor_event()
-        *   be sure to read the Vendor Type and ID formatting requirements
-        *   specified in scsi_netlink.h
+        *   or fc_host_post_fc_event(), be sure to read the Vendor Type
+        *   and ID formatting requirements specified in scsi_netlink.h
+        * Note: when calling fc_host_post_fc_event(), vendor_id may be
+        *   specified as 0.
         */
+void fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf);
 struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
                struct fc_vport_identifiers *);
 int fc_vport_terminate(struct fc_vport *vport);
index 24c398f..a49d371 100644 (file)
@@ -473,6 +473,7 @@ struct iscsi_cmd {
        struct timer_list       dataout_timer;
        /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
        struct kvec             *iov_data;
+       void                    *overflow_buf;
        /* Iovecs for miscellaneous purposes */
 #define ISCSI_MISC_IOVECS                      5
        struct kvec             iov_misc[ISCSI_MISC_IOVECS];
index 19a5bf4..7c9716f 100644 (file)
@@ -795,8 +795,8 @@ struct se_device {
        spinlock_t              se_tmr_lock;
        spinlock_t              qf_cmd_lock;
        struct semaphore        caw_sem;
-       /* Used for legacy SPC-2 reservationsa */
-       struct se_node_acl      *dev_reserved_node_acl;
+       /* Used for legacy SPC-2 reservations */
+       struct se_session       *reservation_holder;
        /* Used for ALUA Logical Unit Group membership */
        struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
        /* Used for SPC-3 Persistent Reservations */
index 8ed9040..063f133 100644 (file)
@@ -142,6 +142,7 @@ void        transport_register_session(struct se_portal_group *,
                struct se_node_acl *, struct se_session *, void *);
 ssize_t        target_show_dynamic_sessions(struct se_portal_group *, char *);
 void   transport_free_session(struct se_session *);
+void   target_spc2_release(struct se_node_acl *nacl);
 void   target_put_nacl(struct se_node_acl *);
 void   transport_deregister_session_configfs(struct se_session *);
 void   transport_deregister_session(struct se_session *);
index b7e0a5e..a81c535 100644 (file)
@@ -52,6 +52,7 @@ enum fc_els_cmd {
        ELS_RRQ =       0x12,   /* reinstate recovery qualifier */
        ELS_REC =       0x13,   /* read exchange concise */
        ELS_SRR =       0x14,   /* sequence retransmission request */
+       ELS_FPIN =      0x16,   /* Fabric Performance Impact Notification */
        ELS_PRLI =      0x20,   /* process login */
        ELS_PRLO =      0x21,   /* process logout */
        ELS_SCN =       0x22,   /* state change notification */
@@ -119,6 +120,7 @@ enum fc_els_cmd {
        [ELS_RRQ] =     "RRQ",                  \
        [ELS_REC] =     "REC",                  \
        [ELS_SRR] =     "SRR",                  \
+       [ELS_FPIN] =    "FPIN",                 \
        [ELS_PRLI] =    "PRLI",                 \
        [ELS_PRLO] =    "PRLO",                 \
        [ELS_SCN] =     "SCN",                  \
@@ -829,4 +831,35 @@ enum fc_els_clid_ic {
        ELS_CLID_IC_LIP =       8,      /* receiving LIP */
 };
 
+
+/*
+ * Fabric Notification Descriptor Tag values
+ */
+enum fc_fn_dtag {
+       ELS_FN_DTAG_LNK_INTEGRITY =     0x00020001,     /* Link Integrity */
+       ELS_FN_DTAG_PEER_CONGEST =      0x00020003,     /* Peer Congestion */
+       ELS_FN_DTAG_CONGESTION =        0x00020004,     /* Congestion */
+};
+
+/*
+ * Fabric Notification Descriptor
+ */
+struct fc_fn_desc {
+       __be32          fn_desc_tag;    /* Notification Descriptor Tag */
+       __be32          fn_desc_value_len; /* Length of Descriptor Value field
+                                           * (in bytes)
+                                           */
+       __u8            fn_desc_value[0];  /* Descriptor Value */
+};
+
+/*
+ * ELS_FPIN - Fabric Performance Impact Notification
+ */
+struct fc_els_fpin {
+       __u8            fpin_cmd;       /* command (0x16) */
+       __u8            fpin_zero[3];   /* specified as zero - part of cmd */
+       __be32          fpin_desc_cnt;  /* count of descriptors */
+       struct fc_fn_desc       fpin_desc[0];   /* Descriptor list */
+};
+
 #endif /* _FC_ELS_H_ */