Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Sep 2021 22:09:46 +0000 (15:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Sep 2021 22:09:46 +0000 (15:09 -0700)
Pull SCSI updates from James Bottomley:
 "This series consists of the usual driver updates (ufs, qla2xxx,
  target, smartpqi, lpfc, mpt3sas).

  The core change causing the most churn was replacing the command
  request field request with a macro, allowing us to offset map to it
  and remove the redundant field; the same was also done for the tag
  field.

  The most impactful change is the final removal of scsi_ioctl, which
  has been deprecated for over a decade"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (293 commits)
  scsi: ufs: Fix ufshcd_request_sense_async() for Samsung KLUFG8RHDA-B2D1
  scsi: ufs: ufs-exynos: Fix static checker warning
  scsi: mpt3sas: Use the proper SCSI midlayer interfaces for PI
  scsi: lpfc: Use the proper SCSI midlayer interfaces for PI
  scsi: lpfc: Copyright updates for 14.0.0.1 patches
  scsi: lpfc: Update lpfc version to 14.0.0.1
  scsi: lpfc: Add bsg support for retrieving adapter cmf data
  scsi: lpfc: Add cmf_info sysfs entry
  scsi: lpfc: Add debugfs support for cm framework buffers
  scsi: lpfc: Add support for maintaining the cm statistics buffer
  scsi: lpfc: Add rx monitoring statistics
  scsi: lpfc: Add support for the CM framework
  scsi: lpfc: Add cmfsync WQE support
  scsi: lpfc: Add support for cm enablement buffer
  scsi: lpfc: Add cm statistics buffer support
  scsi: lpfc: Add EDC ELS support
  scsi: lpfc: Expand FPIN and RDF receive logging
  scsi: lpfc: Add MIB feature enablement support
  scsi: lpfc: Add SET_HOST_DATA mbox cmd to pass date/time info to firmware
  scsi: fc: Add EDC ELS definition
  ...

217 files changed:
Documentation/ABI/testing/sysfs-driver-ufs
block/Kconfig
block/Makefile
block/blk-mq.c
block/bsg-lib.c
block/bsg.c
block/scsi_ioctl.c [deleted file]
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/pata_falcon.c
drivers/base/core.c
drivers/block/Kconfig
drivers/block/paride/Kconfig
drivers/cdrom/cdrom.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/53c700.c
drivers/scsi/BusLogic.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR5380.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/aacraid/commsup.c
drivers/scsi/advansys.c
drivers/scsi/aha1542.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/ch.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/cxlflash/main.c
drivers/scsi/dpt_i2o.c
drivers/scsi/elx/efct/efct_lio.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ips.c
drivers/scsi/isci/request.c
drivers/scsi/libsas/Kconfig
drivers/scsi/libsas/Makefile
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_host_smp.c
drivers/scsi/libsas/sas_init.c
drivers/scsi/libsas/sas_phy.c
drivers/scsi/libsas/sas_port.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_bsg.h
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_debugfs.h
drivers/scsi/lpfc/lpfc_disc.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_ids.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_logmsg.h
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvme.h
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_scsi.h
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpi3mr/mpi3mr.h
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mvumi.c
drivers/scsi/myrb.c
drivers/scsi/myrs.c
drivers/scsi/ncr53c8xx.c
drivers/scsi/pcmcia/fdomain_cs.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/qedf/qedf_io.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_iscsi.h
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla1280.c
drivers/scsi/qla2xxx/Makefile
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_edif.c [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_edif.h [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_edif_bsg.h [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qlogicpti.c
drivers/scsi/scsi.c
drivers/scsi/scsi_bsg.c [new file with mode: 0644]
drivers/scsi/scsi_common.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_ioctl.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_logging.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/sd.c
drivers/scsi/sd_zbc.c
drivers/scsi/sg.c
drivers/scsi/smartpqi/Kconfig
drivers/scsi/smartpqi/smartpqi.h
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/smartpqi/smartpqi_sas_transport.c
drivers/scsi/smartpqi/smartpqi_sis.c
drivers/scsi/smartpqi/smartpqi_sis.h
drivers/scsi/snic/snic_scsi.c
drivers/scsi/sr.c
drivers/scsi/st.c
drivers/scsi/stex.c
drivers/scsi/storvsc_drv.c
drivers/scsi/sun3_scsi.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/cdns-pltfrm.c
drivers/scsi/ufs/tc-dwc-g210-pci.c
drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
drivers/scsi/ufs/ufs-exynos.c
drivers/scsi/ufs/ufs-exynos.h
drivers/scsi/ufs/ufs-fault-injection.c [new file with mode: 0644]
drivers/scsi/ufs/ufs-fault-injection.h [new file with mode: 0644]
drivers/scsi/ufs/ufs-hisi.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs-sysfs.c
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufs_quirks.h
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd-pltfrm.c
drivers/scsi/ufs/ufshcd-pltfrm.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/scsi/ufs/ufshpb.c [new file with mode: 0644]
drivers/scsi/ufs/ufshpb.h [new file with mode: 0644]
drivers/scsi/virtio_scsi.c
drivers/scsi/wd719x.c
drivers/scsi/xen-scsifront.c
drivers/target/Kconfig
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
drivers/target/loopback/tcm_loop.c
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/usb/storage/transport.c
fs/nfsd/Kconfig
include/linux/blkdev.h
include/linux/bsg-lib.h
include/linux/bsg.h
include/linux/cdrom.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/scsi/scsi_devinfo.h
include/scsi/scsi_ioctl.h
include/scsi/scsi_request.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/uapi/linux/target_core_user.h
include/uapi/scsi/fc/fc_els.h

index b4a5d55..ec3a714 100644 (file)
@@ -1298,3 +1298,239 @@ Description:    This node is used to set or display whether UFS WriteBooster is
                (if the platform supports UFSHCD_CAP_CLK_SCALING). For a
                platform that doesn't support UFSHCD_CAP_CLK_SCALING, we can
                disable/enable WriteBooster through this sysfs node.
+
+What:          /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_version
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the HPB specification version.
+               The full information about the descriptor can be found in the UFS
+               HPB (Host Performance Booster) Extension specifications.
+               Example: version 1.2.3 = 0123h
+
+               The file is read only.
+
+What:          /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_control
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows an indication of the HPB control mode.
+               00h: Host control mode
+               01h: Device control mode
+
+               The file is read only.
+
+What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_region_size
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the bHPBRegionSize which can be calculated
+               as in the following (in bytes):
+               HPB Region size = 512B * 2^bHPBRegionSize
+
+               The file is read only.
+
+What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_number_lu
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the maximum number of HPB LU supported by
+               the device.
+               00h: HPB is not supported by the device.
+               01h ~ 20h: Maximum number of HPB LU supported by the device
+
+               The file is read only.
+
+What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_subregion_size
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the bHPBSubRegionSize, which can be
+               calculated as in the following (in bytes) and shall be a multiple of
+               logical block size:
+               HPB Sub-Region size = 512B x 2^bHPBSubRegionSize
+               bHPBSubRegionSize shall not exceed bHPBRegionSize.
+
+               The file is read only.
+
+What:          /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_max_active_regions
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the maximum number of active HPB regions that
+               is supported by the device.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/unit_descriptor/hpb_lu_max_active_regions
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the maximum number of HPB regions assigned to
+               the HPB logical unit.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/unit_descriptor/hpb_pinned_region_start_offset
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the start offset of HPB pinned region.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/unit_descriptor/hpb_number_pinned_regions
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the number of HPB pinned regions assigned to
+               the HPB logical unit.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/hpb_stats/hit_cnt
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the number of reads that changed to HPB read.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/hpb_stats/miss_cnt
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the number of reads that cannot be changed to
+               HPB read.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/hpb_stats/rb_noti_cnt
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the number of response UPIUs that has
+               recommendations for activating sub-regions and/or inactivating region.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/hpb_stats/rb_active_cnt
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the number of active sub-regions recommended by
+               response UPIUs.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/hpb_stats/rb_inactive_cnt
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the number of inactive regions recommended by
+               response UPIUs.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/hpb_stats/map_req_cnt
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the number of read buffer commands for
+               activating sub-regions recommended by response UPIUs.
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/hpb_params/requeue_timeout_ms
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the requeue timeout threshold for write buffer
+               command in ms. The value can be changed by writing an integer to
+               this entry.
+
+What:          /sys/bus/platform/drivers/ufshcd/*/attributes/max_data_size_hpb_single_cmd
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the maximum HPB data size for using a single HPB
+               command.
+
+               ===  ========
+               00h  4KB
+               01h  8KB
+               02h  12KB
+               ...
+               FFh  1024KB
+               ===  ========
+
+               The file is read only.
+
+What:          /sys/bus/platform/drivers/ufshcd/*/flags/hpb_enable
+Date:          June 2021
+Contact:       Daejun Park <daejun7.park@samsung.com>
+Description:   This entry shows the status of HPB.
+
+               == ============================
+               0  HPB is not enabled.
+               1  HPB is enabled
+               == ============================
+
+               The file is read only.
+
+What:          /sys/class/scsi_device/*/device/hpb_param_sysfs/activation_thld
+Date:          February 2021
+Contact:       Avri Altman <avri.altman@wdc.com>
+Description:   In host control mode, reads are the major source of activation
+               trials.  Once this threshold hs met, the region is added to the
+               "to-be-activated" list.  Since we reset the read counter upon
+               write, this include sending a rb command updating the region
+               ppn as well.
+
+What:          /sys/class/scsi_device/*/device/hpb_param_sysfs/normalization_factor
+Date:          February 2021
+Contact:       Avri Altman <avri.altman@wdc.com>
+Description:   In host control mode, we think of the regions as "buckets".
+               Those buckets are being filled with reads, and emptied on write.
+               We use entries_per_srgn - the amount of blocks in a subregion as
+               our bucket size.  This applies because HPB1.0 only handles
+               single-block reads.  Once the bucket size is crossed, we trigger
+               a normalization work - not only to avoid overflow, but mainly
+               because we want to keep those counters normalized, as we are
+               using those reads as a comparative score, to make various decisions.
+               The normalization is dividing (shift right) the read counter by
+               the normalization_factor. If during consecutive normalizations
+               an active region has exhausted its reads - inactivate it.
+
+What:          /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_enter
+Date:          February 2021
+Contact:       Avri Altman <avri.altman@wdc.com>
+Description:   Region deactivation is often due to the fact that eviction took
+               place: A region becomes active at the expense of another. This is
+               happening when the max-active-regions limit has been crossed.
+               In host mode, eviction is considered an extreme measure. We
+               want to verify that the entering region has enough reads, and
+               the exiting region has much fewer reads.  eviction_thld_enter is
+               the min reads that a region must have in order to be considered
+               a candidate for evicting another region.
+
+What:          /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_exit
+Date:          February 2021
+Contact:       Avri Altman <avri.altman@wdc.com>
+Description:   Same as above for the exiting region. A region is considered to
+               be a candidate for eviction only if it has fewer reads than
+               eviction_thld_exit.
+
+What:          /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_ms
+Date:          February 2021
+Contact:       Avri Altman <avri.altman@wdc.com>
+Description:   In order not to hang on to "cold" regions, we inactivate
+               a region that has no READ access for a predefined amount of
+               time - read_timeout_ms. If read_timeout_ms has expired, and the
+               region is dirty, it is less likely that we can make any use of
+               HPB reading it so we inactivate it.  Still, deactivation has
+               its overhead, and we may still benefit from HPB reading this
+               region if it is clean - see read_timeout_expiries.
+
+What:          /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_expiries
+Date:          February 2021
+Contact:       Avri Altman <avri.altman@wdc.com>
+Description:   If the region read timeout has expired, but the region is clean,
+               just re-wind its timer for another spin.  Do that as long as it
+               is clean and did not exhaust its read_timeout_expiries threshold.
+
+What:          /sys/class/scsi_device/*/device/hpb_param_sysfs/timeout_polling_interval_ms
+Date:          February 2021
+Contact:       Avri Altman <avri.altman@wdc.com>
+Description:   The frequency with which the delayed worker that checks the
+               read_timeouts is awakened.
+
+What:          /sys/class/scsi_device/*/device/hpb_param_sysfs/inflight_map_req
+Date:          February 2021
+Contact:       Avri Altman <avri.altman@wdc.com>
+Description:   In host control mode the host is the originator of map requests.
+               To avoid flooding the device with map requests, use a simple throttling
+               mechanism that limits the number of inflight map requests.
index bac87d7..8e28ae7 100644 (file)
@@ -29,35 +29,15 @@ if BLOCK
 config BLK_RQ_ALLOC_TIME
        bool
 
-config BLK_SCSI_REQUEST
-       bool
-
 config BLK_CGROUP_RWSTAT
        bool
 
-config BLK_DEV_BSG
-       bool "Block layer SG support v4"
-       default y
-       select BLK_SCSI_REQUEST
-       help
-         Saying Y here will enable generic SG (SCSI generic) v4 support
-         for any block device.
-
-         Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
-         can handle complicated SCSI commands: tagged variable length cdbs
-         with bidirectional data transfers and generic request/response
-         protocols (e.g. Task Management Functions and SMP in Serial
-         Attached SCSI).
-
-         This option is required by recent UDEV versions to properly
-         access device serial numbers, etc.
-
-         If unsure, say Y.
+config BLK_DEV_BSG_COMMON
+       tristate
 
 config BLK_DEV_BSGLIB
        bool "Block layer SG support v4 helper lib"
-       select BLK_DEV_BSG
-       select BLK_SCSI_REQUEST
+       select BLK_DEV_BSG_COMMON
        help
          Subsystems will normally enable this if needed. Users will not
          normally need to manually enable this.
index 1d0d466..6cf4027 100644 (file)
@@ -12,8 +12,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
                        disk-events.o
 
 obj-$(CONFIG_BOUNCE)           += bounce.o
-obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
-obj-$(CONFIG_BLK_DEV_BSG)      += bsg.o
+obj-$(CONFIG_BLK_DEV_BSG_COMMON) += bsg.o
 obj-$(CONFIG_BLK_DEV_BSGLIB)   += bsg-lib.o
 obj-$(CONFIG_BLK_CGROUP)       += blk-cgroup.o
 obj-$(CONFIG_BLK_CGROUP_RWSTAT)        += blk-cgroup-rwstat.o
index 9440499..65d3a63 100644 (file)
@@ -3280,8 +3280,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
            set->map[HCTX_TYPE_POLL].nr_queues)
                blk_queue_flag_set(QUEUE_FLAG_POLL, q);
 
-       q->sg_reserved_size = INT_MAX;
-
        INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
        INIT_LIST_HEAD(&q->requeue_list);
        spin_lock_init(&q->requeue_lock);
index a89d801..ccb9827 100644 (file)
@@ -6,6 +6,7 @@
  *  Copyright (C) 2011   Red Hat, Inc.  All rights reserved.
  *  Copyright (C) 2011   Mike Christie
  */
+#include <linux/bsg.h>
 #include <linux/slab.h>
 #include <linux/blk-mq.h>
 #include <linux/delay.h>
 
 struct bsg_set {
        struct blk_mq_tag_set   tag_set;
+       struct bsg_device       *bd;
        bsg_job_fn              *job_fn;
        bsg_timeout_fn          *timeout_fn;
 };
 
-static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
+static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
+               fmode_t mode, unsigned int timeout)
 {
+       struct bsg_job *job;
+       struct request *rq;
+       struct bio *bio;
+       int ret;
+
        if (hdr->protocol != BSG_PROTOCOL_SCSI  ||
            hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
                return -EINVAL;
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
-       return 0;
-}
 
-static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
-               fmode_t mode)
-{
-       struct bsg_job *job = blk_mq_rq_to_pdu(rq);
-       int ret;
+       rq = blk_get_request(q, hdr->dout_xfer_len ?
+                            REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+       rq->timeout = timeout;
 
+       job = blk_mq_rq_to_pdu(rq);
        job->request_len = hdr->request_len;
        job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
-       if (IS_ERR(job->request))
-               return PTR_ERR(job->request);
+       if (IS_ERR(job->request)) {
+               ret = PTR_ERR(job->request);
+               goto out_put_request;
+       }
 
        if (hdr->dout_xfer_len && hdr->din_xfer_len) {
                job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0);
                if (IS_ERR(job->bidi_rq)) {
                        ret = PTR_ERR(job->bidi_rq);
-                       goto out;
+                       goto out_free_job_request;
                }
 
                ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
@@ -63,20 +72,20 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
                job->bidi_bio = NULL;
        }
 
-       return 0;
+       ret = 0;
+       if (hdr->dout_xfer_len) {
+               ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
+                               hdr->dout_xfer_len, GFP_KERNEL);
+       } else if (hdr->din_xfer_len) {
+               ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
+                               hdr->din_xfer_len, GFP_KERNEL);
+       }
 
-out_free_bidi_rq:
-       if (job->bidi_rq)
-               blk_put_request(job->bidi_rq);
-out:
-       kfree(job->request);
-       return ret;
-}
+       if (ret)
+               goto out_unmap_bidi_rq;
 
-static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
-{
-       struct bsg_job *job = blk_mq_rq_to_pdu(rq);
-       int ret = 0;
+       bio = rq->bio;
+       blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
 
        /*
         * The assignments below don't make much sense, but are kept for
@@ -119,28 +128,20 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
                hdr->din_resid = 0;
        }
 
-       return ret;
-}
-
-static void bsg_transport_free_rq(struct request *rq)
-{
-       struct bsg_job *job = blk_mq_rq_to_pdu(rq);
-
-       if (job->bidi_rq) {
+       blk_rq_unmap_user(bio);
+out_unmap_bidi_rq:
+       if (job->bidi_rq)
                blk_rq_unmap_user(job->bidi_bio);
+out_free_bidi_rq:
+       if (job->bidi_rq)
                blk_put_request(job->bidi_rq);
-       }
-
+out_free_job_request:
        kfree(job->request);
+out_put_request:
+       blk_put_request(rq);
+       return ret;
 }
 
-static const struct bsg_ops bsg_transport_ops = {
-       .check_proto            = bsg_transport_check_proto,
-       .fill_hdr               = bsg_transport_fill_hdr,
-       .complete_rq            = bsg_transport_complete_rq,
-       .free_rq                = bsg_transport_free_rq,
-};
-
 /**
  * bsg_teardown_job - routine to teardown a bsg job
  * @kref: kref inside bsg_job that is to be torn down
@@ -327,7 +328,7 @@ void bsg_remove_queue(struct request_queue *q)
                struct bsg_set *bset =
                        container_of(q->tag_set, struct bsg_set, tag_set);
 
-               bsg_unregister_queue(q);
+               bsg_unregister_queue(bset->bd);
                blk_cleanup_queue(q);
                blk_mq_free_tag_set(&bset->tag_set);
                kfree(bset);
@@ -396,10 +397,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
        q->queuedata = dev;
        blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 
-       ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
-       if (ret) {
-               printk(KERN_ERR "%s: bsg interface failed to "
-                      "initialize - register queue\n", dev->kobj.name);
+       bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
+       if (IS_ERR(bset->bd)) {
+               ret = PTR_ERR(bset->bd);
                goto out_cleanup_queue;
        }
 
index 1f19656..3510951 100644 (file)
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_ioctl.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_driver.h>
 #include <scsi/sg.h>
 
 #define BSG_DESCRIPTION        "Block layer SCSI generic (bsg) driver"
 #define BSG_VERSION    "0.4"
 
-#define bsg_dbg(bd, fmt, ...) \
-       pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
-
 struct bsg_device {
        struct request_queue *queue;
-       spinlock_t lock;
-       struct hlist_node dev_list;
-       refcount_t ref_count;
-       char name[20];
+       struct device device;
+       struct cdev cdev;
        int max_queue;
+       unsigned int timeout;
+       unsigned int reserved_size;
+       bsg_sg_io_fn *sg_io_fn;
 };
 
+static inline struct bsg_device *to_bsg_device(struct inode *inode)
+{
+       return container_of(inode->i_cdev, struct bsg_device, cdev);
+}
+
 #define BSG_DEFAULT_CMDS       64
 #define BSG_MAX_DEVS           32768
 
-static DEFINE_MUTEX(bsg_mutex);
-static DEFINE_IDR(bsg_minor_idr);
-
-#define BSG_LIST_ARRAY_SIZE    8
-static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
-
+static DEFINE_IDA(bsg_minor_ida);
 static struct class *bsg_class;
 static int bsg_major;
 
-static inline struct hlist_head *bsg_dev_idx_hash(int index)
-{
-       return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
-}
-
-#define uptr64(val) ((void __user *)(uintptr_t)(val))
-
-static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
-{
-       if (hdr->protocol != BSG_PROTOCOL_SCSI  ||
-           hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
-               return -EINVAL;
-       return 0;
-}
-
-static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
-               fmode_t mode)
-{
-       struct scsi_request *sreq = scsi_req(rq);
-
-       if (hdr->dout_xfer_len && hdr->din_xfer_len) {
-               pr_warn_once("BIDI support in bsg has been removed.\n");
-               return -EOPNOTSUPP;
-       }
-
-       sreq->cmd_len = hdr->request_len;
-       if (sreq->cmd_len > BLK_MAX_CDB) {
-               sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
-               if (!sreq->cmd)
-                       return -ENOMEM;
-       }
-
-       if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
-               return -EFAULT;
-       if (blk_verify_command(sreq->cmd, mode))
-               return -EPERM;
-       return 0;
-}
-
-static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
+static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr)
 {
-       struct scsi_request *sreq = scsi_req(rq);
-       int ret = 0;
-
-       /*
-        * fill in all the output members
-        */
-       hdr->device_status = sreq->result & 0xff;
-       hdr->transport_status = host_byte(sreq->result);
-       hdr->driver_status = 0;
-       if (scsi_status_is_check_condition(sreq->result))
-               hdr->driver_status = DRIVER_SENSE;
-       hdr->info = 0;
-       if (hdr->device_status || hdr->transport_status || hdr->driver_status)
-               hdr->info |= SG_INFO_CHECK;
-       hdr->response_len = 0;
-
-       if (sreq->sense_len && hdr->response) {
-               int len = min_t(unsigned int, hdr->max_response_len,
-                                       sreq->sense_len);
-
-               if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
-                       ret = -EFAULT;
-               else
-                       hdr->response_len = len;
-       }
+       unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT;
 
-       if (rq_data_dir(rq) == READ)
-               hdr->din_resid = sreq->resid_len;
-       else
-               hdr->dout_resid = sreq->resid_len;
+       if (hdr->timeout)
+               timeout = msecs_to_jiffies(hdr->timeout);
+       else if (bd->timeout)
+               timeout = bd->timeout;
 
-       return ret;
+       return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT);
 }
 
-static void bsg_scsi_free_rq(struct request *rq)
+static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg)
 {
-       scsi_req_free_cmd(scsi_req(rq));
-}
-
-static const struct bsg_ops bsg_scsi_ops = {
-       .check_proto            = bsg_scsi_check_proto,
-       .fill_hdr               = bsg_scsi_fill_hdr,
-       .complete_rq            = bsg_scsi_complete_rq,
-       .free_rq                = bsg_scsi_free_rq,
-};
-
-static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
-{
-       struct request *rq;
-       struct bio *bio;
        struct sg_io_v4 hdr;
        int ret;
 
        if (copy_from_user(&hdr, uarg, sizeof(hdr)))
                return -EFAULT;
-
-       if (!q->bsg_dev.class_dev)
-               return -ENXIO;
-
        if (hdr.guard != 'Q')
                return -EINVAL;
-       ret = q->bsg_dev.ops->check_proto(&hdr);
-       if (ret)
-               return ret;
-
-       rq = blk_get_request(q, hdr.dout_xfer_len ?
-                       REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
-       ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
-       if (ret) {
-               blk_put_request(rq);
-               return ret;
-       }
-
-       rq->timeout = msecs_to_jiffies(hdr.timeout);
-       if (!rq->timeout)
-               rq->timeout = q->sg_timeout;
-       if (!rq->timeout)
-               rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
-       if (rq->timeout < BLK_MIN_SG_TIMEOUT)
-               rq->timeout = BLK_MIN_SG_TIMEOUT;
-
-       if (hdr.dout_xfer_len) {
-               ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
-                               hdr.dout_xfer_len, GFP_KERNEL);
-       } else if (hdr.din_xfer_len) {
-               ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
-                               hdr.din_xfer_len, GFP_KERNEL);
-       }
-
-       if (ret)
-               goto out_free_rq;
-
-       bio = rq->bio;
-
-       blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
-       ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
-       blk_rq_unmap_user(bio);
-
-out_free_rq:
-       rq->q->bsg_dev.ops->free_rq(rq);
-       blk_put_request(rq);
+       ret = bd->sg_io_fn(bd->queue, &hdr, mode, bsg_timeout(bd, &hdr));
        if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
                return -EFAULT;
        return ret;
 }
 
-static struct bsg_device *bsg_alloc_device(void)
-{
-       struct bsg_device *bd;
-
-       bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
-       if (unlikely(!bd))
-               return NULL;
-
-       spin_lock_init(&bd->lock);
-       bd->max_queue = BSG_DEFAULT_CMDS;
-       INIT_HLIST_NODE(&bd->dev_list);
-       return bd;
-}
-
-static int bsg_put_device(struct bsg_device *bd)
-{
-       struct request_queue *q = bd->queue;
-
-       mutex_lock(&bsg_mutex);
-
-       if (!refcount_dec_and_test(&bd->ref_count)) {
-               mutex_unlock(&bsg_mutex);
-               return 0;
-       }
-
-       hlist_del(&bd->dev_list);
-       mutex_unlock(&bsg_mutex);
-
-       bsg_dbg(bd, "tearing down\n");
-
-       /*
-        * close can always block
-        */
-       kfree(bd);
-       blk_put_queue(q);
-       return 0;
-}
-
-static struct bsg_device *bsg_add_device(struct inode *inode,
-                                        struct request_queue *rq,
-                                        struct file *file)
-{
-       struct bsg_device *bd;
-       unsigned char buf[32];
-
-       lockdep_assert_held(&bsg_mutex);
-
-       if (!blk_get_queue(rq))
-               return ERR_PTR(-ENXIO);
-
-       bd = bsg_alloc_device();
-       if (!bd) {
-               blk_put_queue(rq);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       bd->queue = rq;
-
-       refcount_set(&bd->ref_count, 1);
-       hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
-
-       strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
-       bsg_dbg(bd, "bound to <%s>, max queue %d\n",
-               format_dev_t(buf, inode->i_rdev), bd->max_queue);
-
-       return bd;
-}
-
-static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
-{
-       struct bsg_device *bd;
-
-       lockdep_assert_held(&bsg_mutex);
-
-       hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
-               if (bd->queue == q) {
-                       refcount_inc(&bd->ref_count);
-                       goto found;
-               }
-       }
-       bd = NULL;
-found:
-       return bd;
-}
-
-static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
-{
-       struct bsg_device *bd;
-       struct bsg_class_device *bcd;
-
-       /*
-        * find the class device
-        */
-       mutex_lock(&bsg_mutex);
-       bcd = idr_find(&bsg_minor_idr, iminor(inode));
-
-       if (!bcd) {
-               bd = ERR_PTR(-ENODEV);
-               goto out_unlock;
-       }
-
-       bd = __bsg_get_device(iminor(inode), bcd->queue);
-       if (!bd)
-               bd = bsg_add_device(inode, bcd->queue, file);
-
-out_unlock:
-       mutex_unlock(&bsg_mutex);
-       return bd;
-}
-
 static int bsg_open(struct inode *inode, struct file *file)
 {
-       struct bsg_device *bd;
-
-       bd = bsg_get_device(inode, file);
-
-       if (IS_ERR(bd))
-               return PTR_ERR(bd);
-
-       file->private_data = bd;
+       if (!blk_get_queue(to_bsg_device(inode)->queue))
+               return -ENXIO;
        return 0;
 }
 
 static int bsg_release(struct inode *inode, struct file *file)
 {
-       struct bsg_device *bd = file->private_data;
-
-       file->private_data = NULL;
-       return bsg_put_device(bd);
+       blk_put_queue(to_bsg_device(inode)->queue);
+       return 0;
 }
 
 static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
 {
-       return put_user(bd->max_queue, uarg);
+       return put_user(READ_ONCE(bd->max_queue), uarg);
 }
 
 static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
 {
-       int queue;
+       int max_queue;
 
-       if (get_user(queue, uarg))
+       if (get_user(max_queue, uarg))
                return -EFAULT;
-       if (queue < 1)
+       if (max_queue < 1)
                return -EINVAL;
-
-       spin_lock_irq(&bd->lock);
-       bd->max_queue = queue;
-       spin_unlock_irq(&bd->lock);
+       WRITE_ONCE(bd->max_queue, max_queue);
        return 0;
 }
 
 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct bsg_device *bd = file->private_data;
+       struct bsg_device *bd = to_bsg_device(file_inode(file));
+       struct request_queue *q = bd->queue;
        void __user *uarg = (void __user *) arg;
+       int __user *intp = uarg;
+       int val;
 
        switch (cmd) {
        /*
@@ -366,17 +120,37 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
         * SCSI/sg ioctls
         */
        case SG_GET_VERSION_NUM:
+               return put_user(30527, intp);
        case SCSI_IOCTL_GET_IDLUN:
+               return put_user(0, intp);
        case SCSI_IOCTL_GET_BUS_NUMBER:
+               return put_user(0, intp);
        case SG_SET_TIMEOUT:
+               if (get_user(val, intp))
+                       return -EFAULT;
+               bd->timeout = clock_t_to_jiffies(val);
+               return 0;
        case SG_GET_TIMEOUT:
+               return jiffies_to_clock_t(bd->timeout);
        case SG_GET_RESERVED_SIZE:
+               return put_user(min(bd->reserved_size, queue_max_bytes(q)),
+                               intp);
        case SG_SET_RESERVED_SIZE:
+               if (get_user(val, intp))
+                       return -EFAULT;
+               if (val < 0)
+                       return -EINVAL;
+               bd->reserved_size =
+                       min_t(unsigned int, val, queue_max_bytes(q));
+               return 0;
        case SG_EMULATED_HOST:
-       case SCSI_IOCTL_SEND_COMMAND:
-               return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
+               return put_user(1, intp);
        case SG_IO:
-               return bsg_sg_io(bd->queue, file->f_mode, uarg);
+               return bsg_sg_io(bd, file->f_mode, uarg);
+       case SCSI_IOCTL_SEND_COMMAND:
+               pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
+                               current->comm);
+               return -EINVAL;
        default:
                return -ENOTTY;
        }
@@ -391,92 +165,65 @@ static const struct file_operations bsg_fops = {
        .llseek         =       default_llseek,
 };
 
-void bsg_unregister_queue(struct request_queue *q)
+void bsg_unregister_queue(struct bsg_device *bd)
 {
-       struct bsg_class_device *bcd = &q->bsg_dev;
-
-       if (!bcd->class_dev)
-               return;
-
-       mutex_lock(&bsg_mutex);
-       idr_remove(&bsg_minor_idr, bcd->minor);
-       if (q->kobj.sd)
-               sysfs_remove_link(&q->kobj, "bsg");
-       device_unregister(bcd->class_dev);
-       bcd->class_dev = NULL;
-       mutex_unlock(&bsg_mutex);
+       if (bd->queue->kobj.sd)
+               sysfs_remove_link(&bd->queue->kobj, "bsg");
+       cdev_device_del(&bd->cdev, &bd->device);
+       ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+       kfree(bd);
 }
 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
 
-int bsg_register_queue(struct request_queue *q, struct device *parent,
-               const char *name, const struct bsg_ops *ops)
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+               struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn)
 {
-       struct bsg_class_device *bcd;
-       dev_t dev;
+       struct bsg_device *bd;
        int ret;
-       struct device *class_dev = NULL;
 
-       /*
-        * we need a proper transport to send commands, not a stacked device
-        */
-       if (!queue_is_mq(q))
-               return 0;
-
-       bcd = &q->bsg_dev;
-       memset(bcd, 0, sizeof(*bcd));
-
-       mutex_lock(&bsg_mutex);
+       bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+       if (!bd)
+               return ERR_PTR(-ENOMEM);
+       bd->max_queue = BSG_DEFAULT_CMDS;
+       bd->reserved_size = INT_MAX;
+       bd->queue = q;
+       bd->sg_io_fn = sg_io_fn;
 
-       ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
+       ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL);
        if (ret < 0) {
-               if (ret == -ENOSPC) {
-                       printk(KERN_ERR "bsg: too many bsg devices\n");
-                       ret = -EINVAL;
-               }
-               goto unlock;
-       }
-
-       bcd->minor = ret;
-       bcd->queue = q;
-       bcd->ops = ops;
-       dev = MKDEV(bsg_major, bcd->minor);
-       class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
-       if (IS_ERR(class_dev)) {
-               ret = PTR_ERR(class_dev);
-               goto idr_remove;
+               if (ret == -ENOSPC)
+                       dev_err(parent, "bsg: too many bsg devices\n");
+               goto out_kfree;
        }
-       bcd->class_dev = class_dev;
+       bd->device.devt = MKDEV(bsg_major, ret);
+       bd->device.class = bsg_class;
+       bd->device.parent = parent;
+       dev_set_name(&bd->device, "%s", name);
+       device_initialize(&bd->device);
+
+       cdev_init(&bd->cdev, &bsg_fops);
+       bd->cdev.owner = THIS_MODULE;
+       ret = cdev_device_add(&bd->cdev, &bd->device);
+       if (ret)
+               goto out_ida_remove;
 
        if (q->kobj.sd) {
-               ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
+               ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
                if (ret)
-                       goto unregister_class_dev;
+                       goto out_device_del;
        }
 
-       mutex_unlock(&bsg_mutex);
-       return 0;
-
-unregister_class_dev:
-       device_unregister(class_dev);
-idr_remove:
-       idr_remove(&bsg_minor_idr, bcd->minor);
-unlock:
-       mutex_unlock(&bsg_mutex);
-       return ret;
-}
-
-int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
-{
-       if (!blk_queue_scsi_passthrough(q)) {
-               WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
-               return -EINVAL;
-       }
+       return bd;
 
-       return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops);
+out_device_del:
+       cdev_device_del(&bd->cdev, &bd->device);
+out_ida_remove:
+       ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+out_kfree:
+       kfree(bd);
+       return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
-
-static struct cdev bsg_cdev;
+EXPORT_SYMBOL_GPL(bsg_register_queue);
 
 static char *bsg_devnode(struct device *dev, umode_t *mode)
 {
@@ -485,11 +232,8 @@ static char *bsg_devnode(struct device *dev, umode_t *mode)
 
 static int __init bsg_init(void)
 {
-       int ret, i;
        dev_t devid;
-
-       for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
-               INIT_HLIST_HEAD(&bsg_device_list[i]);
+       int ret;
 
        bsg_class = class_create(THIS_MODULE, "bsg");
        if (IS_ERR(bsg_class))
@@ -499,19 +243,12 @@ static int __init bsg_init(void)
        ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
        if (ret)
                goto destroy_bsg_class;
-
        bsg_major = MAJOR(devid);
 
-       cdev_init(&bsg_cdev, &bsg_fops);
-       ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
-       if (ret)
-               goto unregister_chrdev;
-
        printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
               " loaded (major %d)\n", bsg_major);
        return 0;
-unregister_chrdev:
-       unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+
 destroy_bsg_class:
        class_destroy(bsg_class);
        return ret;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
deleted file mode 100644 (file)
index d247431..0000000
+++ /dev/null
@@ -1,890 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- */
-#include <linux/compat.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/capability.h>
-#include <linux/completion.h>
-#include <linux/cdrom.h>
-#include <linux/ratelimit.h>
-#include <linux/slab.h>
-#include <linux/times.h>
-#include <linux/uio.h>
-#include <linux/uaccess.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_ioctl.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/sg.h>
-
-struct blk_cmd_filter {
-       unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
-       unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
-};
-
-static struct blk_cmd_filter blk_default_cmd_filter;
-
-/* Command group 3 is reserved and should never be used.  */
-const unsigned char scsi_command_size_tbl[8] =
-{
-       6, 10, 10, 12,
-       16, 12, 10, 10
-};
-EXPORT_SYMBOL(scsi_command_size_tbl);
-
-static int sg_get_version(int __user *p)
-{
-       static const int sg_version_num = 30527;
-       return put_user(sg_version_num, p);
-}
-
-static int scsi_get_idlun(struct request_queue *q, int __user *p)
-{
-       return put_user(0, p);
-}
-
-static int scsi_get_bus(struct request_queue *q, int __user *p)
-{
-       return put_user(0, p);
-}
-
-static int sg_get_timeout(struct request_queue *q)
-{
-       return jiffies_to_clock_t(q->sg_timeout);
-}
-
-static int sg_set_timeout(struct request_queue *q, int __user *p)
-{
-       int timeout, err = get_user(timeout, p);
-
-       if (!err)
-               q->sg_timeout = clock_t_to_jiffies(timeout);
-
-       return err;
-}
-
-static int max_sectors_bytes(struct request_queue *q)
-{
-       unsigned int max_sectors = queue_max_sectors(q);
-
-       max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
-
-       return max_sectors << 9;
-}
-
-static int sg_get_reserved_size(struct request_queue *q, int __user *p)
-{
-       int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q));
-
-       return put_user(val, p);
-}
-
-static int sg_set_reserved_size(struct request_queue *q, int __user *p)
-{
-       int size, err = get_user(size, p);
-
-       if (err)
-               return err;
-
-       if (size < 0)
-               return -EINVAL;
-
-       q->sg_reserved_size = min(size, max_sectors_bytes(q));
-       return 0;
-}
-
-/*
- * will always return that we are ATAPI even for a real SCSI drive, I'm not
- * so sure this is worth doing anything about (why would you care??)
- */
-static int sg_emulated_host(struct request_queue *q, int __user *p)
-{
-       return put_user(1, p);
-}
-
-static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
-{
-       /* Basic read-only commands */
-       __set_bit(TEST_UNIT_READY, filter->read_ok);
-       __set_bit(REQUEST_SENSE, filter->read_ok);
-       __set_bit(READ_6, filter->read_ok);
-       __set_bit(READ_10, filter->read_ok);
-       __set_bit(READ_12, filter->read_ok);
-       __set_bit(READ_16, filter->read_ok);
-       __set_bit(READ_BUFFER, filter->read_ok);
-       __set_bit(READ_DEFECT_DATA, filter->read_ok);
-       __set_bit(READ_CAPACITY, filter->read_ok);
-       __set_bit(READ_LONG, filter->read_ok);
-       __set_bit(INQUIRY, filter->read_ok);
-       __set_bit(MODE_SENSE, filter->read_ok);
-       __set_bit(MODE_SENSE_10, filter->read_ok);
-       __set_bit(LOG_SENSE, filter->read_ok);
-       __set_bit(START_STOP, filter->read_ok);
-       __set_bit(GPCMD_VERIFY_10, filter->read_ok);
-       __set_bit(VERIFY_16, filter->read_ok);
-       __set_bit(REPORT_LUNS, filter->read_ok);
-       __set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
-       __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
-       __set_bit(MAINTENANCE_IN, filter->read_ok);
-       __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
-
-       /* Audio CD commands */
-       __set_bit(GPCMD_PLAY_CD, filter->read_ok);
-       __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
-       __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
-       __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
-       __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
-
-       /* CD/DVD data reading */
-       __set_bit(GPCMD_READ_CD, filter->read_ok);
-       __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
-       __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
-       __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
-       __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
-       __set_bit(GPCMD_READ_HEADER, filter->read_ok);
-       __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
-       __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
-       __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
-       __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
-       __set_bit(GPCMD_SCAN, filter->read_ok);
-       __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
-       __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
-       __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
-       __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
-       __set_bit(GPCMD_SEEK, filter->read_ok);
-       __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
-
-       /* Basic writing commands */
-       __set_bit(WRITE_6, filter->write_ok);
-       __set_bit(WRITE_10, filter->write_ok);
-       __set_bit(WRITE_VERIFY, filter->write_ok);
-       __set_bit(WRITE_12, filter->write_ok);
-       __set_bit(WRITE_VERIFY_12, filter->write_ok);
-       __set_bit(WRITE_16, filter->write_ok);
-       __set_bit(WRITE_LONG, filter->write_ok);
-       __set_bit(WRITE_LONG_2, filter->write_ok);
-       __set_bit(WRITE_SAME, filter->write_ok);
-       __set_bit(WRITE_SAME_16, filter->write_ok);
-       __set_bit(WRITE_SAME_32, filter->write_ok);
-       __set_bit(ERASE, filter->write_ok);
-       __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
-       __set_bit(MODE_SELECT, filter->write_ok);
-       __set_bit(LOG_SELECT, filter->write_ok);
-       __set_bit(GPCMD_BLANK, filter->write_ok);
-       __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
-       __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
-       __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
-       __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
-       __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
-       __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
-       __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
-       __set_bit(GPCMD_SEND_KEY, filter->write_ok);
-       __set_bit(GPCMD_SEND_OPC, filter->write_ok);
-       __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
-       __set_bit(GPCMD_SET_SPEED, filter->write_ok);
-       __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
-       __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
-       __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
-       __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
-
-       /* ZBC Commands */
-       __set_bit(ZBC_OUT, filter->write_ok);
-       __set_bit(ZBC_IN, filter->read_ok);
-}
-
-int blk_verify_command(unsigned char *cmd, fmode_t mode)
-{
-       struct blk_cmd_filter *filter = &blk_default_cmd_filter;
-
-       /* root can do any command. */
-       if (capable(CAP_SYS_RAWIO))
-               return 0;
-
-       /* Anybody who can open the device can do a read-safe command */
-       if (test_bit(cmd[0], filter->read_ok))
-               return 0;
-
-       /* Write-safe commands require a writable open */
-       if (test_bit(cmd[0], filter->write_ok) && (mode & FMODE_WRITE))
-               return 0;
-
-       return -EPERM;
-}
-EXPORT_SYMBOL(blk_verify_command);
-
-static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
-                            struct sg_io_hdr *hdr, fmode_t mode)
-{
-       struct scsi_request *req = scsi_req(rq);
-
-       if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
-               return -EFAULT;
-       if (blk_verify_command(req->cmd, mode))
-               return -EPERM;
-
-       /*
-        * fill in request structure
-        */
-       req->cmd_len = hdr->cmd_len;
-
-       rq->timeout = msecs_to_jiffies(hdr->timeout);
-       if (!rq->timeout)
-               rq->timeout = q->sg_timeout;
-       if (!rq->timeout)
-               rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
-       if (rq->timeout < BLK_MIN_SG_TIMEOUT)
-               rq->timeout = BLK_MIN_SG_TIMEOUT;
-
-       return 0;
-}
-
-static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
-                                struct bio *bio)
-{
-       struct scsi_request *req = scsi_req(rq);
-       int r, ret = 0;
-
-       /*
-        * fill in all the output members
-        */
-       hdr->status = req->result & 0xff;
-       hdr->masked_status = status_byte(req->result);
-       hdr->msg_status = COMMAND_COMPLETE;
-       hdr->host_status = host_byte(req->result);
-       hdr->driver_status = 0;
-       if (scsi_status_is_check_condition(hdr->status))
-               hdr->driver_status = DRIVER_SENSE;
-       hdr->info = 0;
-       if (hdr->masked_status || hdr->host_status || hdr->driver_status)
-               hdr->info |= SG_INFO_CHECK;
-       hdr->resid = req->resid_len;
-       hdr->sb_len_wr = 0;
-
-       if (req->sense_len && hdr->sbp) {
-               int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
-
-               if (!copy_to_user(hdr->sbp, req->sense, len))
-                       hdr->sb_len_wr = len;
-               else
-                       ret = -EFAULT;
-       }
-
-       r = blk_rq_unmap_user(bio);
-       if (!ret)
-               ret = r;
-
-       return ret;
-}
-
-static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
-               struct sg_io_hdr *hdr, fmode_t mode)
-{
-       unsigned long start_time;
-       ssize_t ret = 0;
-       int writing = 0;
-       int at_head = 0;
-       struct request *rq;
-       struct scsi_request *req;
-       struct bio *bio;
-
-       if (hdr->interface_id != 'S')
-               return -EINVAL;
-
-       if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
-               return -EIO;
-
-       if (hdr->dxfer_len)
-               switch (hdr->dxfer_direction) {
-               default:
-                       return -EINVAL;
-               case SG_DXFER_TO_DEV:
-                       writing = 1;
-                       break;
-               case SG_DXFER_TO_FROM_DEV:
-               case SG_DXFER_FROM_DEV:
-                       break;
-               }
-       if (hdr->flags & SG_FLAG_Q_AT_HEAD)
-               at_head = 1;
-
-       ret = -ENOMEM;
-       rq = blk_get_request(q, writing ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-       req = scsi_req(rq);
-
-       if (hdr->cmd_len > BLK_MAX_CDB) {
-               req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
-               if (!req->cmd)
-                       goto out_put_request;
-       }
-
-       ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
-       if (ret < 0)
-               goto out_free_cdb;
-
-       ret = 0;
-       if (hdr->iovec_count) {
-               struct iov_iter i;
-               struct iovec *iov = NULL;
-
-               ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
-                                  hdr->iovec_count, 0, &iov, &i);
-               if (ret < 0)
-                       goto out_free_cdb;
-
-               /* SG_IO howto says that the shorter of the two wins */
-               iov_iter_truncate(&i, hdr->dxfer_len);
-
-               ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
-               kfree(iov);
-       } else if (hdr->dxfer_len)
-               ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
-                                     GFP_KERNEL);
-
-       if (ret)
-               goto out_free_cdb;
-
-       bio = rq->bio;
-       req->retries = 0;
-
-       start_time = jiffies;
-
-       blk_execute_rq(bd_disk, rq, at_head);
-
-       hdr->duration = jiffies_to_msecs(jiffies - start_time);
-
-       ret = blk_complete_sghdr_rq(rq, hdr, bio);
-
-out_free_cdb:
-       scsi_req_free_cmd(req);
-out_put_request:
-       blk_put_request(rq);
-       return ret;
-}
-
-/**
- * sg_scsi_ioctl  --  handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
- * @q:         request queue to send scsi commands down
- * @disk:      gendisk to operate on (option)
- * @mode:      mode used to open the file through which the ioctl has been
- *             submitted
- * @sic:       userspace structure describing the command to perform
- *
- * Send down the scsi command described by @sic to the device below
- * the request queue @q.  If @file is non-NULL it's used to perform
- * fine-grained permission checks that allow users to send down
- * non-destructive SCSI commands.  If the caller has a struct gendisk
- * available it should be passed in as @disk to allow the low level
- * driver to use the information contained in it.  A non-NULL @disk
- * is only allowed if the caller knows that the low level driver doesn't
- * need it (e.g. in the scsi subsystem).
- *
- * Notes:
- *   -  This interface is deprecated - users should use the SG_IO
- *      interface instead, as this is a more flexible approach to
- *      performing SCSI commands on a device.
- *   -  The SCSI command length is determined by examining the 1st byte
- *      of the given command. There is no way to override this.
- *   -  Data transfers are limited to PAGE_SIZE
- *   -  The length (x + y) must be at least OMAX_SB_LEN bytes long to
- *      accommodate the sense buffer when an error occurs.
- *      The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
- *      old code will not be surprised.
- *   -  If a Unix error occurs (e.g. ENOMEM) then the user will receive
- *      a negative return and the Unix error code in 'errno'.
- *      If the SCSI command succeeds then 0 is returned.
- *      Positive numbers returned are the compacted SCSI error codes (4
- *      bytes in one int) where the lowest byte is the SCSI status.
- */
-int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
-               struct scsi_ioctl_command __user *sic)
-{
-       enum { OMAX_SB_LEN = 16 };      /* For backward compatibility */
-       struct request *rq;
-       struct scsi_request *req;
-       int err;
-       unsigned int in_len, out_len, bytes, opcode, cmdlen;
-       char *buffer = NULL;
-
-       if (!sic)
-               return -EINVAL;
-
-       /*
-        * get in an out lengths, verify they don't exceed a page worth of data
-        */
-       if (get_user(in_len, &sic->inlen))
-               return -EFAULT;
-       if (get_user(out_len, &sic->outlen))
-               return -EFAULT;
-       if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
-               return -EINVAL;
-       if (get_user(opcode, sic->data))
-               return -EFAULT;
-
-       bytes = max(in_len, out_len);
-       if (bytes) {
-               buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
-               if (!buffer)
-                       return -ENOMEM;
-
-       }
-
-       rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
-       if (IS_ERR(rq)) {
-               err = PTR_ERR(rq);
-               goto error_free_buffer;
-       }
-       req = scsi_req(rq);
-
-       cmdlen = COMMAND_SIZE(opcode);
-
-       /*
-        * get command and data to send to device, if any
-        */
-       err = -EFAULT;
-       req->cmd_len = cmdlen;
-       if (copy_from_user(req->cmd, sic->data, cmdlen))
-               goto error;
-
-       if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
-               goto error;
-
-       err = blk_verify_command(req->cmd, mode);
-       if (err)
-               goto error;
-
-       /* default.  possible overriden later */
-       req->retries = 5;
-
-       switch (opcode) {
-       case SEND_DIAGNOSTIC:
-       case FORMAT_UNIT:
-               rq->timeout = FORMAT_UNIT_TIMEOUT;
-               req->retries = 1;
-               break;
-       case START_STOP:
-               rq->timeout = START_STOP_TIMEOUT;
-               break;
-       case MOVE_MEDIUM:
-               rq->timeout = MOVE_MEDIUM_TIMEOUT;
-               break;
-       case READ_ELEMENT_STATUS:
-               rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
-               break;
-       case READ_DEFECT_DATA:
-               rq->timeout = READ_DEFECT_DATA_TIMEOUT;
-               req->retries = 1;
-               break;
-       default:
-               rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
-               break;
-       }
-
-       if (bytes) {
-               err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
-               if (err)
-                       goto error;
-       }
-
-       blk_execute_rq(disk, rq, 0);
-
-       err = req->result & 0xff;       /* only 8 bit SCSI status */
-       if (err) {
-               if (req->sense_len && req->sense) {
-                       bytes = (OMAX_SB_LEN > req->sense_len) ?
-                               req->sense_len : OMAX_SB_LEN;
-                       if (copy_to_user(sic->data, req->sense, bytes))
-                               err = -EFAULT;
-               }
-       } else {
-               if (copy_to_user(sic->data, buffer, out_len))
-                       err = -EFAULT;
-       }
-       
-error:
-       blk_put_request(rq);
-
-error_free_buffer:
-       kfree(buffer);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
-
-/* Send basic block requests */
-static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
-                             int cmd, int data)
-{
-       struct request *rq;
-       int err;
-
-       rq = blk_get_request(q, REQ_OP_DRV_OUT, 0);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-       rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
-       scsi_req(rq)->cmd[0] = cmd;
-       scsi_req(rq)->cmd[4] = data;
-       scsi_req(rq)->cmd_len = 6;
-       blk_execute_rq(bd_disk, rq, 0);
-       err = scsi_req(rq)->result ? -EIO : 0;
-       blk_put_request(rq);
-
-       return err;
-}
-
-static inline int blk_send_start_stop(struct request_queue *q,
-                                     struct gendisk *bd_disk, int data)
-{
-       return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
-}
-
-int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
-{
-#ifdef CONFIG_COMPAT
-       if (in_compat_syscall()) {
-               struct compat_sg_io_hdr hdr32 =  {
-                       .interface_id    = hdr->interface_id,
-                       .dxfer_direction = hdr->dxfer_direction,
-                       .cmd_len         = hdr->cmd_len,
-                       .mx_sb_len       = hdr->mx_sb_len,
-                       .iovec_count     = hdr->iovec_count,
-                       .dxfer_len       = hdr->dxfer_len,
-                       .dxferp          = (uintptr_t)hdr->dxferp,
-                       .cmdp            = (uintptr_t)hdr->cmdp,
-                       .sbp             = (uintptr_t)hdr->sbp,
-                       .timeout         = hdr->timeout,
-                       .flags           = hdr->flags,
-                       .pack_id         = hdr->pack_id,
-                       .usr_ptr         = (uintptr_t)hdr->usr_ptr,
-                       .status          = hdr->status,
-                       .masked_status   = hdr->masked_status,
-                       .msg_status      = hdr->msg_status,
-                       .sb_len_wr       = hdr->sb_len_wr,
-                       .host_status     = hdr->host_status,
-                       .driver_status   = hdr->driver_status,
-                       .resid           = hdr->resid,
-                       .duration        = hdr->duration,
-                       .info            = hdr->info,
-               };
-
-               if (copy_to_user(argp, &hdr32, sizeof(hdr32)))
-                       return -EFAULT;
-
-               return 0;
-       }
-#endif
-
-       if (copy_to_user(argp, hdr, sizeof(*hdr)))
-               return -EFAULT;
-
-       return 0;
-}
-EXPORT_SYMBOL(put_sg_io_hdr);
-
-int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
-{
-#ifdef CONFIG_COMPAT
-       struct compat_sg_io_hdr hdr32;
-
-       if (in_compat_syscall()) {
-               if (copy_from_user(&hdr32, argp, sizeof(hdr32)))
-                       return -EFAULT;
-
-               *hdr = (struct sg_io_hdr) {
-                       .interface_id    = hdr32.interface_id,
-                       .dxfer_direction = hdr32.dxfer_direction,
-                       .cmd_len         = hdr32.cmd_len,
-                       .mx_sb_len       = hdr32.mx_sb_len,
-                       .iovec_count     = hdr32.iovec_count,
-                       .dxfer_len       = hdr32.dxfer_len,
-                       .dxferp          = compat_ptr(hdr32.dxferp),
-                       .cmdp            = compat_ptr(hdr32.cmdp),
-                       .sbp             = compat_ptr(hdr32.sbp),
-                       .timeout         = hdr32.timeout,
-                       .flags           = hdr32.flags,
-                       .pack_id         = hdr32.pack_id,
-                       .usr_ptr         = compat_ptr(hdr32.usr_ptr),
-                       .status          = hdr32.status,
-                       .masked_status   = hdr32.masked_status,
-                       .msg_status      = hdr32.msg_status,
-                       .sb_len_wr       = hdr32.sb_len_wr,
-                       .host_status     = hdr32.host_status,
-                       .driver_status   = hdr32.driver_status,
-                       .resid           = hdr32.resid,
-                       .duration        = hdr32.duration,
-                       .info            = hdr32.info,
-               };
-
-               return 0;
-       }
-#endif
-
-       if (copy_from_user(hdr, argp, sizeof(*hdr)))
-               return -EFAULT;
-
-       return 0;
-}
-EXPORT_SYMBOL(get_sg_io_hdr);
-
-#ifdef CONFIG_COMPAT
-struct compat_cdrom_generic_command {
-       unsigned char   cmd[CDROM_PACKET_SIZE];
-       compat_caddr_t  buffer;
-       compat_uint_t   buflen;
-       compat_int_t    stat;
-       compat_caddr_t  sense;
-       unsigned char   data_direction;
-       unsigned char   pad[3];
-       compat_int_t    quiet;
-       compat_int_t    timeout;
-       compat_caddr_t  unused;
-};
-#endif
-
-static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
-                                     const void __user *arg)
-{
-#ifdef CONFIG_COMPAT
-       if (in_compat_syscall()) {
-               struct compat_cdrom_generic_command cgc32;
-
-               if (copy_from_user(&cgc32, arg, sizeof(cgc32)))
-                       return -EFAULT;
-
-               *cgc = (struct cdrom_generic_command) {
-                       .buffer         = compat_ptr(cgc32.buffer),
-                       .buflen         = cgc32.buflen,
-                       .stat           = cgc32.stat,
-                       .sense          = compat_ptr(cgc32.sense),
-                       .data_direction = cgc32.data_direction,
-                       .quiet          = cgc32.quiet,
-                       .timeout        = cgc32.timeout,
-                       .unused         = compat_ptr(cgc32.unused),
-               };
-               memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
-               return 0;
-       }
-#endif
-       if (copy_from_user(cgc, arg, sizeof(*cgc)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
-                                     void __user *arg)
-{
-#ifdef CONFIG_COMPAT
-       if (in_compat_syscall()) {
-               struct compat_cdrom_generic_command cgc32 = {
-                       .buffer         = (uintptr_t)(cgc->buffer),
-                       .buflen         = cgc->buflen,
-                       .stat           = cgc->stat,
-                       .sense          = (uintptr_t)(cgc->sense),
-                       .data_direction = cgc->data_direction,
-                       .quiet          = cgc->quiet,
-                       .timeout        = cgc->timeout,
-                       .unused         = (uintptr_t)(cgc->unused),
-               };
-               memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
-
-               if (copy_to_user(arg, &cgc32, sizeof(cgc32)))
-                       return -EFAULT;
-
-               return 0;
-       }
-#endif
-       if (copy_to_user(arg, cgc, sizeof(*cgc)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int scsi_cdrom_send_packet(struct request_queue *q,
-                                 struct gendisk *bd_disk,
-                                 fmode_t mode, void __user *arg)
-{
-       struct cdrom_generic_command cgc;
-       struct sg_io_hdr hdr;
-       int err;
-
-       err = scsi_get_cdrom_generic_arg(&cgc, arg);
-       if (err)
-               return err;
-
-       cgc.timeout = clock_t_to_jiffies(cgc.timeout);
-       memset(&hdr, 0, sizeof(hdr));
-       hdr.interface_id = 'S';
-       hdr.cmd_len = sizeof(cgc.cmd);
-       hdr.dxfer_len = cgc.buflen;
-       switch (cgc.data_direction) {
-               case CGC_DATA_UNKNOWN:
-                       hdr.dxfer_direction = SG_DXFER_UNKNOWN;
-                       break;
-               case CGC_DATA_WRITE:
-                       hdr.dxfer_direction = SG_DXFER_TO_DEV;
-                       break;
-               case CGC_DATA_READ:
-                       hdr.dxfer_direction = SG_DXFER_FROM_DEV;
-                       break;
-               case CGC_DATA_NONE:
-                       hdr.dxfer_direction = SG_DXFER_NONE;
-                       break;
-               default:
-                       return -EINVAL;
-       }
-
-       hdr.dxferp = cgc.buffer;
-       hdr.sbp = cgc.sense;
-       if (hdr.sbp)
-               hdr.mx_sb_len = sizeof(struct request_sense);
-       hdr.timeout = jiffies_to_msecs(cgc.timeout);
-       hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
-       hdr.cmd_len = sizeof(cgc.cmd);
-
-       err = sg_io(q, bd_disk, &hdr, mode);
-       if (err == -EFAULT)
-               return -EFAULT;
-
-       if (hdr.status)
-               return -EIO;
-
-       cgc.stat = err;
-       cgc.buflen = hdr.resid;
-       if (scsi_put_cdrom_generic_arg(&cgc, arg))
-               return -EFAULT;
-
-       return err;
-}
-
-int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
-                  unsigned int cmd, void __user *arg)
-{
-       int err;
-
-       if (!q)
-               return -ENXIO;
-
-       switch (cmd) {
-               /*
-                * new sgv3 interface
-                */
-               case SG_GET_VERSION_NUM:
-                       err = sg_get_version(arg);
-                       break;
-               case SCSI_IOCTL_GET_IDLUN:
-                       err = scsi_get_idlun(q, arg);
-                       break;
-               case SCSI_IOCTL_GET_BUS_NUMBER:
-                       err = scsi_get_bus(q, arg);
-                       break;
-               case SG_SET_TIMEOUT:
-                       err = sg_set_timeout(q, arg);
-                       break;
-               case SG_GET_TIMEOUT:
-                       err = sg_get_timeout(q);
-                       break;
-               case SG_GET_RESERVED_SIZE:
-                       err = sg_get_reserved_size(q, arg);
-                       break;
-               case SG_SET_RESERVED_SIZE:
-                       err = sg_set_reserved_size(q, arg);
-                       break;
-               case SG_EMULATED_HOST:
-                       err = sg_emulated_host(q, arg);
-                       break;
-               case SG_IO: {
-                       struct sg_io_hdr hdr;
-
-                       err = get_sg_io_hdr(&hdr, arg);
-                       if (err)
-                               break;
-                       err = sg_io(q, bd_disk, &hdr, mode);
-                       if (err == -EFAULT)
-                               break;
-
-                       if (put_sg_io_hdr(&hdr, arg))
-                               err = -EFAULT;
-                       break;
-               }
-               case CDROM_SEND_PACKET:
-                       err = scsi_cdrom_send_packet(q, bd_disk, mode, arg);
-                       break;
-
-               /*
-                * old junk scsi send command ioctl
-                */
-               case SCSI_IOCTL_SEND_COMMAND:
-                       printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
-                       err = -EINVAL;
-                       if (!arg)
-                               break;
-
-                       err = sg_scsi_ioctl(q, bd_disk, mode, arg);
-                       break;
-               case CDROMCLOSETRAY:
-                       err = blk_send_start_stop(q, bd_disk, 0x03);
-                       break;
-               case CDROMEJECT:
-                       err = blk_send_start_stop(q, bd_disk, 0x02);
-                       break;
-               default:
-                       err = -ENOTTY;
-       }
-
-       return err;
-}
-EXPORT_SYMBOL(scsi_cmd_ioctl);
-
-int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
-{
-       if (bd && !bdev_is_partition(bd))
-               return 0;
-
-       if (capable(CAP_SYS_RAWIO))
-               return 0;
-
-       return -ENOIOCTLCMD;
-}
-EXPORT_SYMBOL(scsi_verify_blk_ioctl);
-
-int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
-                      unsigned int cmd, void __user *arg)
-{
-       int ret;
-
-       ret = scsi_verify_blk_ioctl(bd, cmd);
-       if (ret < 0)
-               return ret;
-
-       return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
-}
-EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
-
-/**
- * scsi_req_init - initialize certain fields of a scsi_request structure
- * @req: Pointer to a scsi_request structure.
- * Initializes .__cmd[], .cmd, .cmd_len and .sense_len but no other members
- * of struct scsi_request.
- */
-void scsi_req_init(struct scsi_request *req)
-{
-       memset(req->__cmd, 0, sizeof(req->__cmd));
-       req->cmd = req->__cmd;
-       req->cmd_len = BLK_MAX_CDB;
-       req->sense_len = 0;
-}
-EXPORT_SYMBOL(scsi_req_init);
-
-static int __init blk_scsi_ioctl_init(void)
-{
-       blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
-       return 0;
-}
-fs_initcall(blk_scsi_ioctl_init);
index bb36377..bf9c4b6 100644 (file)
@@ -912,7 +912,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
         * Note that ATA_QCFLAG_FAILED is unconditionally set after
         * this function completes.
         */
-       blk_abort_request(qc->scsicmd->request);
+       blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
 }
 
 /**
@@ -1893,8 +1893,7 @@ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
  */
 static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
 {
-       if (qc->scsicmd &&
-           qc->scsicmd->request->rq_flags & RQF_QUIET)
+       if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET)
                qc->flags |= ATA_QCFLAG_QUIET;
        return qc->flags & ATA_QCFLAG_QUIET;
 }
index 0b7b462..1fb4611 100644 (file)
@@ -631,7 +631,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
 {
        struct ata_queued_cmd *qc;
 
-       qc = ata_qc_new_init(dev, cmd->request->tag);
+       qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
        if (qc) {
                qc->scsicmd = cmd;
                qc->scsidone = cmd->scsi_done;
@@ -639,7 +639,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
                qc->sg = scsi_sglist(cmd);
                qc->n_elem = scsi_sg_count(cmd);
 
-               if (cmd->request->rq_flags & RQF_QUIET)
+               if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
                        qc->flags |= ATA_QCFLAG_QUIET;
        } else {
                cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
@@ -1496,7 +1496,7 @@ nothing_to_do:
 
 static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
 {
-       struct request *rq = scmd->request;
+       struct request *rq = scsi_cmd_to_rq(scmd);
        u32 req_blocks;
 
        if (!blk_rq_is_passthrough(rq))
@@ -1531,7 +1531,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 {
        struct scsi_cmnd *scmd = qc->scsicmd;
        const u8 *cdb = scmd->cmnd;
-       struct request *rq = scmd->request;
+       struct request *rq = scsi_cmd_to_rq(scmd);
        int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
        unsigned int tf_flags = 0;
        u64 block;
@@ -3139,7 +3139,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
         * as it modifies the DATA OUT buffer, which would corrupt user
         * memory for SG_IO commands.
         */
-       if (unlikely(blk_rq_is_passthrough(scmd->request)))
+       if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))))
                goto invalid_opcode;
 
        if (unlikely(scmd->cmd_len < 16)) {
index 9d0dd8f..121635a 100644 (file)
@@ -48,8 +48,8 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
        struct scsi_cmnd *cmd = qc->scsicmd;
        bool swap = 1;
 
-       if (dev->class == ATA_DEV_ATA && cmd && cmd->request &&
-           !blk_rq_is_passthrough(cmd->request))
+       if (dev->class == ATA_DEV_ATA && cmd &&
+           !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
                swap = 0;
 
        /* Transfer multiple of 2 bytes */
index 2804254..688a592 100644 (file)
@@ -886,6 +886,8 @@ static void device_link_put_kref(struct device_link *link)
 {
        if (link->flags & DL_FLAG_STATELESS)
                kref_put(&link->kref, __device_link_del);
+       else if (!device_is_registered(link->consumer))
+               __device_link_del(&link->kref);
        else
                WARN(1, "Unable to drop a managed device link reference\n");
 }
index fbb3a55..ab3e37a 100644 (file)
@@ -74,7 +74,6 @@ config N64CART
 
 config CDROM
        tristate
-       select BLK_SCSI_REQUEST
 
 config GDROM
        tristate "SEGA Dreamcast GD-ROM drive"
@@ -306,7 +305,7 @@ config CDROM_PKTCDVD
        tristate "Packet writing on CD/DVD media (DEPRECATED)"
        depends on !UML
        select CDROM
-       select BLK_SCSI_REQUEST
+       select SCSI_COMMON
        help
          Note: This driver is deprecated and will be removed from the
          kernel in the near future!
index 7c6ae10..a295634 100644 (file)
@@ -27,7 +27,6 @@ config PARIDE_PCD
        tristate "Parallel port ATAPI CD-ROMs"
        depends on PARIDE
        select CDROM
-       select BLK_SCSI_REQUEST # only for the generic cdrom code
        help
          This option enables the high-level driver for ATAPI CD-ROM devices
          connected through a parallel port. If you chose to build PARIDE
index feb827e..bd2e5b1 100644 (file)
@@ -629,7 +629,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
        if (CDROM_CAN(CDC_MRW_W))
                cdi->exit = cdrom_mrw_exit;
 
-       if (cdi->disk)
+       if (cdi->ops->read_cdda_bpc)
                cdi->cdda_method = CDDA_BPC_FULL;
        else
                cdi->cdda_method = CDDA_OLD;
@@ -2159,81 +2159,26 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
 static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                               int lba, int nframes)
 {
-       struct request_queue *q = cdi->disk->queue;
-       struct request *rq;
-       struct scsi_request *req;
-       struct bio *bio;
-       unsigned int len;
+       int max_frames = (queue_max_sectors(cdi->disk->queue) << 9) /
+                         CD_FRAMESIZE_RAW;
        int nr, ret = 0;
 
-       if (!q)
-               return -ENXIO;
-
-       if (!blk_queue_scsi_passthrough(q)) {
-               WARN_ONCE(true,
-                         "Attempt read CDDA info through a non-SCSI queue\n");
-               return -EINVAL;
-       }
-
        cdi->last_sense = 0;
 
        while (nframes) {
-               nr = nframes;
                if (cdi->cdda_method == CDDA_BPC_SINGLE)
                        nr = 1;
-               if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
-                       nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
-
-               len = nr * CD_FRAMESIZE_RAW;
-
-               rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
-               if (IS_ERR(rq)) {
-                       ret = PTR_ERR(rq);
-                       break;
-               }
-               req = scsi_req(rq);
-
-               ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
-               if (ret) {
-                       blk_put_request(rq);
-                       break;
-               }
-
-               req->cmd[0] = GPCMD_READ_CD;
-               req->cmd[1] = 1 << 2;
-               req->cmd[2] = (lba >> 24) & 0xff;
-               req->cmd[3] = (lba >> 16) & 0xff;
-               req->cmd[4] = (lba >>  8) & 0xff;
-               req->cmd[5] = lba & 0xff;
-               req->cmd[6] = (nr >> 16) & 0xff;
-               req->cmd[7] = (nr >>  8) & 0xff;
-               req->cmd[8] = nr & 0xff;
-               req->cmd[9] = 0xf8;
-
-               req->cmd_len = 12;
-               rq->timeout = 60 * HZ;
-               bio = rq->bio;
-
-               blk_execute_rq(cdi->disk, rq, 0);
-               if (scsi_req(rq)->result) {
-                       struct scsi_sense_hdr sshdr;
-
-                       ret = -EIO;
-                       scsi_normalize_sense(req->sense, req->sense_len,
-                                            &sshdr);
-                       cdi->last_sense = sshdr.sense_key;
-               }
-
-               if (blk_rq_unmap_user(bio))
-                       ret = -EFAULT;
-               blk_put_request(rq);
+               else
+                       nr = min(nframes, max_frames);
 
+               ret = cdi->ops->read_cdda_bpc(cdi, ubuf, lba, nr,
+                                             &cdi->last_sense);
                if (ret)
                        break;
 
                nframes -= nr;
                lba += nr;
-               ubuf += len;
+               ubuf += (nr * CD_FRAMESIZE_RAW);
        }
 
        return ret;
@@ -3357,13 +3302,6 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
        void __user *argp = (void __user *)arg;
        int ret;
 
-       /*
-        * Try the generic SCSI command ioctl's first.
-        */
-       ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
-       if (ret != -ENOTTY)
-               return ret;
-
        switch (cmd) {
        case CDROMMULTISESSION:
                return cdrom_ioctl_multisession(cdi, argp);
index afec40d..9776b75 100644 (file)
@@ -159,7 +159,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
 {
        domain->sig_type = IB_SIG_TYPE_T10_DIF;
        domain->sig.dif.pi_interval = scsi_prot_interval(sc);
-       domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request);
+       domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
        /*
         * At the moment we hard code those, but in the future
         * we will take them from sc.
index b44cbb8..b566f7c 100644 (file)
@@ -949,7 +949,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
                        sector_t sector_off = mr_status.sig_err.sig_err_offset;
 
                        sector_div(sector_off, sector_size + 8);
-                       *sector = scsi_get_lba(iser_task->sc) + sector_off;
+                       *sector = scsi_get_sector(iser_task->sc) + sector_off;
 
                        iser_err("PI error found type %d at sector %llx "
                               "expected %x vs actual %x\n",
index 8d5cf5e..71eda91 100644 (file)
@@ -1280,7 +1280,7 @@ static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
 {
        struct srp_terminate_context *context = context_ptr;
        struct srp_target_port *target = context->srp_target;
-       u32 tag = blk_mq_unique_tag(scmnd->request);
+       u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
        struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
        struct srp_request *req = scsi_cmd_priv(scmnd);
 
@@ -2152,6 +2152,7 @@ static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
 
 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
 {
+       struct request *rq = scsi_cmd_to_rq(scmnd);
        struct srp_target_port *target = host_to_target(shost);
        struct srp_rdma_ch *ch;
        struct srp_request *req = scsi_cmd_priv(scmnd);
@@ -2166,8 +2167,8 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
        if (unlikely(scmnd->result))
                goto err;
 
-       WARN_ON_ONCE(scmnd->request->tag < 0);
-       tag = blk_mq_unique_tag(scmnd->request);
+       WARN_ON_ONCE(rq->tag < 0);
+       tag = blk_mq_unique_tag(rq);
        ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
 
        spin_lock_irqsave(&ch->lock, flags);
@@ -2791,7 +2792,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
 
        if (!req)
                return SUCCESS;
-       tag = blk_mq_unique_tag(scmnd->request);
+       tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
        ch_idx = blk_mq_unique_tag_to_hwq(tag);
        if (WARN_ON_ONCE(ch_idx >= target->ch_count))
                return SUCCESS;
index 2e4804e..6da8f6d 100644 (file)
@@ -2377,7 +2377,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
                }
        }
 
-       blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
+       blk_add_driver_data(scsi_cmd_to_rq(scsi), &blktrc, sizeof(blktrc));
 }
 
 /**
@@ -2599,8 +2599,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
        io->fcp_cmnd_length = FCP_CMND_LEN;
 
        if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
-               io->data_block_length = scsi_cmnd->device->sector_size;
-               io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
+               io->data_block_length = scsi_prot_interval(scsi_cmnd);
+               io->ref_tag_value = scsi_prot_ref_tag(scsi_cmnd);
        }
 
        if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
index 1c6b4e6..a12e352 100644 (file)
@@ -1823,7 +1823,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
 
        if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
            SCp->device->simple_tags) {
-               slot->tag = SCp->request->tag;
+               slot->tag = scsi_cmd_to_rq(SCp)->tag;
                CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
                       slot->tag, slot);
        } else {
index adddcd5..40088dc 100644 (file)
@@ -1711,7 +1711,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
        if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
                blogic_info("  DMA Channel: None, ", adapter);
                if (adapter->bios_addr > 0)
-                       blogic_info("BIOS Address: 0x%lX, ", adapter,
+                       blogic_info("BIOS Address: 0x%X, ", adapter,
                                        adapter->bios_addr);
                else
                        blogic_info("BIOS Address: None, ", adapter);
@@ -3436,7 +3436,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
        int len = 0;
 
        va_start(args, adapter);
-       len = vsprintf(buf, fmt, args);
+       len = vscnprintf(buf, sizeof(buf), fmt, args);
        va_end(args);
        if (msglevel == BLOGIC_ANNOUNCE_LEVEL) {
                static int msglines = 0;
@@ -3451,7 +3451,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
                        if (buf[0] != '\n' || len > 1)
                                printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
                } else
-                       printk("%s", buf);
+                       pr_cont("%s", buf);
        } else {
                if (begin) {
                        if (adapter != NULL && adapter->adapter_initd)
@@ -3459,7 +3459,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
                        else
                                printk("%s%s", blogic_msglevelmap[msglevel], buf);
                } else
-                       printk("%s", buf);
+                       pr_cont("%s", buf);
        }
        begin = (buf[len - 1] == '\n');
 }
index 8f44d43..6e3a041 100644 (file)
@@ -14,12 +14,16 @@ config RAID_ATTRS
        help
          Provides RAID
 
+config SCSI_COMMON
+       tristate
+
 config SCSI
        tristate "SCSI device support"
        depends on BLOCK
        select SCSI_DMA if HAS_DMA
        select SG_POOL
-       select BLK_SCSI_REQUEST
+       select SCSI_COMMON
+       select BLK_DEV_BSG_COMMON if BLK_DEV_BSG
        help
          If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
          any other SCSI device under Linux, say Y and make sure that you know
@@ -140,6 +144,18 @@ config CHR_DEV_SG
 
          If unsure, say N.
 
+config BLK_DEV_BSG
+       bool "/dev/bsg support (SG v4)"
+       depends on SCSI
+       default y
+       help
+         Saying Y here will enable generic SG (SCSI generic) v4 support
+         for any SCSI device.
+
+         This option is required by UDEV to access device serial numbers, etc.
+
+         If unsure, say Y.
+
 config CHR_DEV_SCH
        tristate "SCSI media changer support"
        depends on SCSI
index 1748d1e..19814c2 100644 (file)
@@ -20,7 +20,7 @@ CFLAGS_aha152x.o =   -DAHA152X_STAT -DAUTOCONF
 obj-$(CONFIG_PCMCIA)           += pcmcia/
 
 obj-$(CONFIG_SCSI)             += scsi_mod.o
-obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o
+obj-$(CONFIG_SCSI_COMMON)      += scsi_common.o
 
 obj-$(CONFIG_RAID_ATTRS)       += raid_class.o
 
@@ -168,6 +168,7 @@ scsi_mod-$(CONFIG_BLK_DEBUG_FS)     += scsi_debugfs.o
 scsi_mod-y                     += scsi_trace.o scsi_logging.o
 scsi_mod-$(CONFIG_PM)          += scsi_pm.o
 scsi_mod-$(CONFIG_SCSI_DH)     += scsi_dh.o
+scsi_mod-$(CONFIG_BLK_DEV_BSG) += scsi_bsg.o
 
 hv_storvsc-y                   := storvsc_drv.o
 
@@ -183,7 +184,7 @@ CFLAGS_ncr53c8xx.o  := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
 zalon7xx-objs  := zalon.o ncr53c8xx.o
 
 # Files generated that shall be removed upon make clean
-clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
+clean-files := 53c700_d.h 53c700_u.h
 
 $(obj)/53c700.o: $(obj)/53c700_d.h
 
@@ -192,9 +193,11 @@ $(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
 quiet_cmd_bflags = GEN     $@
        cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
 
-$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
+$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h FORCE
        $(call if_changed,bflags)
 
+targets +=  scsi_devinfo_tbl.c
+
 # If you want to play with the firmware, uncomment
 # GENERATE_FIRMWARE := 1
 
index 3baadd0..a85589a 100644 (file)
@@ -778,7 +778,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
        }
 
 #ifdef CONFIG_SUN3
-       if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
+       if (sun3scsi_dma_finish(hostdata->connected->sc_data_direction)) {
                pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
                       instance->host_no);
                BUG();
@@ -1710,7 +1710,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                count = sun3scsi_dma_xfer_len(hostdata, cmd);
 
                                if (count > 0) {
-                                       if (rq_data_dir(cmd->request))
+                                       if (cmd->sc_data_direction == DMA_TO_DEVICE)
                                                sun3scsi_dma_send_setup(hostdata,
                                                                        cmd->SCp.ptr, count);
                                        else
@@ -2158,7 +2158,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
                count = sun3scsi_dma_xfer_len(hostdata, tmp);
 
                if (count > 0) {
-                       if (rq_data_dir(tmp->request))
+                       if (tmp->sc_data_direction == DMA_TO_DEVICE)
                                sun3scsi_dma_send_setup(hostdata,
                                                        tmp->SCp.ptr, count);
                        else
index 46b8dff..c2d6f0a 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/completion.h>
 #include <linux/blkdev.h>
 #include <linux/uaccess.h>
-#include <linux/highmem.h> /* For flush_kernel_dcache_page */
 #include <linux/module.h>
 
 #include <asm/unaligned.h>
@@ -1505,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
        srbcmd->id       = cpu_to_le32(scmd_id(cmd));
        srbcmd->lun      = cpu_to_le32(cmd->device->lun);
        srbcmd->flags    = cpu_to_le32(flag);
-       timeout = cmd->request->timeout/HZ;
+       timeout = scsi_cmd_to_rq(cmd)->timeout / HZ;
        if (timeout == 0)
                timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
        srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
index 54eb4d4..deb32c9 100644 (file)
@@ -224,7 +224,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
 {
        struct fib *fibptr;
 
-       fibptr = &dev->fibs[scmd->request->tag];
+       fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
        /*
         *      Null out fields that depend on being zero at the start of
         *      each I/O
index f3377e2..ffb3919 100644 (file)
@@ -7423,7 +7423,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
         * Set the srb_tag to the command tag + 1, as
         * srb_tag '0' is used internally by the chip.
         */
-       srb_tag = scp->request->tag + 1;
+       srb_tag = scsi_cmd_to_rq(scp)->tag + 1;
        asc_scsi_q->q2.srb_tag = srb_tag;
 
        /*
@@ -7637,7 +7637,7 @@ static int
 adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
              adv_req_t **adv_reqpp)
 {
-       u32 srb_tag = scp->request->tag;
+       u32 srb_tag = scsi_cmd_to_rq(scp)->tag;
        adv_req_t *reqp;
        ADV_SCSI_REQ_Q *scsiqp;
        int ret;
index 1210e61..584a595 100644 (file)
@@ -262,11 +262,12 @@ static void aha1542_free_cmd(struct scsi_cmnd *cmd)
        struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
 
        if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+               struct request *rq = scsi_cmd_to_rq(cmd);
                void *buf = acmd->data_buffer;
                struct req_iterator iter;
                struct bio_vec bv;
 
-               rq_for_each_segment(bv, cmd->request, iter) {
+               rq_for_each_segment(bv, rq, iter) {
                        memcpy_to_page(bv.bv_page, bv.bv_offset, buf,
                                       bv.bv_len);
                        buf += bv.bv_len;
@@ -447,11 +448,12 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 #endif
 
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+               struct request *rq = scsi_cmd_to_rq(cmd);
                void *buf = acmd->data_buffer;
                struct req_iterator iter;
                struct bio_vec bv;
 
-               rq_for_each_segment(bv, cmd->request, iter) {
+               rq_for_each_segment(bv, rq, iter) {
                        memcpy_from_page(buf, bv.bv_page, bv.bv_offset,
                                         bv.bv_len);
                        buf += bv.bv_len;
index 462717b..4e899ec 100644 (file)
@@ -235,8 +235,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
        wrb = alloc_mcc_wrb(phba, &tag);
        if (!wrb) {
                mutex_unlock(&ctrl->mbox_lock);
-               rc = -ENOMEM;
-               goto free_cmd;
+               return -ENOMEM;
        }
 
        sge = nonembedded_sgl(wrb);
@@ -269,24 +268,6 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
        /* copy the response, if any */
        if (resp_buf)
                memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
-       /**
-        * This is special case of NTWK_GET_IF_INFO where the size of
-        * response is not known. beiscsi_if_get_info checks the return
-        * value to free DMA buffer.
-        */
-       if (rc == -EAGAIN)
-               return rc;
-
-       /**
-        * If FW is busy that is driver timed out, DMA buffer is saved with
-        * the tag, only when the cmd completes this buffer is freed.
-        */
-       if (rc == -EBUSY)
-               return rc;
-
-free_cmd:
-       dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
-                           nonemb_cmd->va, nonemb_cmd->dma);
        return rc;
 }
 
@@ -309,6 +290,19 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
        return 0;
 }
 
+static void beiscsi_free_nemb_cmd(struct beiscsi_hba *phba,
+                                 struct be_dma_mem *cmd, int rc)
+{
+       /*
+        * If FW is busy the DMA buffer is saved with the tag. When the cmd
+        * completes this buffer is freed.
+        */
+       if (rc == -EBUSY)
+               return;
+
+       dma_free_coherent(&phba->ctrl.pdev->dev, cmd->size, cmd->va, cmd->dma);
+}
+
 static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
 {
        struct be_dma_mem *tag_mem;
@@ -344,8 +338,16 @@ int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
                                cpu_to_le32(set_eqd[i].delay_multiplier);
        }
 
-       return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd,
-                                    __beiscsi_eq_delay_compl, NULL, 0);
+       rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, __beiscsi_eq_delay_compl,
+                                  NULL, 0);
+       if (rc) {
+               /*
+                * Only free on failure. Async cmds are handled like -EBUSY
+                * where it's handled for us.
+                */
+               beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+       }
+       return rc;
 }
 
 /**
@@ -372,6 +374,7 @@ int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg)
                req->hdr.version = 1;
        rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
                                   &resp, sizeof(resp));
+       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
        if (rc) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -449,7 +452,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
        req->ip_addr.ip_type = ip_type;
        memcpy(req->ip_addr.addr, gw,
               (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
-       return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+       rt_val = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rt_val);
+       return rt_val;
 }
 
 int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
@@ -499,8 +504,10 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
        req = nonemb_cmd.va;
        req->ip_type = ip_type;
 
-       return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
-                                    resp, sizeof(*resp));
+       rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, resp,
+                                  sizeof(*resp));
+       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+       return rc;
 }
 
 static int
@@ -537,6 +544,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
                            "BG_%d : failed to clear IP: rc %d status %d\n",
                            rc, req->ip_params.ip_record.status);
        }
+       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
        return rc;
 }
 
@@ -581,6 +589,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
                if (req->ip_params.ip_record.status)
                        rc = -EINVAL;
        }
+       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
        return rc;
 }
 
@@ -608,6 +617,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
                reldhcp->interface_hndl = phba->interface_handle;
                reldhcp->ip_type = ip_type;
                rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
+               beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
                if (rc < 0) {
                        beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
                                    "BG_%d : failed to release existing DHCP: %d\n",
@@ -689,7 +699,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
        dhcpreq->interface_hndl = phba->interface_handle;
        dhcpreq->ip_type = ip_type;
        rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
-
+       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
 exit:
        kfree(if_info);
        return rc;
@@ -762,11 +772,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
                                    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
                                    "BG_%d : Memory Allocation Failure\n");
 
-                               /* Free the DMA memory for the IOCTL issuing */
-                               dma_free_coherent(&phba->ctrl.pdev->dev,
-                                                   nonemb_cmd.size,
-                                                   nonemb_cmd.va,
-                                                   nonemb_cmd.dma);
+                               beiscsi_free_nemb_cmd(phba, &nonemb_cmd,
+                                                     -ENOMEM);
                                return -ENOMEM;
                }
 
@@ -781,15 +788,13 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
                                      nonemb_cmd.va)->actual_resp_len;
                        ioctl_size += sizeof(struct be_cmd_req_hdr);
 
-                       /* Free the previous allocated DMA memory */
-                       dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
-                                           nonemb_cmd.va,
-                                           nonemb_cmd.dma);
-
+                       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
                        /* Free the virtual memory */
                        kfree(*if_info);
-               } else
+               } else {
+                       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
                        break;
+               }
        } while (true);
        return rc;
 }
@@ -806,8 +811,9 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
        if (rc)
                return rc;
 
-       return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
-                                    nic, sizeof(*nic));
+       rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, nic, sizeof(*nic));
+       beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
+       return rc;
 }
 
 static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
index 43e8a1d..5521469 100644 (file)
@@ -1918,7 +1918,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
 
        spin_unlock(&session->back_lock);
 
-       p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
+       p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc)));
        spin_lock(&p->p_work_lock);
        if (unlikely(!p->iothread)) {
                rc = -EINVAL;
index fc7197a..2701290 100644 (file)
@@ -618,6 +618,12 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
        return 0;
 }
 
+struct changer_element_status32 {
+       int             ces_type;
+       compat_uptr_t   ces_data;
+};
+#define CHIOGSTATUS32  _IOW('c', 8, struct changer_element_status32)
+
 static long ch_ioctl(struct file *file,
                    unsigned int cmd, unsigned long arg)
 {
@@ -748,7 +754,20 @@ static long ch_ioctl(struct file *file,
 
                return ch_gstatus(ch, ces.ces_type, ces.ces_data);
        }
+#ifdef CONFIG_COMPAT
+       case CHIOGSTATUS32:
+       {
+               struct changer_element_status32 ces32;
 
+               if (copy_from_user(&ces32, argp, sizeof(ces32)))
+                       return -EFAULT;
+               if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
+                       return -EINVAL;
+
+               return ch_gstatus(ch, ces32.ces_type,
+                                 compat_ptr(ces32.ces_data));
+       }
+#endif
        case CHIOGELEM:
        {
                struct changer_get_element cge;
@@ -858,59 +877,11 @@ static long ch_ioctl(struct file *file,
        }
 
        default:
-               return scsi_ioctl(ch->device, cmd, argp);
+               return scsi_ioctl(ch->device, NULL, file->f_mode, cmd, argp);
 
        }
 }
 
-#ifdef CONFIG_COMPAT
-
-struct changer_element_status32 {
-       int             ces_type;
-       compat_uptr_t   ces_data;
-};
-#define CHIOGSTATUS32  _IOW('c', 8,struct changer_element_status32)
-
-static long ch_ioctl_compat(struct file * file,
-                           unsigned int cmd, unsigned long arg)
-{
-       scsi_changer *ch = file->private_data;
-       int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
-                                                       file->f_flags & O_NDELAY);
-       if (retval)
-               return retval;
-
-       switch (cmd) {
-       case CHIOGPARAMS:
-       case CHIOGVPARAMS:
-       case CHIOPOSITION:
-       case CHIOMOVE:
-       case CHIOEXCHANGE:
-       case CHIOGELEM:
-       case CHIOINITELEM:
-       case CHIOSVOLTAG:
-               /* compatible */
-               return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-       case CHIOGSTATUS32:
-       {
-               struct changer_element_status32 ces32;
-               unsigned char __user *data;
-
-               if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
-                       return -EFAULT;
-               if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
-                       return -EINVAL;
-
-               data = compat_ptr(ces32.ces_data);
-               return ch_gstatus(ch, ces32.ces_type, data);
-       }
-       default:
-               return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg));
-
-       }
-}
-#endif
-
 /* ------------------------------------------------------------------------ */
 
 static int ch_probe(struct device *dev)
@@ -1015,9 +986,7 @@ static const struct file_operations changer_fops = {
        .open           = ch_open,
        .release        = ch_release,
        .unlocked_ioctl = ch_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl   = ch_ioctl_compat,
-#endif
+       .compat_ioctl   = compat_ptr_ioctl,
        .llseek         = noop_llseek,
 };
 
index 56b9ad0..3b2eb6c 100644 (file)
@@ -1786,7 +1786,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
        struct csio_scsi_qset *sqset;
        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 
-       sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
+       sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
 
        nr = fc_remote_port_chkready(rport);
        if (nr) {
@@ -1989,13 +1989,13 @@ inval_scmnd:
                csio_info(hw,
                        "Aborted SCSI command to (%d:%llu) tag %u\n",
                        cmnd->device->id, cmnd->device->lun,
-                       cmnd->request->tag);
+                       scsi_cmd_to_rq(cmnd)->tag);
                return SUCCESS;
        } else {
                csio_info(hw,
                        "Failed to abort SCSI command, (%d:%llu) tag %u\n",
                        cmnd->device->id, cmnd->device->lun,
-                       cmnd->request->tag);
+                       scsi_cmd_to_rq(cmnd)->tag);
                return FAILED;
        }
 }
index 222593b..2f18945 100644 (file)
@@ -433,7 +433,7 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
                hwq = afu->hwq_rr_count++ % afu->num_hwqs;
                break;
        case HWQ_MODE_TAG:
-               tag = blk_mq_unique_tag(scp->request);
+               tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
                hwq = blk_mq_unique_tag_to_hwq(tag);
                break;
        case HWQ_MODE_CPU:
index a18a4a0..7af96d1 100644 (file)
@@ -652,7 +652,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
        msg[2] = 0;
        msg[3]= 0;
        /* Add 1 to avoid firmware treating it as invalid command */
-       msg[4] = cmd->request->tag + 1;
+       msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
        if (pHba->host)
                spin_lock_irq(pHba->host->host_lock);
        rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
@@ -2236,7 +2236,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
        msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
        msg[2] = 0;
        /* Add 1 to avoid firmware treating it as invalid command */
-       msg[3] = cmd->request->tag + 1;
+       msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
        // Our cards use the transaction context as the tag for queueing
        // Adaptec/DPT Private stuff 
        msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
index e0d798d..bb3b460 100644 (file)
@@ -780,7 +780,7 @@ efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
 {
        struct efct_lio_vport *lio_vport;
        struct efct *efct;
-       int ret = -1;
+       int ret;
        u64 p_wwpn, npiv_wwpn, npiv_wwnn;
        char *p, *pbuf, tmp[128];
        struct efct_lio_vport_list_t *vport_list;
index 762cc8b..f8afbfb 100644 (file)
@@ -107,7 +107,7 @@ static void fnic_cleanup_io(struct fnic *fnic);
 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
                                            struct scsi_cmnd *sc)
 {
-       u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
+       u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
 
        return &fnic->io_req_lock[hash];
 }
@@ -390,7 +390,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
            (rp->flags & FC_RP_FLAGS_RETRY))
                exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
 
-       fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
+       fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
                                         0, exch_flags, io_req->sgl_cnt,
                                         SCSI_SENSE_BUFFERSIZE,
                                         io_req->sgl_list_pa,
@@ -422,6 +422,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
  */
 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 {
+       const int tag = scsi_cmd_to_rq(sc)->tag;
        struct fc_lport *lp = shost_priv(sc->device->host);
        struct fc_rport *rport;
        struct fnic_io_req *io_req = NULL;
@@ -511,8 +512,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        sg_count = scsi_dma_map(sc);
        if (sg_count < 0) {
                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
-                         sc->request->tag, sc, 0, sc->cmnd[0],
-                         sg_count, CMD_STATE(sc));
+                         tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc));
                mempool_free(io_req, fnic->io_req_pool);
                goto out;
        }
@@ -571,7 +571,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                 * refetch the pointer under the lock.
                 */
                FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
-                         sc->request->tag, sc, 0, 0, 0,
+                         tag, sc, 0, 0, 0,
                          (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
                io_req = (struct fnic_io_req *)CMD_SP(sc);
                CMD_SP(sc) = NULL;
@@ -603,8 +603,7 @@ out:
                        sc->cmnd[5]);
 
        FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
-                 sc->request->tag, sc, io_req,
-                 sg_count, cmd_trace,
+                 tag, sc, io_req, sg_count, cmd_trace,
                  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
 
        /* if only we issued IO, will we have the io lock */
@@ -1364,6 +1363,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
 static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
                                 bool reserved)
 {
+       const int tag = scsi_cmd_to_rq(sc)->tag;
        struct fnic *fnic = data;
        struct fnic_io_req *io_req;
        unsigned long flags = 0;
@@ -1371,7 +1371,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
        unsigned long start_time = 0;
        struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 
-       io_lock = fnic_io_lock_tag(fnic, sc->request->tag);
+       io_lock = fnic_io_lock_tag(fnic, tag);
        spin_lock_irqsave(io_lock, flags);
 
        io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1413,7 +1413,7 @@ cleanup_scsi_cmd:
        sc->result = DID_TRANSPORT_DISRUPTED << 16;
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                      "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
-                     sc->request->tag, sc, (jiffies - start_time));
+                     tag, sc, jiffies - start_time);
 
        if (atomic64_read(&fnic->io_cmpl_skip))
                atomic64_dec(&fnic->io_cmpl_skip);
@@ -1425,10 +1425,10 @@ cleanup_scsi_cmd:
                if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
                        shost_printk(KERN_ERR, fnic->lport->host,
                                     "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
-                                    sc->request->tag, sc);
+                                    tag, sc);
 
                FNIC_TRACE(fnic_cleanup_io,
-                          sc->device->host->host_no, sc->request->tag, sc,
+                          sc->device->host->host_no, tag, sc,
                           jiffies_to_msecs(jiffies - start_time),
                           0, ((u64)sc->cmnd[0] << 32 |
                               (u64)sc->cmnd[2] << 24 |
@@ -1566,7 +1566,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
 {
        struct fnic_rport_abort_io_iter_data *iter_data = data;
        struct fnic *fnic = iter_data->fnic;
-       int abt_tag = sc->request->tag;
+       int abt_tag = scsi_cmd_to_rq(sc)->tag;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -1727,6 +1727,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
  */
 int fnic_abort_cmd(struct scsi_cmnd *sc)
 {
+       struct request *const rq = scsi_cmd_to_rq(sc);
        struct fc_lport *lp;
        struct fnic *fnic;
        struct fnic_io_req *io_req = NULL;
@@ -1741,7 +1742,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        struct abort_stats *abts_stats;
        struct terminate_stats *term_stats;
        enum fnic_ioreq_state old_ioreq_state;
-       int tag;
+       const int tag = rq->tag;
        unsigned long abt_issued_time;
        DECLARE_COMPLETION_ONSTACK(tm_done);
 
@@ -1757,7 +1758,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        term_stats = &fnic->fnic_stats.term_stats;
 
        rport = starget_to_rport(scsi_target(sc->device));
-       tag = sc->request->tag;
        FNIC_SCSI_DBG(KERN_DEBUG,
                fnic->lport->host,
                "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
@@ -1842,8 +1842,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        /* Now queue the abort command to firmware */
        int_to_scsilun(sc->device->lun, &fc_lun);
 
-       if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
-                                   fc_lun.scsi_lun, io_req)) {
+       if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
+                                   io_req)) {
                spin_lock_irqsave(io_lock, flags);
                if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
                        CMD_STATE(sc) = old_ioreq_state;
@@ -1943,8 +1943,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        }
 
 fnic_abort_cmd_end:
-       FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
-                 sc->request->tag, sc,
+       FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
                  jiffies_to_msecs(jiffies - start_time),
                  0, ((u64)sc->cmnd[0] << 32 |
                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
@@ -1994,7 +1993,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
        /* fill in the lun info */
        int_to_scsilun(sc->device->lun, &fc_lun);
 
-       fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
+       fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
                                     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
                                     fc_lun.scsi_lun, io_req->port_id,
                                     fnic->config.ra_tov, fnic->config.ed_tov);
@@ -2025,7 +2024,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
        struct fnic_pending_aborts_iter_data *iter_data = data;
        struct fnic *fnic = iter_data->fnic;
        struct scsi_device *lun_dev = iter_data->lun_dev;
-       int abt_tag = sc->request->tag;
+       int abt_tag = scsi_cmd_to_rq(sc)->tag;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -2206,14 +2205,15 @@ clean_pending_aborts_end:
 static inline int
 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
 {
-       struct request_queue *q = sc->request->q;
+       struct request *rq = scsi_cmd_to_rq(sc);
+       struct request_queue *q = rq->q;
        struct request *dummy;
 
        dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
        if (IS_ERR(dummy))
                return SCSI_NO_TAG;
 
-       sc->tag = sc->request->tag = dummy->tag;
+       rq->tag = dummy->tag;
        sc->host_scribble = (unsigned char *)dummy;
 
        return dummy->tag;
@@ -2238,6 +2238,7 @@ fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
  */
 int fnic_device_reset(struct scsi_cmnd *sc)
 {
+       struct request *rq = scsi_cmd_to_rq(sc);
        struct fc_lport *lp;
        struct fnic *fnic;
        struct fnic_io_req *io_req = NULL;
@@ -2250,7 +2251,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        struct scsi_lun fc_lun;
        struct fnic_stats *fnic_stats;
        struct reset_stats *reset_stats;
-       int tag = 0;
+       int tag = rq->tag;
        DECLARE_COMPLETION_ONSTACK(tm_done);
        int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
        bool new_sc = 0;
@@ -2284,7 +2285,6 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
        /* Allocate tag if not present */
 
-       tag = sc->request->tag;
        if (unlikely(tag < 0)) {
                /*
                 * Really should fix the midlayer to pass in a proper
@@ -2458,8 +2458,7 @@ fnic_device_reset_clean:
        }
 
 fnic_device_reset_end:
-       FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
-                 sc->request->tag, sc,
+       FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
                  jiffies_to_msecs(jiffies - start_time),
                  0, ((u64)sc->cmnd[0] << 32 |
                  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
index 3a903e8..9515c45 100644 (file)
@@ -185,7 +185,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
        void *bitmap = hisi_hba->slot_index_tags;
 
        if (scsi_cmnd)
-               return scsi_cmnd->request->tag;
+               return scsi_cmd_to_rq(scsi_cmnd)->tag;
 
        spin_lock(&hisi_hba->lock);
        index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
@@ -449,7 +449,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
                unsigned int dq_index;
                u32 blk_tag;
 
-               blk_tag = blk_mq_unique_tag(scmd->request);
+               blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
                dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
                *dq_pointer = dq = &hisi_hba->dq[dq_index];
        } else {
index a4885d0..3ab669d 100644 (file)
@@ -1153,7 +1153,7 @@ static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
 {
        unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
        unsigned int interval = scsi_prot_interval(scsi_cmnd);
-       u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
+       u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd));
 
        switch (prot_op) {
        case SCSI_PROT_READ_INSERT:
index f135a10..3faa87f 100644 (file)
@@ -5686,7 +5686,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
        /* Get the ptr to our adapter structure out of cmd->host. */
        h = sdev_to_hba(cmd->device);
 
-       BUG_ON(cmd->request->tag < 0);
+       BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0);
 
        dev = cmd->device->hostdata;
        if (!dev) {
@@ -5729,7 +5729,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
         *       and is therefore a brand-new command.
         */
        if (likely(cmd->retries == 0 &&
-                       !blk_rq_is_passthrough(cmd->request) &&
+                       !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) &&
                        h->acciopath_status)) {
                /* Submit with the retry_pending flag unset. */
                rc = hpsa_ioaccel_submit(h, c, cmd, false);
@@ -5894,7 +5894,7 @@ static int hpsa_scsi_add_host(struct ctlr_info *h)
  */
 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
 {
-       int idx = scmd->request->tag;
+       int idx = scsi_cmd_to_rq(scmd)->tag;
 
        if (idx < 0)
                return idx;
index 935b01e..1f1586a 100644 (file)
@@ -1926,7 +1926,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
        struct ibmvfc_cmd *vfc_cmd;
        struct ibmvfc_fcp_cmd_iu *iu;
        struct ibmvfc_event *evt;
-       u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
+       u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
        u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
        u16 scsi_channel;
        int rc;
@@ -1956,7 +1956,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
        memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
 
        if (cmnd->flags & SCMD_TAGGED) {
-               vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
+               vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
                iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
        }
 
@@ -3292,14 +3292,18 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
        int done = 0;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       if (time >= (init_timeout * HZ)) {
+       if (!vhost->scan_timeout)
+               done = 1;
+       else if (time >= (vhost->scan_timeout * HZ)) {
                dev_info(vhost->dev, "Scan taking longer than %d seconds, "
-                        "continuing initialization\n", init_timeout);
+                        "continuing initialization\n", vhost->scan_timeout);
                done = 1;
        }
 
-       if (vhost->scan_complete)
+       if (vhost->scan_complete) {
+               vhost->scan_timeout = init_timeout;
                done = 1;
+       }
        spin_unlock_irqrestore(shost->host_lock, flags);
        return done;
 }
@@ -6084,6 +6088,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
        vhost->using_channels = 0;
        vhost->do_enquiry = 1;
+       vhost->scan_timeout = 0;
 
        strcpy(vhost->partition_name, "UNKNOWN");
        init_waitqueue_head(&vhost->work_wait_q);
index 92fb889..3718406 100644 (file)
@@ -876,6 +876,7 @@ struct ibmvfc_host {
        int reinit;
        int delay_init;
        int scan_complete;
+       int scan_timeout;
        int logged_in;
        int mq_enabled;
        int using_channels;
index e6a3eaa..50df7dd 100644 (file)
@@ -1072,7 +1072,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
        init_event_struct(evt_struct,
                          handle_cmd_rsp,
                          VIOSRP_SRP_FORMAT,
-                         cmnd->request->timeout/HZ);
+                         scsi_cmd_to_rq(cmnd)->timeout / HZ);
 
        evt_struct->cmnd = cmnd;
        evt_struct->cmnd_done = done;
index 8b33c98..cdd94fb 100644 (file)
@@ -3735,7 +3735,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
                scb->cmd.dcdb.segment_4G = 0;
                scb->cmd.dcdb.enhanced_sg = 0;
 
-               TimeOut = scb->scsi_cmd->request->timeout;
+               TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout;
 
                if (ha->subsys->param[4] & 0x00100000) {        /* If NEW Tape DCDB is Supported */
                        if (!scb->sg_len) {
index e1ff794..fcaa84a 100644 (file)
@@ -341,7 +341,7 @@ static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
        tc->reserved_E8_0 = 0;
 
        if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
-               tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
+               tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd);
        else if (type & SCSI_PROT_DIF_TYPE3)
                tc->ref_tag_seed_gen = 0;
 }
@@ -369,7 +369,7 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
        tc->app_tag_gen = 0;
 
        if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
-               tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
+               tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd);
        else if (type & SCSI_PROT_DIF_TYPE3)
                tc->ref_tag_seed_verify = 0;
 
index 052ee3a..c640535 100644 (file)
@@ -10,7 +10,6 @@ config SCSI_SAS_LIBSAS
        tristate "SAS Domain Transport Attributes"
        depends on SCSI
        select SCSI_SAS_ATTRS
-       select BLK_DEV_BSGLIB
        help
          This provides transport specific helpers for SAS drivers which
          use the domain device construct (like the aic94xxx).
index e63a54f..9dc3273 100644 (file)
@@ -18,4 +18,4 @@ libsas-y +=  sas_init.o     \
 libsas-$(CONFIG_SCSI_SAS_ATA) +=       sas_ata.o
 libsas-$(CONFIG_SCSI_SAS_HOST_SMP) +=  sas_host_smp.o
 
-ccflags-y := -DDEBUG
+ccflags-y := -DDEBUG -I$(srctree)/drivers/scsi
index 4aa1fda..a315715 100644 (file)
@@ -20,8 +20,8 @@
 #include <scsi/scsi.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
-#include "../scsi_transport_api.h"
+#include "scsi_sas_internal.h"
+#include "scsi_transport_api.h"
 #include <scsi/scsi_eh.h>
 
 static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
@@ -596,7 +596,7 @@ void sas_ata_task_abort(struct sas_task *task)
 
        /* Bounce SCSI-initiated commands to the SCSI EH */
        if (qc->scsicmd) {
-               blk_abort_request(qc->scsicmd->request);
+               blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
                return;
        }
 
index dd20541..12e1e36 100644 (file)
@@ -16,7 +16,7 @@
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
 #include <scsi/sas_ata.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
 
 /* ---------- Basic task processing for discovery purposes ---------- */
 
index e006885..c2150a8 100644 (file)
@@ -18,7 +18,7 @@
 #include <scsi/sas_ata.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
 
 static int sas_discover_expander(struct domain_device *dev);
 static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
index eca2a6b..32cdc96 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
 
 static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
                                  u8 phy_id)
index 2b0f98c..80592f5 100644 (file)
@@ -19,7 +19,7 @@
 
 #include "sas_internal.h"
 
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
 
 static struct kmem_cache *sas_task_cache;
 static struct kmem_cache *sas_event_cache;
index 4ca4b1f..a0d592d 100644 (file)
@@ -10,7 +10,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
 
 /* ---------- Phy events ---------- */
 
index e3d03d7..67b429d 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
-#include "../scsi_sas_internal.h"
+#include "scsi_sas_internal.h"
 
 static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
 {
index ee44a0d..08ffb87 100644 (file)
@@ -22,9 +22,9 @@
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
 #include <scsi/sas_ata.h>
-#include "../scsi_sas_internal.h"
-#include "../scsi_transport_api.h"
-#include "../scsi_priv.h"
+#include "scsi_sas_internal.h"
+#include "scsi_transport_api.h"
+#include "scsi_priv.h"
 
 #include <linux/err.h>
 #include <linux/blkdev.h>
@@ -908,7 +908,7 @@ void sas_task_abort(struct sas_task *task)
        if (dev_is_sata(task->dev))
                sas_ata_task_abort(task);
        else
-               blk_abort_request(sc->request);
+               blk_abort_request(scsi_cmd_to_rq(sc));
 }
 
 int sas_slave_alloc(struct scsi_device *sdev)
index 1702886..befeb7c 100644 (file)
@@ -114,6 +114,12 @@ struct lpfc_sli2_slim;
 #define LPFC_MBX_NO_WAIT       0
 #define LPFC_MBX_WAIT          1
 
+#define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005
+#define LPFC_PORT_CFG_NAME "/cfg/port.cfg"
+
+#define lpfc_rangecheck(val, min, max) \
+       ((uint)(val) >= (uint)(min) && (val) <= (max))
+
 enum lpfc_polling_flags {
        ENABLE_FCP_RING_POLLING = 0x1,
        DISABLE_FCP_RING_INT    = 0x2
@@ -403,6 +409,160 @@ struct lpfc_trunk_link  {
                                     link3;
 };
 
+/* Format of congestion module parameters */
+struct lpfc_cgn_param {
+       uint32_t cgn_param_magic;
+       uint8_t  cgn_param_version;     /* version 1 */
+       uint8_t  cgn_param_mode;        /* 0=off 1=managed 2=monitor only */
+#define LPFC_CFG_OFF           0
+#define LPFC_CFG_MANAGED       1
+#define LPFC_CFG_MONITOR       2
+       uint8_t  cgn_rsvd1;
+       uint8_t  cgn_rsvd2;
+       uint8_t  cgn_param_level0;
+       uint8_t  cgn_param_level1;
+       uint8_t  cgn_param_level2;
+       uint8_t  byte11;
+       uint8_t  byte12;
+       uint8_t  byte13;
+       uint8_t  byte14;
+       uint8_t  byte15;
+};
+
+/* Max number of days of congestion data */
+#define LPFC_MAX_CGN_DAYS 10
+
+/* Format of congestion buffer info
+ * This structure defines memory thats allocated and registered with
+ * the HBA firmware. When adding or removing fields from this structure
+ * the alignment must match the HBA firmware.
+ */
+
+struct lpfc_cgn_info {
+       /* Header */
+       __le16   cgn_info_size;         /* is sizeof(struct lpfc_cgn_info) */
+       uint8_t  cgn_info_version;      /* represents format of structure */
+#define LPFC_CGN_INFO_V1       1
+#define LPFC_CGN_INFO_V2       2
+#define LPFC_CGN_INFO_V3       3
+       uint8_t  cgn_info_mode;         /* 0=off 1=managed 2=monitor only */
+       uint8_t  cgn_info_detect;
+       uint8_t  cgn_info_action;
+       uint8_t  cgn_info_level0;
+       uint8_t  cgn_info_level1;
+       uint8_t  cgn_info_level2;
+
+       /* Start Time */
+       uint8_t  cgn_info_month;
+       uint8_t  cgn_info_day;
+       uint8_t  cgn_info_year;
+       uint8_t  cgn_info_hour;
+       uint8_t  cgn_info_minute;
+       uint8_t  cgn_info_second;
+
+       /* minute / hours / daily indices */
+       uint8_t  cgn_index_minute;
+       uint8_t  cgn_index_hour;
+       uint8_t  cgn_index_day;
+
+       __le16   cgn_warn_freq;
+       __le16   cgn_alarm_freq;
+       __le16   cgn_lunq;
+       uint8_t  cgn_pad1[8];
+
+       /* Driver Information */
+       __le16   cgn_drvr_min[60];
+       __le32   cgn_drvr_hr[24];
+       __le32   cgn_drvr_day[LPFC_MAX_CGN_DAYS];
+
+       /* Congestion Warnings */
+       __le16   cgn_warn_min[60];
+       __le32   cgn_warn_hr[24];
+       __le32   cgn_warn_day[LPFC_MAX_CGN_DAYS];
+
+       /* Latency Information */
+       __le32   cgn_latency_min[60];
+       __le32   cgn_latency_hr[24];
+       __le32   cgn_latency_day[LPFC_MAX_CGN_DAYS];
+
+       /* Bandwidth Information */
+       __le16   cgn_bw_min[60];
+       __le16   cgn_bw_hr[24];
+       __le16   cgn_bw_day[LPFC_MAX_CGN_DAYS];
+
+       /* Congestion Alarms */
+       __le16   cgn_alarm_min[60];
+       __le32   cgn_alarm_hr[24];
+       __le32   cgn_alarm_day[LPFC_MAX_CGN_DAYS];
+
+       /* Start of congestion statistics */
+       uint8_t  cgn_stat_npm;          /* Notifications per minute */
+
+       /* Start Time */
+       uint8_t  cgn_stat_month;
+       uint8_t  cgn_stat_day;
+       uint8_t  cgn_stat_year;
+       uint8_t  cgn_stat_hour;
+       uint8_t  cgn_stat_minute;
+       uint8_t  cgn_pad2[2];
+
+       __le32   cgn_notification;
+       __le32   cgn_peer_notification;
+       __le32   link_integ_notification;
+       __le32   delivery_notification;
+
+       uint8_t  cgn_stat_cgn_month; /* Last congestion notification FPIN */
+       uint8_t  cgn_stat_cgn_day;
+       uint8_t  cgn_stat_cgn_year;
+       uint8_t  cgn_stat_cgn_hour;
+       uint8_t  cgn_stat_cgn_min;
+       uint8_t  cgn_stat_cgn_sec;
+
+       uint8_t  cgn_stat_peer_month; /* Last peer congestion FPIN */
+       uint8_t  cgn_stat_peer_day;
+       uint8_t  cgn_stat_peer_year;
+       uint8_t  cgn_stat_peer_hour;
+       uint8_t  cgn_stat_peer_min;
+       uint8_t  cgn_stat_peer_sec;
+
+       uint8_t  cgn_stat_lnk_month; /* Last link integrity FPIN */
+       uint8_t  cgn_stat_lnk_day;
+       uint8_t  cgn_stat_lnk_year;
+       uint8_t  cgn_stat_lnk_hour;
+       uint8_t  cgn_stat_lnk_min;
+       uint8_t  cgn_stat_lnk_sec;
+
+       uint8_t  cgn_stat_del_month; /* Last delivery notification FPIN */
+       uint8_t  cgn_stat_del_day;
+       uint8_t  cgn_stat_del_year;
+       uint8_t  cgn_stat_del_hour;
+       uint8_t  cgn_stat_del_min;
+       uint8_t  cgn_stat_del_sec;
+#define LPFC_CGN_STAT_SIZE     48
+#define LPFC_CGN_DATA_SIZE     (sizeof(struct lpfc_cgn_info) -  \
+                               LPFC_CGN_STAT_SIZE - sizeof(uint32_t))
+
+       __le32   cgn_info_crc;
+#define LPFC_CGN_CRC32_MAGIC_NUMBER    0x1EDC6F41
+#define LPFC_CGN_CRC32_SEED            0xFFFFFFFF
+};
+
+#define LPFC_CGN_INFO_SZ       (sizeof(struct lpfc_cgn_info) -  \
+                               sizeof(uint32_t))
+
+struct lpfc_cgn_stat {
+       atomic64_t total_bytes;
+       atomic64_t rcv_bytes;
+       atomic64_t rx_latency;
+#define LPFC_CGN_NOT_SENT      0xFFFFFFFFFFFFFFFFLL
+       atomic_t rx_io_cnt;
+};
+
+struct lpfc_cgn_acqe_stat {
+       atomic64_t alarm;
+       atomic64_t warn;
+};
+
 struct lpfc_vport {
        struct lpfc_hba *phba;
        struct list_head listentry;
@@ -869,7 +1029,10 @@ struct lpfc_hba {
                                         * capability
                                         */
 #define HBA_FLOGI_ISSUED       0x100000 /* FLOGI was issued */
+#define HBA_CGN_RSVD1          0x200000 /* Reserved CGN flag */
+#define HBA_CGN_DAY_WRAP       0x400000 /* HBA Congestion info day wraps */
 #define HBA_DEFER_FLOGI                0x800000 /* Defer FLOGI till read_sparm cmpl */
+#define HBA_SETUP              0x1000000 /* Signifies HBA setup is completed */
 #define HBA_NEEDS_CFG_PORT     0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */
 #define HBA_HBEAT_INP          0x4000000 /* mbox HBEAT is in progress */
 #define HBA_HBEAT_TMO          0x8000000 /* HBEAT initiated after timeout */
@@ -922,7 +1085,6 @@ struct lpfc_hba {
        uint8_t  wwpn[8];
        uint32_t RandomData[7];
        uint8_t  fcp_embed_io;
-       uint8_t  nvme_support;  /* Firmware supports NVME */
        uint8_t  nvmet_support; /* driver supports NVMET */
 #define LPFC_NVMET_MAX_PORTS   32
        uint8_t  mds_diags_support;
@@ -1121,6 +1283,7 @@ struct lpfc_hba {
        uint32_t total_iocbq_bufs;
        struct list_head active_rrq_list;
        spinlock_t hbalock;
+       struct work_struct  unblock_request_work; /* SCSI layer unblock IOs */
 
        /* dma_mem_pools */
        struct dma_pool *lpfc_sg_dma_buf_pool;
@@ -1194,6 +1357,8 @@ struct lpfc_hba {
 #ifdef LPFC_HDWQ_LOCK_STAT
        struct dentry *debug_lockstat;
 #endif
+       struct dentry *debug_cgn_buffer;
+       struct dentry *debug_rx_monitor;
        struct dentry *debug_ras_log;
        atomic_t nvmeio_trc_cnt;
        uint32_t nvmeio_trc_size;
@@ -1344,6 +1509,76 @@ struct lpfc_hba {
        uint64_t ktime_seg10_min;
        uint64_t ktime_seg10_max;
 #endif
+       /* CMF objects */
+       struct lpfc_cgn_stat __percpu *cmf_stat;
+       uint32_t cmf_interval_rate;  /* timer interval limit in ms */
+       uint32_t cmf_timer_cnt;
+#define LPFC_CMF_INTERVAL 90
+       uint64_t cmf_link_byte_count;
+       uint64_t cmf_max_line_rate;
+       uint64_t cmf_max_bytes_per_interval;
+       uint64_t cmf_last_sync_bw;
+#define  LPFC_CMF_BLK_SIZE 512
+       struct hrtimer cmf_timer;
+       atomic_t cmf_bw_wait;
+       atomic_t cmf_busy;
+       atomic_t cmf_stop_io;      /* To block request and stop IO's */
+       uint32_t cmf_active_mode;
+       uint32_t cmf_info_per_interval;
+#define LPFC_MAX_CMF_INFO 32
+       struct timespec64 cmf_latency;  /* Interval congestion timestamp */
+       uint32_t cmf_last_ts;   /* Interval congestion time (ms) */
+       uint32_t cmf_active_info;
+
+       /* Signal / FPIN handling for Congestion Mgmt */
+       u8 cgn_reg_fpin;           /* Negotiated value from RDF */
+       u8 cgn_init_reg_fpin;      /* Initial value from READ_CONFIG */
+#define LPFC_CGN_FPIN_NONE     0x0
+#define LPFC_CGN_FPIN_WARN     0x1
+#define LPFC_CGN_FPIN_ALARM    0x2
+#define LPFC_CGN_FPIN_BOTH     (LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM)
+
+       u8 cgn_reg_signal;          /* Negotiated value from EDC */
+       u8 cgn_init_reg_signal;     /* Initial value from READ_CONFIG */
+               /* cgn_reg_signal and cgn_init_reg_signal use
+                * enum fc_edc_cg_signal_cap_types
+                */
+       u16 cgn_fpin_frequency;
+#define LPFC_FPIN_INIT_FREQ    0xffff
+       u32 cgn_sig_freq;
+       u32 cgn_acqe_cnt;
+
+       /* RX monitor handling for CMF */
+       struct rxtable_entry *rxtable;  /* RX_monitor information */
+       atomic_t rxtable_idx_head;
+#define LPFC_RXMONITOR_TABLE_IN_USE     (LPFC_MAX_RXMONITOR_ENTRY + 73)
+       atomic_t rxtable_idx_tail;
+       atomic_t rx_max_read_cnt;       /* Maximum read bytes */
+       uint64_t rx_block_cnt;
+
+       /* Congestion parameters from flash */
+       struct lpfc_cgn_param cgn_p;
+
+       /* Statistics counter for ACQE cgn alarms and warnings */
+       struct lpfc_cgn_acqe_stat cgn_acqe_stat;
+
+       /* Congestion buffer information */
+       struct lpfc_dmabuf *cgn_i;      /* Congestion Info buffer */
+       atomic_t cgn_fabric_warn_cnt;   /* Total warning cgn events for info */
+       atomic_t cgn_fabric_alarm_cnt;  /* Total alarm cgn events for info */
+       atomic_t cgn_sync_warn_cnt;     /* Total warning events for SYNC wqe */
+       atomic_t cgn_sync_alarm_cnt;    /* Total alarm events for SYNC wqe */
+       atomic_t cgn_driver_evt_cnt;    /* Total driver cgn events for fmw */
+       atomic_t cgn_latency_evt_cnt;
+       struct timespec64 cgn_daily_ts;
+       atomic64_t cgn_latency_evt;     /* Avg latency per minute */
+       unsigned long cgn_evt_timestamp;
+#define LPFC_CGN_TIMER_TO_MIN   60000 /* ms in a minute */
+       uint32_t cgn_evt_minute;
+#define LPFC_SEC_MIN           60
+#define LPFC_MIN_HOUR          60
+#define LPFC_HOUR_DAY          24
+#define LPFC_MIN_DAY           (LPFC_MIN_HOUR * LPFC_HOUR_DAY)
 
        struct hlist_node cpuhp;        /* used for cpuhp per hba callback */
        struct timer_list cpuhp_poll_timer;
@@ -1364,6 +1599,22 @@ struct lpfc_hba {
        struct dbg_log_ent dbg_log[DBG_LOG_SZ];
 };
 
+#define LPFC_MAX_RXMONITOR_ENTRY       800
+#define LPFC_MAX_RXMONITOR_DUMP                32
+struct rxtable_entry {
+       uint64_t total_bytes;   /* Total no of read bytes requested */
+       uint64_t rcv_bytes;     /* Total no of read bytes completed */
+       uint64_t avg_io_size;
+       uint64_t avg_io_latency;/* Average io latency in microseconds */
+       uint64_t max_read_cnt;  /* Maximum read bytes */
+       uint64_t max_bytes_per_interval;
+       uint32_t cmf_busy;
+       uint32_t cmf_info;      /* CMF_SYNC_WQE info */
+       uint32_t io_cnt;
+       uint32_t timer_utilization;
+       uint32_t timer_interval;
+};
+
 static inline struct Scsi_Host *
 lpfc_shost_from_vport(struct lpfc_vport *vport)
 {
index eb88aaa..b35bf70 100644 (file)
@@ -57,6 +57,8 @@
 #define LPFC_MIN_DEVLOSS_TMO   1
 #define LPFC_MAX_DEVLOSS_TMO   255
 
+#define LPFC_MAX_INFO_TMP_LEN  100
+#define LPFC_INFO_MORE_STR     "\nCould be more info...\n"
 /*
  * Write key size should be multiple of 4. If write key is changed
  * make sure that library write key is also changed.
@@ -112,6 +114,186 @@ lpfc_jedec_to_ascii(int incr, char hdw[])
        return;
 }
 
+static ssize_t
+lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr,
+                  char *buf)
+{
+       struct Scsi_Host  *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
+       struct lpfc_cgn_info *cp = NULL;
+       struct lpfc_cgn_stat *cgs;
+       int  len = 0;
+       int cpu;
+       u64 rcv, total;
+       char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
+
+       if (phba->cgn_i)
+               cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+       scnprintf(tmp, sizeof(tmp),
+                 "Congestion Mgmt Info: E2Eattr %d Ver %d "
+                 "CMF %d cnt %d\n",
+                 phba->sli4_hba.pc_sli4_params.mi_ver,
+                 cp ? cp->cgn_info_version : 0,
+                 phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt);
+
+       if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+               goto buffer_done;
+
+       if (!phba->sli4_hba.pc_sli4_params.cmf)
+               goto buffer_done;
+
+       switch (phba->cgn_init_reg_signal) {
+       case EDC_CG_SIG_WARN_ONLY:
+               scnprintf(tmp, sizeof(tmp),
+                         "Register: Init:  Signal:WARN  ");
+               break;
+       case EDC_CG_SIG_WARN_ALARM:
+               scnprintf(tmp, sizeof(tmp),
+                         "Register: Init:  Signal:WARN|ALARM  ");
+               break;
+       default:
+               scnprintf(tmp, sizeof(tmp),
+                         "Register: Init:  Signal:NONE  ");
+               break;
+       }
+       if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+               goto buffer_done;
+
+       switch (phba->cgn_init_reg_fpin) {
+       case LPFC_CGN_FPIN_WARN:
+               scnprintf(tmp, sizeof(tmp),
+                         "FPIN:WARN\n");
+               break;
+       case LPFC_CGN_FPIN_ALARM:
+               scnprintf(tmp, sizeof(tmp),
+                         "FPIN:ALARM\n");
+               break;
+       case LPFC_CGN_FPIN_BOTH:
+               scnprintf(tmp, sizeof(tmp),
+                         "FPIN:WARN|ALARM\n");
+               break;
+       default:
+               scnprintf(tmp, sizeof(tmp),
+                         "FPIN:NONE\n");
+               break;
+       }
+       if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+               goto buffer_done;
+
+       switch (phba->cgn_reg_signal) {
+       case EDC_CG_SIG_WARN_ONLY:
+               scnprintf(tmp, sizeof(tmp),
+                         "       Current:  Signal:WARN  ");
+               break;
+       case EDC_CG_SIG_WARN_ALARM:
+               scnprintf(tmp, sizeof(tmp),
+                         "       Current:  Signal:WARN|ALARM  ");
+               break;
+       default:
+               scnprintf(tmp, sizeof(tmp),
+                         "       Current:  Signal:NONE  ");
+               break;
+       }
+       if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+               goto buffer_done;
+
+       switch (phba->cgn_reg_fpin) {
+       case LPFC_CGN_FPIN_WARN:
+               scnprintf(tmp, sizeof(tmp),
+                         "FPIN:WARN  ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+               break;
+       case LPFC_CGN_FPIN_ALARM:
+               scnprintf(tmp, sizeof(tmp),
+                         "FPIN:ALARM  ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+               break;
+       case LPFC_CGN_FPIN_BOTH:
+               scnprintf(tmp, sizeof(tmp),
+                         "FPIN:WARN|ALARM  ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+               break;
+       default:
+               scnprintf(tmp, sizeof(tmp),
+                         "FPIN:NONE  ACQEcnt:%d\n", phba->cgn_acqe_cnt);
+               break;
+       }
+       if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+               goto buffer_done;
+
+       if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) {
+               switch (phba->cmf_active_mode) {
+               case LPFC_CFG_OFF:
+                       scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n");
+                       break;
+               case LPFC_CFG_MANAGED:
+                       scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n");
+                       break;
+               case LPFC_CFG_MONITOR:
+                       scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n");
+                       break;
+               default:
+                       scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n");
+               }
+               if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+                       goto buffer_done;
+       }
+
+       switch (phba->cgn_p.cgn_param_mode) {
+       case LPFC_CFG_OFF:
+               scnprintf(tmp, sizeof(tmp), "Config: Mode:Off  ");
+               break;
+       case LPFC_CFG_MANAGED:
+               scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed ");
+               break;
+       case LPFC_CFG_MONITOR:
+               scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor ");
+               break;
+       default:
+               scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown ");
+       }
+       if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+               goto buffer_done;
+
+       total = 0;
+       rcv = 0;
+       for_each_present_cpu(cpu) {
+               cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+               total += atomic64_read(&cgs->total_bytes);
+               rcv += atomic64_read(&cgs->rcv_bytes);
+       }
+
+       scnprintf(tmp, sizeof(tmp),
+                 "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n",
+                 atomic_read(&phba->cmf_busy),
+                 phba->cmf_active_info, rcv, total);
+       if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+               goto buffer_done;
+
+       scnprintf(tmp, sizeof(tmp),
+                 "Port_speed:%d  Link_byte_cnt:%ld  "
+                 "Max_byte_per_interval:%ld\n",
+                 lpfc_sli_port_speed_get(phba),
+                 (unsigned long)phba->cmf_link_byte_count,
+                 (unsigned long)phba->cmf_max_bytes_per_interval);
+       strlcat(buf, tmp, PAGE_SIZE);
+
+buffer_done:
+       len = strnlen(buf, PAGE_SIZE);
+
+       if (unlikely(len >= (PAGE_SIZE - 1))) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "6312 Catching potential buffer "
+                               "overflow > PAGE_SIZE = %lu bytes\n",
+                               PAGE_SIZE);
+               strscpy(buf + PAGE_SIZE - 1 -
+                       strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1),
+                       LPFC_INFO_MORE_STR,
+                       strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1)
+                       + 1);
+       }
+       return len;
+}
+
 /**
  * lpfc_drvr_version_show - Return the Emulex driver string with version number
  * @dev: class unused variable.
@@ -168,7 +350,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
        char *statep;
        int i;
        int len = 0;
-       char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
+       char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
 
        if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
                len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -512,9 +694,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                "6314 Catching potential buffer "
                                "overflow > PAGE_SIZE = %lu bytes\n",
                                PAGE_SIZE);
-               strlcpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_NVME_INFO_MORE_STR),
-                       LPFC_NVME_INFO_MORE_STR,
-                       sizeof(LPFC_NVME_INFO_MORE_STR) + 1);
+               strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
+                       LPFC_INFO_MORE_STR,
+                       sizeof(LPFC_INFO_MORE_STR) + 1);
        }
 
        return len;
@@ -2248,11 +2430,6 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
        return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
 }
 
-static inline bool lpfc_rangecheck(uint val, uint min, uint max)
-{
-       return val >= min && val <= max;
-}
-
 /**
  * lpfc_enable_bbcr_set: Sets an attribute value.
  * @phba: pointer the the adapter structure.
@@ -2641,6 +2818,7 @@ static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
                   NULL);
+static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
 
 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 #define WWN_SZ 8
@@ -4038,6 +4216,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
        const char *val_buf = buf;
        int err;
        uint32_t prev_val;
+       u8 sli_family, if_type;
 
        if (!strncmp(buf, "nolip ", strlen("nolip "))) {
                nolip = 1;
@@ -4061,13 +4240,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
                /*
                 * The 'topology' is not a configurable parameter if :
                 *   - persistent topology enabled
-                *   - G7/G6 with no private loop support
+                *   - ASIC_GEN_NUM >= 0xC, with no private loop support
                 */
-
+               sli_family = bf_get(lpfc_sli_intf_sli_family,
+                                   &phba->sli4_hba.sli_intf);
+               if_type = bf_get(lpfc_sli_intf_if_type,
+                                &phba->sli4_hba.sli_intf);
                if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
-                    (!phba->sli4_hba.pc_sli4_params.pls &&
-                    (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
-                    phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
+                   (!phba->sli4_hba.pc_sli4_params.pls &&
+                    (sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
+                     if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
                    val == 4) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                "3114 Loop mode not supported\n");
@@ -5412,9 +5594,9 @@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
 
 /*
 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
-# is [0,1]. Default value is 0.
+# is [0,1]. Default value is 1.
 */
-LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
+LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1,
                   "Use ADISC on rediscovery to authenticate FCP devices");
 
 /*
@@ -6146,6 +6328,19 @@ LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
  */
 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
 
+/* Signaling module parameters */
+int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
+module_param(lpfc_fabric_cgn_frequency, int, 0444);
+MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
+
+int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
+module_param(lpfc_acqe_cgn_frequency, int, 0444);
+MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
+
+int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail  */
+module_param(lpfc_use_cgn_signal, int, 0444);
+MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available");
+
 /*
  * lpfc_enable_dpp: Enable DPP on G7
  *       0  = DPP on G7 disabled
@@ -6320,6 +6515,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_enable_bbcr,
        &dev_attr_lpfc_enable_dpp,
        &dev_attr_lpfc_enable_mi,
+       &dev_attr_cmf_info,
        &dev_attr_lpfc_max_vmid,
        &dev_attr_lpfc_vmid_inactivity_timeout,
        &dev_attr_lpfc_vmid_app_header,
@@ -6350,6 +6546,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
        &dev_attr_lpfc_max_scsicmpl_time,
        &dev_attr_lpfc_stat_data_ctrl,
        &dev_attr_lpfc_static_vport,
+       &dev_attr_cmf_info,
        NULL,
 };
 
@@ -6741,6 +6938,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
                case LPFC_LINK_SPEED_128GHZ:
                        fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
                        break;
+               case LPFC_LINK_SPEED_256GHZ:
+                       fc_host_speed(shost) = FC_PORTSPEED_256GBIT;
+                       break;
                default:
                        fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
                        break;
@@ -6908,6 +7108,9 @@ lpfc_get_stats(struct Scsi_Host *shost)
        hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
        hs->error_frames = pmb->un.varRdLnk.crcCnt;
 
+       hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn);
+       hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm);
+
        hs->link_failure_count -= lso->link_failure_count;
        hs->loss_of_sync_count -= lso->loss_of_sync_count;
        hs->loss_of_signal_count -= lso->loss_of_signal_count;
@@ -7019,6 +7222,12 @@ lpfc_reset_stats(struct Scsi_Host *shost)
        else
                lso->link_events = (phba->fc_eventTag >> 1);
 
+       atomic64_set(&phba->cgn_acqe_stat.warn, 0);
+       atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+
+       memset(&shost_to_fc_host(shost)->fpin_stats, 0,
+              sizeof(shost_to_fc_host(shost)->fpin_stats));
+
        psli->stats_start = ktime_get_seconds();
 
        mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -7452,6 +7661,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
        lpfc_enable_mi_init(phba, lpfc_enable_mi);
 
+       phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF;
+       phba->cmf_active_mode = LPFC_CFG_OFF;
+       if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX ||
+          lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN)
+               lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
+
        if (phba->sli_rev != LPFC_SLI_REV4) {
                /* NVME only supported on SLI4 */
                phba->nvmet_support = 0;
index 38cfe1b..fdf08cb 100644 (file)
@@ -5751,6 +5751,92 @@ job_error:
 
 }
 
+static int
+lpfc_get_cgnbuf_info(struct bsg_job *job)
+{
+       struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+       struct lpfc_hba *phba = vport->phba;
+       struct fc_bsg_request *bsg_request = job->request;
+       struct fc_bsg_reply *bsg_reply = job->reply;
+       struct get_cgnbuf_info_req *cgnbuf_req;
+       struct lpfc_cgn_info *cp;
+       uint8_t *cgn_buff;
+       int size, cinfosz;
+       int  rc = 0;
+
+       if (job->request_len < sizeof(struct fc_bsg_request) +
+           sizeof(struct get_cgnbuf_info_req)) {
+               rc = -ENOMEM;
+               goto job_exit;
+       }
+
+       if (!phba->sli4_hba.pc_sli4_params.cmf) {
+               rc = -ENOENT;
+               goto job_exit;
+       }
+
+       if (!phba->cgn_i || !phba->cgn_i->virt) {
+               rc = -ENOENT;
+               goto job_exit;
+       }
+
+       cp = phba->cgn_i->virt;
+       if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
+               rc = -EPERM;
+               goto job_exit;
+       }
+
+       cgnbuf_req = (struct get_cgnbuf_info_req *)
+               bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+       /* For reset or size == 0 */
+       bsg_reply->reply_payload_rcv_len = 0;
+
+       if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
+               lpfc_init_congestion_stat(phba);
+               goto job_exit;
+       }
+
+       /* We don't want to include the CRC at the end */
+       cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
+
+       size = cgnbuf_req->read_size;
+       if (!size)
+               goto job_exit;
+
+       if (size < cinfosz) {
+               /* Just copy back what we can */
+               cinfosz = size;
+               rc = -E2BIG;
+       }
+
+       /* Allocate memory to read congestion info */
+       cgn_buff = vmalloc(cinfosz);
+       if (!cgn_buff) {
+               rc = -ENOMEM;
+               goto job_exit;
+       }
+
+       memcpy(cgn_buff, cp, cinfosz);
+
+       bsg_reply->reply_payload_rcv_len =
+               sg_copy_from_buffer(job->reply_payload.sg_list,
+                                   job->reply_payload.sg_cnt,
+                                   cgn_buff, cinfosz);
+
+       vfree(cgn_buff);
+
+job_exit:
+       bsg_reply->result = rc;
+       if (!rc)
+               bsg_job_done(job, bsg_reply->result,
+                            bsg_reply->reply_payload_rcv_len);
+       else
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "2724 GET CGNBUF error: %d\n", rc);
+       return rc;
+}
+
 /**
  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  * @job: fc_bsg_job to handle
@@ -5813,6 +5899,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
        case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
                rc = lpfc_get_trunk_info(job);
                break;
+       case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
+               rc = lpfc_get_cgnbuf_info(job);
+               break;
        default:
                rc = -EINVAL;
                bsg_reply->reply_payload_rcv_len = 0;
index 2dc7124..749d6c4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2010-2015 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -43,6 +43,7 @@
 #define LPFC_BSG_VENDOR_RAS_GET_CONFIG         18
 #define LPFC_BSG_VENDOR_RAS_SET_CONFIG         19
 #define LPFC_BSG_VENDOR_GET_TRUNK_INFO         20
+#define LPFC_BSG_VENDOR_GET_CGNBUF_INFO                21
 
 struct set_ct_event {
        uint32_t command;
@@ -386,6 +387,13 @@ struct get_trunk_info_req {
        uint32_t command;
 };
 
+struct get_cgnbuf_info_req {
+       uint32_t command;
+       uint32_t read_size;
+       uint32_t reset;
+#define LPFC_BSG_CGN_RESET_STAT                1
+};
+
 /* driver only */
 #define SLI_CONFIG_NOT_HANDLED         0
 #define SLI_CONFIG_HANDLED             1
index 737483c..c512f41 100644 (file)
@@ -58,6 +58,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
 int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
                           uint16_t, uint16_t, bool);
 int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_reg_congestion_buf(struct lpfc_hba *phba);
+int lpfc_unreg_congestion_buf(struct lpfc_hba *phba);
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
 void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
@@ -74,6 +76,20 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
 void lpfc_free_iocb_list(struct lpfc_hba *phba);
 int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                        struct lpfc_queue *drq, int count, int idx);
+uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba);
+void lpfc_cmf_signal_init(struct lpfc_hba *phba);
+void lpfc_cmf_start(struct lpfc_hba *phba);
+void lpfc_cmf_stop(struct lpfc_hba *phba);
+void lpfc_init_congestion_stat(struct lpfc_hba *phba);
+void lpfc_init_congestion_buf(struct lpfc_hba *phba);
+int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba);
+uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed);
+int lpfc_config_cgn_signal(struct lpfc_hba *phba);
+int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total);
+void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
+void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
+void lpfc_unblock_requests(struct lpfc_hba *phba);
+void lpfc_block_requests(struct lpfc_hba *phba);
 
 void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -87,6 +103,8 @@ void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
+void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
 void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_set_disctmo(struct lpfc_vport *);
 int  lpfc_can_disctmo(struct lpfc_vport *);
@@ -141,6 +159,8 @@ int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry);
 int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry);
 int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
 int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry);
+int lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry);
+void lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length);
 int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -213,6 +233,9 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
 irqreturn_t lpfc_sli4_intr_handler(int, void *);
 irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
 
+int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
+                    uint32_t len);
+
 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
 int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
 void lpfc_sli4_poll_hbtimer(struct timer_list *t);
@@ -459,6 +482,9 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
 void lpfc_create_static_vport(struct lpfc_hba *);
 void lpfc_stop_hba_timers(struct lpfc_hba *);
 void lpfc_stop_port(struct lpfc_hba *);
+int lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t sz);
+int lpfc_update_cmf_cmpl(struct lpfc_hba *phba, uint64_t val, uint32_t sz,
+                        struct Scsi_Host *shost);
 void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
 void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
 void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
@@ -605,6 +631,10 @@ extern int lpfc_enable_nvmet_cnt;
 extern unsigned long long lpfc_enable_nvmet[];
 extern int lpfc_no_hba_reset_cnt;
 extern unsigned long lpfc_no_hba_reset[];
+extern int lpfc_acqe_cgn_frequency;
+extern int lpfc_fabric_cgn_frequency;
+extern int lpfc_use_cgn_signal;
+
 extern union lpfc_wqe128 lpfc_iread_cmd_template;
 extern union lpfc_wqe128 lpfc_iwrite_cmd_template;
 extern union lpfc_wqe128 lpfc_icmnd_cmd_template;
index 610b6da..dfcb7d4 100644 (file)
@@ -2288,6 +2288,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        /* No retry on Vendor, RPA only done on physical port */
                        if (phba->link_flag & LS_CT_VEN_RPA) {
                                phba->link_flag &= ~LS_CT_VEN_RPA;
+                               if (phba->cmf_active_mode == LPFC_CFG_OFF)
+                                       return;
                                lpfc_printf_log(phba, KERN_ERR,
                                                LOG_DISCOVERY | LOG_ELS,
                                                "6460 VEN FDMI RPA failure\n");
@@ -2332,24 +2334,29 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                break;
        case SLI_MGMT_RPA:
                if (vport->port_type == LPFC_PHYSICAL_PORT &&
-                   phba->cfg_enable_mi &&
-                   phba->sli4_hba.pc_sli4_params.mi_ver > LPFC_MIB1_SUPPORT) {
+                   phba->sli4_hba.pc_sli4_params.mi_ver) {
                        /* mi is only for the phyical port, no vports */
                        if (phba->link_flag & LS_CT_VEN_RPA) {
                                lpfc_printf_vlog(vport, KERN_INFO,
-                                                LOG_DISCOVERY | LOG_ELS,
+                                                LOG_DISCOVERY | LOG_ELS |
+                                                LOG_CGN_MGMT,
                                                 "6449 VEN RPA FDMI Success\n");
                                phba->link_flag &= ~LS_CT_VEN_RPA;
                                break;
                        }
 
+                       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                                       "6210 Issue Vendor MI FDMI %x\n",
+                                       phba->sli4_hba.pc_sli4_params.mi_ver);
+
+                       /* CGN is only for the physical port, no vports */
                        if (lpfc_fdmi_cmd(vport, ndlp, cmd,
                                          LPFC_FDMI_VENDOR_ATTR_mi) == 0)
                                phba->link_flag |= LS_CT_VEN_RPA;
                        lpfc_printf_log(phba, KERN_INFO,
                                        LOG_DISCOVERY | LOG_ELS,
                                        "6458 Send MI FDMI:%x Flag x%x\n",
-                                       phba->sli4_hba.pc_sli4_params.mi_value,
+                                       phba->sli4_hba.pc_sli4_params.mi_ver,
                                        phba->link_flag);
                } else {
                        lpfc_printf_log(phba, KERN_INFO,
@@ -2846,6 +2853,8 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
 
        ae->un.AttrInt = 0;
        if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+               if (phba->lmt & LMT_256Gb)
+                       ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
                if (phba->lmt & LMT_128Gb)
                        ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
                if (phba->lmt & LMT_64Gb)
@@ -2927,6 +2936,9 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
                case LPFC_LINK_SPEED_128GHZ:
                        ae->un.AttrInt = HBA_PORTSPEED_128GFC;
                        break;
+               case LPFC_LINK_SPEED_256GHZ:
+                       ae->un.AttrInt = HBA_PORTSPEED_256GFC;
+                       break;
                default:
                        ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
                        break;
@@ -3343,7 +3355,7 @@ lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
        ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
        memset(ae, 0, 256);
        sprintf(mibrevision, "ELXE2EM:%04d",
-               phba->sli4_hba.pc_sli4_params.mi_value);
+               phba->sli4_hba.pc_sli4_params.mi_ver);
        strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
        len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
        len += (len & 3) ? (4 - (len & 3)) : 4;
@@ -3884,9 +3896,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 /**
  * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
  * @vport: pointer to a host virtual N_Port data structure.
- * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
- * cmdcode: FDMI command to send
- * mask: Mask of HBA or PORT Attributes to send
+ * @cmdcode: application server command code to send
+ * @vmid: pointer to vmid info structure
  *
  * Builds and sends a FDMI command using the CT subsystem.
  */
index 6ff85ae..bd6d459 100644 (file)
@@ -5429,6 +5429,180 @@ lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
        return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
 }
 
+static int
+lpfc_cgn_buffer_open(struct inode *inode, struct file *file)
+{
+       struct lpfc_debug *debug;
+       int rc = -ENOMEM;
+
+       debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+       if (!debug)
+               goto out;
+
+       debug->buffer = vmalloc(LPFC_CGN_BUF_SIZE);
+       if (!debug->buffer) {
+               kfree(debug);
+               goto out;
+       }
+
+       debug->i_private = inode->i_private;
+       file->private_data = debug;
+
+       rc = 0;
+out:
+       return rc;
+}
+
+static ssize_t
+lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
+                    loff_t *ppos)
+{
+       struct lpfc_debug *debug = file->private_data;
+       struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+       char *buffer = debug->buffer;
+       uint32_t *ptr;
+       int cnt, len = 0;
+
+       if (!phba->sli4_hba.pc_sli4_params.mi_ver || !phba->cgn_i) {
+               len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+                                "Congestion Mgmt is not supported\n");
+               goto out;
+       }
+       ptr = (uint32_t *)phba->cgn_i->virt;
+       len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+                        "Congestion Buffer Header\n");
+       /* Dump the first 32 bytes */
+       cnt = 32;
+       len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+                        "000: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+                        *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
+                        *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7));
+       ptr += 8;
+       len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+                        "Congestion Buffer Data\n");
+       while (cnt < sizeof(struct lpfc_cgn_info)) {
+               if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
+                       len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+                                        "Truncated . . .\n");
+                       break;
+               }
+               len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+                                "%03x: %08x %08x %08x %08x "
+                                "%08x %08x %08x %08x\n",
+                                cnt, *ptr, *(ptr + 1), *(ptr + 2),
+                                *(ptr + 3), *(ptr + 4), *(ptr + 5),
+                                *(ptr + 6), *(ptr + 7));
+               cnt += 32;
+               ptr += 8;
+       }
+out:
+       return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+}
+
+static int
+lpfc_cgn_buffer_release(struct inode *inode, struct file *file)
+{
+       struct lpfc_debug *debug = file->private_data;
+
+       vfree(debug->buffer);
+       kfree(debug);
+
+       return 0;
+}
+
+static int
+lpfc_rx_monitor_open(struct inode *inode, struct file *file)
+{
+       struct lpfc_rx_monitor_debug *debug;
+       int rc = -ENOMEM;
+
+       debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+       if (!debug)
+               goto out;
+
+       debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
+       if (!debug->buffer) {
+               kfree(debug);
+               goto out;
+       }
+
+       debug->i_private = inode->i_private;
+       file->private_data = debug;
+
+       rc = 0;
+out:
+       return rc;
+}
+
+static ssize_t
+lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
+                    loff_t *ppos)
+{
+       struct lpfc_rx_monitor_debug *debug = file->private_data;
+       struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+       char *buffer = debug->buffer;
+       struct rxtable_entry *entry;
+       int i, len = 0, head, tail, last, start;
+
+       head = atomic_read(&phba->rxtable_idx_head);
+       while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
+               /* Table is getting updated */
+               msleep(20);
+               head = atomic_read(&phba->rxtable_idx_head);
+       }
+
+       tail = atomic_xchg(&phba->rxtable_idx_tail, head);
+       if (!phba->rxtable || head == tail) {
+               len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+                               "Rxtable is empty\n");
+               goto out;
+       }
+       last = (head > tail) ?  head : LPFC_MAX_RXMONITOR_ENTRY;
+       start = tail;
+
+       len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+                       "        MaxBPI\t Total Data Cmd  Total Data Cmpl "
+                       "  Latency(us)    Avg IO Size\tMax IO Size   IO cnt "
+                       "Info BWutil(ms)\n");
+get_table:
+       for (i = start; i < last; i++) {
+               entry = &phba->rxtable[i];
+               len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+                               "%3d:%12lld  %12lld\t%12lld\t"
+                               "%8lldus\t%8lld\t%10lld "
+                               "%8d   %2d %2d(%2d)\n",
+                               i, entry->max_bytes_per_interval,
+                               entry->total_bytes,
+                               entry->rcv_bytes,
+                               entry->avg_io_latency,
+                               entry->avg_io_size,
+                               entry->max_read_cnt,
+                               entry->io_cnt,
+                               entry->cmf_info,
+                               entry->timer_utilization,
+                               entry->timer_interval);
+       }
+
+       if (head != last) {
+               start = 0;
+               last = head;
+               goto get_table;
+       }
+out:
+       return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+}
+
+static int
+lpfc_rx_monitor_release(struct inode *inode, struct file *file)
+{
+       struct lpfc_rx_monitor_debug *debug = file->private_data;
+
+       vfree(debug->buffer);
+       kfree(debug);
+
+       return 0;
+}
+
 #undef lpfc_debugfs_op_disc_trc
 static const struct file_operations lpfc_debugfs_op_disc_trc = {
        .owner =        THIS_MODULE,
@@ -5657,6 +5831,23 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
        .write =        lpfc_idiag_extacc_write,
        .release =      lpfc_idiag_cmd_release,
 };
+#undef lpfc_cgn_buffer_op
+static const struct file_operations lpfc_cgn_buffer_op = {
+       .owner =        THIS_MODULE,
+       .open =         lpfc_cgn_buffer_open,
+       .llseek =       lpfc_debugfs_lseek,
+       .read =         lpfc_cgn_buffer_read,
+       .release =      lpfc_cgn_buffer_release,
+};
+
+#undef lpfc_rx_monitor_op
+static const struct file_operations lpfc_rx_monitor_op = {
+       .owner =        THIS_MODULE,
+       .open =         lpfc_rx_monitor_open,
+       .llseek =       lpfc_debugfs_lseek,
+       .read =         lpfc_rx_monitor_read,
+       .release =      lpfc_rx_monitor_release,
+};
 #endif
 
 /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
@@ -5907,6 +6098,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                        goto debug_failed;
                }
 
+               /* Congestion Info Buffer */
+               scnprintf(name, sizeof(name), "cgn_buffer");
+               phba->debug_cgn_buffer =
+                       debugfs_create_file(name, S_IFREG | 0644,
+                                           phba->hba_debugfs_root,
+                                           phba, &lpfc_cgn_buffer_op);
+               if (!phba->debug_cgn_buffer) {
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                                        "6527 Cannot create debugfs "
+                                        "cgn_buffer\n");
+                       goto debug_failed;
+               }
+
+               /* RX Monitor */
+               scnprintf(name, sizeof(name), "rx_monitor");
+               phba->debug_rx_monitor =
+                       debugfs_create_file(name, S_IFREG | 0644,
+                                           phba->hba_debugfs_root,
+                                           phba, &lpfc_rx_monitor_op);
+               if (!phba->debug_rx_monitor) {
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                                        "6528 Cannot create debugfs "
+                                        "rx_monitor\n");
+                       goto debug_failed;
+               }
+
                /* RAS log */
                snprintf(name, sizeof(name), "ras_log");
                phba->debug_ras_log =
@@ -6335,6 +6552,12 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
                debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
                phba->debug_hbqinfo = NULL;
 
+               debugfs_remove(phba->debug_cgn_buffer);
+               phba->debug_cgn_buffer = NULL;
+
+               debugfs_remove(phba->debug_rx_monitor);
+               phba->debug_rx_monitor = NULL;
+
                debugfs_remove(phba->debug_ras_log);
                phba->debug_ras_log = NULL;
 
index 7ab6d3b..a5bf71b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2007-2011 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -52,6 +52,9 @@
 /* scsistat output buffer size */
 #define LPFC_SCSISTAT_SIZE 8192
 
+/* Congestion Info Buffer size */
+#define LPFC_CGN_BUF_SIZE 8192
+
 #define LPFC_DEBUG_OUT_LINE_SZ 80
 
 /*
@@ -279,6 +282,12 @@ struct lpfc_idiag {
        void *ptr_private;
 };
 
+#define MAX_DEBUGFS_RX_TABLE_SIZE      (100 * LPFC_MAX_RXMONITOR_ENTRY)
+struct lpfc_rx_monitor_debug {
+       char *i_private;
+       char *buffer;
+};
+
 #else
 
 #define lpfc_nvmeio_data(phba, fmt, arg...) \
index 131374a..871b665 100644 (file)
@@ -78,10 +78,11 @@ struct lpfc_node_rrqs {
 };
 
 enum lpfc_fc4_xpt_flags {
-       NLP_WAIT_FOR_UNREG = 0x1,
-       SCSI_XPT_REGD      = 0x2,
-       NVME_XPT_REGD      = 0x4,
-       NLP_XPT_HAS_HH     = 0x8,
+       NLP_XPT_REGD            = 0x1,
+       SCSI_XPT_REGD           = 0x2,
+       NVME_XPT_REGD           = 0x4,
+       NVME_XPT_UNREG_WAIT     = 0x8,
+       NLP_XPT_HAS_HH          = 0x10
 };
 
 struct lpfc_nodelist {
index e481f5f..1254a57 100644 (file)
@@ -56,6 +56,9 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
                                struct lpfc_nodelist *ndlp, uint8_t retry);
 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
                                  struct lpfc_iocbq *iocb);
+static void lpfc_cmpl_els_edc(struct lpfc_hba *phba,
+                             struct lpfc_iocbq *cmdiocb,
+                             struct lpfc_iocbq *rspiocb);
 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
                               struct lpfc_iocbq *);
 
@@ -1664,6 +1667,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
        if (!new_ndlp || (new_ndlp == ndlp))
                return ndlp;
 
+       /*
+        * Unregister from backend if not done yet. Could have been skipped
+        * due to ADISC
+        */
+       lpfc_nlp_unreg_node(vport, new_ndlp);
+
        if (phba->sli_rev == LPFC_SLI_REV4) {
                active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
                                                       GFP_KERNEL);
@@ -2025,9 +2034,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                 irsp->un.ulpWord[4]);
 
                /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-               if (lpfc_error_lost_link(irsp))
-                       goto check_plogi;
-               else
+               if (!lpfc_error_lost_link(irsp))
                        lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                                NLP_EVT_CMPL_PLOGI);
 
@@ -2080,7 +2087,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                        NLP_EVT_CMPL_PLOGI);
        }
 
- check_plogi:
        if (disc && vport->num_disc_nodes) {
                /* Check to see if there are more PLOGIs to be sent */
                lpfc_more_plogi(vport);
@@ -2607,6 +2613,14 @@ lpfc_adisc_done(struct lpfc_vport *vport)
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
            !(vport->fc_flag & FC_RSCN_MODE) &&
            (phba->sli_rev < LPFC_SLI_REV4)) {
+
+               /*
+                * If link is down, clear_la and reg_vpi will be done after
+                * flogi following a link up event
+                */
+               if (!lpfc_is_link_up(phba))
+                       return;
+
                /* The ADISCs are complete.  Doesn't matter if they
                 * succeeded or failed because the ADISC completion
                 * routine guarantees to call the state machine and
@@ -2749,12 +2763,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
                                 ndlp->nlp_DID, irsp->ulpStatus,
                                 irsp->un.ulpWord[4]);
-               /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-               if (lpfc_error_lost_link(irsp))
-                       goto check_adisc;
-               else
-                       lpfc_disc_state_machine(vport, ndlp, cmdiocb,
-                                               NLP_EVT_CMPL_ADISC);
+
+               lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+                               NLP_EVT_CMPL_ADISC);
 
                /* As long as this node is not registered with the SCSI or NVMe
                 * transport, it is no longer an active node. Otherwise
@@ -2772,7 +2783,6 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                lpfc_disc_state_machine(vport, ndlp, cmdiocb,
                                        NLP_EVT_CMPL_ADISC);
 
- check_adisc:
        /* Check to see if there are more ADISCs to be sent */
        if (disc && vport->num_disc_nodes)
                lpfc_more_adisc(vport);
@@ -3253,7 +3263,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                irsp->ulpStatus, irsp->un.ulpWord[4],
                irsp->un.elsreq64.remoteID);
        /* ELS cmd tag <ulpIoTag> completes */
-       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
                         "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
                         "x%x\n",
                         irsp->ulpIoTag, irsp->ulpStatus,
@@ -3279,6 +3289,9 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        case ELS_CMD_SCR:
                                lpfc_issue_els_scr(vport, cmdiocb->retry);
                                break;
+                       case ELS_CMD_EDC:
+                               lpfc_issue_els_edc(vport, cmdiocb->retry);
+                               break;
                        case ELS_CMD_RDF:
                                cmdiocb->context1 = NULL; /* save ndlp refcnt */
                                lpfc_issue_els_rdf(vport, cmdiocb->retry);
@@ -3288,6 +3301,11 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                }
                phba->fc_stat.elsRetryExceeded++;
        }
+       if (cmd == ELS_CMD_EDC) {
+               /* must be called before checking uplStatus and returning */
+               lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
+               return;
+       }
        if (irsp->ulpStatus) {
                /* ELS discovery cmd completes with error */
                lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
@@ -3312,11 +3330,14 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
                for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
                            i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                                "4677 Fabric RDF Notification Grant Data: "
-                                "0x%08x\n",
-                                be32_to_cpu(
-                                       prdf->reg_d1.desc_tags[i]));
+                       lpfc_printf_vlog(vport, KERN_INFO,
+                                        LOG_ELS | LOG_CGN_MGMT,
+                                        "4677 Fabric RDF Notification Grant "
+                                        "Data: 0x%08x Reg: %x %x\n",
+                                        be32_to_cpu(
+                                               prdf->reg_d1.desc_tags[i]),
+                                        phba->cgn_reg_signal,
+                                        phba->cgn_reg_fpin);
        }
 
 out:
@@ -3375,6 +3396,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
        if (phba->sli_rev == LPFC_SLI_REV4) {
                rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
                if (rc) {
+                       lpfc_els_free_iocb(phba, elsiocb);
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
                                         "0937 %s: Failed to reg fc node, rc %d\n",
                                         __func__, rc);
@@ -3413,7 +3435,6 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
                return 1;
        }
 
-       /* Keep the ndlp just in case RDF is being sent */
        return 0;
 }
 
@@ -3657,28 +3678,15 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
                lpfc_enqueue_node(vport, ndlp);
        }
 
-       /* RDF ELS is not required on an NPIV VN_Port.  */
-       if (vport->port_type == LPFC_NPIV_PORT) {
-               lpfc_nlp_put(ndlp);
+       /* RDF ELS is not required on an NPIV VN_Port. */
+       if (vport->port_type == LPFC_NPIV_PORT)
                return -EACCES;
-       }
 
        elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
                                     ndlp->nlp_DID, ELS_CMD_RDF);
        if (!elsiocb)
                return -ENOMEM;
 
-       if (phba->sli_rev == LPFC_SLI_REV4 &&
-           !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
-                                "0939 %s: FC_NODE x%x RPI x%x flag x%x "
-                                "ste x%x type x%x Not registered\n",
-                                __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
-                                ndlp->nlp_flag, ndlp->nlp_state,
-                                ndlp->nlp_type);
-               return -ENODEV;
-       }
-
        /* Configure the payload for the supported FPIN events. */
        prdf = (struct lpfc_els_rdf_req *)
                (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
@@ -3695,10 +3703,12 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
        prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
        prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
 
-       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "6444 Xmit RDF to remote NPORT x%x\n",
-                        ndlp->nlp_DID);
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+                        "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n",
+                        ndlp->nlp_DID, phba->cgn_reg_signal,
+                        phba->cgn_reg_fpin);
 
+       phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
        elsiocb->context1 = lpfc_nlp_get(ndlp);
        if (!elsiocb->context1) {
@@ -3739,7 +3749,7 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 {
        /* Send LS_ACC */
        if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
-               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
                                 "1623 Failed to RDF_ACC from x%x for x%x\n",
                                 ndlp->nlp_DID, vport->fc_myDID);
                return -EIO;
@@ -3747,7 +3757,7 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 
        /* Issue new RDF for reregistering */
        if (lpfc_issue_els_rdf(vport, 0)) {
-               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
                                 "2623 Failed to re register RDF for x%x\n",
                                 vport->fc_myDID);
                return -EIO;
@@ -3756,6 +3766,448 @@ lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        return 0;
 }
 
+/**
+ * lpfc_least_capable_settings - helper function for EDC rsp processing
+ * @phba: pointer to lpfc hba data structure.
+ * @pcgd: pointer to congestion detection descriptor in EDC rsp.
+ *
+ * This helper routine determines the least capable setting for
+ * congestion signals, signal freq, including scale, from the
+ * congestion detection descriptor in the EDC rsp.  The routine
+ * sets @phba values in preparation for a set_featues mailbox.
+ **/
+static void
+lpfc_least_capable_settings(struct lpfc_hba *phba,
+                           struct fc_diag_cg_sig_desc *pcgd)
+{
+       u32 rsp_sig_cap = 0, drv_sig_cap = 0;
+       u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
+       struct lpfc_cgn_info *cp;
+       u32 crc;
+       u16 sig_freq;
+
+       /* Get rsp signal and frequency capabilities.  */
+       rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
+       rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count);
+       rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units);
+
+       /* If the Fport does not support signals. Set FPIN only */
+       if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED)
+               goto out_no_support;
+
+       /* Apply the xmt scale to the xmt cycle to get the correct frequency.
+        * Adapter default is 100 millisSeconds.  Convert all xmt cycle values
+        * to milliSeconds.
+        */
+       switch (rsp_sig_freq_scale) {
+       case EDC_CG_SIGFREQ_SEC:
+               rsp_sig_freq_cyc *= MSEC_PER_SEC;
+               break;
+       case EDC_CG_SIGFREQ_MSEC:
+               rsp_sig_freq_cyc = 1;
+               break;
+       default:
+               goto out_no_support;
+       }
+
+       /* Convenient shorthand. */
+       drv_sig_cap = phba->cgn_reg_signal;
+
+       /* Choose the least capable frequency. */
+       if (rsp_sig_freq_cyc > phba->cgn_sig_freq)
+               phba->cgn_sig_freq = rsp_sig_freq_cyc;
+
+       /* Should be some common signals support. Settle on least capable
+        * signal and adjust FPIN values. Initialize defaults to ease the
+        * decision.
+        */
+       phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+       phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+       if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY &&
+           (drv_sig_cap == EDC_CG_SIG_WARN_ONLY ||
+            drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) {
+               phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+               phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+       }
+       if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) {
+               if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) {
+                       phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM;
+                       phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE;
+               }
+               if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) {
+                       phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+                       phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+               }
+       }
+
+       if (!phba->cgn_i)
+               return;
+
+       /* Update signal frequency in congestion info buffer */
+       cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+       /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
+        * are received by the HBA
+        */
+       sig_freq = phba->cgn_sig_freq;
+
+       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
+               cp->cgn_warn_freq = cpu_to_le16(sig_freq);
+       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+               cp->cgn_alarm_freq = cpu_to_le16(sig_freq);
+               cp->cgn_warn_freq = cpu_to_le16(sig_freq);
+       }
+       crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+       cp->cgn_info_crc = cpu_to_le32(crc);
+       return;
+
+out_no_support:
+       phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+       phba->cgn_sig_freq = 0;
+       phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+}
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
+                       FC_LS_TLV_DTAG_INIT);
+
+/**
+ * lpfc_cmpl_els_edc - Completion callback function for EDC
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for issuing the Exchange
+ * Diagnostic Capabilities (EDC) command. The driver issues an EDC to
+ * notify the FPort of its Congestion and Link Fault capabilities.  This
+ * routine parses the FPort's response and decides on the least common
+ * values applicable to both FPort and NPort for Warnings and Alarms that
+ * are communicated via hardware signals.
+ **/
+static void
+lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+                 struct lpfc_iocbq *rspiocb)
+{
+       IOCB_t *irsp;
+       struct fc_els_edc_resp *edc_rsp;
+       struct fc_tlv_desc *tlv;
+       struct fc_diag_cg_sig_desc *pcgd;
+       struct fc_diag_lnkflt_desc *plnkflt;
+       struct lpfc_dmabuf *pcmd, *prsp;
+       const char *dtag_nm;
+       u32 *pdata, dtag;
+       int desc_cnt = 0, bytes_remain;
+       bool rcv_cap_desc = false;
+       struct lpfc_nodelist *ndlp;
+
+       irsp = &rspiocb->iocb;
+       ndlp = cmdiocb->context1;
+
+       lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
+                             "EDC cmpl:    status:x%x/x%x did:x%x",
+                             irsp->ulpStatus, irsp->un.ulpWord[4],
+                             irsp->un.elsreq64.remoteID);
+
+       /* ELS cmd tag <ulpIoTag> completes */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+                       "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
+                       irsp->ulpIoTag, irsp->ulpStatus,
+                       irsp->un.ulpWord[4], irsp->ulpTimeout);
+
+       pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+       if (!pcmd)
+               goto out;
+
+       pdata = (u32 *)pcmd->virt;
+       if (!pdata)
+               goto out;
+
+       /* Need to clear signal values, send features MB and RDF with FPIN. */
+       if (irsp->ulpStatus)
+               goto out;
+
+       prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+       if (!prsp)
+               goto out;
+
+       edc_rsp = prsp->virt;
+       if (!edc_rsp)
+               goto out;
+
+       /* ELS cmd tag <ulpIoTag> completes */
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+                       "4676 Fabric EDC Rsp: "
+                       "0x%02x, 0x%08x\n",
+                       edc_rsp->acc_hdr.la_cmd,
+                       be32_to_cpu(edc_rsp->desc_list_len));
+
+       /*
+        * Payload length in bytes is the response descriptor list
+        * length minus the 12 bytes of Link Service Request
+        * Information descriptor in the reply.
+        */
+       bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) -
+                                  sizeof(struct fc_els_lsri_desc);
+       if (bytes_remain <= 0)
+               goto out;
+
+       tlv = edc_rsp->desc;
+
+       /*
+        * cycle through EDC diagnostic descriptors to find the
+        * congestion signaling capability descriptor
+        */
+       while (bytes_remain) {
+               if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "6461 Truncated TLV hdr on "
+                                       "Diagnostic descriptor[%d]\n",
+                                       desc_cnt);
+                       goto out;
+               }
+
+               dtag = be32_to_cpu(tlv->desc_tag);
+               switch (dtag) {
+               case ELS_DTAG_LNK_FAULT_CAP:
+                       if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+                           FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+                                       sizeof(struct fc_diag_lnkflt_desc)) {
+                               lpfc_printf_log(
+                                       phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "6462 Truncated Link Fault Diagnostic "
+                                       "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+                                       desc_cnt, bytes_remain,
+                                       FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+                                       sizeof(struct fc_diag_cg_sig_desc));
+                               goto out;
+                       }
+                       plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
+                       lpfc_printf_log(
+                               phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+                               "4617 Link Fault Desc Data: 0x%08x 0x%08x "
+                               "0x%08x 0x%08x 0x%08x\n",
+                               be32_to_cpu(plnkflt->desc_tag),
+                               be32_to_cpu(plnkflt->desc_len),
+                               be32_to_cpu(
+                                       plnkflt->degrade_activate_threshold),
+                               be32_to_cpu(
+                                       plnkflt->degrade_deactivate_threshold),
+                               be32_to_cpu(plnkflt->fec_degrade_interval));
+                       break;
+               case ELS_DTAG_CG_SIGNAL_CAP:
+                       if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+                           FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+                                       sizeof(struct fc_diag_cg_sig_desc)) {
+                               lpfc_printf_log(
+                                       phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "6463 Truncated Cgn Signal Diagnostic "
+                                       "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+                                       desc_cnt, bytes_remain,
+                                       FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+                                       sizeof(struct fc_diag_cg_sig_desc));
+                               goto out;
+                       }
+
+                       pcgd = (struct fc_diag_cg_sig_desc *)tlv;
+                       lpfc_printf_log(
+                               phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+                               "4616 CGN Desc Data: 0x%08x 0x%08x "
+                               "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n",
+                               be32_to_cpu(pcgd->desc_tag),
+                               be32_to_cpu(pcgd->desc_len),
+                               be32_to_cpu(pcgd->xmt_signal_capability),
+                               be32_to_cpu(pcgd->xmt_signal_frequency.count),
+                               be32_to_cpu(pcgd->xmt_signal_frequency.units),
+                               be32_to_cpu(pcgd->rcv_signal_capability),
+                               be32_to_cpu(pcgd->rcv_signal_frequency.count),
+                               be32_to_cpu(pcgd->rcv_signal_frequency.units));
+
+                       /* Compare driver and Fport capabilities and choose
+                        * least common.
+                        */
+                       lpfc_least_capable_settings(phba, pcgd);
+                       rcv_cap_desc = true;
+                       break;
+               default:
+                       dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "4919 unknown Diagnostic "
+                                       "Descriptor[%d]: tag x%x (%s)\n",
+                                       desc_cnt, dtag, dtag_nm);
+               }
+
+               bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+               tlv = fc_tlv_next_desc(tlv);
+               desc_cnt++;
+       }
+
+out:
+       if (!rcv_cap_desc) {
+               phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+               phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+               phba->cgn_sig_freq = 0;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
+                               "4202 EDC rsp error - sending RDF "
+                               "for FPIN only.\n");
+       }
+
+       lpfc_config_cgn_signal(phba);
+
+       /* Check to see if link went down during discovery */
+       lpfc_els_chk_latt(phba->pport);
+       lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
+                             "EDC Cmpl:     did:x%x refcnt %d",
+                             ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+       lpfc_els_free_iocb(phba, cmdiocb);
+       lpfc_nlp_put(ndlp);
+}
+
+static void
+lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
+{
+       /* We are assuming cgd was zero'ed before calling this routine */
+
+       /* Configure the congestion detection capability */
+       cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP);
+
+       /* Descriptor len doesn't include the tag or len fields. */
+       cgd->desc_len = cpu_to_be32(
+               FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc));
+
+       /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
+        * xmt_signal_frequency.count already set to 0.
+        * xmt_signal_frequency.units already set to 0.
+        */
+
+       if (phba->cmf_active_mode == LPFC_CFG_OFF) {
+               /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED.
+                * rcv_signal_frequency.count already set to 0.
+                * rcv_signal_frequency.units already set to 0.
+                */
+               phba->cgn_sig_freq = 0;
+               return;
+       }
+       switch (phba->cgn_reg_signal) {
+       case EDC_CG_SIG_WARN_ONLY:
+               cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY);
+               break;
+       case EDC_CG_SIG_WARN_ALARM:
+               cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM);
+               break;
+       default:
+               /* rcv_signal_capability left 0 thus no support */
+               break;
+       }
+
+       /* We start negotiation with lpfc_fabric_cgn_frequency, after
+        * the completion we settle on the higher frequency.
+        */
+       cgd->rcv_signal_frequency.count =
+               cpu_to_be16(lpfc_fabric_cgn_frequency);
+       cgd->rcv_signal_frequency.units =
+               cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
+}
+
+ /**
+  * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
+  * @vport: pointer to a host virtual N_Port data structure.
+  * @retry: retry counter for the command iocb.
+  *
+  * This routine issues an ELS EDC to the F-Port Controller to communicate
+  * this N_Port's support of hardware signals in its Congestion
+  * Capabilities Descriptor.
+  *
+  * Note: This routine does not check if one or more signals are
+  * set in the cgn_reg_signal parameter.  The caller makes the
+  * decision to enforce cgn_reg_signal as nonzero or zero depending
+  * on the conditions.  During Fabric requests, the driver
+  * requires cgn_reg_signals to be nonzero.  But a dynamic request
+  * to set the congestion mode to OFF from Monitor or Manage
+  * would correctly issue an EDC with no signals enabled to
+  * turn off switch functionality and then update the FW.
+  *
+  * Return code
+  *   0 - Successfully issued edc command
+  *   1 - Failed to issue edc command
+  **/
+int
+lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_els_edc_req *edc_req;
+       struct fc_diag_cg_sig_desc *cgn_desc;
+       u16 cmdsize;
+       struct lpfc_nodelist *ndlp;
+       u8 *pcmd = NULL;
+       u32 edc_req_size, cgn_desc_size;
+       int rc;
+
+       if (vport->port_type == LPFC_NPIV_PORT)
+               return -EACCES;
+
+       ndlp = lpfc_findnode_did(vport, Fabric_DID);
+       if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+               return -ENODEV;
+
+       /* If HBA doesn't support signals, drop into RDF */
+       if (!phba->cgn_init_reg_signal)
+               goto try_rdf;
+
+       edc_req_size = sizeof(struct fc_els_edc);
+       cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
+       cmdsize = edc_req_size + cgn_desc_size;
+       elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+                                    ndlp->nlp_DID, ELS_CMD_EDC);
+       if (!elsiocb)
+               goto try_rdf;
+
+       /* Configure the payload for the supported Diagnostics capabilities. */
+       pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       memset(pcmd, 0, cmdsize);
+       edc_req = (struct lpfc_els_edc_req *)pcmd;
+       edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size);
+       edc_req->edc.edc_cmd = ELS_EDC;
+
+       cgn_desc = &edc_req->cgn_desc;
+
+       lpfc_format_edc_cgn_desc(phba, cgn_desc);
+
+       phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT,
+                        "4623 Xmit EDC to remote "
+                        "NPORT x%x reg_sig x%x reg_fpin:x%x\n",
+                        ndlp->nlp_DID, phba->cgn_reg_signal,
+                        phba->cgn_reg_fpin);
+
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
+       elsiocb->context1 = lpfc_nlp_get(ndlp);
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return -EIO;
+       }
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+                             "Issue EDC:     did:x%x refcnt %d",
+                             ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+       if (rc == IOCB_ERROR) {
+               /* The additional lpfc_nlp_put will cause the following
+                * lpfc_els_free_iocb routine to trigger the rlease of
+                * the node.
+                */
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               goto try_rdf;
+       }
+       return 0;
+try_rdf:
+       phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+       phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+       rc = lpfc_issue_els_rdf(vport, 0);
+       return rc;
+}
+
 /**
  * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
  * @vport: pointer to a host virtual N_Port data structure.
@@ -4378,7 +4830,7 @@ out_retry:
                            (cmd == ELS_CMD_NVMEPRLI))
                                lpfc_nlp_set_state(vport, ndlp,
                                        NLP_STE_PRLI_ISSUE);
-                       else
+                       else if (cmd != ELS_CMD_ADISC)
                                lpfc_nlp_set_state(vport, ndlp,
                                        NLP_STE_NPR_NODE);
                        ndlp->nlp_last_elscmd = cmd;
@@ -4520,7 +4972,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
 {
        struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
 
-       /* The I/O job is complete.  Clear the context1 data. */
+       /* The I/O iocb is complete.  Clear the context1 data. */
        elsiocb->context1 = NULL;
 
        /* context2  = cmd,  context2->next = rsp, context3 = bpl */
@@ -4612,6 +5064,15 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                goto out;
 
        if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+
+               /* If PLOGI is being retried, PLOGI completion will cleanup the
+                * node. The NLP_NPR_2B_DISC flag needs to be retained to make
+                * progress on nodes discovered from last RSCN.
+                */
+               if ((ndlp->nlp_flag & NLP_DELAY_TMO) &&
+                   (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
+                       goto out;
+
                /* NPort Recovery mode or node is just allocated */
                if (!lpfc_nlp_not_used(ndlp)) {
                        /* A LOGO is completing and the node is in NPR state.
@@ -5158,6 +5619,86 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
        return 0;
 }
 
+ /**
+  * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric.
+  * @vport: pointer to a host virtual N_Port data structure.
+  * @cmdiocb: pointer to the original lpfc command iocb data structure.
+  * @ndlp: NPort to where rsp is directed
+  *
+  * This routine issues an EDC ACC RSP to the F-Port Controller to communicate
+  * this N_Port's support of hardware signals in its Congestion
+  * Capabilities Descriptor.
+  *
+  * Return code
+  *   0 - Successfully issued edc rsp command
+  *   1 - Failed to issue edc rsp command
+  **/
+static int
+lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+                      struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       struct lpfc_els_edc_rsp *edc_rsp;
+       struct lpfc_iocbq *elsiocb;
+       IOCB_t *icmd, *cmd;
+       uint8_t *pcmd;
+       int cmdsize, rc;
+
+       cmdsize = sizeof(struct lpfc_els_edc_rsp);
+       elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
+                                    ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+       if (!elsiocb)
+               return 1;
+
+       icmd = &elsiocb->iocb;
+       cmd = &cmdiocb->iocb;
+       icmd->ulpContext = cmd->ulpContext;     /* Xri / rx_id */
+       icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
+       pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+       memset(pcmd, 0, cmdsize);
+
+       edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
+       edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC;
+       edc_rsp->edc_rsp.desc_list_len = cpu_to_be32(
+               FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp));
+       edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
+       edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32(
+               FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
+       edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC;
+       lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc);
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+                             "Issue EDC ACC:      did:x%x flg:x%x refcnt %d",
+                             ndlp->nlp_DID, ndlp->nlp_flag,
+                             kref_read(&ndlp->kref));
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+       phba->fc_stat.elsXmitACC++;
+       elsiocb->context1 = lpfc_nlp_get(ndlp);
+       if (!elsiocb->context1) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
+
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               lpfc_nlp_put(ndlp);
+               return 1;
+       }
+
+       /* Xmit ELS ACC response tag <ulpIoTag> */
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                        "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, "
+                        "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
+                        "RPI: x%x, fc_flag x%x\n",
+                        rc, elsiocb->iotag, elsiocb->sli4_xritag,
+                        ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+                        ndlp->nlp_rpi, vport->fc_flag);
+
+       return 0;
+}
+
 /**
  * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
  * @vport: pointer to a virtual N_Port data structure.
@@ -5657,25 +6198,40 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
 
        /* go thru NPR nodes and issue any remaining ELS ADISCs */
        list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-               if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
-                   (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
-                   (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
-                       spin_lock_irq(&ndlp->lock);
-                       ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-                       spin_unlock_irq(&ndlp->lock);
-                       ndlp->nlp_prev_state = ndlp->nlp_state;
-                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
-                       lpfc_issue_els_adisc(vport, ndlp, 0);
-                       sentadisc++;
-                       vport->num_disc_nodes++;
-                       if (vport->num_disc_nodes >=
-                           vport->cfg_discovery_threads) {
-                               spin_lock_irq(shost->host_lock);
-                               vport->fc_flag |= FC_NLP_MORE;
-                               spin_unlock_irq(shost->host_lock);
-                               break;
-                       }
+
+               if (ndlp->nlp_state != NLP_STE_NPR_NODE ||
+                   !(ndlp->nlp_flag & NLP_NPR_ADISC))
+                       continue;
+
+               spin_lock_irq(&ndlp->lock);
+               ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+               spin_unlock_irq(&ndlp->lock);
+
+               if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+                       /* This node was marked for ADISC but was not picked
+                        * for discovery. This is possible if the node was
+                        * missing in gidft response.
+                        *
+                        * At time of marking node for ADISC, we skipped unreg
+                        * from backend
+                        */
+                       lpfc_nlp_unreg_node(vport, ndlp);
+                       continue;
+               }
+
+               ndlp->nlp_prev_state = ndlp->nlp_state;
+               lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+               lpfc_issue_els_adisc(vport, ndlp, 0);
+               sentadisc++;
+               vport->num_disc_nodes++;
+               if (vport->num_disc_nodes >=
+                               vport->cfg_discovery_threads) {
+                       spin_lock_irq(shost->host_lock);
+                       vport->fc_flag |= FC_NLP_MORE;
+                       spin_unlock_irq(shost->host_lock);
+                       break;
                }
+
        }
        if (sentadisc == 0) {
                spin_lock_irq(shost->host_lock);
@@ -6087,6 +6643,12 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
        case LPFC_LINK_SPEED_64GHZ:
                rdp_speed = RDP_PS_64GB;
                break;
+       case LPFC_LINK_SPEED_128GHZ:
+               rdp_speed = RDP_PS_128GB;
+               break;
+       case LPFC_LINK_SPEED_256GHZ:
+               rdp_speed = RDP_PS_256GB;
+               break;
        default:
                rdp_speed = RDP_PS_UNKNOWN;
                break;
@@ -6094,6 +6656,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
 
        desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
 
+       if (phba->lmt & LMT_256Gb)
+               rdp_cap |= RDP_PS_256GB;
        if (phba->lmt & LMT_128Gb)
                rdp_cap |= RDP_PS_128GB;
        if (phba->lmt & LMT_64Gb)
@@ -6886,13 +7450,6 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
                        continue;
                }
 
-               /* Check to see if we need to NVME rescan this target
-                * remoteport.
-                */
-               if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
-                   ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
-                       lpfc_nvme_rescan_port(vport, ndlp);
-
                lpfc_disc_state_machine(vport, ndlp, NULL,
                                        NLP_EVT_DEVICE_RECOVERY);
                lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -8211,6 +8768,125 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        return 0;
 }
 
+/**
+ * lpfc_els_rcv_edc - Process an unsolicited EDC iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+                struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       struct fc_els_edc *edc_req;
+       struct fc_tlv_desc *tlv;
+       uint8_t *payload;
+       uint32_t *ptr, dtag;
+       const char *dtag_nm;
+       int desc_cnt = 0, bytes_remain;
+       bool rcv_cap_desc = false;
+
+       payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+
+       edc_req = (struct fc_els_edc *)payload;
+       bytes_remain = be32_to_cpu(edc_req->desc_len);
+
+       ptr = (uint32_t *)payload;
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+                        "3319 Rcv EDC payload len %d: x%x x%x x%x\n",
+                        bytes_remain, be32_to_cpu(*ptr),
+                        be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
+
+       /* No signal support unless there is a congestion descriptor */
+       phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+       phba->cgn_sig_freq = 0;
+       phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN;
+
+       if (bytes_remain <= 0)
+               goto out;
+
+       tlv = edc_req->desc;
+
+       /*
+        * cycle through EDC diagnostic descriptors to find the
+        * congestion signaling capability descriptor
+        */
+       while (bytes_remain && !rcv_cap_desc) {
+               if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "6464 Truncated TLV hdr on "
+                                       "Diagnostic descriptor[%d]\n",
+                                       desc_cnt);
+                       goto out;
+               }
+
+               dtag = be32_to_cpu(tlv->desc_tag);
+               switch (dtag) {
+               case ELS_DTAG_LNK_FAULT_CAP:
+                       if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+                           FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+                               sizeof(struct fc_diag_lnkflt_desc)) {
+                               lpfc_printf_log(
+                                       phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "6465 Truncated Link Fault Diagnostic "
+                                       "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+                                       desc_cnt, bytes_remain,
+                                       FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+                                       sizeof(struct fc_diag_cg_sig_desc));
+                               goto out;
+                       }
+                       /* No action for Link Fault descriptor for now */
+                       break;
+               case ELS_DTAG_CG_SIGNAL_CAP:
+                       if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
+                           FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
+                               sizeof(struct fc_diag_cg_sig_desc)) {
+                               lpfc_printf_log(
+                                       phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "6466 Truncated cgn signal Diagnostic "
+                                       "descriptor[%d]: %d vs 0x%zx 0x%zx\n",
+                                       desc_cnt, bytes_remain,
+                                       FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
+                                       sizeof(struct fc_diag_cg_sig_desc));
+                               goto out;
+                       }
+
+                       phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
+                       phba->cgn_reg_signal = phba->cgn_init_reg_signal;
+
+                       /* We start negotiation with lpfc_fabric_cgn_frequency.
+                        * When we process the EDC, we will settle on the
+                        * higher frequency.
+                        */
+                       phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+
+                       lpfc_least_capable_settings(
+                               phba, (struct fc_diag_cg_sig_desc *)tlv);
+                       rcv_cap_desc = true;
+                       break;
+               default:
+                       dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "6467 unknown Diagnostic "
+                                       "Descriptor[%d]: tag x%x (%s)\n",
+                                       desc_cnt, dtag, dtag_nm);
+               }
+               bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
+               tlv = fc_tlv_next_desc(tlv);
+               desc_cnt++;
+       }
+out:
+       /* Need to send back an ACC */
+       lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp);
+
+       lpfc_config_cgn_signal(phba);
+       return 0;
+}
+
 /**
  * lpfc_els_timeout - Handler funciton to the els timer
  * @t: timer context used to obtain the vport.
@@ -8668,50 +9344,304 @@ lpfc_send_els_event(struct lpfc_vport *vport,
 }
 
 
-DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
-                       FC_LS_TLV_DTAG_INIT);
-
 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
                        FC_FPIN_LI_EVT_TYPES_INIT);
 
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types,
+                       FC_FPIN_DELI_EVT_TYPES_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types,
+                       FC_FPIN_CONGN_EVT_TYPES_INIT);
+
+DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm,
+                       fc_fpin_congn_severity_types,
+                       FC_FPIN_CONGN_SEVERITY_INIT);
+
+
+/**
+ * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port
+ * @phba: Pointer to phba object.
+ * @wwnlist: Pointer to list of WWPNs in FPIN payload
+ * @cnt: count of WWPNs in FPIN payload
+ *
+ * This routine is called by LI and PC descriptors.
+ * Limit the number of WWPNs displayed to 6 log messages, 6 per log message
+ */
+static void
+lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
+{
+       char buf[LPFC_FPIN_WWPN_LINE_SZ];
+       __be64 wwn;
+       u64 wwpn;
+       int i, len;
+       int line = 0;
+       int wcnt = 0;
+       bool endit = false;
+
+       len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:");
+       for (i = 0; i < cnt; i++) {
+               /* Are we on the last WWPN */
+               if (i == (cnt - 1))
+                       endit = true;
+
+               /* Extract the next WWPN from the payload */
+               wwn = *wwnlist++;
+               wwpn = be64_to_cpu(wwn);
+               len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ,
+                                " %016llx", wwpn);
+
+               /* Log a message if we are on the last WWPN
+                * or if we hit the max allowed per message.
+                */
+               wcnt++;
+               if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) {
+                       buf[len] = 0;
+                       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                                       "4686 %s\n", buf);
+
+                       /* Check if we reached the last WWPN */
+                       if (endit)
+                               return;
+
+                       /* Limit the number of log message displayed per FPIN */
+                       line++;
+                       if (line == LPFC_FPIN_WWPN_NUM_LINE) {
+                               lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                                               "4687 %d WWPNs Truncated\n",
+                                               cnt - i - 1);
+                               return;
+                       }
+
+                       /* Start over with next log message */
+                       wcnt = 0;
+                       len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ,
+                                       "Additional WWPNs:");
+               }
+       }
+}
+
 /**
  * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
- * @vport: Pointer to vport object.
+ * @phba: Pointer to phba object.
  * @tlv:  Pointer to the Link Integrity Notification Descriptor.
  *
- * This function processes a link integrity FPIN event by
- * logging a message
+ * This function processes a Link Integrity FPIN event by logging a message.
  **/
 static void
-lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv)
+lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
 {
        struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
        const char *li_evt_str;
-       u32 li_evt;
+       u32 li_evt, cnt;
 
        li_evt = be16_to_cpu(li->event_type);
        li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
+       cnt = be32_to_cpu(li->pname_count);
 
-       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "4680 FPIN Link Integrity %s (x%x) "
-                        "Detecting PN x%016llx Attached PN x%016llx "
-                        "Duration %d mSecs Count %d Port Cnt %d\n",
-                        li_evt_str, li_evt,
-                        be64_to_cpu(li->detecting_wwpn),
-                        be64_to_cpu(li->attached_wwpn),
-                        be32_to_cpu(li->event_threshold),
-                        be32_to_cpu(li->event_count),
-                        be32_to_cpu(li->pname_count));
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "4680 FPIN Link Integrity %s (x%x) "
+                       "Detecting PN x%016llx Attached PN x%016llx "
+                       "Duration %d mSecs Count %d Port Cnt %d\n",
+                       li_evt_str, li_evt,
+                       be64_to_cpu(li->detecting_wwpn),
+                       be64_to_cpu(li->attached_wwpn),
+                       be32_to_cpu(li->event_threshold),
+                       be32_to_cpu(li->event_count), cnt);
+
+       lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt);
+}
+
+/**
+ * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event.
+ * @phba: Pointer to hba object.
+ * @tlv:  Pointer to the Delivery Notification Descriptor TLV
+ *
+ * This function processes a Delivery FPIN event by logging a message.
+ **/
+static void
+lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+       struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv;
+       const char *del_rsn_str;
+       u32 del_rsn;
+       __be32 *frame;
+
+       del_rsn = be16_to_cpu(del->deli_reason_code);
+       del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn);
+
+       /* Skip over desc_tag/desc_len header to payload */
+       frame = (__be32 *)(del + 1);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "4681 FPIN Delivery %s (x%x) "
+                       "Detecting PN x%016llx Attached PN x%016llx "
+                       "DiscHdr0  x%08x "
+                       "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x "
+                       "DiscHdr4 x%08x DiscHdr5 x%08x\n",
+                       del_rsn_str, del_rsn,
+                       be64_to_cpu(del->detecting_wwpn),
+                       be64_to_cpu(del->attached_wwpn),
+                       be32_to_cpu(frame[0]),
+                       be32_to_cpu(frame[1]),
+                       be32_to_cpu(frame[2]),
+                       be32_to_cpu(frame[3]),
+                       be32_to_cpu(frame[4]),
+                       be32_to_cpu(frame[5]));
 }
 
+/**
+ * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event.
+ * @phba: Pointer to hba object.
+ * @tlv:  Pointer to the Peer Congestion Notification Descriptor TLV
+ *
+ * This function processes a Peer Congestion FPIN event by logging a message.
+ **/
 static void
-lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
-                 u32 fpin_length)
+lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
 {
-       struct fc_tlv_desc *tlv;
+       struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv;
+       const char *pc_evt_str;
+       u32 pc_evt, cnt;
+
+       pc_evt = be16_to_cpu(pc->event_type);
+       pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt);
+       cnt = be32_to_cpu(pc->pname_count);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS,
+                       "4684 FPIN Peer Congestion %s (x%x) "
+                       "Duration %d mSecs "
+                       "Detecting PN x%016llx Attached PN x%016llx "
+                       "Impacted Port Cnt %d\n",
+                       pc_evt_str, pc_evt,
+                       be32_to_cpu(pc->event_period),
+                       be64_to_cpu(pc->detecting_wwpn),
+                       be64_to_cpu(pc->attached_wwpn),
+                       cnt);
+
+       lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt);
+}
+
+/**
+ * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification
+ * @phba: Pointer to hba object.
+ * @tlv:  Pointer to the Congestion Notification Descriptor TLV
+ *
+ * This function processes an FPIN Congestion Notifiction.  The notification
+ * could be an Alarm or Warning.  This routine feeds that data into driver's
+ * running congestion algorithm. It also processes the FPIN by
+ * logging a message. It returns 1 to indicate deliver this message
+ * to the upper layer or 0 to indicate don't deliver it.
+ **/
+static int
+lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+       struct lpfc_cgn_info *cp;
+       struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv;
+       const char *cgn_evt_str;
+       u32 cgn_evt;
+       const char *cgn_sev_str;
+       u32 cgn_sev;
+       uint16_t value;
+       u32 crc;
+       bool nm_log = false;
+       int rc = 1;
+
+       cgn_evt = be16_to_cpu(cgn->event_type);
+       cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt);
+       cgn_sev = cgn->severity;
+       cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev);
+
+       /* The driver only takes action on a Credit Stall or Oversubscription
+        * event type to engage the IO algorithm.  The driver prints an
+        * unmaskable message only for Lost Credit and Credit Stall.
+        * TODO: Still need to have definition of host action on clear,
+        *       lost credit and device specific event types.
+        */
+       switch (cgn_evt) {
+       case FPIN_CONGN_LOST_CREDIT:
+               nm_log = true;
+               break;
+       case FPIN_CONGN_CREDIT_STALL:
+               nm_log = true;
+               fallthrough;
+       case FPIN_CONGN_OVERSUBSCRIPTION:
+               if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION)
+                       nm_log = false;
+               switch (cgn_sev) {
+               case FPIN_CONGN_SEVERITY_ERROR:
+                       /* Take action here for an Alarm event */
+                       if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+                               if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
+                                       /* Track of alarm cnt for cgn_info */
+                                       atomic_inc(&phba->cgn_fabric_alarm_cnt);
+                                       /* Track of alarm cnt for SYNC_WQE */
+                                       atomic_inc(&phba->cgn_sync_alarm_cnt);
+                               }
+                               goto cleanup;
+                       }
+                       break;
+               case FPIN_CONGN_SEVERITY_WARNING:
+                       /* Take action here for a Warning event */
+                       if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+                               if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
+                                       /* Track of warning cnt for cgn_info */
+                                       atomic_inc(&phba->cgn_fabric_warn_cnt);
+                                       /* Track of warning cnt for SYNC_WQE */
+                                       atomic_inc(&phba->cgn_sync_warn_cnt);
+                               }
+cleanup:
+                               /* Save frequency in ms */
+                               phba->cgn_fpin_frequency =
+                                       be32_to_cpu(cgn->event_period);
+                               value = phba->cgn_fpin_frequency;
+                               if (phba->cgn_i) {
+                                       cp = (struct lpfc_cgn_info *)
+                                               phba->cgn_i->virt;
+                                       if (phba->cgn_reg_fpin &
+                                               LPFC_CGN_FPIN_ALARM)
+                                               cp->cgn_alarm_freq =
+                                                       cpu_to_le16(value);
+                                       if (phba->cgn_reg_fpin &
+                                               LPFC_CGN_FPIN_WARN)
+                                               cp->cgn_warn_freq =
+                                                       cpu_to_le16(value);
+                                       crc = lpfc_cgn_calc_crc32
+                                               (cp,
+                                               LPFC_CGN_INFO_SZ,
+                                               LPFC_CGN_CRC32_SEED);
+                                       cp->cgn_info_crc = cpu_to_le32(crc);
+                               }
+
+                               /* Don't deliver to upper layer since
+                                * driver took action on this tlv.
+                                */
+                               rc = 0;
+                       }
+                       break;
+               }
+               break;
+       }
+
+       /* Change the log level to unmaskable for the following event types. */
+       lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO),
+                       LOG_CGN_MGMT | LOG_ELS,
+                       "4683 FPIN CONGESTION %s type %s (x%x) Event "
+                       "Duration %d mSecs\n",
+                       cgn_sev_str, cgn_evt_str, cgn_evt,
+                       be32_to_cpu(cgn->event_period));
+       return rc;
+}
+
+void
+lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length)
+{
+       struct lpfc_hba *phba = vport->phba;
+       struct fc_els_fpin *fpin = (struct fc_els_fpin *)p;
+       struct fc_tlv_desc *tlv, *first_tlv, *current_tlv;
        const char *dtag_nm;
-       uint32_t desc_cnt = 0, bytes_remain;
-       u32 dtag;
+       int desc_cnt = 0, bytes_remain, cnt;
+       u32 dtag, deliver = 0;
+       int len;
 
        /* FPINs handled only if we are in the right discovery state */
        if (vport->port_state < LPFC_DISC_AUTH)
@@ -8721,35 +9651,92 @@ lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
        if (fpin_length < sizeof(struct fc_els_fpin))
                return;
 
+       /* Sanity check descriptor length. The desc_len value does not
+        * include space for the ELS command and the desc_len fields.
+        */
+       len = be32_to_cpu(fpin->desc_len);
+       if (fpin_length < len + sizeof(struct fc_els_fpin)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                               "4671 Bad ELS FPIN length %d: %d\n",
+                               len, fpin_length);
+               return;
+       }
+
        tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
+       first_tlv = tlv;
        bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
        bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
 
-       /* process each descriptor */
+       /* process each descriptor separately */
        while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
               bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
-
                dtag = be32_to_cpu(tlv->desc_tag);
                switch (dtag) {
                case ELS_DTAG_LNK_INTEGRITY:
-                       lpfc_els_rcv_fpin_li(vport, tlv);
+                       lpfc_els_rcv_fpin_li(phba, tlv);
+                       deliver = 1;
+                       break;
+               case ELS_DTAG_DELIVERY:
+                       lpfc_els_rcv_fpin_del(phba, tlv);
+                       deliver = 1;
+                       break;
+               case ELS_DTAG_PEER_CONGEST:
+                       lpfc_els_rcv_fpin_peer_cgn(phba, tlv);
+                       deliver = 1;
+                       break;
+               case ELS_DTAG_CONGESTION:
+                       deliver = lpfc_els_rcv_fpin_cgn(phba, tlv);
                        break;
                default:
                        dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
-                       lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
-                                        "4678  skipped FPIN descriptor[%d]: "
-                                        "tag x%x (%s)\n",
-                                        desc_cnt, dtag, dtag_nm);
-                       break;
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "4678 unknown FPIN descriptor[%d]: "
+                                       "tag x%x (%s)\n",
+                                       desc_cnt, dtag, dtag_nm);
+
+                       /* If descriptor is bad, drop the rest of the data */
+                       return;
                }
+               lpfc_cgn_update_stat(phba, dtag);
+               cnt = be32_to_cpu(tlv->desc_len);
 
-               desc_cnt++;
+               /* Sanity check descriptor length. The desc_len value does not
+                * include space for the desc_tag and the desc_len fields.
+                */
+               len -= (cnt + sizeof(struct fc_tlv_desc));
+               if (len < 0) {
+                       dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "4672 Bad FPIN descriptor TLV length "
+                                       "%d: %d %d %s\n",
+                                       cnt, len, fpin_length, dtag_nm);
+                       return;
+               }
+
+               current_tlv = tlv;
                bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
                tlv = fc_tlv_next_desc(tlv);
-       }
 
-       fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length,
-                        (char *)fpin);
+               /* Format payload such that the FPIN delivered to the
+                * upper layer is a single descriptor FPIN.
+                */
+               if (desc_cnt)
+                       memcpy(first_tlv, current_tlv,
+                              (cnt + sizeof(struct fc_els_fpin)));
+
+               /* Adjust the length so that it only reflects a
+                * single descriptor FPIN.
+                */
+               fpin_length = cnt + sizeof(struct fc_els_fpin);
+               fpin->desc_len = cpu_to_be32(fpin_length);
+               fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */
+
+               /* Send every descriptor individually to the upper layer */
+               if (deliver)
+                       fc_host_fpin_rcv(lpfc_shost_from_vport(vport),
+                                        fpin_length, (char *)fpin);
+               desc_cnt++;
+       }
 }
 
 /**
@@ -8948,6 +9935,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        break;
                }
                lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+               if (newnode)
+                       lpfc_disc_state_machine(vport, ndlp, NULL,
+                                       NLP_EVT_DEVICE_RM);
                break;
        case ELS_CMD_PRLO:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -9137,6 +10127,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
                /* There are no replies, so no rjt codes */
                break;
+       case ELS_CMD_EDC:
+               lpfc_els_rcv_edc(vport, elsiocb, ndlp);
+               break;
        case ELS_CMD_RDF:
                phba->fc_stat.elsRcvRDF++;
                /* Accept RDF only from fabric controller */
index 7cc5920..7195ca0 100644 (file)
@@ -3331,6 +3331,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
                case LPFC_LINK_SPEED_32GHZ:
                case LPFC_LINK_SPEED_64GHZ:
                case LPFC_LINK_SPEED_128GHZ:
+               case LPFC_LINK_SPEED_256GHZ:
                        break;
                default:
                        phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
@@ -3646,6 +3647,10 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                                        phba->wait_4_mlo_maint_flg);
                }
                lpfc_mbx_process_link_up(phba, la);
+
+               if (phba->cmf_active_mode != LPFC_CFG_OFF)
+                       lpfc_cmf_signal_init(phba);
+
        } else if (attn_type == LPFC_ATT_LINK_DOWN ||
                   attn_type == LPFC_ATT_UNEXP_WWPN) {
                phba->fc_stat.LinkDown++;
@@ -4208,6 +4213,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
        struct lpfc_vport *vport = pmb->vport;
+       int rc;
 
        pmb->ctx_buf = NULL;
        pmb->ctx_ndlp = NULL;
@@ -4283,9 +4289,23 @@ out:
                /* Issue SCR just before NameServer GID_FT Query */
                lpfc_issue_els_scr(vport, 0);
 
-               if (!phba->cfg_enable_mi ||
-                   phba->sli4_hba.pc_sli4_params.mi_ver < LPFC_MIB3_SUPPORT)
+               /* Link was bounced or a Fabric LOGO occurred.  Start EDC
+                * with initial FW values provided the congestion mode is
+                * not off.  Note that signals may or may not be supported
+                * by the adapter but FPIN is provided by default for 1
+                * or both missing signals support.
+                */
+               if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+                       phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
+                       phba->cgn_reg_signal = phba->cgn_init_reg_signal;
+                       rc = lpfc_issue_els_edc(vport, 0);
+                       lpfc_printf_log(phba, KERN_INFO,
+                                       LOG_INIT | LOG_ELS | LOG_DISCOVERY,
+                                       "4220 EDC issue error x%x, Data: x%x\n",
+                                       rc, phba->cgn_init_reg_signal);
+               } else {
                        lpfc_issue_els_rdf(vport, 0);
+               }
        }
 
        vport->fc_ns_retry = 0;
@@ -4501,10 +4521,152 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
        spin_unlock_irqrestore(shost->host_lock, iflags);
 }
 
+/* Register a node with backend if not already done */
+void
+lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+
+       unsigned long iflags;
+
+       spin_lock_irqsave(&ndlp->lock, iflags);
+       if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+               /* Already registered with backend, trigger rescan */
+               spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+               if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
+                   ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
+                       lpfc_nvme_rescan_port(vport, ndlp);
+               }
+               return;
+       }
+
+       ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
+       spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+       if (lpfc_valid_xpt_node(ndlp)) {
+               vport->phba->nport_event_cnt++;
+               /*
+                * Tell the fc transport about the port, if we haven't
+                * already. If we have, and it's a scsi entity, be
+                */
+               lpfc_register_remote_port(vport, ndlp);
+       }
+
+       /* We are done if we do not have any NVME remote node */
+       if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
+               return;
+
+       /* Notify the NVME transport of this new rport. */
+       if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
+                       ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+               if (vport->phba->nvmet_support == 0) {
+                       /* Register this rport with the transport.
+                        * Only NVME Target Rports are registered with
+                        * the transport.
+                        */
+                       if (ndlp->nlp_type & NLP_NVME_TARGET) {
+                               vport->phba->nport_event_cnt++;
+                               lpfc_nvme_register_port(vport, ndlp);
+                       }
+               } else {
+                       /* Just take an NDLP ref count since the
+                        * target does not register rports.
+                        */
+                       lpfc_nlp_get(ndlp);
+               }
+       }
+}
+
+/* Unregister a node with backend if not already done */
+void
+lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       unsigned long iflags;
+
+       spin_lock_irqsave(&ndlp->lock, iflags);
+       if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
+               spin_unlock_irqrestore(&ndlp->lock, iflags);
+               return;
+       }
+
+       ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+       spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+       if (ndlp->rport &&
+           ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
+               vport->phba->nport_event_cnt++;
+               lpfc_unregister_remote_port(ndlp);
+       }
+
+       if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
+               vport->phba->nport_event_cnt++;
+               if (vport->phba->nvmet_support == 0) {
+                       /* Start devloss if target. */
+                       if (ndlp->nlp_type & NLP_NVME_TARGET)
+                               lpfc_nvme_unregister_port(vport, ndlp);
+               } else {
+                       /* NVMET has no upcall. */
+                       lpfc_nlp_put(ndlp);
+               }
+       }
+
+}
+
+/*
+ * Adisc state change handling
+ */
+static void
+lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+               int new_state)
+{
+       switch (new_state) {
+       /*
+        * Any state to ADISC_ISSUE
+        * Do nothing, adisc cmpl handling will trigger state changes
+        */
+       case NLP_STE_ADISC_ISSUE:
+               break;
+
+       /*
+        * ADISC_ISSUE to mapped states
+        * Trigger a registration with backend, it will be nop if
+        * already registered
+        */
+       case NLP_STE_UNMAPPED_NODE:
+               ndlp->nlp_type |= NLP_FC_NODE;
+               fallthrough;
+       case NLP_STE_MAPPED_NODE:
+               ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+               lpfc_nlp_reg_node(vport, ndlp);
+               break;
+
+       /*
+        * ADISC_ISSUE to non-mapped states
+        * We are moving from ADISC_ISSUE to a non-mapped state because
+        * ADISC failed, we would have skipped unregistering with
+        * backend, attempt it now
+        */
+       case NLP_STE_NPR_NODE:
+               ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
+               fallthrough;
+       default:
+               lpfc_nlp_unreg_node(vport, ndlp);
+               break;
+       }
+
+}
+
 static void
 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                       int old_state, int new_state)
 {
+       /* Trap ADISC changes here */
+       if (new_state == NLP_STE_ADISC_ISSUE ||
+           old_state == NLP_STE_ADISC_ISSUE) {
+               lpfc_handle_adisc_state(vport, ndlp, new_state);
+               return;
+       }
+
        if (new_state == NLP_STE_UNMAPPED_NODE) {
                ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
                ndlp->nlp_type |= NLP_FC_NODE;
@@ -4514,60 +4676,17 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (new_state == NLP_STE_NPR_NODE)
                ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
 
-       /* FCP and NVME Transport interface */
+       /* Reg/Unreg for FCP and NVME Transport interface */
        if ((old_state == NLP_STE_MAPPED_NODE ||
             old_state == NLP_STE_UNMAPPED_NODE)) {
-               if (ndlp->rport &&
-                   lpfc_valid_xpt_node(ndlp)) {
-                       vport->phba->nport_event_cnt++;
-                       lpfc_unregister_remote_port(ndlp);
-               }
-
-               if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
-                       vport->phba->nport_event_cnt++;
-                       if (vport->phba->nvmet_support == 0) {
-                               /* Start devloss if target. */
-                               if (ndlp->nlp_type & NLP_NVME_TARGET)
-                                       lpfc_nvme_unregister_port(vport, ndlp);
-                       } else {
-                               /* NVMET has no upcall. */
-                               lpfc_nlp_put(ndlp);
-                       }
-               }
+               /* For nodes marked for ADISC, Handle unreg in ADISC cmpl */
+               if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+                       lpfc_nlp_unreg_node(vport, ndlp);
        }
 
-       /* FCP and NVME Transport interfaces */
-
        if (new_state ==  NLP_STE_MAPPED_NODE ||
-           new_state == NLP_STE_UNMAPPED_NODE) {
-               if (lpfc_valid_xpt_node(ndlp)) {
-                       vport->phba->nport_event_cnt++;
-                       /*
-                        * Tell the fc transport about the port, if we haven't
-                        * already. If we have, and it's a scsi entity, be
-                        */
-                       lpfc_register_remote_port(vport, ndlp);
-               }
-               /* Notify the NVME transport of this new rport. */
-               if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
-                   ndlp->nlp_fc4_type & NLP_FC4_NVME) {
-                       if (vport->phba->nvmet_support == 0) {
-                               /* Register this rport with the transport.
-                                * Only NVME Target Rports are registered with
-                                * the transport.
-                                */
-                               if (ndlp->nlp_type & NLP_NVME_TARGET) {
-                                       vport->phba->nport_event_cnt++;
-                                       lpfc_nvme_register_port(vport, ndlp);
-                               }
-                       } else {
-                               /* Just take an NDLP ref count since the
-                                * target does not register rports.
-                                */
-                               lpfc_nlp_get(ndlp);
-                       }
-               }
-       }
+           new_state == NLP_STE_UNMAPPED_NODE)
+               lpfc_nlp_reg_node(vport, ndlp);
 
        if ((new_state ==  NLP_STE_MAPPED_NODE) &&
                (vport->stat_data_enabled)) {
index 4a5a85e..634f8ff 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -608,6 +608,7 @@ struct fc_vft_header {
 #define ELS_CMD_LIRR      0x7A000000
 #define ELS_CMD_LCB      0x81000000
 #define ELS_CMD_FPIN     0x16000000
+#define ELS_CMD_EDC      0x17000000
 #define ELS_CMD_QFPA      0xB0000000
 #define ELS_CMD_UVEM      0xB1000000
 #else  /*  __LITTLE_ENDIAN_BITFIELD */
@@ -652,6 +653,7 @@ struct fc_vft_header {
 #define ELS_CMD_LIRR      0x7A
 #define ELS_CMD_LCB      0x81
 #define ELS_CMD_FPIN     ELS_FPIN
+#define ELS_CMD_EDC      ELS_EDC
 #define ELS_CMD_QFPA      0xB0
 #define ELS_CMD_UVEM      0xB1
 #endif
@@ -1694,6 +1696,7 @@ struct lpfc_fdmi_reg_portattr {
 #define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
 #define PCI_DEVICE_ID_LANCER_G6_FC  0xe300
 #define PCI_DEVICE_ID_LANCER_G7_FC  0xf400
+#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500
 #define PCI_DEVICE_ID_SAT_SMB       0xf011
 #define PCI_DEVICE_ID_SAT_MID       0xf015
 #define PCI_DEVICE_ID_RFLY          0xf095
index eb8c735..79a4872 100644 (file)
@@ -20,6 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
+#include <uapi/scsi/fc/fc_fs.h>
 #include <uapi/scsi/fc/fc_els.h>
 
 /* Macros to deal with bit fields. Each bit field must have 3 #defines
@@ -94,6 +95,9 @@ struct lpfc_sli_intf {
 #define LPFC_SLI_INTF_FAMILY_BE3       0x1
 #define LPFC_SLI_INTF_FAMILY_LNCR_A0   0xa
 #define LPFC_SLI_INTF_FAMILY_LNCR_B0   0xb
+#define LPFC_SLI_INTF_FAMILY_G6                0xc
+#define LPFC_SLI_INTF_FAMILY_G7                0xd
+#define LPFC_SLI_INTF_FAMILY_G7P       0xe
 #define lpfc_sli_intf_slirev_SHIFT             4
 #define lpfc_sli_intf_slirev_MASK              0x0000000F
 #define lpfc_sli_intf_slirev_WORD              word0
@@ -393,6 +397,12 @@ struct lpfc_wcqe_complete {
 #define lpfc_wcqe_c_ersp0_MASK         0x0000FFFF
 #define lpfc_wcqe_c_ersp0_WORD         word0
        uint32_t total_data_placed;
+#define lpfc_wcqe_c_cmf_cg_SHIFT       31
+#define lpfc_wcqe_c_cmf_cg_MASK                0x00000001
+#define lpfc_wcqe_c_cmf_cg_WORD                total_data_placed
+#define lpfc_wcqe_c_cmf_bw_SHIFT       0
+#define lpfc_wcqe_c_cmf_bw_MASK                0x0FFFFFFF
+#define lpfc_wcqe_c_cmf_bw_WORD                total_data_placed
        uint32_t parameter;
 #define lpfc_wcqe_c_bg_edir_SHIFT      5
 #define lpfc_wcqe_c_bg_edir_MASK       0x00000001
@@ -687,6 +697,7 @@ struct lpfc_register {
 #define lpfc_sliport_eqdelay_id_MASK   0xfff
 #define lpfc_sliport_eqdelay_id_WORD   word0
 #define LPFC_SEC_TO_USEC               1000000
+#define LPFC_SEC_TO_MSEC               1000
 
 /* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
  * reside in BAR 2.
@@ -959,6 +970,12 @@ union lpfc_sli4_cfg_shdr {
 #define lpfc_mbox_hdr_add_status_SHIFT         8
 #define lpfc_mbox_hdr_add_status_MASK          0x000000FF
 #define lpfc_mbox_hdr_add_status_WORD          word7
+#define LPFC_ADD_STATUS_INCOMPAT_OBJ           0xA2
+#define lpfc_mbox_hdr_add_status_2_SHIFT       16
+#define lpfc_mbox_hdr_add_status_2_MASK                0x000000FF
+#define lpfc_mbox_hdr_add_status_2_WORD                word7
+#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH       0x01
+#define LPFC_ADD_STATUS_2_INCORRECT_ASIC       0x02
                uint32_t response_length;
                uint32_t actual_response_length;
        } response;
@@ -1015,6 +1032,7 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_SET_HOST_DATA                 0x5D
 #define LPFC_MBOX_OPCODE_SEND_ACTIVATION               0x73
 #define LPFC_MBOX_OPCODE_RESET_LICENSES                        0x74
+#define LPFC_MBOX_OPCODE_REG_CONGESTION_BUF            0x8E
 #define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO          0x9A
 #define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT         0x9B
 #define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT             0x9C
@@ -1123,6 +1141,12 @@ struct lpfc_mbx_sge {
        uint32_t length;
 };
 
+struct lpfc_mbx_host_buf {
+       uint32_t length;
+       uint32_t pa_lo;
+       uint32_t pa_hi;
+};
+
 struct lpfc_mbx_nembed_cmd {
        struct lpfc_sli4_cfg_mhdr cfg_mhdr;
 #define LPFC_SLI4_MBX_SGE_MAX_PAGES    19
@@ -1133,6 +1157,31 @@ struct lpfc_mbx_nembed_sge_virt {
        void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
 };
 
+#define LPFC_MBX_OBJECT_NAME_LEN_DW    26
+struct lpfc_mbx_read_object {  /* Version 0 */
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_rd_object_rlen_SHIFT  0
+#define lpfc_mbx_rd_object_rlen_MASK   0x00FFFFFF
+#define lpfc_mbx_rd_object_rlen_WORD   word0
+                       uint32_t rd_object_offset;
+                       uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
+#define LPFC_OBJ_NAME_SZ 104   /* 26 x sizeof(uint32_t) is 104. */
+                       uint32_t rd_object_cnt;
+                       struct lpfc_mbx_host_buf rd_object_hbuf[4];
+               } request;
+               struct {
+                       uint32_t rd_object_actual_rlen;
+                       uint32_t word1;
+#define lpfc_mbx_rd_object_eof_SHIFT   31
+#define lpfc_mbx_rd_object_eof_MASK    0x1
+#define lpfc_mbx_rd_object_eof_WORD    word1
+               } response;
+       } u;
+};
+
 struct lpfc_mbx_eq_create {
        struct mbox_header header;
        union {
@@ -1555,7 +1604,7 @@ struct rq_context {
 #define lpfc_rq_context_hdr_size_WORD  word1
        uint32_t word2;
 #define lpfc_rq_context_cq_id_SHIFT    16
-#define lpfc_rq_context_cq_id_MASK     0x000003FF
+#define lpfc_rq_context_cq_id_MASK     0x0000FFFF
 #define lpfc_rq_context_cq_id_WORD     word2
 #define lpfc_rq_context_buf_size_SHIFT 0
 #define lpfc_rq_context_buf_size_MASK  0x0000FFFF
@@ -2328,6 +2377,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
 #define ADD_STATUS_OPERATION_ALREADY_ACTIVE            0x67
 #define ADD_STATUS_FW_NOT_SUPPORTED                    0xEB
 #define ADD_STATUS_INVALID_REQUEST                     0x4B
+#define ADD_STATUS_INVALID_OBJECT_NAME                 0xA0
 #define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED              0x58
 
 struct lpfc_mbx_sli4_config {
@@ -2803,6 +2853,12 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_extnts_inuse_SHIFT    31
 #define lpfc_mbx_rd_conf_extnts_inuse_MASK     0x00000001
 #define lpfc_mbx_rd_conf_extnts_inuse_WORD     word1
+#define lpfc_mbx_rd_conf_wcs_SHIFT             28      /* warning signaling */
+#define lpfc_mbx_rd_conf_wcs_MASK              0x00000001
+#define lpfc_mbx_rd_conf_wcs_WORD              word1
+#define lpfc_mbx_rd_conf_acs_SHIFT             27      /* alarm signaling */
+#define lpfc_mbx_rd_conf_acs_MASK              0x00000001
+#define lpfc_mbx_rd_conf_acs_WORD              word1
        uint32_t word2;
 #define lpfc_mbx_rd_conf_lnk_numb_SHIFT                0
 #define lpfc_mbx_rd_conf_lnk_numb_MASK         0x0000003F
@@ -3328,17 +3384,20 @@ struct lpfc_sli4_parameters {
 #define cfg_nosr_SHIFT                         9
 #define cfg_nosr_MASK                          0x00000001
 #define cfg_nosr_WORD                          word19
-
 #define cfg_bv1s_SHIFT                          10
 #define cfg_bv1s_MASK                           0x00000001
 #define cfg_bv1s_WORD                           word19
-#define cfg_pvl_SHIFT                          13
-#define cfg_pvl_MASK                           0x00000001
-#define cfg_pvl_WORD                           word19
 
 #define cfg_nsler_SHIFT                         12
 #define cfg_nsler_MASK                          0x00000001
 #define cfg_nsler_WORD                          word19
+#define cfg_pvl_SHIFT                          13
+#define cfg_pvl_MASK                           0x00000001
+#define cfg_pvl_WORD                           word19
+
+#define cfg_pbde_SHIFT                         20
+#define cfg_pbde_MASK                          0x00000001
+#define cfg_pbde_WORD                          word19
 
        uint32_t word20;
 #define cfg_max_tow_xri_SHIFT                  0
@@ -3346,12 +3405,13 @@ struct lpfc_sli4_parameters {
 #define cfg_max_tow_xri_WORD                   word20
 
        uint32_t word21;
-#define cfg_mib_bde_cnt_SHIFT                  16
-#define cfg_mib_bde_cnt_MASK                   0x000000ff
-#define cfg_mib_bde_cnt_WORD                   word21
 #define cfg_mi_ver_SHIFT                       0
 #define cfg_mi_ver_MASK                                0x0000ffff
 #define cfg_mi_ver_WORD                                word21
+#define cfg_cmf_SHIFT                          24
+#define cfg_cmf_MASK                           0x000000ff
+#define cfg_cmf_WORD                           word21
+
        uint32_t mib_size;
        uint32_t word23;                        /* RESERVED */
 
@@ -3380,7 +3440,10 @@ struct lpfc_sli4_parameters {
 
 #define LPFC_SET_UE_RECOVERY           0x10
 #define LPFC_SET_MDS_DIAGS             0x12
+#define LPFC_SET_CGN_SIGNAL            0x1f
 #define LPFC_SET_DUAL_DUMP             0x1e
+#define LPFC_SET_ENABLE_MI             0x21
+#define LPFC_SET_ENABLE_CMF            0x24
 struct lpfc_mbx_set_feature {
        struct mbox_header header;
        uint32_t feature;
@@ -3395,6 +3458,9 @@ struct lpfc_mbx_set_feature {
 #define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT  1
 #define lpfc_mbx_set_feature_mds_deep_loopbk_MASK   0x00000001
 #define lpfc_mbx_set_feature_mds_deep_loopbk_WORD   word6
+#define lpfc_mbx_set_feature_CGN_warn_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_warn_freq_MASK  0x0000ffff
+#define lpfc_mbx_set_feature_CGN_warn_freq_WORD  word6
 #define lpfc_mbx_set_feature_dd_SHIFT          0
 #define lpfc_mbx_set_feature_dd_MASK           0x00000001
 #define lpfc_mbx_set_feature_dd_WORD           word6
@@ -3404,6 +3470,15 @@ struct lpfc_mbx_set_feature {
 #define LPFC_DISABLE_DUAL_DUMP         0
 #define LPFC_ENABLE_DUAL_DUMP          1
 #define LPFC_QUERY_OP_DUAL_DUMP                2
+#define lpfc_mbx_set_feature_cmf_SHIFT         0
+#define lpfc_mbx_set_feature_cmf_MASK          0x00000001
+#define lpfc_mbx_set_feature_cmf_WORD          word6
+#define lpfc_mbx_set_feature_mi_SHIFT          0
+#define lpfc_mbx_set_feature_mi_MASK           0x0000ffff
+#define lpfc_mbx_set_feature_mi_WORD           word6
+#define lpfc_mbx_set_feature_milunq_SHIFT      16
+#define lpfc_mbx_set_feature_milunq_MASK       0x0000ffff
+#define lpfc_mbx_set_feature_milunq_WORD       word6
        uint32_t word7;
 #define lpfc_mbx_set_feature_UERP_SHIFT 0
 #define lpfc_mbx_set_feature_UERP_MASK  0x0000ffff
@@ -3411,16 +3486,51 @@ struct lpfc_mbx_set_feature {
 #define lpfc_mbx_set_feature_UESR_SHIFT 16
 #define lpfc_mbx_set_feature_UESR_MASK  0x0000ffff
 #define lpfc_mbx_set_feature_UESR_WORD  word7
+#define lpfc_mbx_set_feature_CGN_alarm_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_alarm_freq_MASK  0x0000ffff
+#define lpfc_mbx_set_feature_CGN_alarm_freq_WORD  word7
+       u32 word8;
+#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0
+#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK  0x000000ff
+#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD  word8
 };
 
 
 #define LPFC_SET_HOST_OS_DRIVER_VERSION    0x2
+#define LPFC_SET_HOST_DATE_TIME                   0x4
+
+struct lpfc_mbx_set_host_date_time {
+       uint32_t word6;
+#define lpfc_mbx_set_host_month_WORD   word6
+#define lpfc_mbx_set_host_month_SHIFT  16
+#define lpfc_mbx_set_host_month_MASK   0xFF
+#define lpfc_mbx_set_host_day_WORD     word6
+#define lpfc_mbx_set_host_day_SHIFT    8
+#define lpfc_mbx_set_host_day_MASK     0xFF
+#define lpfc_mbx_set_host_year_WORD    word6
+#define lpfc_mbx_set_host_year_SHIFT   0
+#define lpfc_mbx_set_host_year_MASK    0xFF
+       uint32_t word7;
+#define lpfc_mbx_set_host_hour_WORD    word7
+#define lpfc_mbx_set_host_hour_SHIFT   16
+#define lpfc_mbx_set_host_hour_MASK    0xFF
+#define lpfc_mbx_set_host_min_WORD     word7
+#define lpfc_mbx_set_host_min_SHIFT    8
+#define lpfc_mbx_set_host_min_MASK     0xFF
+#define lpfc_mbx_set_host_sec_WORD     word7
+#define lpfc_mbx_set_host_sec_SHIFT     0
+#define lpfc_mbx_set_host_sec_MASK      0xFF
+};
+
 struct lpfc_mbx_set_host_data {
 #define LPFC_HOST_OS_DRIVER_VERSION_SIZE   48
        struct mbox_header header;
        uint32_t param_id;
        uint32_t param_len;
-       uint8_t  data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+       union {
+               uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+               struct  lpfc_mbx_set_host_date_time tm;
+       } un;
 };
 
 struct lpfc_mbx_set_trunk_mode {
@@ -3438,6 +3548,21 @@ struct lpfc_mbx_get_sli4_parameters {
        struct lpfc_sli4_parameters sli4_parameters;
 };
 
+struct lpfc_mbx_reg_congestion_buf {
+       struct mbox_header header;
+       uint32_t word0;
+#define lpfc_mbx_reg_cgn_buf_type_WORD         word0
+#define lpfc_mbx_reg_cgn_buf_type_SHIFT                0
+#define lpfc_mbx_reg_cgn_buf_type_MASK         0xFF
+#define lpfc_mbx_reg_cgn_buf_cnt_WORD          word0
+#define lpfc_mbx_reg_cgn_buf_cnt_SHIFT         16
+#define lpfc_mbx_reg_cgn_buf_cnt_MASK          0xFF
+       uint32_t word1;
+       uint32_t length;
+       uint32_t addr_lo;
+       uint32_t addr_hi;
+};
+
 struct lpfc_rscr_desc_generic {
 #define LPFC_RSRC_DESC_WSIZE                   22
        uint32_t desc[LPFC_RSRC_DESC_WSIZE];
@@ -3603,6 +3728,9 @@ struct lpfc_controller_attribute {
 #define lpfc_cntl_attr_eprom_ver_hi_SHIFT      8
 #define lpfc_cntl_attr_eprom_ver_hi_MASK       0x000000ff
 #define lpfc_cntl_attr_eprom_ver_hi_WORD       word17
+#define lpfc_cntl_attr_flash_id_SHIFT          16
+#define lpfc_cntl_attr_flash_id_MASK           0x000000ff
+#define lpfc_cntl_attr_flash_id_WORD           word17
        uint32_t mbx_da_struct_ver;
        uint32_t ep_fw_da_struct_ver;
        uint32_t ncsi_ver_str[3];
@@ -3744,6 +3872,7 @@ struct lpfc_mbx_get_port_name {
 #define MB_CEQ_STATUS_QUEUE_FLUSHING           0x4
 #define MB_CQE_STATUS_DMA_FAILED               0x5
 
+
 #define LPFC_MBX_WR_CONFIG_MAX_BDE             1
 struct lpfc_mbx_wr_object {
        struct mbox_header header;
@@ -3760,7 +3889,7 @@ struct lpfc_mbx_wr_object {
 #define lpfc_wr_object_write_length_MASK       0x00FFFFFF
 #define lpfc_wr_object_write_length_WORD       word4
                        uint32_t write_offset;
-                       uint32_t object_name[26];
+                       uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
                        uint32_t bde_count;
                        struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
                } request;
@@ -3809,6 +3938,7 @@ struct lpfc_mqe {
                struct lpfc_mbx_unreg_fcfi unreg_fcfi;
                struct lpfc_mbx_mq_create mq_create;
                struct lpfc_mbx_mq_create_ext mq_create_ext;
+               struct lpfc_mbx_read_object read_object;
                struct lpfc_mbx_eq_create eq_create;
                struct lpfc_mbx_modify_eq_delay eq_delay;
                struct lpfc_mbx_cq_create cq_create;
@@ -3834,6 +3964,7 @@ struct lpfc_mqe {
                struct lpfc_mbx_query_fw_config query_fw_cfg;
                struct lpfc_mbx_set_beacon_config beacon_config;
                struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+               struct lpfc_mbx_reg_congestion_buf reg_congestion_buf;
                struct lpfc_mbx_set_link_diag_state link_diag_state;
                struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
                struct lpfc_mbx_run_link_diag_test link_diag_test;
@@ -3888,6 +4019,7 @@ struct lpfc_mcqe {
 #define LPFC_TRAILER_CODE_GRP5 0x5
 #define LPFC_TRAILER_CODE_FC   0x10
 #define LPFC_TRAILER_CODE_SLI  0x11
+#define LPFC_TRAILER_CODE_CMSTAT        0x13
 };
 
 struct lpfc_acqe_link {
@@ -4122,6 +4254,19 @@ struct lpfc_acqe_misconfigured_event {
 #define LPFC_SLI_EVENT_STATUS_UNCERTIFIED      0x05
 };
 
+struct lpfc_acqe_cgn_signal {
+       u32 word0;
+#define lpfc_warn_acqe_SHIFT           0
+#define lpfc_warn_acqe_MASK            0x7FFFFFFF
+#define lpfc_warn_acqe_WORD            word0
+#define lpfc_imm_acqe_SHIFT            31
+#define lpfc_imm_acqe_MASK             0x1
+#define lpfc_imm_acqe_WORD             word0
+       u32 alarm_cnt;
+       u32 word2;
+       u32 trailer;
+};
+
 struct lpfc_acqe_sli {
        uint32_t event_data1;
        uint32_t event_data2;
@@ -4134,8 +4279,10 @@ struct lpfc_acqe_sli {
 #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP          0x5
 #define LPFC_SLI_EVENT_TYPE_MISCONFIGURED      0x9
 #define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT       0xA
+#define LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG    0xE
 #define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN      0xF
 #define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE     0x10
+#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL         0x11
 };
 
 /*
@@ -4543,6 +4690,69 @@ struct create_xri_wqe {
 #define T_REQUEST_TAG 3
 #define T_XRI_TAG 1
 
+struct cmf_sync_wqe {
+       uint32_t rsrvd[3];
+       uint32_t word3;
+#define        cmf_sync_interval_SHIFT 0
+#define        cmf_sync_interval_MASK  0x00000ffff
+#define        cmf_sync_interval_WORD  word3
+#define        cmf_sync_afpin_SHIFT    16
+#define        cmf_sync_afpin_MASK     0x000000001
+#define        cmf_sync_afpin_WORD     word3
+#define        cmf_sync_asig_SHIFT     17
+#define        cmf_sync_asig_MASK      0x000000001
+#define        cmf_sync_asig_WORD      word3
+#define        cmf_sync_op_SHIFT       20
+#define        cmf_sync_op_MASK        0x00000000f
+#define        cmf_sync_op_WORD        word3
+#define        cmf_sync_ver_SHIFT      24
+#define        cmf_sync_ver_MASK       0x0000000ff
+#define        cmf_sync_ver_WORD       word3
+#define LPFC_CMF_SYNC_VER      1
+       uint32_t event_tag;
+       uint32_t word5;
+#define        cmf_sync_wsigmax_SHIFT  0
+#define        cmf_sync_wsigmax_MASK   0x00000ffff
+#define        cmf_sync_wsigmax_WORD   word5
+#define        cmf_sync_wsigcnt_SHIFT  16
+#define        cmf_sync_wsigcnt_MASK   0x00000ffff
+#define        cmf_sync_wsigcnt_WORD   word5
+       uint32_t word6;
+       uint32_t word7;
+#define        cmf_sync_cmnd_SHIFT     8
+#define        cmf_sync_cmnd_MASK      0x0000000ff
+#define        cmf_sync_cmnd_WORD      word7
+       uint32_t word8;
+       uint32_t word9;
+#define        cmf_sync_reqtag_SHIFT   0
+#define        cmf_sync_reqtag_MASK    0x00000ffff
+#define        cmf_sync_reqtag_WORD    word9
+#define        cmf_sync_wfpinmax_SHIFT 16
+#define        cmf_sync_wfpinmax_MASK  0x0000000ff
+#define        cmf_sync_wfpinmax_WORD  word9
+#define        cmf_sync_wfpincnt_SHIFT 24
+#define        cmf_sync_wfpincnt_MASK  0x0000000ff
+#define        cmf_sync_wfpincnt_WORD  word9
+       uint32_t word10;
+#define cmf_sync_qosd_SHIFT    9
+#define cmf_sync_qosd_MASK     0x00000001
+#define cmf_sync_qosd_WORD     word10
+       uint32_t word11;
+#define cmf_sync_cmd_type_SHIFT        0
+#define cmf_sync_cmd_type_MASK 0x0000000f
+#define cmf_sync_cmd_type_WORD word11
+#define cmf_sync_wqec_SHIFT    7
+#define cmf_sync_wqec_MASK     0x00000001
+#define cmf_sync_wqec_WORD     word11
+#define cmf_sync_cqid_SHIFT    16
+#define cmf_sync_cqid_MASK     0x0000ffff
+#define cmf_sync_cqid_WORD     word11
+       uint32_t read_bytes;
+       uint32_t word13;
+       uint32_t word14;
+       uint32_t word15;
+};
+
 struct abort_cmd_wqe {
        uint32_t rsrvd[3];
        uint32_t word3;
@@ -4672,6 +4882,7 @@ union lpfc_wqe {
        struct fcp_iread64_wqe fcp_iread;
        struct fcp_iwrite64_wqe fcp_iwrite;
        struct abort_cmd_wqe abort_cmd;
+       struct cmf_sync_wqe cmf_sync;
        struct create_xri_wqe create_xri;
        struct xmit_bcast64_wqe xmit_bcast64;
        struct xmit_seq64_wqe xmit_sequence;
@@ -4692,6 +4903,7 @@ union lpfc_wqe128 {
        struct fcp_iread64_wqe fcp_iread;
        struct fcp_iwrite64_wqe fcp_iwrite;
        struct abort_cmd_wqe abort_cmd;
+       struct cmf_sync_wqe cmf_sync;
        struct create_xri_wqe create_xri;
        struct xmit_bcast64_wqe xmit_bcast64;
        struct xmit_seq64_wqe xmit_sequence;
@@ -4707,6 +4919,7 @@ union lpfc_wqe128 {
 
 #define MAGIC_NUMBER_G6 0xFEAA0003
 #define MAGIC_NUMBER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G7P 0xFEAA0020
 
 struct lpfc_grp_hdr {
        uint32_t size;
@@ -4734,6 +4947,7 @@ struct lpfc_grp_hdr {
 #define FCP_COMMAND_TRSP       0x3
 #define FCP_COMMAND_TSEND      0x7
 #define OTHER_COMMAND          0x8
+#define CMF_SYNC_COMMAND       0xA
 #define ELS_COMMAND_NON_FIP    0xC
 #define ELS_COMMAND_FIP                0xD
 
@@ -4755,6 +4969,7 @@ struct lpfc_grp_hdr {
 #define CMD_FCP_TRECEIVE64_WQE  0xA1
 #define CMD_FCP_TRSP64_WQE      0xA3
 #define CMD_GEN_REQUEST64_WQE   0xC2
+#define CMD_CMF_SYNC_WQE       0xE8
 
 #define CMD_WQE_MASK            0xff
 
@@ -4762,3 +4977,43 @@ struct lpfc_grp_hdr {
 #define LPFC_FW_DUMP   1
 #define LPFC_FW_RESET  2
 #define LPFC_DV_RESET  3
+
+/* On some kernels, enum fc_ls_tlv_dtag does not have
+ * these 2 enums defined, on other kernels it does.
+ * To get aound this we need to add these 2 defines here.
+ */
+#ifndef ELS_DTAG_LNK_FAULT_CAP
+#define ELS_DTAG_LNK_FAULT_CAP        0x0001000D
+#endif
+#ifndef ELS_DTAG_CG_SIGNAL_CAP
+#define ELS_DTAG_CG_SIGNAL_CAP        0x0001000F
+#endif
+
+/*
+ * Initializer useful for decoding FPIN string table.
+ */
+#define FC_FPIN_CONGN_SEVERITY_INIT {                          \
+       { FPIN_CONGN_SEVERITY_WARNING,          "Warning" },    \
+       { FPIN_CONGN_SEVERITY_ERROR,            "Alarm" },      \
+}
+
+/* EDC supports two descriptors.  When allocated, it is the
+ * size of this structure plus each supported descriptor.
+ */
+struct lpfc_els_edc_req {
+       struct fc_els_edc               edc;       /* hdr up to descriptors */
+       struct fc_diag_cg_sig_desc      cgn_desc;  /* 1st descriptor */
+};
+
+/* Minimum structure defines for the EDC response.
+ * Balance is in buffer.
+ */
+struct lpfc_els_edc_rsp {
+       struct fc_els_edc_resp          edc_rsp;   /* hdr up to descriptors */
+       struct fc_diag_cg_sig_desc      cgn_desc;  /* 1st descriptor */
+};
+
+/* Used for logging FPIN messages */
+#define LPFC_FPIN_WWPN_LINE_SZ  128
+#define LPFC_FPIN_WWPN_LINE_CNT 6
+#define LPFC_FPIN_WWPN_NUM_LINE 6
index d48414e..6a90e6e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -118,6 +118,8 @@ const struct pci_device_id lpfc_id_table[] = {
                PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC,
                PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC,
+               PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
                PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
index e29523a..0ec322f 100644 (file)
@@ -93,6 +93,7 @@ static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
+static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1243,7 +1244,8 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
                return;
 
        if (phba->link_state == LPFC_HBA_ERROR ||
-           phba->pport->fc_flag & FC_OFFLINE_MODE)
+           phba->pport->fc_flag & FC_OFFLINE_MODE ||
+           phba->cmf_active_mode != LPFC_CFG_OFF)
                goto requeue;
 
        for_each_present_cpu(i) {
@@ -1852,6 +1854,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
 {
        int rc;
        uint32_t intr_mode;
+       LPFC_MBOXQ_t *mboxq;
 
        if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
            LPFC_SLI_INTF_IF_TYPE_2) {
@@ -1871,11 +1874,19 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
                                "Recovery...\n");
 
        /* If we are no wait, the HBA has been reset and is not
-        * functional, thus we should clear LPFC_SLI_ACTIVE flag.
+        * functional, thus we should clear
+        * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
         */
        if (mbx_action == LPFC_MBX_NO_WAIT) {
                spin_lock_irq(&phba->hbalock);
                phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+               if (phba->sli.mbox_active) {
+                       mboxq = phba->sli.mbox_active;
+                       mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+                       __lpfc_mbox_cmpl_put(phba, mboxq);
+                       phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+                       phba->sli.mbox_active = NULL;
+               }
                spin_unlock_irq(&phba->hbalock);
        }
 
@@ -2590,6 +2601,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
        case PCI_DEVICE_ID_LANCER_G7_FC:
                m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
                break;
+       case PCI_DEVICE_ID_LANCER_G7P_FC:
+               m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
+               break;
        case PCI_DEVICE_ID_SKYHAWK:
        case PCI_DEVICE_ID_SKYHAWK_VF:
                oneConnect = 1;
@@ -3007,6 +3021,123 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
        spin_unlock_irq(&phba->hbalock);
 }
 
+/**
+ * lpfc_cmf_stop - Stop CMF processing
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is called when the link goes down or if CMF mode is turned OFF.
+ * It is also called when going offline or unloaded just before the
+ * congestion info buffer is unregistered.
+ **/
+void
+lpfc_cmf_stop(struct lpfc_hba *phba)
+{
+       int cpu;
+       struct lpfc_cgn_stat *cgs;
+
+       /* We only do something if CMF is enabled */
+       if (!phba->sli4_hba.pc_sli4_params.cmf)
+               return;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       "6221 Stop CMF / Cancel Timer\n");
+
+       /* Cancel the CMF timer */
+       hrtimer_cancel(&phba->cmf_timer);
+
+       /* Zero CMF counters */
+       atomic_set(&phba->cmf_busy, 0);
+       for_each_present_cpu(cpu) {
+               cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+               atomic64_set(&cgs->total_bytes, 0);
+               atomic64_set(&cgs->rcv_bytes, 0);
+               atomic_set(&cgs->rx_io_cnt, 0);
+               atomic64_set(&cgs->rx_latency, 0);
+       }
+       atomic_set(&phba->cmf_bw_wait, 0);
+
+       /* Resume any blocked IO - Queue unblock on workqueue */
+       queue_work(phba->wq, &phba->unblock_request_work);
+}
+
+static inline uint64_t
+lpfc_get_max_line_rate(struct lpfc_hba *phba)
+{
+       uint64_t rate = lpfc_sli_port_speed_get(phba);
+
+       return ((((unsigned long)rate) * 1024 * 1024) / 10);
+}
+
+void
+lpfc_cmf_signal_init(struct lpfc_hba *phba)
+{
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       "6223 Signal CMF init\n");
+
+       /* Use the new fc_linkspeed to recalculate */
+       phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+       phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
+       phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
+                                           phba->cmf_interval_rate, 1000);
+       phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
+
+       /* This is a signal to firmware to sync up CMF BW with link speed */
+       lpfc_issue_cmf_sync_wqe(phba, 0, 0);
+}
+
+/**
+ * lpfc_cmf_start - Start CMF processing
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is called when the link comes up or if CMF mode is turned OFF
+ * to Monitor or Managed.
+ **/
+void
+lpfc_cmf_start(struct lpfc_hba *phba)
+{
+       struct lpfc_cgn_stat *cgs;
+       int cpu;
+
+       /* We only do something if CMF is enabled */
+       if (!phba->sli4_hba.pc_sli4_params.cmf ||
+           phba->cmf_active_mode == LPFC_CFG_OFF)
+               return;
+
+       /* Reinitialize congestion buffer info */
+       lpfc_init_congestion_buf(phba);
+
+       atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+       atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+       atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+       atomic_set(&phba->cgn_sync_warn_cnt, 0);
+
+       atomic_set(&phba->cmf_busy, 0);
+       for_each_present_cpu(cpu) {
+               cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+               atomic64_set(&cgs->total_bytes, 0);
+               atomic64_set(&cgs->rcv_bytes, 0);
+               atomic_set(&cgs->rx_io_cnt, 0);
+               atomic64_set(&cgs->rx_latency, 0);
+       }
+       phba->cmf_latency.tv_sec = 0;
+       phba->cmf_latency.tv_nsec = 0;
+
+       lpfc_cmf_signal_init(phba);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       "6222 Start CMF / Timer\n");
+
+       phba->cmf_timer_cnt = 0;
+       hrtimer_start(&phba->cmf_timer,
+                     ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
+                     HRTIMER_MODE_REL);
+       /* Setup for latency check in IO cmpl routines */
+       ktime_get_real_ts64(&phba->cmf_latency);
+
+       atomic_set(&phba->cmf_bw_wait, 0);
+       atomic_set(&phba->cmf_stop_io, 0);
+}
+
 /**
  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
  * @phba: pointer to lpfc hba data structure.
@@ -3541,6 +3672,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
                                spin_lock_irq(&ndlp->lock);
                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
                                spin_unlock_irq(&ndlp->lock);
+
+                               lpfc_unreg_rpi(vports[i], ndlp);
                                /*
                                 * Whenever an SLI4 port goes offline, free the
                                 * RPI. Get a new RPI when the adapter port
@@ -3556,7 +3689,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
                                        lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
                                        ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
                                }
-                               lpfc_unreg_rpi(vports[i], ndlp);
 
                                if (ndlp->nlp_type & NLP_FABRIC) {
                                        lpfc_disc_state_machine(vports[i], ndlp,
@@ -4666,6 +4798,8 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
        if (phba->hba_flag & HBA_FCOE_MODE)
                return;
 
+       if (phba->lmt & LMT_256Gb)
+               fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
        if (phba->lmt & LMT_128Gb)
                fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
        if (phba->lmt & LMT_64Gb)
@@ -4845,7 +4979,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
 
 /**
  * lpfc_vmid_poll - VMID timeout detection
- * @ptr: Map to lpfc_hba data structure pointer.
+ * @t: Timer context used to obtain the pointer to lpfc hba data structure.
  *
  * This routine is invoked when there is no I/O on by a VM for the specified
  * amount of time. When this situation is detected, the VMID has to be
@@ -5074,6 +5208,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
                case LPFC_FC_LA_SPEED_128G:
                        port_speed = 128000;
                        break;
+               case LPFC_FC_LA_SPEED_256G:
+                       port_speed = 256000;
+                       break;
                default:
                        port_speed = 0;
                }
@@ -5267,6 +5404,645 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
        return port_speed;
 }
 
+void
+lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
+{
+       struct rxtable_entry *entry;
+       int cnt = 0, head, tail, last, start;
+
+       head = atomic_read(&phba->rxtable_idx_head);
+       tail = atomic_read(&phba->rxtable_idx_tail);
+       if (!phba->rxtable || head == tail) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+                               "4411 Rxtable is empty\n");
+               return;
+       }
+       last = tail;
+       start = head;
+
+       /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
+       while (start != last) {
+               if (start)
+                       start--;
+               else
+                       start = LPFC_MAX_RXMONITOR_ENTRY - 1;
+               entry = &phba->rxtable[start];
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
+                               "Lat %lld ASz %lld Info %02d BWUtil %d "
+                               "Int %d slot %d\n",
+                               cnt, entry->max_bytes_per_interval,
+                               entry->total_bytes, entry->rcv_bytes,
+                               entry->avg_io_latency, entry->avg_io_size,
+                               entry->cmf_info, entry->timer_utilization,
+                               entry->timer_interval, start);
+               cnt++;
+               if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
+                       return;
+       }
+}
+
+/**
+ * lpfc_cgn_update_stat - Save data into congestion stats buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @dtag: FPIN descriptor received
+ *
+ * Increment the FPIN received counter/time when it happens.
+ */
+void
+lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
+{
+       struct lpfc_cgn_info *cp;
+       struct tm broken;
+       struct timespec64 cur_time;
+       u32 cnt;
+       u16 value;
+
+       /* Make sure we have a congestion info buffer */
+       if (!phba->cgn_i)
+               return;
+       cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+       ktime_get_real_ts64(&cur_time);
+       time64_to_tm(cur_time.tv_sec, 0, &broken);
+
+       /* Update congestion statistics */
+       switch (dtag) {
+       case ELS_DTAG_LNK_INTEGRITY:
+               cnt = le32_to_cpu(cp->link_integ_notification);
+               cnt++;
+               cp->link_integ_notification = cpu_to_le32(cnt);
+
+               cp->cgn_stat_lnk_month = broken.tm_mon + 1;
+               cp->cgn_stat_lnk_day = broken.tm_mday;
+               cp->cgn_stat_lnk_year = broken.tm_year - 100;
+               cp->cgn_stat_lnk_hour = broken.tm_hour;
+               cp->cgn_stat_lnk_min = broken.tm_min;
+               cp->cgn_stat_lnk_sec = broken.tm_sec;
+               break;
+       case ELS_DTAG_DELIVERY:
+               cnt = le32_to_cpu(cp->delivery_notification);
+               cnt++;
+               cp->delivery_notification = cpu_to_le32(cnt);
+
+               cp->cgn_stat_del_month = broken.tm_mon + 1;
+               cp->cgn_stat_del_day = broken.tm_mday;
+               cp->cgn_stat_del_year = broken.tm_year - 100;
+               cp->cgn_stat_del_hour = broken.tm_hour;
+               cp->cgn_stat_del_min = broken.tm_min;
+               cp->cgn_stat_del_sec = broken.tm_sec;
+               break;
+       case ELS_DTAG_PEER_CONGEST:
+               cnt = le32_to_cpu(cp->cgn_peer_notification);
+               cnt++;
+               cp->cgn_peer_notification = cpu_to_le32(cnt);
+
+               cp->cgn_stat_peer_month = broken.tm_mon + 1;
+               cp->cgn_stat_peer_day = broken.tm_mday;
+               cp->cgn_stat_peer_year = broken.tm_year - 100;
+               cp->cgn_stat_peer_hour = broken.tm_hour;
+               cp->cgn_stat_peer_min = broken.tm_min;
+               cp->cgn_stat_peer_sec = broken.tm_sec;
+               break;
+       case ELS_DTAG_CONGESTION:
+               cnt = le32_to_cpu(cp->cgn_notification);
+               cnt++;
+               cp->cgn_notification = cpu_to_le32(cnt);
+
+               cp->cgn_stat_cgn_month = broken.tm_mon + 1;
+               cp->cgn_stat_cgn_day = broken.tm_mday;
+               cp->cgn_stat_cgn_year = broken.tm_year - 100;
+               cp->cgn_stat_cgn_hour = broken.tm_hour;
+               cp->cgn_stat_cgn_min = broken.tm_min;
+               cp->cgn_stat_cgn_sec = broken.tm_sec;
+       }
+       if (phba->cgn_fpin_frequency &&
+           phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
+               value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
+               cp->cgn_stat_npm = cpu_to_le32(value);
+       }
+       value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+                                   LPFC_CGN_CRC32_SEED);
+       cp->cgn_info_crc = cpu_to_le32(value);
+}
+
+/**
+ * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Save the congestion event data every minute.
+ * On the hour collapse all the minute data into hour data. Every day
+ * collapse all the hour data into daily data. Separate driver
+ * and fabrc congestion event counters that will be saved out
+ * to the registered congestion buffer every minute.
+ */
+static void
+lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
+{
+       struct lpfc_cgn_info *cp;
+       struct tm broken;
+       struct timespec64 cur_time;
+       uint32_t i, index;
+       uint16_t value, mvalue;
+       uint64_t bps;
+       uint32_t mbps;
+       uint32_t dvalue, wvalue, lvalue, avalue;
+       uint64_t latsum;
+       uint16_t *ptr;
+       uint32_t *lptr;
+       uint16_t *mptr;
+
+       /* Make sure we have a congestion info buffer */
+       if (!phba->cgn_i)
+               return;
+       cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+       if (time_before(jiffies, phba->cgn_evt_timestamp))
+               return;
+       phba->cgn_evt_timestamp = jiffies +
+                       msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
+       phba->cgn_evt_minute++;
+
+       /* We should get to this point in the routine on 1 minute intervals */
+
+       ktime_get_real_ts64(&cur_time);
+       time64_to_tm(cur_time.tv_sec, 0, &broken);
+
+       if (phba->cgn_fpin_frequency &&
+           phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
+               value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
+               cp->cgn_stat_npm = cpu_to_le32(value);
+       }
+
+       /* Read and clear the latency counters for this minute */
+       lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
+       latsum = atomic64_read(&phba->cgn_latency_evt);
+       atomic_set(&phba->cgn_latency_evt_cnt, 0);
+       atomic64_set(&phba->cgn_latency_evt, 0);
+
+       /* We need to store MB/sec bandwidth in the congestion information.
+        * block_cnt is count of 512 byte blocks for the entire minute,
+        * bps will get bytes per sec before finally converting to MB/sec.
+        */
+       bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
+       phba->rx_block_cnt = 0;
+       mvalue = bps / (1024 * 1024); /* convert to MB/sec */
+
+       /* Every minute */
+       /* cgn parameters */
+       cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+       cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+       cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+       cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+
+       /* Fill in default LUN qdepth */
+       value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
+       cp->cgn_lunq = cpu_to_le16(value);
+
+       /* Record congestion buffer info - every minute
+        * cgn_driver_evt_cnt (Driver events)
+        * cgn_fabric_warn_cnt (Congestion Warnings)
+        * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
+        * cgn_fabric_alarm_cnt (Congestion Alarms)
+        */
+       index = ++cp->cgn_index_minute;
+       if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
+               cp->cgn_index_minute = 0;
+               index = 0;
+       }
+
+       /* Get the number of driver events in this sample and reset counter */
+       dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
+       atomic_set(&phba->cgn_driver_evt_cnt, 0);
+
+       /* Get the number of warning events - FPIN and Signal for this minute */
+       wvalue = 0;
+       if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
+           phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+           phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+               wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
+       atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+
+       /* Get the number of alarm events - FPIN and Signal for this minute */
+       avalue = 0;
+       if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
+           phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+               avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
+       atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+
+       /* Collect the driver, warning, alarm and latency counts for this
+        * minute into the driver congestion buffer.
+        */
+       ptr = &cp->cgn_drvr_min[index];
+       value = (uint16_t)dvalue;
+       *ptr = cpu_to_le16(value);
+
+       ptr = &cp->cgn_warn_min[index];
+       value = (uint16_t)wvalue;
+       *ptr = cpu_to_le16(value);
+
+       ptr = &cp->cgn_alarm_min[index];
+       value = (uint16_t)avalue;
+       *ptr = cpu_to_le16(value);
+
+       lptr = &cp->cgn_latency_min[index];
+       if (lvalue) {
+               lvalue = (uint32_t)div_u64(latsum, lvalue);
+               *lptr = cpu_to_le32(lvalue);
+       } else {
+               *lptr = 0;
+       }
+
+       /* Collect the bandwidth value into the driver's congesion buffer. */
+       mptr = &cp->cgn_bw_min[index];
+       *mptr = cpu_to_le16(mvalue);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
+                       index, dvalue, wvalue, *lptr, mvalue, avalue);
+
+       /* Every hour */
+       if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
+               /* Record congestion buffer info - every hour
+                * Collapse all minutes into an hour
+                */
+               index = ++cp->cgn_index_hour;
+               if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
+                       cp->cgn_index_hour = 0;
+                       index = 0;
+               }
+
+               dvalue = 0;
+               wvalue = 0;
+               lvalue = 0;
+               avalue = 0;
+               mvalue = 0;
+               mbps = 0;
+               for (i = 0; i < LPFC_MIN_HOUR; i++) {
+                       dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
+                       wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
+                       lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
+                       mbps += le16_to_cpu(cp->cgn_bw_min[i]);
+                       avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
+               }
+               if (lvalue)             /* Avg of latency averages */
+                       lvalue /= LPFC_MIN_HOUR;
+               if (mbps)               /* Avg of Bandwidth averages */
+                       mvalue = mbps / LPFC_MIN_HOUR;
+
+               lptr = &cp->cgn_drvr_hr[index];
+               *lptr = cpu_to_le32(dvalue);
+               lptr = &cp->cgn_warn_hr[index];
+               *lptr = cpu_to_le32(wvalue);
+               lptr = &cp->cgn_latency_hr[index];
+               *lptr = cpu_to_le32(lvalue);
+               mptr = &cp->cgn_bw_hr[index];
+               *mptr = cpu_to_le16(mvalue);
+               lptr = &cp->cgn_alarm_hr[index];
+               *lptr = cpu_to_le32(avalue);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "2419 Congestion Info - hour "
+                               "(%d): %d %d %d %d %d\n",
+                               index, dvalue, wvalue, lvalue, mvalue, avalue);
+       }
+
+       /* Every day */
+       if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
+               /* Record congestion buffer info - every hour
+                * Collapse all hours into a day. Rotate days
+                * after LPFC_MAX_CGN_DAYS.
+                */
+               index = ++cp->cgn_index_day;
+               if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
+                       cp->cgn_index_day = 0;
+                       index = 0;
+               }
+
+               /* Anytime we overwrite daily index 0, after we wrap,
+                * we will be overwriting the oldest day, so we must
+                * update the congestion data start time for that day.
+                * That start time should have previously been saved after
+                * we wrote the last days worth of data.
+                */
+               if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
+                       time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
+
+                       cp->cgn_info_month = broken.tm_mon + 1;
+                       cp->cgn_info_day = broken.tm_mday;
+                       cp->cgn_info_year = broken.tm_year - 100;
+                       cp->cgn_info_hour = broken.tm_hour;
+                       cp->cgn_info_minute = broken.tm_min;
+                       cp->cgn_info_second = broken.tm_sec;
+
+                       lpfc_printf_log
+                               (phba, KERN_INFO, LOG_CGN_MGMT,
+                               "2646 CGNInfo idx0 Start Time: "
+                               "%d/%d/%d %d:%d:%d\n",
+                               cp->cgn_info_day, cp->cgn_info_month,
+                               cp->cgn_info_year, cp->cgn_info_hour,
+                               cp->cgn_info_minute, cp->cgn_info_second);
+               }
+
+               dvalue = 0;
+               wvalue = 0;
+               lvalue = 0;
+               mvalue = 0;
+               mbps = 0;
+               avalue = 0;
+               for (i = 0; i < LPFC_HOUR_DAY; i++) {
+                       dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
+                       wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
+                       lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
+                       mbps += le32_to_cpu(cp->cgn_bw_hr[i]);
+                       avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
+               }
+               if (lvalue)             /* Avg of latency averages */
+                       lvalue /= LPFC_HOUR_DAY;
+               if (mbps)               /* Avg of Bandwidth averages */
+                       mvalue = mbps / LPFC_HOUR_DAY;
+
+               lptr = &cp->cgn_drvr_day[index];
+               *lptr = cpu_to_le32(dvalue);
+               lptr = &cp->cgn_warn_day[index];
+               *lptr = cpu_to_le32(wvalue);
+               lptr = &cp->cgn_latency_day[index];
+               *lptr = cpu_to_le32(lvalue);
+               mptr = &cp->cgn_bw_day[index];
+               *mptr = cpu_to_le16(mvalue);
+               lptr = &cp->cgn_alarm_day[index];
+               *lptr = cpu_to_le32(avalue);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "2420 Congestion Info - daily (%d): "
+                               "%d %d %d %d %d\n",
+                               index, dvalue, wvalue, lvalue, mvalue, avalue);
+
+               /* We just wrote LPFC_MAX_CGN_DAYS of data,
+                * so we are wrapped on any data after this.
+                * Save this as the start time for the next day.
+                */
+               if (index == (LPFC_MAX_CGN_DAYS - 1)) {
+                       phba->hba_flag |= HBA_CGN_DAY_WRAP;
+                       ktime_get_real_ts64(&phba->cgn_daily_ts);
+               }
+       }
+
+       /* Use the frequency found in the last rcv'ed FPIN */
+       value = phba->cgn_fpin_frequency;
+       if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
+               cp->cgn_warn_freq = cpu_to_le16(value);
+       if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
+               cp->cgn_alarm_freq = cpu_to_le16(value);
+
+       /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
+        * are received by the HBA
+        */
+       value = phba->cgn_sig_freq;
+
+       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+           phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+               cp->cgn_warn_freq = cpu_to_le16(value);
+       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
+               cp->cgn_alarm_freq = cpu_to_le16(value);
+
+       lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+                                    LPFC_CGN_CRC32_SEED);
+       cp->cgn_info_crc = cpu_to_le32(lvalue);
+}
+
+/**
+ * lpfc_calc_cmf_latency - latency from start of rxate timer interval
+ * @phba: The Hba for which this call is being executed.
+ *
+ * The routine calculates the latency from the beginning of the CMF timer
+ * interval to the current point in time. It is called from IO completion
+ * when we exceed our Bandwidth limitation for the time interval.
+ */
+uint32_t
+lpfc_calc_cmf_latency(struct lpfc_hba *phba)
+{
+       struct timespec64 cmpl_time;
+       uint32_t msec = 0;
+
+       ktime_get_real_ts64(&cmpl_time);
+
+       /* This routine works on a ms granularity so sec and usec are
+        * converted accordingly.
+        */
+       if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
+               msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
+                       NSEC_PER_MSEC;
+       } else {
+               if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
+                       msec = (cmpl_time.tv_sec -
+                               phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
+                       msec += ((cmpl_time.tv_nsec -
+                                 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
+               } else {
+                       msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
+                               1) * MSEC_PER_SEC;
+                       msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
+                                cmpl_time.tv_nsec) / NSEC_PER_MSEC);
+               }
+       }
+       return msec;
+}
+
+/**
+ * lpfc_cmf_timer -  This is the timer function for one congestion
+ * rate interval.
+ * @timer: Pointer to the high resolution timer that expired
+ */
+static enum hrtimer_restart
+lpfc_cmf_timer(struct hrtimer *timer)
+{
+       struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
+                                            cmf_timer);
+       struct rxtable_entry *entry;
+       uint32_t io_cnt;
+       uint32_t head, tail;
+       uint32_t busy, max_read;
+       uint64_t total, rcv, lat, mbpi;
+       int timer_interval = LPFC_CMF_INTERVAL;
+       uint32_t ms;
+       struct lpfc_cgn_stat *cgs;
+       int cpu;
+
+       /* Only restart the timer if congestion mgmt is on */
+       if (phba->cmf_active_mode == LPFC_CFG_OFF ||
+           !phba->cmf_latency.tv_sec) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "6224 CMF timer exit: %d %lld\n",
+                               phba->cmf_active_mode,
+                               (uint64_t)phba->cmf_latency.tv_sec);
+               return HRTIMER_NORESTART;
+       }
+
+       /* If pport is not ready yet, just exit and wait for
+        * the next timer cycle to hit.
+        */
+       if (!phba->pport)
+               goto skip;
+
+       /* Do not block SCSI IO while in the timer routine since
+        * total_bytes will be cleared
+        */
+       atomic_set(&phba->cmf_stop_io, 1);
+
+       /* First we need to calculate the actual ms between
+        * the last timer interrupt and this one. We ask for
+        * LPFC_CMF_INTERVAL, however the actual time may
+        * vary depending on system overhead.
+        */
+       ms = lpfc_calc_cmf_latency(phba);
+
+
+       /* Immediately after we calculate the time since the last
+        * timer interrupt, set the start time for the next
+        * interrupt
+        */
+       ktime_get_real_ts64(&phba->cmf_latency);
+
+       phba->cmf_link_byte_count =
+               div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
+
+       /* Collect all the stats from the prior timer interval */
+       total = 0;
+       io_cnt = 0;
+       lat = 0;
+       rcv = 0;
+       for_each_present_cpu(cpu) {
+               cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+               total += atomic64_xchg(&cgs->total_bytes, 0);
+               io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
+               lat += atomic64_xchg(&cgs->rx_latency, 0);
+               rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
+       }
+
+       /* Before we issue another CMF_SYNC_WQE, retrieve the BW
+        * returned from the last CMF_SYNC_WQE issued, from
+        * cmf_last_sync_bw. This will be the target BW for
+        * this next timer interval.
+        */
+       if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
+           phba->link_state != LPFC_LINK_DOWN &&
+           phba->hba_flag & HBA_SETUP) {
+               mbpi = phba->cmf_last_sync_bw;
+               phba->cmf_last_sync_bw = 0;
+               lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
+       } else {
+               /* For Monitor mode or link down we want mbpi
+                * to be the full link speed
+                */
+               mbpi = phba->cmf_link_byte_count;
+       }
+       phba->cmf_timer_cnt++;
+
+       if (io_cnt) {
+               /* Update congestion info buffer latency in us */
+               atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
+               atomic64_add(lat, &phba->cgn_latency_evt);
+       }
+       busy = atomic_xchg(&phba->cmf_busy, 0);
+       max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
+
+       /* Calculate MBPI for the next timer interval */
+       if (mbpi) {
+               if (mbpi > phba->cmf_link_byte_count ||
+                   phba->cmf_active_mode == LPFC_CFG_MONITOR)
+                       mbpi = phba->cmf_link_byte_count;
+
+               /* Change max_bytes_per_interval to what the prior
+                * CMF_SYNC_WQE cmpl indicated.
+                */
+               if (mbpi != phba->cmf_max_bytes_per_interval)
+                       phba->cmf_max_bytes_per_interval = mbpi;
+       }
+
+       /* Save rxmonitor information for debug */
+       if (phba->rxtable) {
+               head = atomic_xchg(&phba->rxtable_idx_head,
+                                  LPFC_RXMONITOR_TABLE_IN_USE);
+               entry = &phba->rxtable[head];
+               entry->total_bytes = total;
+               entry->rcv_bytes = rcv;
+               entry->cmf_busy = busy;
+               entry->cmf_info = phba->cmf_active_info;
+               if (io_cnt) {
+                       entry->avg_io_latency = div_u64(lat, io_cnt);
+                       entry->avg_io_size = div_u64(rcv, io_cnt);
+               } else {
+                       entry->avg_io_latency = 0;
+                       entry->avg_io_size = 0;
+               }
+               entry->max_read_cnt = max_read;
+               entry->io_cnt = io_cnt;
+               entry->max_bytes_per_interval = mbpi;
+               if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+                       entry->timer_utilization = phba->cmf_last_ts;
+               else
+                       entry->timer_utilization = ms;
+               entry->timer_interval = ms;
+               phba->cmf_last_ts = 0;
+
+               /* Increment rxtable index */
+               head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+               tail = atomic_read(&phba->rxtable_idx_tail);
+               if (head == tail) {
+                       tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+                       atomic_set(&phba->rxtable_idx_tail, tail);
+               }
+               atomic_set(&phba->rxtable_idx_head, head);
+       }
+
+       if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
+               /* If Monitor mode, check if we are oversubscribed
+                * against the full line rate.
+                */
+               if (mbpi && total > mbpi)
+                       atomic_inc(&phba->cgn_driver_evt_cnt);
+       }
+       phba->rx_block_cnt += div_u64(rcv, 512);  /* save 512 byte block cnt */
+
+       /* Each minute save Fabric and Driver congestion information */
+       lpfc_cgn_save_evt_cnt(phba);
+
+       /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
+        * minute, adjust our next timer interval, if needed, to ensure a
+        * 1 minute granularity when we get the next timer interrupt.
+        */
+       if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
+                      phba->cgn_evt_timestamp)) {
+               timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
+                                                 jiffies);
+               if (timer_interval <= 0)
+                       timer_interval = LPFC_CMF_INTERVAL;
+
+               /* If we adjust timer_interval, max_bytes_per_interval
+                * needs to be adjusted as well.
+                */
+               phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
+                                                   timer_interval, 1000);
+               if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
+                       phba->cmf_max_bytes_per_interval =
+                               phba->cmf_link_byte_count;
+       }
+
+       /* Since total_bytes has already been zero'ed, its okay to unblock
+        * after max_bytes_per_interval is setup.
+        */
+       if (atomic_xchg(&phba->cmf_bw_wait, 0))
+               queue_work(phba->wq, &phba->unblock_request_work);
+
+       /* SCSI IO is now unblocked */
+       atomic_set(&phba->cmf_stop_io, 0);
+
+skip:
+       hrtimer_forward_now(timer,
+                           ktime_set(0, timer_interval * NSEC_PER_MSEC));
+       return HRTIMER_RESTART;
+}
+
 #define trunk_link_status(__idx)\
        bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
               ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
@@ -5329,6 +6105,9 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
                        trunk_link_status(0), trunk_link_status(1),
                        trunk_link_status(2), trunk_link_status(3));
 
+       if (phba->cmf_active_mode != LPFC_CFG_OFF)
+               lpfc_cmf_signal_init(phba);
+
        if (port_fault)
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                "3202 trunk error:0x%x (%s) seen on port0:%s "
@@ -5510,9 +6289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
        uint8_t operational = 0;
        struct temp_event temp_event_data;
        struct lpfc_acqe_misconfigured_event *misconfigured;
+       struct lpfc_acqe_cgn_signal *cgn_signal;
        struct Scsi_Host  *shost;
        struct lpfc_vport **vports;
-       int rc, i;
+       int rc, i, cnt;
 
        evt_type = bf_get(lpfc_trailer_type, acqe_sli);
 
@@ -5668,6 +6448,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
                                "Event Data1:x%08x Event Data2: x%08x\n",
                                acqe_sli->event_data1, acqe_sli->event_data2);
                break;
+       case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
+               /* Call FW to obtain active parms */
+               lpfc_sli4_cgn_parm_chg_evt(phba);
+               break;
        case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
                /* Misconfigured WWN. Reports that the SLI Port is configured
                 * to use FA-WWN, but the attached device doesn’t support it.
@@ -5685,6 +6469,40 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
                             "Event Data1: x%08x Event Data2: x%08x\n",
                             acqe_sli->event_data1, acqe_sli->event_data2);
                break;
+       case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
+               if (phba->cmf_active_mode == LPFC_CFG_OFF)
+                       break;
+               cgn_signal = (struct lpfc_acqe_cgn_signal *)
+                                       &acqe_sli->event_data1;
+               phba->cgn_acqe_cnt++;
+
+               cnt = bf_get(lpfc_warn_acqe, cgn_signal);
+               atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
+               atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
+
+               /* no threshold for CMF, even 1 signal will trigger an event */
+
+               /* Alarm overrides warning, so check that first */
+               if (cgn_signal->alarm_cnt) {
+                       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+                               /* Keep track of alarm cnt for cgn_info */
+                               atomic_add(cgn_signal->alarm_cnt,
+                                          &phba->cgn_fabric_alarm_cnt);
+                               /* Keep track of alarm cnt for CMF_SYNC_WQE */
+                               atomic_add(cgn_signal->alarm_cnt,
+                                          &phba->cgn_sync_alarm_cnt);
+                       }
+               } else if (cnt) {
+                       /* signal action needs to be taken */
+                       if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+                           phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+                               /* Keep track of warning cnt for cgn_info */
+                               atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
+                               /* Keep track of warning cnt for CMF_SYNC_WQE */
+                               atomic_add(cnt, &phba->cgn_sync_warn_cnt);
+                       }
+               }
+               break;
        default:
                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
                                "3193 Unrecognized SLI event, type: 0x%x",
@@ -6059,6 +6877,276 @@ lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
                        phba->sli4_hba.link_state.logical_speed);
 }
 
+/**
+ * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
+ * is an asynchronous notification of a request to reset CM stats.
+ **/
+static void
+lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
+{
+       if (!phba->cgn_i)
+               return;
+       lpfc_init_congestion_stat(phba);
+}
+
+/**
+ * lpfc_cgn_params_val - Validate FW congestion parameters.
+ * @phba: pointer to lpfc hba data structure.
+ * @p_cfg_param: pointer to FW provided congestion parameters.
+ *
+ * This routine validates the congestion parameters passed
+ * by the FW to the driver via an ACQE event.
+ **/
+static void
+lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
+{
+       spin_lock_irq(&phba->hbalock);
+
+       if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
+                            LPFC_CFG_MONITOR)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+                               "6225 CMF mode param out of range: %d\n",
+                                p_cfg_param->cgn_param_mode);
+               p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
+       }
+
+       spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_cgn_params_parse - Process a FW cong parm change event
+ * @phba: pointer to lpfc hba data structure.
+ * @p_cgn_param: pointer to a data buffer with the FW cong params.
+ * @len: the size of pdata in bytes.
+ *
+ * This routine validates the congestion management buffer signature
+ * from the FW, validates the contents and makes corrections for
+ * valid, in-range values.  If the signature magic is correct and
+ * after parameter validation, the contents are copied to the driver's
+ * @phba structure. If the magic is incorrect, an error message is
+ * logged.
+ **/
+static void
+lpfc_cgn_params_parse(struct lpfc_hba *phba,
+                     struct lpfc_cgn_param *p_cgn_param, uint32_t len)
+{
+       struct lpfc_cgn_info *cp;
+       uint32_t crc, oldmode;
+
+       /* Make sure the FW has encoded the correct magic number to
+        * validate the congestion parameter in FW memory.
+        */
+       if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+                               "4668 FW cgn parm buffer data: "
+                               "magic 0x%x version %d mode %d "
+                               "level0 %d level1 %d "
+                               "level2 %d byte13 %d "
+                               "byte14 %d byte15 %d "
+                               "byte11 %d byte12 %d activeMode %d\n",
+                               p_cgn_param->cgn_param_magic,
+                               p_cgn_param->cgn_param_version,
+                               p_cgn_param->cgn_param_mode,
+                               p_cgn_param->cgn_param_level0,
+                               p_cgn_param->cgn_param_level1,
+                               p_cgn_param->cgn_param_level2,
+                               p_cgn_param->byte13,
+                               p_cgn_param->byte14,
+                               p_cgn_param->byte15,
+                               p_cgn_param->byte11,
+                               p_cgn_param->byte12,
+                               phba->cmf_active_mode);
+
+               oldmode = phba->cmf_active_mode;
+
+               /* Any parameters out of range are corrected to defaults
+                * by this routine.  No need to fail.
+                */
+               lpfc_cgn_params_val(phba, p_cgn_param);
+
+               /* Parameters are verified, move them into driver storage */
+               spin_lock_irq(&phba->hbalock);
+               memcpy(&phba->cgn_p, p_cgn_param,
+                      sizeof(struct lpfc_cgn_param));
+
+               /* Update parameters in congestion info buffer now */
+               if (phba->cgn_i) {
+                       cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+                       cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+                       cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+                       cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+                       cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+                       crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
+                                                 LPFC_CGN_CRC32_SEED);
+                       cp->cgn_info_crc = cpu_to_le32(crc);
+               }
+               spin_unlock_irq(&phba->hbalock);
+
+               phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
+
+               switch (oldmode) {
+               case LPFC_CFG_OFF:
+                       if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
+                               /* Turning CMF on */
+                               lpfc_cmf_start(phba);
+
+                               if (phba->link_state >= LPFC_LINK_UP) {
+                                       phba->cgn_reg_fpin =
+                                               phba->cgn_init_reg_fpin;
+                                       phba->cgn_reg_signal =
+                                               phba->cgn_init_reg_signal;
+                                       lpfc_issue_els_edc(phba->pport, 0);
+                               }
+                       }
+                       break;
+               case LPFC_CFG_MANAGED:
+                       switch (phba->cgn_p.cgn_param_mode) {
+                       case LPFC_CFG_OFF:
+                               /* Turning CMF off */
+                               lpfc_cmf_stop(phba);
+                               if (phba->link_state >= LPFC_LINK_UP)
+                                       lpfc_issue_els_edc(phba->pport, 0);
+                               break;
+                       case LPFC_CFG_MONITOR:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                                               "4661 Switch from MANAGED to "
+                                               "`MONITOR mode\n");
+                               phba->cmf_max_bytes_per_interval =
+                                       phba->cmf_link_byte_count;
+
+                               /* Resume blocked IO - unblock on workqueue */
+                               queue_work(phba->wq,
+                                          &phba->unblock_request_work);
+                               break;
+                       }
+                       break;
+               case LPFC_CFG_MONITOR:
+                       switch (phba->cgn_p.cgn_param_mode) {
+                       case LPFC_CFG_OFF:
+                               /* Turning CMF off */
+                               lpfc_cmf_stop(phba);
+                               if (phba->link_state >= LPFC_LINK_UP)
+                                       lpfc_issue_els_edc(phba->pport, 0);
+                               break;
+                       case LPFC_CFG_MANAGED:
+                               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                                               "4662 Switch from MONITOR to "
+                                               "MANAGED mode\n");
+                               lpfc_cmf_signal_init(phba);
+                               break;
+                       }
+                       break;
+               }
+       } else {
+               lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+                               "4669 FW cgn parm buf wrong magic 0x%x "
+                               "version %d\n", p_cgn_param->cgn_param_magic,
+                               p_cgn_param->cgn_param_version);
+       }
+}
+
+/**
+ * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a read_object mailbox command to
+ * get the congestion management parameters from the FW
+ * parses it and updates the driver maintained values.
+ *
+ * Returns
+ *  0     if the object was empty
+ *  -Eval if an error was encountered
+ *  Count if bytes were read from object
+ **/
+int
+lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
+{
+       int ret = 0;
+       struct lpfc_cgn_param *p_cgn_param = NULL;
+       u32 *pdata = NULL;
+       u32 len = 0;
+
+       /* Find out if the FW has a new set of congestion parameters. */
+       len = sizeof(struct lpfc_cgn_param);
+       pdata = kzalloc(len, GFP_KERNEL);
+       ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
+                              pdata, len);
+
+       /* 0 means no data.  A negative means error.  A positive means
+        * bytes were copied.
+        */
+       if (!ret) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+                               "4670 CGN RD OBJ returns no data\n");
+               goto rd_obj_err;
+       } else if (ret < 0) {
+               /* Some error.  Just exit and return it to the caller.*/
+               goto rd_obj_err;
+       }
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+                       "6234 READ CGN PARAMS Successful %d\n", len);
+
+       /* Parse data pointer over len and update the phba congestion
+        * parameters with values passed back.  The receive rate values
+        * may have been altered in FW, but take no action here.
+        */
+       p_cgn_param = (struct lpfc_cgn_param *)pdata;
+       lpfc_cgn_params_parse(phba, p_cgn_param, len);
+
+ rd_obj_err:
+       kfree(pdata);
+       return ret;
+}
+
+/**
+ * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The FW generated Async ACQE SLI event calls this routine when
+ * the event type is an SLI Internal Port Event and the Event Code
+ * indicates a change to the FW maintained congestion parameters.
+ *
+ * This routine executes a Read_Object mailbox call to obtain the
+ * current congestion parameters maintained in FW and corrects
+ * the driver's active congestion parameters.
+ *
+ * The acqe event is not passed because there is no further data
+ * required.
+ *
+ * Returns nonzero error if event processing encountered an error.
+ * Zero otherwise for success.
+ **/
+static int
+lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
+{
+       int ret = 0;
+
+       if (!phba->sli4_hba.pc_sli4_params.cmf) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+                               "4664 Cgn Evt when E2E off. Drop event\n");
+               return -EACCES;
+       }
+
+       /* If the event is claiming an empty object, it's ok.  A write
+        * could have cleared it.  Only error is a negative return
+        * status.
+        */
+       ret = lpfc_sli4_cgn_params_read(phba);
+       if (ret < 0) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+                               "4667 Error reading Cgn Params (%d)\n",
+                               ret);
+       } else if (!ret) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+                               "4673 CGN Event empty object.\n");
+       }
+       return ret;
+}
+
 /**
  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
  * @phba: pointer to lpfc hba data structure.
@@ -6107,6 +7195,9 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
                case LPFC_TRAILER_CODE_SLI:
                        lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
                        break;
+               case LPFC_TRAILER_CODE_CMSTAT:
+                       lpfc_sli4_async_cmstat_evt(phba);
+                       break;
                default:
                        lpfc_printf_log(phba, KERN_ERR,
                                        LOG_TRACE_EVENT,
@@ -6391,6 +7482,15 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
        return rc;
 }
 
+static void
+lpfc_unblock_requests_work(struct work_struct *work)
+{
+       struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
+                                            unblock_request_work);
+
+       lpfc_unblock_requests(phba);
+}
+
 /**
  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
  * @phba: pointer to lpfc hba data structure.
@@ -6466,7 +7566,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
 
        INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
                          lpfc_idle_stat_delay_work);
-
+       INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
        return 0;
 }
 
@@ -6697,6 +7797,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        /* FCF rediscover timer */
        timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
 
+       /* CMF congestion timer */
+       hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       phba->cmf_timer.function = lpfc_cmf_timer;
+
        /*
         * Control structure for handling external multi-buffer mailbox
         * command pass-through.
@@ -7145,6 +8249,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        }
 #endif
 
+       phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
+       if (!phba->cmf_stat) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+                               "3331 Failed allocating per cpu cgn stats\n");
+               rc = -ENOMEM;
+               goto out_free_hba_hdwq_info;
+       }
+
        /*
         * Enable sr-iov virtual functions if supported and configured
         * through the module parameter.
@@ -7164,6 +8276,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        return 0;
 
+out_free_hba_hdwq_info:
+       free_percpu(phba->sli4_hba.c_stat);
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 out_free_hba_idle_stat:
        kfree(phba->sli4_hba.idle_stat);
@@ -7211,6 +8325,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        free_percpu(phba->sli4_hba.c_stat);
 #endif
+       free_percpu(phba->cmf_stat);
        kfree(phba->sli4_hba.idle_stat);
 
        /* Free memory allocated for msi-x interrupt vector to CPU mapping */
@@ -8537,9 +9652,12 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
        }
        /* FW supports persistent topology - override module parameter value */
        phba->hba_flag |= HBA_PERSISTENT_TOPO;
-       switch (phba->pcidev->device) {
-       case PCI_DEVICE_ID_LANCER_G7_FC:
-       case PCI_DEVICE_ID_LANCER_G6_FC:
+
+       /* if ASIC_GEN_NUM >= 0xC) */
+       if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+                   LPFC_SLI_INTF_IF_TYPE_6) ||
+           (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+                   LPFC_SLI_INTF_FAMILY_G6)) {
                if (!tf) {
                        phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
                                        ? FLAGS_TOPOLOGY_MODE_LOOP
@@ -8547,8 +9665,7 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
                } else {
                        phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
                }
-               break;
-       default:        /* G5 */
+       } else { /* G5 */
                if (tf) {
                        /* If topology failover set - pt is '0' or '1' */
                        phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
@@ -8558,7 +9675,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
                                        ? FLAGS_TOPOLOGY_MODE_PT_PT
                                        : FLAGS_TOPOLOGY_MODE_LOOP);
                }
-               break;
        }
        if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -8683,6 +9799,52 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
                                (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
                phba->max_vports = phba->max_vpi;
+
+               /* Next decide on FPIN or Signal E2E CGN support
+                * For congestion alarms and warnings valid combination are:
+                * 1. FPIN alarms / FPIN warnings
+                * 2. Signal alarms / Signal warnings
+                * 3. FPIN alarms / Signal warnings
+                * 4. Signal alarms / FPIN warnings
+                *
+                * Initialize the adapter frequency to 100 mSecs
+                */
+               phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
+               phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+               phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+
+               if (lpfc_use_cgn_signal) {
+                       if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
+                               phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
+                               phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
+                       }
+                       if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
+                               /* MUST support both alarm and warning
+                                * because EDC does not support alarm alone.
+                                */
+                               if (phba->cgn_reg_signal !=
+                                   EDC_CG_SIG_WARN_ONLY) {
+                                       /* Must support both or none */
+                                       phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
+                                       phba->cgn_reg_signal =
+                                               EDC_CG_SIG_NOTSUPPORTED;
+                               } else {
+                                       phba->cgn_reg_signal =
+                                               EDC_CG_SIG_WARN_ALARM;
+                                       phba->cgn_reg_fpin =
+                                               LPFC_CGN_FPIN_NONE;
+                               }
+                       }
+               }
+
+               /* Set the congestion initial signal and fpin values. */
+               phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
+               phba->cgn_init_reg_signal = phba->cgn_reg_signal;
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
+                               phba->cgn_reg_signal, phba->cgn_reg_fpin);
+
                lpfc_map_topology(phba, rd_config);
                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
                                "2003 cfg params Extents? %d "
@@ -12063,6 +13225,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        struct pci_dev *pdev = phba->pcidev;
 
        lpfc_stop_hba_timers(phba);
+       hrtimer_cancel(&phba->cmf_timer);
+
        if (phba->pport)
                phba->sli4_hba.intr_enable = 0;
 
@@ -12133,6 +13297,240 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
                phba->pport->work_port_events = 0;
 }
 
+static uint32_t
+lpfc_cgn_crc32(uint32_t crc, u8 byte)
+{
+       uint32_t msb = 0;
+       uint32_t bit;
+
+       for (bit = 0; bit < 8; bit++) {
+               msb = (crc >> 31) & 1;
+               crc <<= 1;
+
+               if (msb ^ (byte & 1)) {
+                       crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
+                       crc |= 1;
+               }
+               byte >>= 1;
+       }
+       return crc;
+}
+
+static uint32_t
+lpfc_cgn_reverse_bits(uint32_t wd)
+{
+       uint32_t result = 0;
+       uint32_t i;
+
+       for (i = 0; i < 32; i++) {
+               result <<= 1;
+               result |= (1 & (wd >> i));
+       }
+       return result;
+}
+
+/*
+ * The routine corresponds with the algorithm the HBA firmware
+ * uses to validate the data integrity.
+ */
+uint32_t
+lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
+{
+       uint32_t  i;
+       uint32_t result;
+       uint8_t  *data = (uint8_t *)ptr;
+
+       for (i = 0; i < byteLen; ++i)
+               crc = lpfc_cgn_crc32(crc, data[i]);
+
+       result = ~lpfc_cgn_reverse_bits(crc);
+       return result;
+}
+
+void
+lpfc_init_congestion_buf(struct lpfc_hba *phba)
+{
+       struct lpfc_cgn_info *cp;
+       struct timespec64 cmpl_time;
+       struct tm broken;
+       uint16_t size;
+       uint32_t crc;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
+
+       if (!phba->cgn_i)
+               return;
+       cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+
+       atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+       atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+       atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+       atomic_set(&phba->cgn_sync_warn_cnt, 0);
+
+       atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+       atomic64_set(&phba->cgn_acqe_stat.warn, 0);
+       atomic_set(&phba->cgn_driver_evt_cnt, 0);
+       atomic_set(&phba->cgn_latency_evt_cnt, 0);
+       atomic64_set(&phba->cgn_latency_evt, 0);
+       phba->cgn_evt_minute = 0;
+       phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
+
+       memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
+       cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
+       cp->cgn_info_version = LPFC_CGN_INFO_V3;
+
+       /* cgn parameters */
+       cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
+       cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
+       cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
+       cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
+
+       ktime_get_real_ts64(&cmpl_time);
+       time64_to_tm(cmpl_time.tv_sec, 0, &broken);
+
+       cp->cgn_info_month = broken.tm_mon + 1;
+       cp->cgn_info_day = broken.tm_mday;
+       cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
+       cp->cgn_info_hour = broken.tm_hour;
+       cp->cgn_info_minute = broken.tm_min;
+       cp->cgn_info_second = broken.tm_sec;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+                       "2643 CGNInfo Init: Start Time "
+                       "%d/%d/%d %d:%d:%d\n",
+                       cp->cgn_info_day, cp->cgn_info_month,
+                       cp->cgn_info_year, cp->cgn_info_hour,
+                       cp->cgn_info_minute, cp->cgn_info_second);
+
+       /* Fill in default LUN qdepth */
+       if (phba->pport) {
+               size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
+               cp->cgn_lunq = cpu_to_le16(size);
+       }
+
+       /* last used Index initialized to 0xff already */
+
+       cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ;
+       cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ;
+       crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+       cp->cgn_info_crc = cpu_to_le32(crc);
+
+       phba->cgn_evt_timestamp = jiffies +
+               msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
+}
+
+void
+lpfc_init_congestion_stat(struct lpfc_hba *phba)
+{
+       struct lpfc_cgn_info *cp;
+       struct timespec64 cmpl_time;
+       struct tm broken;
+       uint32_t crc;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       "6236 INIT Congestion Stat %p\n", phba->cgn_i);
+
+       if (!phba->cgn_i)
+               return;
+
+       cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
+       memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
+
+       ktime_get_real_ts64(&cmpl_time);
+       time64_to_tm(cmpl_time.tv_sec, 0, &broken);
+
+       cp->cgn_stat_month = broken.tm_mon + 1;
+       cp->cgn_stat_day = broken.tm_mday;
+       cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
+       cp->cgn_stat_hour = broken.tm_hour;
+       cp->cgn_stat_minute = broken.tm_min;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
+                       "2647 CGNstat Init: Start Time "
+                       "%d/%d/%d %d:%d\n",
+                       cp->cgn_stat_day, cp->cgn_stat_month,
+                       cp->cgn_stat_year, cp->cgn_stat_hour,
+                       cp->cgn_stat_minute);
+
+       crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
+       cp->cgn_info_crc = cpu_to_le32(crc);
+}
+
+/**
+ * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
+ * @phba: Pointer to hba context object.
+ * @reg: flag to determine register or unregister.
+ */
+static int
+__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
+{
+       struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
+       union  lpfc_sli4_cfg_shdr *shdr;
+       uint32_t shdr_status, shdr_add_status;
+       LPFC_MBOXQ_t *mboxq;
+       int length, rc;
+
+       if (!phba->cgn_i)
+               return -ENXIO;
+
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "2641 REG_CONGESTION_BUF mbox allocation fail: "
+                               "HBA state x%x reg %d\n",
+                               phba->pport->port_state, reg);
+               return -ENOMEM;
+       }
+
+       length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
+               sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
+                        LPFC_SLI4_MBX_EMBED);
+       reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
+       bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
+       if (reg > 0)
+               bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
+       else
+               bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
+       reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
+       reg_congestion_buf->addr_lo =
+               putPaddrLow(phba->cgn_i->phys);
+       reg_congestion_buf->addr_hi =
+               putPaddrHigh(phba->cgn_i->phys);
+
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+                                &shdr->response);
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2642 REG_CONGESTION_BUF mailbox "
+                               "failed with status x%x add_status x%x,"
+                               " mbx status x%x reg %d\n",
+                               shdr_status, shdr_add_status, rc, reg);
+               return -ENXIO;
+       }
+       return 0;
+}
+
+int
+lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
+{
+       lpfc_cmf_stop(phba);
+       return __lpfc_reg_congestion_buf(phba, 0);
+}
+
+int
+lpfc_reg_congestion_buf(struct lpfc_hba *phba)
+{
+       return __lpfc_reg_congestion_buf(phba, 1);
+}
+
 /**
  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
  * @phba: Pointer to HBA context object.
@@ -12241,7 +13639,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                                        bf_get(cfg_xib, mbx_sli4_parameters),
                                        phba->cfg_enable_fc4_type);
 fcponly:
-                       phba->nvme_support = 0;
                        phba->nvmet_support = 0;
                        phba->cfg_nvmet_mrq = 0;
                        phba->cfg_nvme_seg_cnt = 0;
@@ -12259,9 +13656,10 @@ fcponly:
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
                phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
 
-       /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
-       if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
-           LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
+       /* Enable embedded Payload BDE if support is indicated */
+       if (bf_get(cfg_pbde, mbx_sli4_parameters))
+               phba->cfg_enable_pbde = 1;
+       else
                phba->cfg_enable_pbde = 0;
 
        /*
@@ -12299,7 +13697,7 @@ fcponly:
                        "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
                        bf_get(cfg_xib, mbx_sli4_parameters),
                        phba->cfg_enable_pbde,
-                       phba->fcp_embed_io, phba->nvme_support,
+                       phba->fcp_embed_io, sli4_params->nvme,
                        phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
 
        if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
@@ -12331,21 +13729,6 @@ fcponly:
        else
                phba->nsler = 0;
 
-       /* Save PB info for use during HBA setup */
-       sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
-       sli4_params->mib_bde_cnt = bf_get(cfg_mib_bde_cnt, mbx_sli4_parameters);
-       sli4_params->mib_size = mbx_sli4_parameters->mib_size;
-       sli4_params->mi_value = LPFC_DFLT_MIB_VAL;
-
-       /* Next we check for Vendor MIB support */
-       if (sli4_params->mi_ver && phba->cfg_enable_mi)
-               phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
-
-       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "6461 MIB attr %d  enable %d  FDMI %d buf %d:%d\n",
-                       sli4_params->mi_ver, phba->cfg_enable_mi,
-                       sli4_params->mi_value, sli4_params->mib_bde_cnt,
-                       sli4_params->mib_size);
        return 0;
 }
 
@@ -12978,7 +14361,9 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
        const struct firmware *fw)
 {
        int rc;
+       u8 sli_family;
 
+       sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
        /* Three cases:  (1) FW was not supported on the detected adapter.
         * (2) FW update has been locked out administratively.
         * (3) Some other error during FW update.
@@ -12986,10 +14371,12 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
         * for admin diagnosis.
         */
        if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
-           (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+           (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
             magic_number != MAGIC_NUMBER_G6) ||
-           (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
-            magic_number != MAGIC_NUMBER_G7)) {
+           (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
+            magic_number != MAGIC_NUMBER_G7) ||
+           (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
+            magic_number != MAGIC_NUMBER_G7P)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                "3030 This firmware version is not supported on"
                                " this HBA model. Device:%x Magic:%x Type:%x "
@@ -13377,6 +14764,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
        spin_lock_irq(&phba->hbalock);
        vport->load_flag |= FC_UNLOADING;
        spin_unlock_irq(&phba->hbalock);
+       if (phba->cgn_i)
+               lpfc_unreg_congestion_buf(phba);
 
        lpfc_free_sysfs_attr(vport);
 
@@ -14041,17 +15430,18 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
 void
 lpfc_sli4_ras_init(struct lpfc_hba *phba)
 {
-       switch (phba->pcidev->device) {
-       case PCI_DEVICE_ID_LANCER_G6_FC:
-       case PCI_DEVICE_ID_LANCER_G7_FC:
+       /* if ASIC_GEN_NUM >= 0xC) */
+       if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+                   LPFC_SLI_INTF_IF_TYPE_6) ||
+           (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+                   LPFC_SLI_INTF_FAMILY_G6)) {
                phba->ras_fwlog.ras_hwsupport = true;
                if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
                    phba->cfg_ras_fwlog_buffsize)
                        phba->ras_fwlog.ras_enabled = true;
                else
                        phba->ras_fwlog.ras_enabled = false;
-               break;
-       default:
+       } else {
                phba->ras_fwlog.ras_hwsupport = false;
        }
 }
@@ -14164,8 +15554,9 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
        unsigned int temp_idx;
        int i;
        int j = 0;
-       unsigned long rem_nsec;
-       struct lpfc_vport **vports;
+       unsigned long rem_nsec, iflags;
+       bool log_verbose = false;
+       struct lpfc_vport *port_iterator;
 
        /* Don't dump messages if we explicitly set log_verbose for the
         * physical port or any vport.
@@ -14173,16 +15564,24 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
        if (phba->cfg_log_verbose)
                return;
 
-       vports = lpfc_create_vport_work_array(phba);
-       if (vports != NULL) {
-               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
-                       if (vports[i]->cfg_log_verbose) {
-                               lpfc_destroy_vport_work_array(phba, vports);
+       spin_lock_irqsave(&phba->port_list_lock, iflags);
+       list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+               if (port_iterator->load_flag & FC_UNLOADING)
+                       continue;
+               if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
+                       if (port_iterator->cfg_log_verbose)
+                               log_verbose = true;
+
+                       scsi_host_put(lpfc_shost_from_vport(port_iterator));
+
+                       if (log_verbose) {
+                               spin_unlock_irqrestore(&phba->port_list_lock,
+                                                      iflags);
                                return;
                        }
                }
        }
-       lpfc_destroy_vport_work_array(phba, vports);
+       spin_unlock_irqrestore(&phba->port_list_lock, iflags);
 
        if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
                return;
index 5660a87..7d480c7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -44,6 +44,9 @@
 #define LOG_NVME_DISC   0x00200000      /* NVME Discovery/Connect events. */
 #define LOG_NVME_ABTS   0x00400000      /* NVME ABTS events. */
 #define LOG_NVME_IOERR  0x00800000      /* NVME IO Error events. */
+#define LOG_RSVD1      0x01000000      /* Reserved */
+#define LOG_RSVD2      0x02000000      /* Reserved */
+#define LOG_CGN_MGMT    0x04000000     /* Congestion Mgmt events */
 #define LOG_TRACE_EVENT 0x80000000     /* Dmp the DBG log on this err */
 #define LOG_ALL_MSG    0x7fffffff      /* LOG all messages */
 
index 84bc373..6c754ee 100644 (file)
@@ -513,8 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba,
                break;
        }
 
-       if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
-            phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+       /* Topology handling for ASIC_GEN_NUM 0xC and later */
+       if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
+            phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
            !(phba->sli4_hba.pc_sli4_params.pls) &&
            mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
                mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
index be54fbf..870e53b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -335,6 +335,19 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
        dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
        phba->lpfc_cmd_rsp_buf_pool = NULL;
 
+       /* Free Congestion Data buffer */
+       if (phba->cgn_i) {
+               dma_free_coherent(&phba->pcidev->dev,
+                                 sizeof(struct lpfc_cgn_info),
+                                 phba->cgn_i->virt, phba->cgn_i->phys);
+               kfree(phba->cgn_i);
+               phba->cgn_i = NULL;
+       }
+
+       /* Free RX table */
+       kfree(phba->rxtable);
+       phba->rxtable = NULL;
+
        /* Free the iocb lookup array */
        kfree(psli->iocbq_lookup);
        psli->iocbq_lookup = NULL;
index e12f83f..27263f0 100644 (file)
@@ -736,9 +736,13 @@ out:
                 * is already in MAPPED or UNMAPPED state.  Catch this
                 * condition and don't set the nlp_state again because
                 * it causes an unnecessary transport unregister/register.
+                *
+                * Nodes marked for ADISC will move MAPPED or UNMAPPED state
+                * after issuing ADISC
                 */
                if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
-                       if (ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+                       if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
+                           !(ndlp->nlp_flag & NLP_NPR_ADISC))
                                lpfc_nlp_set_state(vport, ndlp,
                                                   NLP_STE_MAPPED_NODE);
                }
@@ -863,6 +867,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
        }
 out:
+       /* Unregister from backend, could have been skipped due to ADISC */
+       lpfc_nlp_unreg_node(vport, ndlp);
+
        ndlp->nlp_prev_state = ndlp->nlp_state;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 
@@ -1677,9 +1684,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
                spin_unlock_irq(&ndlp->lock);
                ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
 
-               memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
-               memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
-
                ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
                lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
                lpfc_unreg_rpi(vport, ndlp);
@@ -2597,13 +2601,14 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
                              void *arg,
                              uint32_t evt)
 {
+       lpfc_disc_set_adisc(vport, ndlp);
+
        ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
        spin_lock_irq(&ndlp->lock);
        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
        ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
        spin_unlock_irq(&ndlp->lock);
-       lpfc_disc_set_adisc(vport, ndlp);
        return ndlp->nlp_state;
 }
 
@@ -2645,14 +2650,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
 
        if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
-               if (ndlp->nlp_flag & NLP_NPR_ADISC) {
-                       spin_lock_irq(&ndlp->lock);
-                       ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-                       ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-                       spin_unlock_irq(&ndlp->lock);
-                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
-                       lpfc_issue_els_adisc(vport, ndlp, 0);
-               } else {
+               /*
+                * ADISC nodes will be handled in regular discovery path after
+                * receiving response from NS.
+                *
+                * For other nodes, Send PLOGI to trigger an implicit LOGO.
+                */
+               if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
                        ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
                        lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
@@ -2685,12 +2689,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
         */
        if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
            !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
-               if (ndlp->nlp_flag & NLP_NPR_ADISC) {
-                       ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-                       ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
-                       lpfc_issue_els_adisc(vport, ndlp, 0);
-               } else {
+               /*
+                * ADISC nodes will be handled in regular discovery path after
+                * receiving response from NS.
+                *
+                * For other nodes, Send PLOGI to trigger an implicit LOGO.
+                */
+               if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
                        ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
                        lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
                        lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
index bcc804c..73a3568 100644 (file)
@@ -216,8 +216,8 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
        /* The register rebind might have occurred before the delete
         * downcall.  Guard against this race.
         */
-       if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
-               ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
+       if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
+               ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
 
        spin_unlock_irq(&ndlp->lock);
 
@@ -931,6 +931,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
        uint32_t code, status, idx;
        uint16_t cid, sqhd, data;
        uint32_t *ptr;
+       uint32_t lat;
+       bool call_done = false;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        int cpu;
 #endif
@@ -1135,10 +1137,21 @@ out_err:
                freqpriv = nCmd->private;
                freqpriv->nvme_buf = NULL;
                lpfc_ncmd->nvmeCmd = NULL;
-               spin_unlock(&lpfc_ncmd->buf_lock);
+               call_done = true;
+       }
+       spin_unlock(&lpfc_ncmd->buf_lock);
+
+       /* Check if IO qualified for CMF */
+       if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+           nCmd->io_dir == NVMEFC_FCP_READ &&
+           nCmd->payload_length) {
+               /* Used when calculating average latency */
+               lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
+               lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
+       }
+
+       if (call_done)
                nCmd->done(nCmd);
-       } else
-               spin_unlock(&lpfc_ncmd->buf_lock);
 
        /* Call release with XB=1 to queue the IO into the abort list. */
        lpfc_release_nvme_buf(phba, lpfc_ncmd);
@@ -1212,6 +1225,10 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
                        /* Word 5 */
                        wqe->fcp_iread.rsrvd5 = 0;
 
+                       /* For a CMF Managed port, iod must be zero'ed */
+                       if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+                               bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
+                                      LPFC_WQE_IOD_NONE);
                        cstat->input_requests++;
                }
        } else {
@@ -1562,6 +1579,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
                        expedite = 1;
        }
 
+       /* Check if IO qualifies for CMF */
+       if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+           pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
+           pnvme_fcreq->payload_length) {
+               ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
+               if (ret) {
+                       ret = -EBUSY;
+                       goto out_fail;
+               }
+               /* Get start time for IO latency */
+               start = ktime_get_ns();
+       }
+
        /* The node is shared with FCP IO, make sure the IO pending count does
         * not exceed the programmed depth.
         */
@@ -1576,7 +1606,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
                                         ndlp->cmd_qdepth);
                        atomic_inc(&lport->xmt_fcp_qdepth);
                        ret = -EBUSY;
-                       goto out_fail;
+                       goto out_fail1;
                }
        }
 
@@ -1596,7 +1626,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
                                 "idx %d DID %x\n",
                                 lpfc_queue_info->index, ndlp->nlp_DID);
                ret = -EBUSY;
-               goto out_fail;
+               goto out_fail1;
        }
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        if (start) {
@@ -1606,6 +1636,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
                lpfc_ncmd->ts_cmd_start = 0;
        }
 #endif
+       lpfc_ncmd->rx_cmd_start = start;
 
        /*
         * Store the data needed by the driver to issue, abort, and complete
@@ -1687,6 +1718,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
        } else
                cstat->control_requests--;
        lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ out_fail1:
+       lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
+                            pnvme_fcreq->payload_length, NULL);
  out_fail:
        return ret;
 }
@@ -2324,7 +2358,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                 * race that leaves the WAIT flag set.
                 */
                spin_lock_irq(&ndlp->lock);
-               ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+               ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
                ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
                spin_unlock_irq(&ndlp->lock);
                rport = remote_port->private;
@@ -2336,7 +2370,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                         */
                        spin_lock_irq(&ndlp->lock);
                        ndlp->nrport = NULL;
-                       ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
+                       ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
                        spin_unlock_irq(&ndlp->lock);
                        rport->ndlp = NULL;
                        rport->remoteport = NULL;
@@ -2488,7 +2522,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                 * The transport will update it.
                 */
                spin_lock_irq(&vport->phba->hbalock);
-               ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
+               ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
                spin_unlock_irq(&vport->phba->hbalock);
 
                /* Don't let the host nvme transport keep sending keep-alives
index 69a5a84..cc54ffb 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
 #define LPFC_NVME_FB_SHIFT             9
 #define LPFC_NVME_MAX_FB               (1 << 20)       /* 1M */
 
-#define LPFC_MAX_NVME_INFO_TMP_LEN     100
-#define LPFC_NVME_INFO_MORE_STR                "\nCould be more info...\n"
-
-#define lpfc_ndlp_get_nrport(ndlp)                                     \
-       ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG))  \
+#define lpfc_ndlp_get_nrport(ndlp)                             \
+       ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\
        ? NULL : ndlp->nrport)
 
 struct lpfc_nvme_qhandle {
index f2d9a35..6e3dd0b 100644 (file)
@@ -1797,19 +1797,22 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
                if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
                        continue;
 
-               spin_lock(&ctxp->ctxlock);
+               spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
+                                      iflag);
+
+               spin_lock_irqsave(&ctxp->ctxlock, iflag);
                /* Check if we already received a free context call
                 * and we have completed processing an abort situation.
                 */
                if (ctxp->flag & LPFC_NVME_CTX_RLS &&
                    !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
+                       spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                        list_del_init(&ctxp->list);
+                       spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
                        released = true;
                }
                ctxp->flag &= ~LPFC_NVME_XBUSY;
-               spin_unlock(&ctxp->ctxlock);
-               spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
-                                      iflag);
+               spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
 
                rrq_empty = list_empty(&phba->active_rrq_list);
                ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
index 1b248c2..0fde1e8 100644 (file)
@@ -96,30 +96,6 @@ static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
                                    struct lpfc_vmid *vmid);
 
-static inline unsigned
-lpfc_cmd_blksize(struct scsi_cmnd *sc)
-{
-       return sc->device->sector_size;
-}
-
-#define LPFC_CHECK_PROTECT_GUARD       1
-#define LPFC_CHECK_PROTECT_REF         2
-static inline unsigned
-lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
-{
-       return 1;
-}
-
-static inline unsigned
-lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
-{
-       if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
-               return 0;
-       if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
-               return 1;
-       return 0;
-}
-
 /**
  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
  * @phba: Pointer to HBA object.
@@ -683,7 +659,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 
        cpu = raw_smp_processor_id();
        if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
-               tag = blk_mq_unique_tag(cmnd->request);
+               tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
                idx = blk_mq_unique_tag_to_hwq(tag);
        } else {
                idx = phba->sli4_hba.cpu_map[cpu].hdwq;
@@ -1046,13 +1022,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                return 0;
 
        sgpe = scsi_prot_sglist(sc);
-       lba = t10_pi_ref_tag(sc->request);
+       lba = scsi_prot_ref_tag(sc);
        if (lba == LPFC_INVALID_REFTAG)
                return 0;
 
        /* First check if we need to match the LBA */
        if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
-               blksize = lpfc_cmd_blksize(sc);
+               blksize = scsi_prot_interval(sc);
                numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
 
                /* Make sure we have the right LBA if one is specified */
@@ -1441,7 +1417,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 {
        uint8_t ret = 0;
 
-       if (lpfc_cmd_guard_csum(sc)) {
+       if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
                switch (scsi_get_prot_op(sc)) {
                case SCSI_PROT_READ_INSERT:
                case SCSI_PROT_WRITE_STRIP:
@@ -1521,7 +1497,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 {
        uint8_t ret = 0;
 
-       if (lpfc_cmd_guard_csum(sc)) {
+       if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
                switch (scsi_get_prot_op(sc)) {
                case SCSI_PROT_READ_INSERT:
                case SCSI_PROT_WRITE_STRIP:
@@ -1629,7 +1605,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                goto out;
 
        /* extract some info from the scsi command for pde*/
-       reftag = t10_pi_ref_tag(sc->request);
+       reftag = scsi_prot_ref_tag(sc);
        if (reftag == LPFC_INVALID_REFTAG)
                goto out;
 
@@ -1668,12 +1644,12 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
         * protection data is automatically generated, not checked.
         */
        if (datadir == DMA_FROM_DEVICE) {
-               if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+               if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
                        bf_set(pde6_ce, pde6, checking);
                else
                        bf_set(pde6_ce, pde6, 0);
 
-               if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+               if (sc->prot_flags & SCSI_PROT_REF_CHECK)
                        bf_set(pde6_re, pde6, checking);
                else
                        bf_set(pde6_re, pde6, 0);
@@ -1791,8 +1767,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                goto out;
 
        /* extract some info from the scsi command */
-       blksize = lpfc_cmd_blksize(sc);
-       reftag = t10_pi_ref_tag(sc->request);
+       blksize = scsi_prot_interval(sc);
+       reftag = scsi_prot_ref_tag(sc);
        if (reftag == LPFC_INVALID_REFTAG)
                goto out;
 
@@ -1832,12 +1808,12 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                bf_set(pde6_optx, pde6, txop);
                bf_set(pde6_oprx, pde6, rxop);
 
-               if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+               if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
                        bf_set(pde6_ce, pde6, checking);
                else
                        bf_set(pde6_ce, pde6, 0);
 
-               if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+               if (sc->prot_flags & SCSI_PROT_REF_CHECK)
                        bf_set(pde6_re, pde6, checking);
                else
                        bf_set(pde6_re, pde6, 0);
@@ -2023,7 +1999,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                goto out;
 
        /* extract some info from the scsi command for pde*/
-       reftag = t10_pi_ref_tag(sc->request);
+       reftag = scsi_prot_ref_tag(sc);
        if (reftag == LPFC_INVALID_REFTAG)
                goto out;
 
@@ -2051,12 +2027,12 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
         * protection data is automatically generated, not checked.
         */
        if (sc->sc_data_direction == DMA_FROM_DEVICE) {
-               if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+               if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
                        bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
                else
                        bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
 
-               if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+               if (sc->prot_flags & SCSI_PROT_REF_CHECK)
                        bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
                else
                        bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
@@ -2223,8 +2199,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                goto out;
 
        /* extract some info from the scsi command */
-       blksize = lpfc_cmd_blksize(sc);
-       reftag = t10_pi_ref_tag(sc->request);
+       blksize = scsi_prot_interval(sc);
+       reftag = scsi_prot_ref_tag(sc);
        if (reftag == LPFC_INVALID_REFTAG)
                goto out;
 
@@ -2281,9 +2257,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                diseed->ref_tag = cpu_to_le32(reftag);
                diseed->ref_tag_tran = diseed->ref_tag;
 
-               if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
+               if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
                        bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-
                } else {
                        bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
                        /*
@@ -2300,7 +2275,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                }
 
 
-               if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+               if (sc->prot_flags & SCSI_PROT_REF_CHECK)
                        bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
                else
                        bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
@@ -2557,7 +2532,7 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
         * DIF (trailer) attached to it. Must ajust FCP data length
         * to account for the protection data.
         */
-       fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+       fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
 
        return fcpdl;
 }
@@ -2811,14 +2786,14 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
                 * data length is a multiple of the blksize.
                 */
                sgde = scsi_sglist(cmd);
-               blksize = lpfc_cmd_blksize(cmd);
+               blksize = scsi_prot_interval(cmd);
                data_src = (uint8_t *)sg_virt(sgde);
                data_len = sgde->length;
                if ((data_len & (blksize - 1)) == 0)
                        chk_guard = 1;
 
                src = (struct scsi_dif_tuple *)sg_virt(sgpe);
-               start_ref_tag = t10_pi_ref_tag(cmd->request);
+               start_ref_tag = scsi_prot_ref_tag(cmd);
                if (start_ref_tag == LPFC_INVALID_REFTAG)
                        goto out;
                start_app_tag = src->app_tag;
@@ -2839,7 +2814,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
                                /* First Guard Tag checking */
                                if (chk_guard) {
                                        guard_tag = src->guard_tag;
-                                       if (lpfc_cmd_guard_csum(cmd))
+                                       if (cmd->prot_flags
+                                           & SCSI_PROT_IP_CHECKSUM)
                                                sum = lpfc_bg_csum(data_src,
                                                                   blksize);
                                        else
@@ -2910,7 +2886,7 @@ out:
                phba->bg_guard_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
                                "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
-                               t10_pi_ref_tag(cmd->request),
+                               scsi_prot_ref_tag(cmd),
                                sum, guard_tag);
 
        } else if (err_type == BGS_REFTAG_ERR_MASK) {
@@ -2920,7 +2896,7 @@ out:
                phba->bg_reftag_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
                                "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
-                               t10_pi_ref_tag(cmd->request),
+                               scsi_prot_ref_tag(cmd),
                                ref_tag, start_ref_tag);
 
        } else if (err_type == BGS_APPTAG_ERR_MASK) {
@@ -2930,7 +2906,7 @@ out:
                phba->bg_apptag_err_cnt++;
                lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
                                "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
-                               t10_pi_ref_tag(cmd->request),
+                               scsi_prot_ref_tag(cmd),
                                app_tag, start_app_tag);
        }
 }
@@ -2992,7 +2968,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                " 0x%x lba 0x%llx blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
                                (unsigned long long)scsi_get_lba(cmd),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_logical_block_count(cmd), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -3007,7 +2983,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                " 0x%x lba 0x%llx blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
                                (unsigned long long)scsi_get_lba(cmd),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_logical_block_count(cmd), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -3022,7 +2998,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                " 0x%x lba 0x%llx blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
                                (unsigned long long)scsi_get_lba(cmd),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_logical_block_count(cmd), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -3066,9 +3042,9 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                " 0x%x lba 0x%llx blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
                                (unsigned long long)scsi_get_lba(cmd),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_logical_block_count(cmd), bgstat, bghm);
 
-               /* Calcuate what type of error it was */
+               /* Calculate what type of error it was */
                lpfc_calc_bg_err(phba, lpfc_cmd);
        }
        return ret;
@@ -3103,8 +3079,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                "9072 BLKGRD: Invalid BG Profile in cmd "
                                "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               t10_pi_ref_tag(cmd->request),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_prot_ref_tag(cmd),
+                               scsi_logical_block_count(cmd), bgstat, bghm);
                ret = (-1);
                goto out;
        }
@@ -3115,8 +3091,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                "9073 BLKGRD: Invalid BG PDIF Block in cmd "
                                "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               t10_pi_ref_tag(cmd->request),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_prot_ref_tag(cmd),
+                               scsi_logical_block_count(cmd), bgstat, bghm);
                ret = (-1);
                goto out;
        }
@@ -3131,8 +3107,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                "9055 BLKGRD: Guard Tag error in cmd "
                                "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               t10_pi_ref_tag(cmd->request),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_prot_ref_tag(cmd),
+                               scsi_logical_block_count(cmd), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -3146,8 +3122,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                "9056 BLKGRD: Ref Tag error in cmd "
                                "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               t10_pi_ref_tag(cmd->request),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_prot_ref_tag(cmd),
+                               scsi_logical_block_count(cmd), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -3161,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                "9061 BLKGRD: App Tag error in cmd "
                                "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               t10_pi_ref_tag(cmd->request),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_prot_ref_tag(cmd),
+                               scsi_logical_block_count(cmd), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -3205,10 +3181,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
                                "9057 BLKGRD: Unknown error in cmd "
                                "0x%x reftag 0x%x blk cnt 0x%x "
                                "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
-                               t10_pi_ref_tag(cmd->request),
-                               blk_rq_sectors(cmd->request), bgstat, bghm);
+                               scsi_prot_ref_tag(cmd),
+                               scsi_logical_block_count(cmd), bgstat, bghm);
 
-               /* Calcuate what type of error it was */
+               /* Calculate what type of error it was */
                lpfc_calc_bg_err(phba, lpfc_cmd);
        }
 out:
@@ -3853,6 +3829,143 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
                                psb->pCmd->sc_data_direction);
 }
 
+/**
+ * lpfc_unblock_requests - allow further commands to be queued.
+ * @phba: pointer to phba object
+ *
+ * For single vport, just call scsi_unblock_requests on physical port.
+ * For multiple vports, send scsi_unblock_requests for all the vports.
+ */
+void
+lpfc_unblock_requests(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       struct Scsi_Host  *shost;
+       int i;
+
+       if (phba->sli_rev == LPFC_SLI_REV4 &&
+           !phba->sli4_hba.max_cfg_param.vpi_used) {
+               shost = lpfc_shost_from_vport(phba->pport);
+               scsi_unblock_requests(shost);
+               return;
+       }
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+                       shost = lpfc_shost_from_vport(vports[i]);
+                       scsi_unblock_requests(shost);
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_block_requests - prevent further commands from being queued.
+ * @phba: pointer to phba object
+ *
+ * For single vport, just call scsi_block_requests on physical port.
+ * For multiple vports, send scsi_block_requests for all the vports.
+ */
+void
+lpfc_block_requests(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       struct Scsi_Host  *shost;
+       int i;
+
+       if (atomic_read(&phba->cmf_stop_io))
+               return;
+
+       if (phba->sli_rev == LPFC_SLI_REV4 &&
+           !phba->sli4_hba.max_cfg_param.vpi_used) {
+               shost = lpfc_shost_from_vport(phba->pport);
+               scsi_block_requests(shost);
+               return;
+       }
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+                       shost = lpfc_shost_from_vport(vports[i]);
+                       scsi_block_requests(shost);
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
+ * @phba: The HBA for which this call is being executed.
+ * @time: The latency of the IO that completed (in ns)
+ * @size: The size of the IO that completed
+ * @shost: SCSI host the IO completed on (NULL for a NVME IO)
+ *
+ * The routine adjusts the various Burst and Bandwidth counters used in
+ * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
+ * that means the IO was never issued to the HBA, so this routine is
+ * just being called to cleanup the counter from a previous
+ * lpfc_update_cmf_cmd call.
+ */
+int
+lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
+                    uint64_t time, uint32_t size, struct Scsi_Host *shost)
+{
+       struct lpfc_cgn_stat *cgs;
+
+       if (time != LPFC_CGN_NOT_SENT) {
+               /* lat is ns coming in, save latency in us */
+               if (time < 1000)
+                       time = 1;
+               else
+                       time = div_u64(time + 500, 1000); /* round it */
+
+               cgs = this_cpu_ptr(phba->cmf_stat);
+               atomic64_add(size, &cgs->rcv_bytes);
+               atomic64_add(time, &cgs->rx_latency);
+               atomic_inc(&cgs->rx_io_cnt);
+       }
+       return 0;
+}
+
+/**
+ * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
+ * @phba: The HBA for which this call is being executed.
+ * @size: The size of the IO that will be issued
+ *
+ * The routine adjusts the various Burst and Bandwidth counters used in
+ * Congestion management and E2E.
+ */
+int
+lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
+{
+       uint64_t total;
+       struct lpfc_cgn_stat *cgs;
+       int cpu;
+
+       /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
+       if (phba->cmf_active_mode == LPFC_CFG_MANAGED) {
+               total = 0;
+               for_each_present_cpu(cpu) {
+                       cgs = per_cpu_ptr(phba->cmf_stat, cpu);
+                       total += atomic64_read(&cgs->total_bytes);
+               }
+               if (total >= phba->cmf_max_bytes_per_interval) {
+                       if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
+                               lpfc_block_requests(phba);
+                               phba->cmf_last_ts =
+                                       lpfc_calc_cmf_latency(phba);
+                       }
+                       atomic_inc(&phba->cmf_busy);
+                       return -EBUSY;
+               }
+               if (size > atomic_read(&phba->rx_max_read_cnt))
+                       atomic_set(&phba->rx_max_read_cnt, size);
+       }
+
+       cgs = this_cpu_ptr(phba->cmf_stat);
+       atomic64_add(size, &cgs->total_bytes);
+       return 0;
+}
+
 /**
  * lpfc_handle_fcp_err - FCP response handler
  * @vport: The virtual port for which this call is being executed.
@@ -4063,6 +4176,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
        u32 logit = LOG_FCP;
        u32 status, idx;
        unsigned long iflags = 0;
+       u32 lat;
        u8 wait_xb_clr = 0;
 
        /* Sanity check on return of outstanding command */
@@ -4351,10 +4465,21 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                lpfc_io_ktime(phba, lpfc_cmd);
        }
 #endif
+       if (likely(!wait_xb_clr))
+               lpfc_cmd->pCmd = NULL;
+       spin_unlock(&lpfc_cmd->buf_lock);
+
+       /* Check if IO qualified for CMF */
+       if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+           cmd->sc_data_direction == DMA_FROM_DEVICE &&
+           (scsi_sg_count(cmd))) {
+               /* Used when calculating average latency */
+               lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
+               lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
+       }
+
        if (wait_xb_clr)
                goto out;
-       lpfc_cmd->pCmd = NULL;
-       spin_unlock(&lpfc_cmd->buf_lock);
 
        /* The sdev is not guaranteed to be valid post scsi_done upcall. */
        cmd->scsi_done(cmd);
@@ -4367,8 +4492,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
        lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
        if (lpfc_cmd->waitq)
                wake_up(lpfc_cmd->waitq);
-out:
        spin_unlock(&lpfc_cmd->buf_lock);
+out:
        lpfc_release_scsi_buf(phba, lpfc_cmd);
 }
 
@@ -4775,6 +4900,11 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
                        fcp_cmnd->fcpCntl3 = READ_DATA;
                        if (hdwq)
                                hdwq->scsi_cstat.input_requests++;
+
+                       /* For a CMF Managed port, iod must be zero'ed */
+                       if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+                               bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
+                                      LPFC_WQE_IOD_NONE);
                }
        } else {
                /* From the icmnd template, initialize words 4 - 11 */
@@ -5029,12 +5159,8 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba)
                }
 
                /* Check for valid Emulex Device ID */
-               switch (ptr->device) {
-               case PCI_DEVICE_ID_LANCER_FC:
-               case PCI_DEVICE_ID_LANCER_G6_FC:
-               case PCI_DEVICE_ID_LANCER_G7_FC:
-                       break;
-               default:
+               if (phba->sli_rev != LPFC_SLI_REV4 ||
+                   phba->hba_flag & HBA_FCOE_MODE) {
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                        "8347 Incapable PCI reset device: "
                                        "0x%04x\n", ptr->device);
@@ -5423,13 +5549,9 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
  */
 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
 {
-       char *uuid = NULL;
+       struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
 
-       if (cmd->request) {
-               if (cmd->request->bio)
-                       uuid = blkcg_get_fc_appid(cmd->request->bio);
-       }
-       return uuid;
+       return bio ? blkcg_get_fc_appid(bio) : NULL;
 }
 
 /**
@@ -5462,7 +5584,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
        if (phba->ktime_on)
                start = ktime_get_ns();
 #endif
-
+       start = ktime_get_ns();
        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 
        /* sanity check on references */
@@ -5493,7 +5615,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
         * transport is still transitioning.
         */
        if (!ndlp)
-               goto out_tgt_busy;
+               goto out_tgt_busy1;
+
+       /* Check if IO qualifies for CMF */
+       if (phba->cmf_active_mode != LPFC_CFG_OFF &&
+           cmnd->sc_data_direction == DMA_FROM_DEVICE &&
+           (scsi_sg_count(cmnd))) {
+               /* Latency start time saved in rx_cmd_start later in routine */
+               err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
+               if (err)
+                       goto out_tgt_busy1;
+       }
+
        if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
                if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
                        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
@@ -5521,7 +5654,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                                         ndlp->nlp_portname.u.wwn[5],
                                         ndlp->nlp_portname.u.wwn[6],
                                         ndlp->nlp_portname.u.wwn[7]);
-                       goto out_tgt_busy;
+                       goto out_tgt_busy2;
                }
        }
 
@@ -5534,6 +5667,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                                 "IO busied\n");
                goto out_host_busy;
        }
+       lpfc_cmd->rx_cmd_start = start;
 
        /*
         * Store the midlayer's command structure for the completion phase
@@ -5557,8 +5691,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                                         "reftag x%x cnt %u pt %x\n",
                                         dif_op_str[scsi_get_prot_op(cmnd)],
                                         cmnd->cmnd[0],
-                                        t10_pi_ref_tag(cmnd->request),
-                                        blk_rq_sectors(cmnd->request),
+                                        scsi_prot_ref_tag(cmnd),
+                                        scsi_logical_block_count(cmnd),
                                         (cmnd->cmnd[1]>>5));
                }
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -5569,8 +5703,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                                         "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
                                         "x%x reftag x%x cnt %u pt %x\n",
                                         cmnd->cmnd[0],
-                                        t10_pi_ref_tag(cmnd->request),
-                                        blk_rq_sectors(cmnd->request),
+                                        scsi_prot_ref_tag(cmnd),
+                                        scsi_logical_block_count(cmnd),
                                         (cmnd->cmnd[1]>>5));
                }
                err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -5641,8 +5775,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                                   bf_get(wqe_tmo,
                                   &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
                                   lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
-                                  (uint32_t)
-                                  (cmnd->request->timeout / 1000));
+                                  (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
 
                goto out_host_busy_free_buf;
        }
@@ -5678,13 +5811,20 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
  out_host_busy_release_buf:
        lpfc_release_scsi_buf(phba, lpfc_cmd);
  out_host_busy:
+       lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+                            shost);
        return SCSI_MLQUEUE_HOST_BUSY;
 
- out_tgt_busy:
+ out_tgt_busy2:
+       lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+                            shost);
+ out_tgt_busy1:
        return SCSI_MLQUEUE_TARGET_BUSY;
 
  out_fail_command_release_buf:
        lpfc_release_scsi_buf(phba, lpfc_cmd);
+       lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
+                            shost);
 
  out_fail_command:
        cmnd->scsi_done(cmnd);
@@ -6273,6 +6413,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
        struct lpfc_scsi_event_header scsi_event;
        int status;
        u32 logit = LOG_FCP;
+       u32 dev_loss_tmo = vport->cfg_devloss_tmo;
        unsigned long flags;
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
 
@@ -6314,39 +6455,44 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
 
        status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
                                        FCP_TARGET_RESET);
-       if (status != SUCCESS)
-               logit =  LOG_TRACE_EVENT;
-       spin_lock_irqsave(&pnode->lock, flags);
-       if (status != SUCCESS &&
-           (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) &&
-            !pnode->logo_waitq) {
-               pnode->logo_waitq = &waitq;
-               pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
-               pnode->nlp_flag |= NLP_ISSUE_LOGO;
-               pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
-               spin_unlock_irqrestore(&pnode->lock, flags);
-               lpfc_unreg_rpi(vport, pnode);
-               wait_event_timeout(waitq,
-                                  (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)),
-                                   msecs_to_jiffies(vport->cfg_devloss_tmo *
-                                   1000));
-
-               if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
-                       lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
-                               "0725 SCSI layer TGTRST failed & LOGO TMO "
-                               " (%d, %llu) return x%x\n", tgt_id,
-                                lun_id, status);
-                       spin_lock_irqsave(&pnode->lock, flags);
-                       pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+       if (status != SUCCESS) {
+               logit = LOG_TRACE_EVENT;
+
+               /* Issue LOGO, if no LOGO is outstanding */
+               spin_lock_irqsave(&pnode->lock, flags);
+               if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) &&
+                   !pnode->logo_waitq) {
+                       pnode->logo_waitq = &waitq;
+                       pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+                       pnode->nlp_flag |= NLP_ISSUE_LOGO;
+                       pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
+                       spin_unlock_irqrestore(&pnode->lock, flags);
+                       lpfc_unreg_rpi(vport, pnode);
+                       wait_event_timeout(waitq,
+                                          (!(pnode->upcall_flags &
+                                             NLP_WAIT_FOR_LOGO)),
+                                          msecs_to_jiffies(dev_loss_tmo *
+                                                           1000));
+
+                       if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
+                               lpfc_printf_vlog(vport, KERN_ERR, logit,
+                                                "0725 SCSI layer TGTRST "
+                                                "failed & LOGO TMO (%d, %llu) "
+                                                "return x%x\n",
+                                                tgt_id, lun_id, status);
+                               spin_lock_irqsave(&pnode->lock, flags);
+                               pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+                       } else {
+                               spin_lock_irqsave(&pnode->lock, flags);
+                       }
+                       pnode->logo_waitq = NULL;
+                       spin_unlock_irqrestore(&pnode->lock, flags);
+                       status = SUCCESS;
+
                } else {
-                       spin_lock_irqsave(&pnode->lock, flags);
+                       spin_unlock_irqrestore(&pnode->lock, flags);
+                       status = FAILED;
                }
-               pnode->logo_waitq = NULL;
-               spin_unlock_irqrestore(&pnode->lock, flags);
-               status = SUCCESS;
-       } else {
-               status = FAILED;
-               spin_unlock_irqrestore(&pnode->lock, flags);
        }
 
        lpfc_printf_vlog(vport, KERN_ERR, logit,
index f76667b..3836d7f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -142,6 +142,10 @@ struct lpfc_scsicmd_bkt {
 #define FC_PORTSPEED_128GBIT   0x2000
 #endif
 
+#ifndef FC_PORTSPEED_256GBIT
+#define FC_PORTSPEED_256GBIT   0x4000
+#endif
+
 #define TXRDY_PAYLOAD_LEN      12
 
 /* For sysfs/debugfs tmp string max len */
index f530d8f..ffd8a14 100644 (file)
@@ -1439,7 +1439,7 @@ out:
        memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
        iocbq->sli4_lxritag = NO_XRI;
        iocbq->sli4_xritag = NO_XRI;
-       iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
+       iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
                              LPFC_IO_NVME_LS);
        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
 }
@@ -1768,6 +1768,254 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
        return cmd_iocb;
 }
 
+/**
+ * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @cmf_cmpl: Pointer to completed WCQE.
+ *
+ * This routine will inform the driver of any BW adjustments we need
+ * to make. These changes will be picked up during the next CMF
+ * timer interrupt. In addition, any BW changes will be logged
+ * with LOG_CGN_MGMT.
+ **/
+static void
+lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+                  struct lpfc_wcqe_complete *cmf_cmpl)
+{
+       union lpfc_wqe128 *wqe;
+       uint32_t status, info;
+       uint64_t bw, bwdif, slop;
+       uint64_t pcent, bwpcent;
+       int asig, afpin, sigcnt, fpincnt;
+       int wsigmax, wfpinmax, cg, tdp;
+       char *s;
+
+       /* First check for error */
+       status = bf_get(lpfc_wcqe_c_status, cmf_cmpl);
+       if (status) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "6211 CMF_SYNC_WQE Error "
+                               "req_tag x%x status x%x hwstatus x%x "
+                               "tdatap x%x parm x%x\n",
+                               bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl),
+                               bf_get(lpfc_wcqe_c_status, cmf_cmpl),
+                               bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl),
+                               cmf_cmpl->total_data_placed,
+                               cmf_cmpl->parameter);
+               goto out;
+       }
+
+       /* Gather congestion information on a successful cmpl */
+       info = cmf_cmpl->parameter;
+       phba->cmf_active_info = info;
+
+       /* See if firmware info count is valid or has changed */
+       if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
+               info = 0;
+       else
+               phba->cmf_info_per_interval = info;
+
+       tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl);
+       cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl);
+
+       /* Get BW requirement from firmware */
+       bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
+       if (!bw) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "6212 CMF_SYNC_WQE x%x: NULL bw\n",
+                               bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl));
+               goto out;
+       }
+
+       /* Gather information needed for logging if a BW change is required */
+       wqe = &cmdiocb->wqe;
+       asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
+       afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
+       fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
+       sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
+       if (phba->cmf_max_bytes_per_interval != bw ||
+           (asig || afpin || sigcnt || fpincnt)) {
+               /* Are we increasing or decreasing BW */
+               if (phba->cmf_max_bytes_per_interval <  bw) {
+                       bwdif = bw - phba->cmf_max_bytes_per_interval;
+                       s = "Increase";
+               } else {
+                       bwdif = phba->cmf_max_bytes_per_interval - bw;
+                       s = "Decrease";
+               }
+
+               /* What is the change percentage */
+               slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
+               pcent = div64_u64(bwdif * 100 + slop,
+                                 phba->cmf_link_byte_count);
+               bwpcent = div64_u64(bw * 100 + slop,
+                                   phba->cmf_link_byte_count);
+               if (asig) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                                       "6237 BW Threshold %lld%% (%lld): "
+                                       "%lld%% %s: Signal Alarm: cg:%d "
+                                       "Info:%u\n",
+                                       bwpcent, bw, pcent, s, cg,
+                                       phba->cmf_active_info);
+               } else if (afpin) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                                       "6238 BW Threshold %lld%% (%lld): "
+                                       "%lld%% %s: FPIN Alarm: cg:%d "
+                                       "Info:%u\n",
+                                       bwpcent, bw, pcent, s, cg,
+                                       phba->cmf_active_info);
+               } else if (sigcnt) {
+                       wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                                       "6239 BW Threshold %lld%% (%lld): "
+                                       "%lld%% %s: Signal Warning: "
+                                       "Cnt %d Max %d: cg:%d Info:%u\n",
+                                       bwpcent, bw, pcent, s, sigcnt,
+                                       wsigmax, cg, phba->cmf_active_info);
+               } else if (fpincnt) {
+                       wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                                       "6240 BW Threshold %lld%% (%lld): "
+                                       "%lld%% %s: FPIN Warning: "
+                                       "Cnt %d Max %d: cg:%d Info:%u\n",
+                                       bwpcent, bw, pcent, s, fpincnt,
+                                       wfpinmax, cg, phba->cmf_active_info);
+               } else {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                                       "6241 BW Threshold %lld%% (%lld): "
+                                       "CMF %lld%% %s: cg:%d Info:%u\n",
+                                       bwpcent, bw, pcent, s, cg,
+                                       phba->cmf_active_info);
+               }
+       } else if (info) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "6246 Info Threshold %u\n", info);
+       }
+
+       /* Save BW change to be picked up during next timer interrupt */
+       phba->cmf_last_sync_bw = bw;
+out:
+       lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
+ * @phba: Pointer to HBA context object.
+ * @ms:   ms to set in WQE interval, 0 means use init op
+ * @total: Total rcv bytes for this interval
+ *
+ * This routine is called every CMF timer interrupt. Its purpose is
+ * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
+ * that may indicate we have congestion (FPINs or Signals). Upon
+ * completion, the firmware will indicate any BW restrictions the
+ * driver may need to take.
+ **/
+int
+lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
+{
+       union lpfc_wqe128 *wqe;
+       struct lpfc_iocbq *sync_buf;
+       unsigned long iflags;
+       u32 ret_val;
+       u32 atot, wtot, max;
+
+       /* First address any alarm / warning activity */
+       atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
+       wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
+
+       /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
+       if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
+           phba->link_state == LPFC_LINK_DOWN)
+               return 0;
+
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       sync_buf = __lpfc_sli_get_iocbq(phba);
+       if (!sync_buf) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+                               "6213 No available WQEs for CMF_SYNC_WQE\n");
+               ret_val = ENOMEM;
+               goto out_unlock;
+       }
+
+       wqe = &sync_buf->wqe;
+
+       /* WQEs are reused.  Clear stale data and set key fields to zero */
+       memset(wqe, 0, sizeof(*wqe));
+
+       /* If this is the very first CMF_SYNC_WQE, issue an init operation */
+       if (!ms) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "6441 CMF Init %d - CMF_SYNC_WQE\n",
+                               phba->fc_eventTag);
+               bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
+               bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
+               goto initpath;
+       }
+
+       bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
+       bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
+
+       /* Check for alarms / warnings */
+       if (atot) {
+               if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+                       /* We hit an Signal alarm condition */
+                       bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
+               } else {
+                       /* We hit a FPIN alarm condition */
+                       bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
+               }
+       } else if (wtot) {
+               if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
+                   phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+                       /* We hit an Signal warning condition */
+                       max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
+                               lpfc_acqe_cgn_frequency;
+                       bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
+                       bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
+               } else {
+                       /* We hit a FPIN warning condition */
+                       bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
+                       bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
+               }
+       }
+
+       /* Update total read blocks during previous timer interval */
+       wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
+
+initpath:
+       bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
+       wqe->cmf_sync.event_tag = phba->fc_eventTag;
+       bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
+
+       /* Setup reqtag to match the wqe completion. */
+       bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
+
+       bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
+
+       bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
+       bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
+       bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
+
+       sync_buf->vport = phba->pport;
+       sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl;
+       sync_buf->iocb_cmpl = NULL;
+       sync_buf->context1 = NULL;
+       sync_buf->context2 = NULL;
+       sync_buf->context3 = NULL;
+       sync_buf->sli4_xritag = NO_XRI;
+
+       sync_buf->iocb_flag |= LPFC_IO_CMF;
+       ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
+       if (ret_val)
+               lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                               "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
+                               ret_val);
+out_unlock:
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       return ret_val;
+}
+
 /**
  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
  * @phba: Pointer to HBA context object.
@@ -4467,6 +4715,7 @@ lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
        } else
                phba->sli4_hba.intr_enable = 0;
 
+       phba->hba_flag &= ~HBA_SETUP;
        return retval;
 }
 
@@ -4787,6 +5036,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
        phba->link_events = 0;
        phba->pport->fc_myDID = 0;
        phba->pport->fc_prevDID = 0;
+       phba->hba_flag &= ~HBA_SETUP;
 
        spin_lock_irq(&phba->hbalock);
        psli->sli_flag &= ~(LPFC_PROCESS_LA);
@@ -5674,16 +5924,20 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
                bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
        phba->sli4_hba.lnk_info.lnk_no =
                bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+       phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
+       phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
 
        memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
        strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
                sizeof(phba->BIOSVersion));
 
        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-                       "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
+                       "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
+                       "flash_id: x%02x, asic_rev: x%02x\n",
                        phba->sli4_hba.lnk_info.lnk_tp,
                        phba->sli4_hba.lnk_info.lnk_no,
-                       phba->BIOSVersion);
+                       phba->BIOSVersion, phba->sli4_hba.flash_id,
+                       phba->sli4_hba.asic_rev);
 out_free_mboxq:
        if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
                lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -6413,6 +6667,7 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
                  uint32_t feature)
 {
        uint32_t len;
+       u32 sig_freq = 0;
 
        len = sizeof(struct lpfc_mbx_set_feature) -
                sizeof(struct lpfc_sli4_cfg_mhdr);
@@ -6435,6 +6690,35 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
                mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
                mbox->u.mqe.un.set_feature.param_len = 8;
                break;
+       case LPFC_SET_CGN_SIGNAL:
+               if (phba->cmf_active_mode == LPFC_CFG_OFF)
+                       sig_freq = 0;
+               else
+                       sig_freq = phba->cgn_sig_freq;
+
+               if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
+                       bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
+                              &mbox->u.mqe.un.set_feature, sig_freq);
+                       bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
+                              &mbox->u.mqe.un.set_feature, sig_freq);
+               }
+
+               if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
+                       bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
+                              &mbox->u.mqe.un.set_feature, sig_freq);
+
+               if (phba->cmf_active_mode == LPFC_CFG_OFF ||
+                   phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
+                       sig_freq = 0;
+               else
+                       sig_freq = lpfc_acqe_cgn_frequency;
+
+               bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
+                      &mbox->u.mqe.un.set_feature, sig_freq);
+
+               mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
+               mbox->u.mqe.un.set_feature.param_len = 12;
+               break;
        case LPFC_SET_DUAL_DUMP:
                bf_set(lpfc_mbx_set_feature_dd,
                       &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
@@ -6443,8 +6727,22 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
                mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
                mbox->u.mqe.un.set_feature.param_len = 4;
                break;
+       case LPFC_SET_ENABLE_MI:
+               mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
+               mbox->u.mqe.un.set_feature.param_len = 4;
+               bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
+                      phba->pport->cfg_lun_queue_depth);
+               bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
+                      phba->sli4_hba.pc_sli4_params.mi_ver);
+               break;
+       case LPFC_SET_ENABLE_CMF:
+               bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
+               mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
+               mbox->u.mqe.un.set_feature.param_len = 4;
+               bf_set(lpfc_mbx_set_feature_cmf,
+                      &mbox->u.mqe.un.set_feature, 1);
+               break;
        }
-
        return;
 }
 
@@ -7365,7 +7663,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
        mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
        mbox->u.mqe.un.set_host_data.param_len =
                                        LPFC_HOST_OS_DRIVER_VERSION_SIZE;
-       snprintf(mbox->u.mqe.un.set_host_data.data,
+       snprintf(mbox->u.mqe.un.set_host_data.un.data,
                 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
                 "Linux %s v"LPFC_DRIVER_VERSION,
                 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
@@ -7433,6 +7731,91 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        return 1;
 }
 
+static void
+lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       struct lpfc_vport *vport = pmb->vport;
+       union lpfc_sli4_cfg_shdr *shdr;
+       u32 shdr_status, shdr_add_status;
+       u32 sig, acqe;
+
+       /* Two outcomes. (1) Set featurs was successul and EDC negotiation
+        * is done. (2) Mailbox failed and send FPIN support only.
+        */
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+                               "2516 CGN SET_FEATURE mbox failed with "
+                               "status x%x add_status x%x, mbx status x%x "
+                               "Reset Congestion to FPINs only\n",
+                               shdr_status, shdr_add_status,
+                               pmb->u.mb.mbxStatus);
+               /* If there is a mbox error, move on to RDF */
+               phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+               phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+               goto out;
+       }
+
+       /* Zero out Congestion Signal ACQE counter */
+       phba->cgn_acqe_cnt = 0;
+       atomic64_set(&phba->cgn_acqe_stat.warn, 0);
+       atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
+
+       acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
+                     &pmb->u.mqe.un.set_feature);
+       sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
+                    &pmb->u.mqe.un.set_feature);
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       "4620 SET_FEATURES Success: Freq: %ds %dms "
+                       " Reg: x%x x%x\n", acqe, sig,
+                       phba->cgn_reg_signal, phba->cgn_reg_fpin);
+out:
+       mempool_free(pmb, phba->mbox_mem_pool);
+
+       /* Register for FPIN events from the fabric now that the
+        * EDC common_set_features has completed.
+        */
+       lpfc_issue_els_rdf(vport, 0);
+}
+
+int
+lpfc_config_cgn_signal(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *mboxq;
+       u32 rc;
+
+       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               goto out_rdf;
+
+       lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
+       mboxq->vport = phba->pport;
+       mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+                       "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
+                       "Reg: x%x x%x\n",
+                       phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
+                       phba->cgn_reg_signal, phba->cgn_reg_fpin);
+
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED)
+               goto out;
+       return 0;
+
+out:
+       mempool_free(mboxq, phba->mbox_mem_pool);
+out_rdf:
+       /* If there is a mbox error, move on to RDF */
+       phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
+       phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
+       lpfc_issue_els_rdf(phba->pport, 0);
+       return -EIO;
+}
+
 /**
  * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
  * @phba: pointer to lpfc hba data structure.
@@ -7464,7 +7847,8 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
                idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
                idle_stat->prev_wall = wall;
 
-               if (phba->nvmet_support)
+               if (phba->nvmet_support ||
+                   phba->cmf_active_mode != LPFC_CFG_OFF)
                        cq->poll_mode = LPFC_QUEUE_WORK;
                else
                        cq->poll_mode = LPFC_IRQ_POLL;
@@ -7495,6 +7879,258 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
        }
 }
 
+/**
+ * lpfc_cmf_setup - Initialize idle_stat tracking
+ * @phba: Pointer to HBA context object.
+ *
+ * This is called from HBA setup during driver load or when the HBA
+ * comes online. this does all the initialization to support CMF and MI.
+ **/
+static int
+lpfc_cmf_setup(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_mqe *mqe;
+       struct lpfc_dmabuf *mp;
+       struct lpfc_pc_sli4_params *sli4_params;
+       struct lpfc_sli4_parameters *mbx_sli4_parameters;
+       int length;
+       int rc, cmf, mi_ver;
+
+       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               return -ENOMEM;
+       mqe = &mboxq->u.mqe;
+
+       /* Read the port's SLI4 Config Parameters */
+       length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+                        length, LPFC_SLI4_MBX_EMBED);
+
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (unlikely(rc)) {
+               mempool_free(mboxq, phba->mbox_mem_pool);
+               return rc;
+       }
+
+       /* Gather info on CMF and MI support */
+       sli4_params = &phba->sli4_hba.pc_sli4_params;
+       mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+       sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters);
+       sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters);
+
+       /* Are we forcing MI off via module parameter? */
+       if (!phba->cfg_enable_mi)
+               sli4_params->mi_ver = 0;
+
+       /* Always try to enable MI feature if we can */
+       if (sli4_params->mi_ver) {
+               lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
+               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+               mi_ver = bf_get(lpfc_mbx_set_feature_mi,
+                                &mboxq->u.mqe.un.set_feature);
+
+               if (rc == MBX_SUCCESS) {
+                       if (mi_ver) {
+                               lpfc_printf_log(phba,
+                                               KERN_WARNING, LOG_CGN_MGMT,
+                                               "6215 MI is enabled\n");
+                               sli4_params->mi_ver = mi_ver;
+                       } else {
+                               lpfc_printf_log(phba,
+                                               KERN_WARNING, LOG_CGN_MGMT,
+                                               "6338 MI is disabled\n");
+                               sli4_params->mi_ver = 0;
+                       }
+               } else {
+                       /* mi_ver is already set from GET_SLI4_PARAMETERS */
+                       lpfc_printf_log(phba, KERN_INFO,
+                                       LOG_CGN_MGMT | LOG_INIT,
+                                       "6245 Enable MI Mailbox x%x (x%x/x%x) "
+                                       "failed, rc:x%x mi:x%x\n",
+                                       bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+                                       lpfc_sli_config_mbox_subsys_get
+                                               (phba, mboxq),
+                                       lpfc_sli_config_mbox_opcode_get
+                                               (phba, mboxq),
+                                       rc, sli4_params->mi_ver);
+               }
+       } else {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                               "6217 MI is disabled\n");
+       }
+
+       /* Ensure FDMI is enabled for MI if enable_mi is set */
+       if (sli4_params->mi_ver)
+               phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
+
+       /* Always try to enable CMF feature if we can */
+       if (sli4_params->cmf) {
+               lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
+               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+               cmf = bf_get(lpfc_mbx_set_feature_cmf,
+                            &mboxq->u.mqe.un.set_feature);
+               if (rc == MBX_SUCCESS && cmf) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                                       "6218 CMF is enabled: mode %d\n",
+                                       phba->cmf_active_mode);
+               } else {
+                       lpfc_printf_log(phba, KERN_WARNING,
+                                       LOG_CGN_MGMT | LOG_INIT,
+                                       "6219 Enable CMF Mailbox x%x (x%x/x%x) "
+                                       "failed, rc:x%x dd:x%x\n",
+                                       bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+                                       lpfc_sli_config_mbox_subsys_get
+                                               (phba, mboxq),
+                                       lpfc_sli_config_mbox_opcode_get
+                                               (phba, mboxq),
+                                       rc, cmf);
+                       sli4_params->cmf = 0;
+                       phba->cmf_active_mode = LPFC_CFG_OFF;
+                       goto no_cmf;
+               }
+
+               /* Allocate Congestion Information Buffer */
+               if (!phba->cgn_i) {
+                       mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+                       if (mp)
+                               mp->virt = dma_alloc_coherent
+                                               (&phba->pcidev->dev,
+                                               sizeof(struct lpfc_cgn_info),
+                                               &mp->phys, GFP_KERNEL);
+                       if (!mp || !mp->virt) {
+                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                               "2640 Failed to alloc memory "
+                                               "for Congestion Info\n");
+                               kfree(mp);
+                               sli4_params->cmf = 0;
+                               phba->cmf_active_mode = LPFC_CFG_OFF;
+                               goto no_cmf;
+                       }
+                       phba->cgn_i = mp;
+
+                       /* initialize congestion buffer info */
+                       lpfc_init_congestion_buf(phba);
+                       lpfc_init_congestion_stat(phba);
+               }
+
+               rc = lpfc_sli4_cgn_params_read(phba);
+               if (rc < 0) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+                                       "6242 Error reading Cgn Params (%d)\n",
+                                       rc);
+                       /* Ensure CGN Mode is off */
+                       sli4_params->cmf = 0;
+               } else if (!rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
+                                       "6243 CGN Event empty object.\n");
+                       /* Ensure CGN Mode is off */
+                       sli4_params->cmf = 0;
+               }
+       } else {
+no_cmf:
+               lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+                               "6220 CMF is disabled\n");
+       }
+
+       /* Only register congestion buffer with firmware if BOTH
+        * CMF and E2E are enabled.
+        */
+       if (sli4_params->cmf && sli4_params->mi_ver) {
+               rc = lpfc_reg_congestion_buf(phba);
+               if (rc) {
+                       dma_free_coherent(&phba->pcidev->dev,
+                                         sizeof(struct lpfc_cgn_info),
+                                         phba->cgn_i->virt, phba->cgn_i->phys);
+                       kfree(phba->cgn_i);
+                       phba->cgn_i = NULL;
+                       /* Ensure CGN Mode is off */
+                       phba->cmf_active_mode = LPFC_CFG_OFF;
+                       return 0;
+               }
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "6470 Setup MI version %d CMF %d mode %d\n",
+                       sli4_params->mi_ver, sli4_params->cmf,
+                       phba->cmf_active_mode);
+
+       mempool_free(mboxq, phba->mbox_mem_pool);
+
+       /* Initialize atomic counters */
+       atomic_set(&phba->cgn_fabric_warn_cnt, 0);
+       atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
+       atomic_set(&phba->cgn_sync_alarm_cnt, 0);
+       atomic_set(&phba->cgn_sync_warn_cnt, 0);
+       atomic_set(&phba->cgn_driver_evt_cnt, 0);
+       atomic_set(&phba->cgn_latency_evt_cnt, 0);
+       atomic64_set(&phba->cgn_latency_evt, 0);
+
+       phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+
+       /* Allocate RX Monitor Buffer */
+       if (!phba->rxtable) {
+               phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
+                                             sizeof(struct rxtable_entry),
+                                             GFP_KERNEL);
+               if (!phba->rxtable) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "2644 Failed to alloc memory "
+                                       "for RX Monitor Buffer\n");
+                       return -ENOMEM;
+               }
+       }
+       atomic_set(&phba->rxtable_idx_head, 0);
+       atomic_set(&phba->rxtable_idx_tail, 0);
+       return 0;
+}
+
+static int
+lpfc_set_host_tm(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *mboxq;
+       uint32_t len, rc;
+       struct timespec64 cur_time;
+       struct tm broken;
+       uint32_t month, day, year;
+       uint32_t hour, minute, second;
+       struct lpfc_mbx_set_host_date_time *tm;
+
+       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               return -ENOMEM;
+
+       len = sizeof(struct lpfc_mbx_set_host_data) -
+               sizeof(struct lpfc_sli4_cfg_mhdr);
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+                        LPFC_SLI4_MBX_EMBED);
+
+       mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
+       mboxq->u.mqe.un.set_host_data.param_len =
+                       sizeof(struct lpfc_mbx_set_host_date_time);
+       tm = &mboxq->u.mqe.un.set_host_data.un.tm;
+       ktime_get_real_ts64(&cur_time);
+       time64_to_tm(cur_time.tv_sec, 0, &broken);
+       month = broken.tm_mon + 1;
+       day = broken.tm_mday;
+       year = broken.tm_year - 100;
+       hour = broken.tm_hour;
+       minute = broken.tm_min;
+       second = broken.tm_sec;
+       bf_set(lpfc_mbx_set_host_month, tm, month);
+       bf_set(lpfc_mbx_set_host_day, tm, day);
+       bf_set(lpfc_mbx_set_host_year, tm, year);
+       bf_set(lpfc_mbx_set_host_hour, tm, hour);
+       bf_set(lpfc_mbx_set_host_min, tm, minute);
+       bf_set(lpfc_mbx_set_host_sec, tm, second);
+
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       return rc;
+}
+
 /**
  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
  * @phba: Pointer to HBA context object.
@@ -7584,6 +8220,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                goto out_free_mbox;
        }
 
+       rc = lpfc_set_host_tm(phba);
+       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+                       "6468 Set host date / time: Status x%x:\n", rc);
+
        /*
         * Continue initialization with default values even if driver failed
         * to read FCoE param config regions, only read parameters if the
@@ -8111,6 +8751,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
        /* Indicate device interrupt mode */
        phba->sli4_hba.intr_enable = 1;
 
+       /* Setup CMF after HBA is initialized */
+       lpfc_cmf_setup(phba);
+
        if (!(phba->hba_flag & HBA_FCOE_MODE) &&
            (phba->hba_flag & LINK_DISABLED)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -8132,7 +8775,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                }
        }
        mempool_free(mboxq, phba->mbox_mem_pool);
+
+       phba->hba_flag |= HBA_SETUP;
        return rc;
+
 out_io_buff_free:
        /* Free allocated IO Buffers */
        lpfc_io_free(phba);
@@ -8790,8 +9436,11 @@ static int
 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
 {
        struct lpfc_sli *psli = &phba->sli;
+       LPFC_MBOXQ_t *mboxq;
        int rc = 0;
        unsigned long timeout = 0;
+       u32 sli_flag;
+       u8 cmd, subsys, opcode;
 
        /* Mark the asynchronous mailbox command posting as blocked */
        spin_lock_irq(&phba->hbalock);
@@ -8809,12 +9458,37 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
        if (timeout)
                lpfc_sli4_process_missed_mbox_completions(phba);
 
-       /* Wait for the outstnading mailbox command to complete */
+       /* Wait for the outstanding mailbox command to complete */
        while (phba->sli.mbox_active) {
                /* Check active mailbox complete status every 2ms */
                msleep(2);
                if (time_after(jiffies, timeout)) {
-                       /* Timeout, marked the outstanding cmd not complete */
+                       /* Timeout, mark the outstanding cmd not complete */
+
+                       /* Sanity check sli.mbox_active has not completed or
+                        * cancelled from another context during last 2ms sleep,
+                        * so take hbalock to be sure before logging.
+                        */
+                       spin_lock_irq(&phba->hbalock);
+                       if (phba->sli.mbox_active) {
+                               mboxq = phba->sli.mbox_active;
+                               cmd = mboxq->u.mb.mbxCommand;
+                               subsys = lpfc_sli_config_mbox_subsys_get(phba,
+                                                                        mboxq);
+                               opcode = lpfc_sli_config_mbox_opcode_get(phba,
+                                                                        mboxq);
+                               sli_flag = psli->sli_flag;
+                               spin_unlock_irq(&phba->hbalock);
+                               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+                                               "2352 Mailbox command x%x "
+                                               "(x%x/x%x) sli_flag x%x could "
+                                               "not complete\n",
+                                               cmd, subsys, opcode,
+                                               sli_flag);
+                       } else {
+                               spin_unlock_irq(&phba->hbalock);
+                       }
+
                        rc = 1;
                        break;
                }
@@ -9763,6 +10437,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                        if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
                                *pcmd == ELS_CMD_SCR ||
                                *pcmd == ELS_CMD_RDF ||
+                               *pcmd == ELS_CMD_EDC ||
                                *pcmd == ELS_CMD_RSCN_XMT ||
                                *pcmd == ELS_CMD_FDISC ||
                                *pcmd == ELS_CMD_LOGO ||
@@ -10097,8 +10772,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
                bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
                       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
-               pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
-                                       iocbq->context2)->virt);
                if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                                bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
                                bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
@@ -11619,6 +12292,7 @@ void
 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                     struct lpfc_iocbq *rspiocb)
 {
+       struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
        IOCB_t *irsp = &rspiocb->iocb;
 
        /* ELS cmd tag <ulpIoTag> completes */
@@ -11627,11 +12301,16 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        "x%x x%x x%x\n",
                        irsp->ulpIoTag, irsp->ulpStatus,
                        irsp->un.ulpWord[4], irsp->ulpTimeout);
-       lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
+       /*
+        * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
+        * if exchange is busy.
+        */
        if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
                lpfc_ct_free_iocb(phba, cmdiocb);
        else
                lpfc_els_free_iocb(phba, cmdiocb);
+
+       lpfc_nlp_put(ndlp);
 }
 
 /**
@@ -14626,8 +15305,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
 
        switch (cq->poll_mode) {
        case LPFC_IRQ_POLL:
-               irq_poll_sched(&cq->iop);
-               break;
+               /* CGN mgmt is mutually exclusive from softirq processing */
+               if (phba->cmf_active_mode == LPFC_CFG_OFF) {
+                       irq_poll_sched(&cq->iop);
+                       break;
+               }
+               fallthrough;
        case LPFC_QUEUE_WORK:
        default:
                if (is_kdump_kernel())
@@ -20020,6 +20703,91 @@ out:
        return;
 }
 
+/**
+ * lpfc_log_fw_write_cmpl - logs firmware write completion status
+ * @phba: pointer to lpfc hba data structure
+ * @shdr_status: wr_object rsp's status field
+ * @shdr_add_status: wr_object rsp's add_status field
+ * @shdr_add_status_2: wr_object rsp's add_status_2 field
+ * @shdr_change_status: wr_object rsp's change_status field
+ * @shdr_csf: wr_object rsp's csf bit
+ *
+ * This routine is intended to be called after a firmware write completes.
+ * It will log next action items to be performed by the user to instantiate
+ * the newly downloaded firmware or reason for incompatibility.
+ **/
+static void
+lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
+                      u32 shdr_add_status, u32 shdr_add_status_2,
+                      u32 shdr_change_status, u32 shdr_csf)
+{
+       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                       "4198 %s: flash_id x%02x, asic_rev x%02x, "
+                       "status x%02x, add_status x%02x, add_status_2 x%02x, "
+                       "change_status x%02x, csf %01x\n", __func__,
+                       phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
+                       shdr_status, shdr_add_status, shdr_add_status_2,
+                       shdr_change_status, shdr_csf);
+
+       if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
+               switch (shdr_add_status_2) {
+               case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+                                       "4199 Firmware write failed: "
+                                       "image incompatible with flash x%02x\n",
+                                       phba->sli4_hba.flash_id);
+                       break;
+               case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+                                       "4200 Firmware write failed: "
+                                       "image incompatible with ASIC "
+                                       "architecture x%02x\n",
+                                       phba->sli4_hba.asic_rev);
+                       break;
+               default:
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+                                       "4210 Firmware write failed: "
+                                       "add_status_2 x%02x\n",
+                                       shdr_add_status_2);
+                       break;
+               }
+       } else if (!shdr_status && !shdr_add_status) {
+               if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+                   shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+                       if (shdr_csf)
+                               shdr_change_status =
+                                                  LPFC_CHANGE_STATUS_PCI_RESET;
+               }
+
+               switch (shdr_change_status) {
+               case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
+                       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                                       "3198 Firmware write complete: System "
+                                       "reboot required to instantiate\n");
+                       break;
+               case (LPFC_CHANGE_STATUS_FW_RESET):
+                       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                                       "3199 Firmware write complete: "
+                                       "Firmware reset required to "
+                                       "instantiate\n");
+                       break;
+               case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
+                       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                                       "3200 Firmware write complete: Port "
+                                       "Migration or PCI Reset required to "
+                                       "instantiate\n");
+                       break;
+               case (LPFC_CHANGE_STATUS_PCI_RESET):
+                       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                                       "3201 Firmware write complete: PCI "
+                                       "Reset required to instantiate\n");
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
 /**
  * lpfc_wr_object - write an object to the firmware
  * @phba: HBA structure that indicates port to create a queue on.
@@ -20046,7 +20814,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
        struct lpfc_mbx_wr_object *wr_object;
        LPFC_MBOXQ_t *mbox;
        int rc = 0, i = 0;
-       uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
+       uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
+       uint32_t shdr_change_status = 0, shdr_csf = 0;
        uint32_t mbox_tmo;
        struct lpfc_dmabuf *dmabuf;
        uint32_t written = 0;
@@ -20100,58 +20869,36 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
                             &wr_object->header.cfg_shdr.response);
        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
                                 &wr_object->header.cfg_shdr.response);
+       shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
+                                  &wr_object->header.cfg_shdr.response);
        if (check_change_status) {
                shdr_change_status = bf_get(lpfc_wr_object_change_status,
                                            &wr_object->u.response);
-
-               if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
-                   shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
-                       shdr_csf = bf_get(lpfc_wr_object_csf,
-                                         &wr_object->u.response);
-                       if (shdr_csf)
-                               shdr_change_status =
-                                                  LPFC_CHANGE_STATUS_PCI_RESET;
-               }
-
-               switch (shdr_change_status) {
-               case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
-                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                                       "3198 Firmware write complete: System "
-                                       "reboot required to instantiate\n");
-                       break;
-               case (LPFC_CHANGE_STATUS_FW_RESET):
-                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                                       "3199 Firmware write complete: Firmware"
-                                       " reset required to instantiate\n");
-                       break;
-               case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
-                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                                       "3200 Firmware write complete: Port "
-                                       "Migration or PCI Reset required to "
-                                       "instantiate\n");
-                       break;
-               case (LPFC_CHANGE_STATUS_PCI_RESET):
-                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                                       "3201 Firmware write complete: PCI "
-                                       "Reset required to instantiate\n");
-                       break;
-               default:
-                       break;
-               }
+               shdr_csf = bf_get(lpfc_wr_object_csf,
+                                 &wr_object->u.response);
        }
+
        if (!phba->sli4_hba.intr_enable)
                mempool_free(mbox, phba->mbox_mem_pool);
        else if (rc != MBX_TIMEOUT)
                mempool_free(mbox, phba->mbox_mem_pool);
-       if (shdr_status || shdr_add_status || rc) {
+       if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                "3025 Write Object mailbox failed with "
-                               "status x%x add_status x%x, mbx status x%x\n",
-                               shdr_status, shdr_add_status, rc);
+                               "status x%x add_status x%x, add_status_2 x%x, "
+                               "mbx status x%x\n",
+                               shdr_status, shdr_add_status, shdr_add_status_2,
+                               rc);
                rc = -ENXIO;
                *offset = shdr_add_status;
-       } else
+       } else {
                *offset += wr_object->u.response.actual_write_length;
+       }
+
+       if (rc || check_change_status)
+               lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
+                                      shdr_add_status_2, shdr_change_status,
+                                      shdr_csf);
        return rc;
 }
 
@@ -20543,8 +21290,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
        }
 
        /* NVME_FCREQ and NVME_ABTS requests */
-       if (pwqe->iocb_flag & LPFC_IO_NVME ||
-           pwqe->iocb_flag & LPFC_IO_FCP) {
+       if (pwqe->iocb_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
                /* Get the IO distribution (hba_wqidx) for WQ assignment. */
                wq = qp->io_wq;
                pring = wq->pring;
@@ -21322,6 +22068,116 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
        return lpfc_cmd;
 }
 
+/**
+ * lpfc_read_object - Retrieve object data from HBA
+ * @phba: The HBA for which this call is being executed.
+ * @rdobject: Pathname of object data we want to read.
+ * @datap: Pointer to where data will be copied to.
+ * @datasz: size of data area
+ *
+ * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
+ * The data will be truncated if datasz is not large enough.
+ * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
+ * Returns the actual bytes read from the object.
+ */
+int
+lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
+                uint32_t datasz)
+{
+       struct lpfc_mbx_read_object *read_object;
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, eof, j, byte_cnt = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+       struct lpfc_dmabuf *pcmd;
+
+       /* sanity check on queue memory */
+       if (!datap)
+               return -ENODEV;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_read_object) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_READ_OBJECT,
+                        length, LPFC_SLI4_MBX_EMBED);
+       read_object = &mbox->u.mqe.un.read_object;
+       shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
+
+       bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
+       bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
+       read_object->u.request.rd_object_offset = 0;
+       read_object->u.request.rd_object_cnt = 1;
+
+       memset((void *)read_object->u.request.rd_object_name, 0,
+              LPFC_OBJ_NAME_SZ);
+       sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject);
+       for (j = 0; j < strlen(rdobject); j++)
+               read_object->u.request.rd_object_name[j] =
+                       cpu_to_le32(read_object->u.request.rd_object_name[j]);
+
+       pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
+       if (pcmd)
+               pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
+       if (!pcmd || !pcmd->virt) {
+               kfree(pcmd);
+               mempool_free(mbox, phba->mbox_mem_pool);
+               return -ENOMEM;
+       }
+       memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
+       read_object->u.request.rd_object_hbuf[0].pa_lo =
+               putPaddrLow(pcmd->phys);
+       read_object->u.request.rd_object_hbuf[0].pa_hi =
+               putPaddrHigh(pcmd->phys);
+       read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
+
+       mbox->vport = phba->pport;
+       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       mbox->ctx_buf = NULL;
+       mbox->ctx_ndlp = NULL;
+
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+       if (shdr_status == STATUS_FAILED &&
+           shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+                               "4674 No port cfg file in FW.\n");
+               byte_cnt = -ENOENT;
+       } else if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
+                               "2625 READ_OBJECT mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               byte_cnt = -ENXIO;
+       } else {
+               /* Success */
+               length = read_object->u.response.rd_object_actual_rlen;
+               eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
+                               "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
+                               length, datasz, eof);
+
+               /* Detect the port config file exists but is empty */
+               if (!length && eof) {
+                       byte_cnt = 0;
+                       goto exit;
+               }
+
+               byte_cnt = length;
+               lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
+       }
+
+ exit:
+       lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+       kfree(pcmd);
+       mempool_free(mbox, phba->mbox_mem_pool);
+       return byte_cnt;
+}
+
 /**
  * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
  * @phba: The HBA for which this call is being executed.
index dde8eb9..5161cca 100644 (file)
@@ -107,6 +107,7 @@ struct lpfc_iocbq {
 #define LPFC_IO_NVME_LS                0x400000 /* NVME LS command */
 #define LPFC_IO_NVMET          0x800000 /* NVMET command */
 #define LPFC_IO_VMID            0x1000000 /* VMID tagged IO */
+#define LPFC_IO_CMF            0x4000000 /* CMF command */
 
        uint32_t drvrTimeout;   /* driver timeout in seconds */
        struct lpfc_vport *vport;/* virtual port pointer */
@@ -462,4 +463,5 @@ struct lpfc_io_buf {
        uint64_t ts_isr_cmpl;
        uint64_t ts_data_io;
 #endif
+       uint64_t rx_cmd_start;
 };
index 26f19c9..99c5d1e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
  * Copyright (C) 2009-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -557,6 +557,7 @@ struct lpfc_pc_sli4_params {
        uint16_t mi_value;
 #define LPFC_DFLT_MIB_VAL      2
        uint8_t mib_bde_cnt;
+       uint8_t cmf;
        uint8_t cqv;
        uint8_t mqv;
        uint8_t wqv;
@@ -978,6 +979,8 @@ struct lpfc_sli4_hba {
 #define lpfc_conf_trunk_port3_nd_WORD  conf_trunk
 #define lpfc_conf_trunk_port3_nd_SHIFT 7
 #define lpfc_conf_trunk_port3_nd_MASK  0x1
+       uint8_t flash_id;
+       uint8_t asic_rev;
 };
 
 enum lpfc_sge_type {
index 2d62fd2..a7aba78 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.8.0.10"
+#define LPFC_DRIVER_VERSION "14.0.0.1"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index ec10b24..e4298bf 100644 (file)
@@ -1451,10 +1451,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
         * pthru timeout to the os layer timeout value.
         */
        if (scp->device->type == TYPE_TAPE) {
-               if ((scp->request->timeout / HZ) > 0xFFFF)
+               if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF)
                        pthru->timeout = cpu_to_le16(0xFFFF);
                else
-                       pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
+                       pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ);
        }
 
        /*
index 06399c0..26d0cf9 100644 (file)
@@ -402,7 +402,7 @@ megasas_get_msix_index(struct megasas_instance *instance,
                        (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
                                instance->msix_vectors));
        } else if (instance->host->nr_hw_queues > 1) {
-               u32 tag = blk_mq_unique_tag(scmd->request);
+               u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
 
                cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
                        instance->low_latency_index_start;
@@ -3023,7 +3023,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
                io_request->DevHandle = cpu_to_le16(device_id);
                io_request->LUN[1] = scmd->device->lun;
                pRAID_Context->timeout_value =
-                       cpu_to_le16 (scmd->request->timeout / HZ);
+                       cpu_to_le16(scsi_cmd_to_rq(scmd)->timeout / HZ);
                cmd->request_desc->SCSIIO.RequestFlags =
                        (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
                        MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -3086,7 +3086,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
 
        device_id = MEGASAS_DEV_INDEX(scmd);
        pd_index = MEGASAS_PD_INDEX(scmd);
-       os_timeout_value = scmd->request->timeout / HZ;
+       os_timeout_value = scsi_cmd_to_rq(scmd)->timeout / HZ;
        mr_device_priv_data = scmd->device->hostdata;
        cmd->pd_interface = mr_device_priv_data->interface_type;
 
@@ -3381,7 +3381,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
-       cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
+       cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag);
 
        if (!cmd) {
                atomic_dec(&instance->fw_outstanding);
@@ -3422,7 +3422,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
         */
        if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
                r1_cmd = megasas_get_cmd_fusion(instance,
-                               (scmd->request->tag + instance->max_fw_cmds));
+                               scsi_cmd_to_rq(scmd)->tag + instance->max_fw_cmds);
                megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
        }
 
index 6f5dc9e..9787b53 100644 (file)
@@ -183,6 +183,20 @@ enum mpi3mr_iocstate {
        MRIOC_STATE_UNRECOVERABLE,
 };
 
+/* Init type definitions */
+enum mpi3mr_init_type {
+       MPI3MR_IT_INIT = 0,
+       MPI3MR_IT_RESET,
+       MPI3MR_IT_RESUME,
+};
+
+/* Cleanup reason definitions */
+enum mpi3mr_cleanup_reason {
+       MPI3MR_COMPLETE_CLEANUP = 0,
+       MPI3MR_REINIT_FAILURE,
+       MPI3MR_SUSPEND,
+};
+
 /* Reset reason code definitions*/
 enum mpi3mr_reset_reason {
        MPI3MR_RESET_FROM_BRINGUP = 1,
@@ -855,8 +869,8 @@ struct delayed_dev_rmhs_node {
 
 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason);
 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
 u16 admin_req_sz, u8 ignore_reset);
@@ -872,6 +886,7 @@ void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
                                     u64 sense_buf_dma);
 
+void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
                             struct mpi3_event_notification_reply *event_reply);
 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
index 2dba2b0..4a8316c 100644 (file)
@@ -3205,7 +3205,7 @@ out_failed:
 /**
  * mpi3mr_init_ioc - Initialize the controller
  * @mrioc: Adapter instance reference
- * @re_init: Flag to indicate is this fresh init or re-init
+ * @init_type: Flag to indicate is the init_type
  *
  * This the controller initialization routine, executed either
  * after soft reset or from pci probe callback.
@@ -3218,7 +3218,7 @@ out_failed:
  *
  * Return: 0 on success and non-zero on failure.
  */
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type)
 {
        int retval = 0;
        enum mpi3mr_iocstate ioc_state;
@@ -3229,7 +3229,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
 
        mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
        mrioc->change_count = 0;
-       if (!re_init) {
+       if (init_type == MPI3MR_IT_INIT) {
                mrioc->cpu_count = num_online_cpus();
                retval = mpi3mr_setup_resources(mrioc);
                if (retval) {
@@ -3314,7 +3314,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
                goto out_failed;
        }
 
-       if (!re_init) {
+       if (init_type != MPI3MR_IT_RESET) {
                retval = mpi3mr_setup_isr(mrioc, 1);
                if (retval) {
                        ioc_err(mrioc, "Failed to setup ISR error %d\n",
@@ -3332,7 +3332,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
        }
 
        mpi3mr_process_factsdata(mrioc, &facts_data);
-       if (!re_init) {
+       if (init_type == MPI3MR_IT_INIT) {
                retval = mpi3mr_check_reset_dma_mask(mrioc);
                if (retval) {
                        ioc_err(mrioc, "Resetting dma mask failed %d\n",
@@ -3351,7 +3351,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
                goto out_failed;
        }
 
-       if (!re_init) {
+       if (init_type == MPI3MR_IT_INIT) {
                retval = mpi3mr_alloc_chain_bufs(mrioc);
                if (retval) {
                        ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
@@ -3374,7 +3374,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
        writel(mrioc->sbq_host_index,
            &mrioc->sysif_regs->sense_buffer_free_host_index);
 
-       if (!re_init)  {
+       if (init_type != MPI3MR_IT_RESET) {
                retval = mpi3mr_setup_isr(mrioc, 0);
                if (retval) {
                        ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
@@ -3390,7 +3390,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
                goto out_failed;
        }
 
-       if (re_init &&
+       if ((init_type != MPI3MR_IT_INIT) &&
            (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
                retval = -1;
                ioc_err(mrioc,
@@ -3422,7 +3422,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
                goto out_failed;
        }
 
-       if (re_init) {
+       if (init_type != MPI3MR_IT_INIT) {
                ioc_info(mrioc, "Issuing Port Enable\n");
                retval = mpi3mr_issue_port_enable(mrioc, 0);
                if (retval) {
@@ -3434,7 +3434,10 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
        return retval;
 
 out_failed:
-       mpi3mr_cleanup_ioc(mrioc, re_init);
+       if (init_type == MPI3MR_IT_INIT)
+               mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
+       else
+               mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE);
 out_nocleanup:
        return retval;
 }
@@ -3495,7 +3498,7 @@ static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
  *
  * Return: Nothing.
  */
-static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
 {
        u16 i;
 
@@ -3710,7 +3713,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
 /**
  * mpi3mr_cleanup_ioc - Cleanup controller
  * @mrioc: Adapter instance reference
- * @re_init: Cleanup due to a reinit or not
+ * @reason: Cleanup reason
  *
  * controller cleanup handler, Message unit reset or soft reset
  * and shutdown notification is issued to the controller and the
@@ -3718,11 +3721,11 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
  *
  * Return: Nothing.
  */
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
 {
        enum mpi3mr_iocstate ioc_state;
 
-       if (!re_init)
+       if (reason == MPI3MR_COMPLETE_CLEANUP)
                mpi3mr_stop_watchdog(mrioc);
 
        mpi3mr_ioc_disable_intr(mrioc);
@@ -3737,11 +3740,11 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
                            MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
                            MPI3MR_RESET_FROM_MUR_FAILURE);
 
-               if (!re_init)
+               if (reason != MPI3MR_REINIT_FAILURE)
                        mpi3mr_issue_ioc_shutdown(mrioc);
        }
 
-       if (!re_init) {
+       if (reason == MPI3MR_COMPLETE_CLEANUP) {
                mpi3mr_free_mem(mrioc);
                mpi3mr_cleanup_resources(mrioc);
        }
@@ -3923,7 +3926,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
        mpi3mr_flush_host_io(mrioc);
        mpi3mr_invalidate_devhandles(mrioc);
        mpi3mr_memset_buffers(mrioc);
-       retval = mpi3mr_init_ioc(mrioc, 1);
+       retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET);
        if (retval) {
                pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
                    mrioc->name, reset_reason);
index 24ac7dd..2197988 100644 (file)
@@ -50,7 +50,7 @@ static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
        u32 unique_tag;
        u16 host_tag, hw_queue;
 
-       unique_tag = blk_mq_unique_tag(scmd->request);
+       unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
 
        hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
        if (hw_queue >= mrioc->num_op_reply_q)
@@ -1963,7 +1963,6 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
 {
        u16 eedp_flags = 0;
        unsigned char prot_op = scsi_get_prot_op(scmd);
-       unsigned char prot_type = scsi_get_prot_type(scmd);
 
        switch (prot_op) {
        case SCSI_PROT_NORMAL:
@@ -1983,60 +1982,42 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
                scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
                break;
        case SCSI_PROT_READ_PASS:
-               eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
-                   MPI3_EEDPFLAGS_CHK_REF_TAG | MPI3_EEDPFLAGS_CHK_APP_TAG |
-                   MPI3_EEDPFLAGS_CHK_GUARD;
+               eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
                scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
                break;
        case SCSI_PROT_WRITE_PASS:
-               if (scsi_host_get_guard(scmd->device->host)
-                   & SHOST_DIX_GUARD_IP) {
-                       eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN |
-                           MPI3_EEDPFLAGS_CHK_APP_TAG |
-                           MPI3_EEDPFLAGS_CHK_GUARD |
-                           MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+               if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
+                       eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
                        scsiio_req->sgl[0].eedp.application_tag_translation_mask =
                            0xffff;
-               } else {
-                       eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
-                           MPI3_EEDPFLAGS_CHK_REF_TAG |
-                           MPI3_EEDPFLAGS_CHK_APP_TAG |
-                           MPI3_EEDPFLAGS_CHK_GUARD;
-               }
+               } else
+                       eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
+
                scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
                break;
        default:
                return;
        }
 
-       if (scsi_host_get_guard(scmd->device->host) & SHOST_DIX_GUARD_IP)
+       if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
+               eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
+
+       if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
                eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
 
-       switch (prot_type) {
-       case SCSI_PROT_DIF_TYPE0:
-               eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+       if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
+               eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
+                       MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
                scsiio_req->cdb.eedp32.primary_reference_tag =
-                   cpu_to_be32(t10_pi_ref_tag(scmd->request));
-               break;
-       case SCSI_PROT_DIF_TYPE1:
-       case SCSI_PROT_DIF_TYPE2:
-               eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG |
-                   MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE |
-                   MPI3_EEDPFLAGS_CHK_GUARD;
-               scsiio_req->cdb.eedp32.primary_reference_tag =
-                   cpu_to_be32(t10_pi_ref_tag(scmd->request));
-               break;
-       case SCSI_PROT_DIF_TYPE3:
-               eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD |
-                   MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
-               break;
-
-       default:
-               scsiio_req->msg_flags &= ~(MPI3_SCSIIO_MSGFLAGS_METASGL_VALID);
-               return;
+                       cpu_to_be32(scsi_prot_ref_tag(scmd));
        }
 
-       switch (scmd->device->sector_size) {
+       if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
+               eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+
+       eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
+
+       switch (scsi_prot_interval(scmd)) {
        case 512:
                scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
                break;
@@ -3451,7 +3432,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
        u16 dev_handle;
        u16 host_tag;
        u32 scsiio_flags = 0;
-       struct request *rq = scmd->request;
+       struct request *rq = scsi_cmd_to_rq(scmd);
        int iprio_class;
 
        sdev_priv_data = scmd->device->hostdata;
@@ -3795,7 +3776,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        mrioc->is_driver_loading = 1;
-       if (mpi3mr_init_ioc(mrioc, 0)) {
+       if (mpi3mr_init_ioc(mrioc, MPI3MR_IT_INIT)) {
                ioc_err(mrioc, "failure at %s:%d/%s()!\n",
                    __FILE__, __LINE__, __func__);
                retval = -ENODEV;
@@ -3818,7 +3799,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return retval;
 
 addhost_failed:
-       mpi3mr_cleanup_ioc(mrioc, 0);
+       mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
 out_iocinit_failed:
        destroy_workqueue(mrioc->fwevt_worker_thread);
 out_fwevtthread_failed:
@@ -3870,7 +3851,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
                mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
                mpi3mr_tgtdev_put(tgtdev);
        }
-       mpi3mr_cleanup_ioc(mrioc, 0);
+       mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
 
        spin_lock(&mrioc_list_lock);
        list_del(&mrioc->list);
@@ -3910,7 +3891,7 @@ static void mpi3mr_shutdown(struct pci_dev *pdev)
        spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
        if (wq)
                destroy_workqueue(wq);
-       mpi3mr_cleanup_ioc(mrioc, 0);
+       mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
 }
 
 #ifdef CONFIG_PM
@@ -3940,7 +3921,7 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
        mpi3mr_cleanup_fwevt_list(mrioc);
        scsi_block_requests(shost);
        mpi3mr_stop_watchdog(mrioc);
-       mpi3mr_cleanup_ioc(mrioc, 1);
+       mpi3mr_cleanup_ioc(mrioc, MPI3MR_SUSPEND);
 
        device_state = pci_choose_state(pdev, state);
        ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
@@ -3988,7 +3969,8 @@ static int mpi3mr_resume(struct pci_dev *pdev)
        }
 
        mrioc->stop_drv_processing = 0;
-       mpi3mr_init_ioc(mrioc, 1);
+       mpi3mr_memset_buffers(mrioc);
+       mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESUME);
        scsi_unblock_requests(shost);
        mpi3mr_start_watchdog(mrioc);
 
index cf4a3a2..6c82435 100644 (file)
@@ -116,6 +116,14 @@ MODULE_PARM_DESC(perf_mode,
        "\t\tdefault - default perf_mode is 'balanced'"
        );
 
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
+       "This parameter is effective only if host_tagset_enable=1. &\n\t\t"
+       "when poll_queues are enabled then &\n\t\t"
+       "perf_mode is set to latency mode. &\n\t\t"
+       );
+
 enum mpt3sas_perf_mode {
        MPT_PERF_MODE_DEFAULT   = -1,
        MPT_PERF_MODE_BALANCED  = 0,
@@ -709,6 +717,7 @@ _base_fault_reset_work(struct work_struct *work)
                 * and this call is safe since dead ioc will never return any
                 * command back from HW.
                 */
+               mpt3sas_base_pause_mq_polling(ioc);
                ioc->schedule_dead_ioc_flush_running_cmds(ioc);
                /*
                 * Set remove_host flag early since kernel thread will
@@ -744,6 +753,7 @@ _base_fault_reset_work(struct work_struct *work)
                        spin_unlock_irqrestore(
                            &ioc->ioc_reset_in_progress_lock, flags);
                        mpt3sas_base_mask_interrupts(ioc);
+                       mpt3sas_base_pause_mq_polling(ioc);
                        _base_clear_outstanding_commands(ioc);
                }
 
@@ -1547,6 +1557,53 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
        return cb_idx;
 }
 
+/**
+ * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
+ *                             when driver is flushing out the IOs.
+ * @ioc: per adapter object
+ *
+ * Pause polling on the mq poll (io uring) queues when driver is flushing
+ * out the IOs. Otherwise we may see the race condition of completing the same
+ * IO from two paths.
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
+{
+       int iopoll_q_count =
+           ioc->reply_queue_count - ioc->iopoll_q_start_index;
+       int qid;
+
+       for (qid = 0; qid < iopoll_q_count; qid++)
+               atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
+
+       /*
+        * wait for current poll to complete.
+        */
+       for (qid = 0; qid < iopoll_q_count; qid++) {
+               while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))
+                       udelay(500);
+       }
+}
+
+/**
+ * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
+{
+       int iopoll_q_count =
+           ioc->reply_queue_count - ioc->iopoll_q_start_index;
+       int qid;
+
+       for (qid = 0; qid < iopoll_q_count; qid++)
+               atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
+}
+
 /**
  * mpt3sas_base_mask_interrupts - disable interrupts
  * @ioc: per adapter object
@@ -1722,7 +1779,8 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
                                                 MPI2_RPHI_MSIX_INDEX_SHIFT),
                                                &ioc->chip->ReplyPostHostIndex);
                        }
-                       if (!reply_q->irq_poll_scheduled) {
+                       if (!reply_q->is_iouring_poll_q &&
+                           !reply_q->irq_poll_scheduled) {
                                reply_q->irq_poll_scheduled = true;
                                irq_poll_sched(&reply_q->irqpoll);
                        }
@@ -1778,6 +1836,33 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
        return completed_cmds;
 }
 
+/**
+ * mpt3sas_blk_mq_poll - poll the blk mq poll queue
+ * @shost: Scsi_Host object
+ * @queue_num: hw ctx queue number
+ *
+ * Return number of entries that has been processed from poll queue.
+ */
+int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+       struct MPT3SAS_ADAPTER *ioc =
+           (struct MPT3SAS_ADAPTER *)shost->hostdata;
+       struct adapter_reply_queue *reply_q;
+       int num_entries = 0;
+       int qid = queue_num - ioc->iopoll_q_start_index;
+
+       if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
+           !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
+               return 0;
+
+       reply_q = ioc->io_uring_poll_queues[qid].reply_q;
+
+       num_entries = _base_process_reply_queue(reply_q);
+       atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
+
+       return num_entries;
+}
+
 /**
  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
  * @irq: irq number (not used)
@@ -1851,6 +1936,8 @@ _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
                return;
 
        list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+               if (reply_q->is_iouring_poll_q)
+                       continue;
                irq_poll_init(&reply_q->irqpoll,
                        ioc->hba_queue_depth/4, _base_irqpoll);
                reply_q->irq_poll_scheduled = false;
@@ -1900,6 +1987,12 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
                /* TMs are on msix_index == 0 */
                if (reply_q->msix_index == 0)
                        continue;
+
+               if (reply_q->is_iouring_poll_q) {
+                       _base_process_reply_queue(reply_q);
+                       continue;
+               }
+
                synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
                if (reply_q->irq_poll_scheduled) {
                        /* Calling irq_poll_disable will wait for any pending
@@ -2998,6 +3091,11 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
 
        list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
                list_del(&reply_q->list);
+               if (reply_q->is_iouring_poll_q) {
+                       kfree(reply_q);
+                       continue;
+               }
+
                if (ioc->smp_affinity_enable)
                        irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
                            reply_q->msix_index), NULL);
@@ -3019,7 +3117,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
 {
        struct pci_dev *pdev = ioc->pdev;
        struct adapter_reply_queue *reply_q;
-       int r;
+       int r, qid;
 
        reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
        if (!reply_q) {
@@ -3031,6 +3129,17 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
        reply_q->msix_index = index;
 
        atomic_set(&reply_q->busy, 0);
+
+       if (index >= ioc->iopoll_q_start_index) {
+               qid = index - ioc->iopoll_q_start_index;
+               snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
+                   ioc->driver_name, ioc->id, qid);
+               reply_q->is_iouring_poll_q = 1;
+               ioc->io_uring_poll_queues[qid].reply_q = reply_q;
+               goto out;
+       }
+
+
        if (ioc->msix_enable)
                snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
                    ioc->driver_name, ioc->id, index);
@@ -3045,7 +3154,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
                kfree(reply_q);
                return -EBUSY;
        }
-
+out:
        INIT_LIST_HEAD(&reply_q->list);
        list_add_tail(&reply_q->list, &ioc->reply_queue_list);
        return 0;
@@ -3066,6 +3175,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
        unsigned int cpu, nr_cpus, nr_msix, index = 0;
        struct adapter_reply_queue *reply_q;
        int local_numa_node;
+       int iopoll_q_count = ioc->reply_queue_count -
+           ioc->iopoll_q_start_index;
 
        if (!_base_is_controller_msix_enabled(ioc))
                return;
@@ -3099,7 +3210,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
                list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
                        const cpumask_t *mask;
 
-                       if (reply_q->msix_index < ioc->high_iops_queues)
+                       if (reply_q->msix_index < ioc->high_iops_queues ||
+                           reply_q->msix_index >= ioc->iopoll_q_start_index)
                                continue;
 
                        mask = pci_irq_get_affinity(ioc->pdev,
@@ -3121,13 +3233,14 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
 
 fall_back:
        cpu = cpumask_first(cpu_online_mask);
-       nr_msix -= ioc->high_iops_queues;
+       nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
        index = 0;
 
        list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
                unsigned int i, group = nr_cpus / nr_msix;
 
-               if (reply_q->msix_index < ioc->high_iops_queues)
+               if (reply_q->msix_index < ioc->high_iops_queues ||
+                   reply_q->msix_index >= ioc->iopoll_q_start_index)
                        continue;
 
                if (cpu >= nr_cpus)
@@ -3164,8 +3277,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
 {
        u16 lnksta, speed;
 
+       /*
+        * Disable high iops queues if io uring poll queues are enabled.
+        */
        if (perf_mode == MPT_PERF_MODE_IOPS ||
-           perf_mode == MPT_PERF_MODE_LATENCY) {
+           perf_mode == MPT_PERF_MODE_LATENCY ||
+           ioc->io_uring_poll_queues) {
                ioc->high_iops_queues = 0;
                return;
        }
@@ -3202,6 +3319,7 @@ mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
                return;
        pci_free_irq_vectors(ioc->pdev);
        ioc->msix_enable = 0;
+       kfree(ioc->io_uring_poll_queues);
 }
 
 /**
@@ -3215,18 +3333,24 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
        int i, irq_flags = PCI_IRQ_MSIX;
        struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
        struct irq_affinity *descp = &desc;
+       /*
+        * Don't allocate msix vectors for poll_queues.
+        * msix_vectors is always within a range of FW supported reply queue.
+        */
+       int nr_msix_vectors = ioc->iopoll_q_start_index;
+
 
        if (ioc->smp_affinity_enable)
-               irq_flags |= PCI_IRQ_AFFINITY;
+               irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
        else
                descp = NULL;
 
-       ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
-           ioc->reply_queue_count);
+       ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
+           ioc->reply_queue_count, nr_msix_vectors);
 
        i = pci_alloc_irq_vectors_affinity(ioc->pdev,
            ioc->high_iops_queues,
-           ioc->reply_queue_count, irq_flags, descp);
+           nr_msix_vectors, irq_flags, descp);
 
        return i;
 }
@@ -3242,6 +3366,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        int r;
        int i, local_max_msix_vectors;
        u8 try_msix = 0;
+       int iopoll_q_count = 0;
 
        ioc->msix_load_balance = false;
 
@@ -3257,22 +3382,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
        pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
                ioc->cpu_count, max_msix_vectors);
-       if (ioc->is_aero_ioc)
-               _base_check_and_enable_high_iops_queues(ioc,
-                       ioc->msix_vector_count);
+
        ioc->reply_queue_count =
-               min_t(int, ioc->cpu_count + ioc->high_iops_queues,
-               ioc->msix_vector_count);
+               min_t(int, ioc->cpu_count, ioc->msix_vector_count);
 
        if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
                local_max_msix_vectors = (reset_devices) ? 1 : 8;
        else
                local_max_msix_vectors = max_msix_vectors;
 
-       if (local_max_msix_vectors > 0)
-               ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
-                       ioc->reply_queue_count);
-       else if (local_max_msix_vectors == 0)
+       if (local_max_msix_vectors == 0)
                goto try_ioapic;
 
        /*
@@ -3293,14 +3412,77 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        if (ioc->msix_load_balance)
                ioc->smp_affinity_enable = 0;
 
+       if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
+               ioc->shost->host_tagset = 0;
+
+       /*
+        * Enable io uring poll queues only if host_tagset is enabled.
+        */
+       if (ioc->shost->host_tagset)
+               iopoll_q_count = poll_queues;
+
+       if (iopoll_q_count) {
+               ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
+                   sizeof(struct io_uring_poll_queue), GFP_KERNEL);
+               if (!ioc->io_uring_poll_queues)
+                       iopoll_q_count = 0;
+       }
+
+       if (ioc->is_aero_ioc)
+               _base_check_and_enable_high_iops_queues(ioc,
+                   ioc->msix_vector_count);
+
+       /*
+        * Add high iops queues count to reply queue count if high iops queues
+        * are enabled.
+        */
+       ioc->reply_queue_count = min_t(int,
+           ioc->reply_queue_count + ioc->high_iops_queues,
+           ioc->msix_vector_count);
+
+       /*
+        * Adjust the reply queue count incase reply queue count
+        * exceeds the user provided MSIx vectors count.
+        */
+       if (local_max_msix_vectors > 0)
+               ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
+                   ioc->reply_queue_count);
+       /*
+        * Add io uring poll queues count to reply queues count
+        * if io uring is enabled in driver.
+        */
+       if (iopoll_q_count) {
+               if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
+                       iopoll_q_count = 0;
+               ioc->reply_queue_count = min_t(int,
+                   ioc->reply_queue_count + iopoll_q_count,
+                   ioc->msix_vector_count);
+       }
+
+       /*
+        * Starting index of io uring poll queues in reply queue list.
+        */
+       ioc->iopoll_q_start_index =
+           ioc->reply_queue_count - iopoll_q_count;
+
        r = _base_alloc_irq_vectors(ioc);
        if (r < 0) {
                ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
                goto try_ioapic;
        }
 
+       /*
+        * Adjust the reply queue count if the allocated
+        * MSIx vectors is less then the requested number
+        * of MSIx vectors.
+        */
+       if (r < ioc->iopoll_q_start_index) {
+               ioc->reply_queue_count = r + iopoll_q_count;
+               ioc->iopoll_q_start_index =
+                   ioc->reply_queue_count - iopoll_q_count;
+       }
+
        ioc->msix_enable = 1;
-       ioc->reply_queue_count = r;
        for (i = 0; i < ioc->reply_queue_count; i++) {
                r = _base_request_irq(ioc, i);
                if (r) {
@@ -3320,6 +3502,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        ioc->high_iops_queues = 0;
        ioc_info(ioc, "High IOPs queues : disabled\n");
        ioc->reply_queue_count = 1;
+       ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
        r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
        if (r < 0) {
                dfailprintk(ioc,
@@ -3416,6 +3599,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
        u64 pio_chip = 0;
        phys_addr_t chip_phys = 0;
        struct adapter_reply_queue *reply_q;
+       int iopoll_q_count = 0;
 
        dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
@@ -3489,6 +3673,12 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
        if (r)
                goto out_fail;
 
+       iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
+       for (i = 0; i < iopoll_q_count; i++) {
+               atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
+               atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
+       }
+
        if (!ioc->is_driver_loading)
                _base_init_irqpolls(ioc);
        /* Use the Combined reply queue feature only for SAS3 C0 & higher
@@ -3530,11 +3720,18 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
                        * 4)));
        }
 
-       list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+       list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+               if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
+                       pr_info("%s: enabled: index: %d\n",
+                           reply_q->name, reply_q->msix_index);
+                       continue;
+               }
+
                pr_info("%s: %s enabled: IRQ %d\n",
                        reply_q->name,
                        ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
                        pci_irq_vector(ioc->pdev, reply_q->msix_index));
+       }
 
        ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
                 &chip_phys, ioc->chip, memap_sz);
@@ -3651,7 +3848,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
                    &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
 
        if (scmd && ioc->shost->nr_hw_queues > 1) {
-               u32 tag = blk_mq_unique_tag(scmd->request);
+               u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
 
                return blk_mq_unique_tag_to_hwq(tag) +
                        ioc->high_iops_queues;
@@ -3735,7 +3932,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
        u16 smid;
        u32 tag, unique_tag;
 
-       unique_tag = blk_mq_unique_tag(scmd->request);
+       unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
        tag = blk_mq_unique_tag_to_tag(unique_tag);
 
        /*
@@ -5168,6 +5365,73 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
                    &ioc->diag_trigger_mpi, 1);
 }
 
+/**
+ * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices.
+ *                             - On failure set default QD values.
+ * @ioc : per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
+{
+       Mpi2ConfigReply_t mpi_reply;
+       Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+       Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
+       int sz;
+       int rc = 0;
+
+       ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
+       ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
+       ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
+       ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
+       if (!ioc->is_gen35_ioc)
+               goto out;
+       /* sas iounit page 1 */
+       sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
+       sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+       if (!sas_iounit_pg1) {
+               pr_err("%s: failure at %s:%d/%s()!\n",
+                   ioc->name, __FILE__, __LINE__, __func__);
+               return rc;
+       }
+       rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+           sas_iounit_pg1, sz);
+       if (rc) {
+               pr_err("%s: failure at %s:%d/%s()!\n",
+                   ioc->name, __FILE__, __LINE__, __func__);
+               goto out;
+       }
+       ioc->max_wideport_qd =
+           (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
+           le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
+           MPT3SAS_SAS_QUEUE_DEPTH;
+       ioc->max_narrowport_qd =
+           (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
+           le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
+           MPT3SAS_SAS_QUEUE_DEPTH;
+       ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
+           sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
+       /* pcie iounit page 1 */
+       rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
+           &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
+       if (rc) {
+               pr_err("%s: failure at %s:%d/%s()!\n",
+                   ioc->name, __FILE__, __LINE__, __func__);
+               goto out;
+       }
+       ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
+           (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
+           MPT3SAS_NVME_QUEUE_DEPTH;
+out:
+       dinitprintk(ioc, pr_err(
+           "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
+           ioc->max_wideport_qd, ioc->max_narrowport_qd,
+           ioc->max_sata_qd, ioc->max_nvme_qd));
+       kfree(sas_iounit_pg1);
+       return rc;
+}
+
 /**
  * _base_static_config_pages - static start of day config pages
  * @ioc: per adapter object
@@ -5237,6 +5501,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
                        ioc_warn(ioc,
                            "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
        }
+       rc = _base_assign_fw_reported_qd(ioc);
+       if (rc)
+               return rc;
        rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
        if (rc)
                return rc;
@@ -8471,6 +8738,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
        _base_pre_reset_handler(ioc);
        mpt3sas_wait_for_commands_to_complete(ioc);
        mpt3sas_base_mask_interrupts(ioc);
+       mpt3sas_base_pause_mq_polling(ioc);
        r = mpt3sas_base_make_ioc_ready(ioc, type);
        if (r)
                goto out;
@@ -8512,6 +8780,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
        ioc->ioc_reset_count++;
        mutex_unlock(&ioc->reset_in_progress_mutex);
+       mpt3sas_base_resume_mq_polling(ioc);
 
  out_unlocked:
        if ((r == 0) && is_trigger) {
index 0c6c3df..f87c091 100644 (file)
@@ -77,9 +77,9 @@
 #define MPT3SAS_DRIVER_NAME            "mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION    "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION         "37.101.00.00"
-#define MPT3SAS_MAJOR_VERSION          37
-#define MPT3SAS_MINOR_VERSION          101
+#define MPT3SAS_DRIVER_VERSION         "39.100.00.00"
+#define MPT3SAS_MAJOR_VERSION          39
+#define MPT3SAS_MINOR_VERSION          100
 #define MPT3SAS_BUILD_VERSION          0
 #define MPT3SAS_RELEASE_VERSION        00
 
@@ -354,6 +354,7 @@ struct mpt3sas_nvme_cmd {
 #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3    12
 #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35   16
 #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET      (0x10)
+#define MPT3_MIN_IRQS                                  1
 
 /* OEM Identifiers */
 #define MFG10_OEM_ID_INVALID                   (0x00000000)
@@ -575,6 +576,7 @@ struct _sas_device {
        u8      is_chassis_slot_valid;
        u8      connector_name[5];
        struct kref refcount;
+       u8      port_type;
        struct hba_port *port;
        struct sas_rphy *rphy;
 };
@@ -936,6 +938,8 @@ struct _event_ack_list {
  * @os_irq: irq number
  * @irqpoll: irq_poll object
  * @irq_poll_scheduled: Tells whether irq poll is scheduled or not
+ * @is_iouring_poll_q: Tells whether reply queues is assigned
+ *                     to io uring poll queues or not
  * @list: this list
 */
 struct adapter_reply_queue {
@@ -949,9 +953,22 @@ struct adapter_reply_queue {
        struct irq_poll         irqpoll;
        bool                    irq_poll_scheduled;
        bool                    irq_line_enable;
+       bool                    is_iouring_poll_q;
        struct list_head        list;
 };
 
+/**
+ * struct io_uring_poll_queue - the io uring poll queue structure
+ * @busy: Tells whether io uring poll queue is busy or not
+ * @pause: Tells whether IOs are paused on io uring poll queue or not
+ * @reply_q: reply queue mapped for io uring poll queue
+ */
+struct io_uring_poll_queue {
+       atomic_t        busy;
+       atomic_t        pause;
+       struct adapter_reply_queue *reply_q;
+};
+
 typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
 
 /* SAS3.0 support */
@@ -1176,6 +1193,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
  * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
  * @thresh_hold: Max number of reply descriptors processed
  *                             before updating Host Index
+ * @iopoll_q_start_index: starting index of io uring poll queues
+ *                             in reply queue list
  * @drv_internal_flags: Bit map internal to driver
  * @drv_support_bitmap: driver's supported feature bit map
  * @use_32bit_dma: Flag to use 32 bit consistent dma mask
@@ -1372,11 +1391,13 @@ struct MPT3SAS_ADAPTER {
        bool            msix_load_balance;
        u16             thresh_hold;
        u8              high_iops_queues;
+       u8              iopoll_q_start_index;
        u32             drv_internal_flags;
        u32             drv_support_bitmap;
        u32             dma_mask;
        bool            enable_sdev_max_qd;
        bool            use_32bit_dma;
+       struct io_uring_poll_queue *io_uring_poll_queues;
 
        /* internal commands, callback index */
        u8              scsi_io_cb_idx;
@@ -1423,6 +1444,10 @@ struct MPT3SAS_ADAPTER {
        u8              tm_custom_handling;
        u8              nvme_abort_timeout;
        u16             max_shutdown_latency;
+       u16             max_wideport_qd;
+       u16             max_narrowport_qd;
+       u16             max_nvme_qd;
+       u8              max_sata_qd;
 
        /* static config pages */
        struct mpt3sas_facts facts;
@@ -1730,10 +1755,12 @@ do {    ioc_err(ioc, "In func: %s\n", __func__); \
        status, mpi_request, sz); } while (0)
 
 int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
-int
-mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
+int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
 void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc);
 
 /* scsih shared API */
 struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
@@ -1829,6 +1856,9 @@ int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
 int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
        Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
        u32 form, u32 handle);
+int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+       Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
+       u16 sz);
 int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
        Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
        u16 sz);
index 83a5c21..0563078 100644 (file)
@@ -1168,6 +1168,43 @@ out:
        return r;
 }
 
+/**
+ * mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+       Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
+       u16 sz)
+{
+       Mpi2ConfigRequest_t mpi_request;
+       int r;
+
+       memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+       mpi_request.Function = MPI2_FUNCTION_CONFIG;
+       mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+       mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+       mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT;
+       mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION;
+       mpi_request.Header.PageNumber = 1;
+       ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+       r = _config_request(ioc, &mpi_request, mpi_reply,
+           MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+       if (r)
+               goto out;
+       mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+       r = _config_request(ioc, &mpi_request, mpi_reply,
+           MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+out:
+       return r;
+}
+
 /**
  * mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2
  * @ioc: per adapter object
index b66140e..770b241 100644 (file)
@@ -3820,9 +3820,10 @@ enable_sdev_max_qd_store(struct device *cdev,
                                }
                        } else if (sas_target_priv_data->flags &
                            MPT_TARGET_FLAGS_PCIE_DEVICE)
-                               qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+                               qdepth = ioc->max_nvme_qd;
                        else
-                               qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+                               qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ?
+                                   ioc->max_wideport_qd : ioc->max_narrowport_qd;
 
                        mpt3sas_scsih_change_queue_depth(sdev, qdepth);
                }
@@ -3918,6 +3919,24 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RO(sas_device_handle);
 
+/**
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+
+       return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
 /**
  * sas_ncq_prio_enable_show - send prioritized io commands to device
  * @dev: pointer to embedded device
@@ -3960,6 +3979,7 @@ static DEVICE_ATTR_RW(sas_ncq_prio_enable);
 struct device_attribute *mpt3sas_dev_attrs[] = {
        &dev_attr_sas_address,
        &dev_attr_sas_device_handle,
+       &dev_attr_sas_ncq_prio_supported,
        &dev_attr_sas_ncq_prio_enable,
        NULL,
 };
index 8e64a6f..2f82b1e 100644 (file)
@@ -1803,7 +1803,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
         * limit max device queue for SATA to 32 if enable_sdev_max_qd
         * is disabled.
         */
-       if (ioc->enable_sdev_max_qd)
+       if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
                goto not_sata;
 
        sas_device_priv_data = sdev->hostdata;
@@ -2657,7 +2657,7 @@ scsih_slave_configure(struct scsi_device *sdev)
                        return 1;
                }
 
-               qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+               qdepth = ioc->max_nvme_qd;
                ds = "NVMe";
                sdev_printk(KERN_INFO, sdev,
                        "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
@@ -2709,7 +2709,8 @@ scsih_slave_configure(struct scsi_device *sdev)
        sas_device->volume_handle = volume_handle;
        sas_device->volume_wwid = volume_wwid;
        if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
-               qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+               qdepth = (sas_device->port_type > 1) ?
+                       ioc->max_wideport_qd : ioc->max_narrowport_qd;
                ssp_target = 1;
                if (sas_device->device_info &
                                MPI2_SAS_DEVICE_INFO_SEP) {
@@ -2721,7 +2722,7 @@ scsih_slave_configure(struct scsi_device *sdev)
                } else
                        ds = "SSP";
        } else {
-               qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+               qdepth = ioc->max_sata_qd;
                if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
                        ds = "STP";
                else if (sas_device->device_info &
@@ -3304,7 +3305,7 @@ scsih_abort(struct scsi_cmnd *scmd)
        sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
            "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
            scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
-           (scmd->request->timeout / HZ) * 1000);
+           (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
        _scsih_tm_display_info(ioc, scmd);
 
        sas_device_priv_data = scmd->device->hostdata;
@@ -5047,48 +5048,31 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
        Mpi25SCSIIORequest_t *mpi_request)
 {
        u16 eedp_flags;
-       unsigned char prot_op = scsi_get_prot_op(scmd);
-       unsigned char prot_type = scsi_get_prot_type(scmd);
        Mpi25SCSIIORequest_t *mpi_request_3v =
           (Mpi25SCSIIORequest_t *)mpi_request;
 
-       if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
-               return;
-
-       if (prot_op ==  SCSI_PROT_READ_STRIP)
+       switch (scsi_get_prot_op(scmd)) {
+       case SCSI_PROT_READ_STRIP:
                eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
-       else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
+               break;
+       case SCSI_PROT_WRITE_INSERT:
                eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
-       else
+               break;
+       default:
                return;
+       }
 
-       switch (prot_type) {
-       case SCSI_PROT_DIF_TYPE1:
-       case SCSI_PROT_DIF_TYPE2:
+       if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
+               eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
 
-               /*
-               * enable ref/guard checking
-               * auto increment ref tag
-               */
+       if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
                eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
-                   MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
-                   MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+                       MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
                mpi_request->CDB.EEDP32.PrimaryReferenceTag =
-                   cpu_to_be32(t10_pi_ref_tag(scmd->request));
-               break;
-
-       case SCSI_PROT_DIF_TYPE3:
-
-               /*
-               * enable guard checking
-               */
-               eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
-
-               break;
+                       cpu_to_be32(scsi_prot_ref_tag(scmd));
        }
 
-       mpi_request_3v->EEDPBlockSize =
-           cpu_to_le16(scmd->device->sector_size);
+       mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
 
        if (ioc->is_gen35_ioc)
                eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
@@ -5141,7 +5125,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        struct MPT3SAS_DEVICE *sas_device_priv_data;
        struct MPT3SAS_TARGET *sas_target_priv_data;
        struct _raid_device *raid_device;
-       struct request *rq = scmd->request;
+       struct request *rq = scsi_cmd_to_rq(scmd);
        int class;
        Mpi25SCSIIORequest_t *mpi_request;
        struct _pcie_device *pcie_device = NULL;
@@ -7371,6 +7355,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
 
        /* get device name */
        sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+       sas_device->port_type = sas_device_pg0.MaxPortConnections;
+       ioc_info(ioc,
+           "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
+           handle, sas_device->sas_address, sas_device->port_type);
 
        if (ioc->wait_for_discovery_to_complete)
                _scsih_sas_device_init_add(ioc, sas_device);
@@ -9603,6 +9591,42 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
        }
 }
 
+/**
+ * _scsih_update_device_qdepth - Update QD during Reset.
+ * @ioc: per adapter object
+ *
+ */
+static void
+_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
+{
+       struct MPT3SAS_DEVICE *sas_device_priv_data;
+       struct MPT3SAS_TARGET *sas_target_priv_data;
+       struct _sas_device *sas_device;
+       struct scsi_device *sdev;
+       u16 qdepth;
+
+       ioc_info(ioc, "Update devices with firmware reported queue depth\n");
+       shost_for_each_device(sdev, ioc->shost) {
+               sas_device_priv_data = sdev->hostdata;
+               if (sas_device_priv_data && sas_device_priv_data->sas_target) {
+                       sas_target_priv_data = sas_device_priv_data->sas_target;
+                       sas_device = sas_device_priv_data->sas_target->sas_dev;
+                       if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
+                               qdepth = ioc->max_nvme_qd;
+                       else if (sas_device &&
+                           sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+                               qdepth = (sas_device->port_type > 1) ?
+                                   ioc->max_wideport_qd : ioc->max_narrowport_qd;
+                       else if (sas_device &&
+                           sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+                               qdepth = ioc->max_sata_qd;
+                       else
+                               continue;
+                       mpt3sas_scsih_change_queue_depth(sdev, qdepth);
+               }
+       }
+}
+
 /**
  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
  * @ioc: per adapter object
@@ -10654,6 +10678,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
                _scsih_remove_unresponding_devices(ioc);
                _scsih_del_dirty_vphy(ioc);
                _scsih_del_dirty_port_entries(ioc);
+               if (ioc->is_gen35_ioc)
+                       _scsih_update_device_qdepth(ioc);
                _scsih_scan_for_devices_after_reset(ioc);
                /*
                 * If diag reset has occurred during the driver load
@@ -11178,8 +11204,10 @@ static void scsih_remove(struct pci_dev *pdev)
 
        ioc->remove_host = 1;
 
-       if (!pci_device_is_present(pdev))
+       if (!pci_device_is_present(pdev)) {
+               mpt3sas_base_pause_mq_polling(ioc);
                _scsih_flush_running_cmds(ioc);
+       }
 
        _scsih_fw_event_cleanup_queue(ioc);
 
@@ -11274,8 +11302,10 @@ scsih_shutdown(struct pci_dev *pdev)
 
        ioc->remove_host = 1;
 
-       if (!pci_device_is_present(pdev))
+       if (!pci_device_is_present(pdev)) {
+               mpt3sas_base_pause_mq_polling(ioc);
                _scsih_flush_running_cmds(ioc);
+       }
 
        _scsih_fw_event_cleanup_queue(ioc);
 
@@ -11785,12 +11815,41 @@ static int scsih_map_queues(struct Scsi_Host *shost)
 {
        struct MPT3SAS_ADAPTER *ioc =
            (struct MPT3SAS_ADAPTER *)shost->hostdata;
+       struct blk_mq_queue_map *map;
+       int i, qoff, offset;
+       int nr_msix_vectors = ioc->iopoll_q_start_index;
+       int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
 
-       if (ioc->shost->nr_hw_queues == 1)
+       if (shost->nr_hw_queues == 1)
                return 0;
 
-       return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
-           ioc->pdev, ioc->high_iops_queues);
+       for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+               map = &shost->tag_set.map[i];
+               map->nr_queues = 0;
+               offset = 0;
+               if (i == HCTX_TYPE_DEFAULT) {
+                       map->nr_queues =
+                           nr_msix_vectors - ioc->high_iops_queues;
+                       offset = ioc->high_iops_queues;
+               } else if (i == HCTX_TYPE_POLL)
+                       map->nr_queues = iopoll_q_count;
+
+               if (!map->nr_queues)
+                       BUG_ON(i == HCTX_TYPE_DEFAULT);
+
+               /*
+                * The poll queue(s) doesn't have an IRQ (and hence IRQ
+                * affinity), so use the regular blk-mq cpu mapping
+                */
+               map->queue_offset = qoff;
+               if (i != HCTX_TYPE_POLL)
+                       blk_mq_pci_map_queues(map, ioc->pdev, offset);
+               else
+                       blk_mq_map_queues(map);
+
+               qoff += map->nr_queues;
+       }
+       return 0;
 }
 
 /* shost template for SAS 2.0 HBA devices */
@@ -11861,6 +11920,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
        .track_queue_depth              = 1,
        .cmd_size                       = sizeof(struct scsiio_tracker),
        .map_queues                     = scsih_map_queues,
+       .mq_poll                        = mpt3sas_blk_mq_poll,
 };
 
 /* raid transport support for SAS 3.0 HBA devices */
@@ -11957,6 +12017,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct Scsi_Host *shost = NULL;
        int rv;
        u16 hba_mpi_version;
+       int iopoll_q_count = 0;
 
        /* Determine in which MPI version class this pci device belongs */
        hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
@@ -12204,6 +12265,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_thread_fail;
        }
 
+       shost->host_tagset = 0;
+
+       if (ioc->is_gen35_ioc && host_tagset_enable)
+               shost->host_tagset = 1;
+
        ioc->is_driver_loading = 1;
        if ((mpt3sas_base_attach(ioc))) {
                ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -12226,16 +12292,17 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        } else
                ioc->hide_drives = 0;
 
-       shost->host_tagset = 0;
        shost->nr_hw_queues = 1;
 
-       if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
-           host_tagset_enable && ioc->smp_affinity_enable) {
-
-               shost->host_tagset = 1;
+       if (shost->host_tagset) {
                shost->nr_hw_queues =
                    ioc->reply_queue_count - ioc->high_iops_queues;
 
+               iopoll_q_count =
+                   ioc->reply_queue_count - ioc->iopoll_q_start_index;
+
+               shost->nr_maps = iopoll_q_count ? 3 : 1;
+
                dev_info(&ioc->pdev->dev,
                    "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
                    shost->can_queue, shost->nr_hw_queues);
@@ -12359,6 +12426,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                /* Permanent error, prepare for device removal */
                ioc->pci_error_recovery = 1;
                mpt3sas_base_stop_watchdog(ioc);
+               mpt3sas_base_pause_mq_polling(ioc);
                _scsih_flush_running_cmds(ioc);
                return PCI_ERS_RESULT_DISCONNECT;
        }
index 6bb03d7..4d251bf 100644 (file)
@@ -702,7 +702,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
        mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 
        scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
-                       scmd->request->tag, scmd->cmnd[0], scmd->retries);
+                       scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries);
 
        return mhba->instancet->reset_host(mhba);
 }
index 542ed88..a4a8832 100644 (file)
@@ -1263,6 +1263,7 @@ static int myrb_host_reset(struct scsi_cmnd *scmd)
 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
                struct scsi_cmnd *scmd)
 {
+       struct request *rq = scsi_cmd_to_rq(scmd);
        struct myrb_hba *cb = shost_priv(shost);
        struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
        union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
@@ -1286,7 +1287,7 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
        }
 
        mbox->type3.opcode = MYRB_CMD_DCDB;
-       mbox->type3.id = scmd->request->tag + 3;
+       mbox->type3.id = rq->tag + 3;
        mbox->type3.addr = dcdb_addr;
        dcdb->channel = sdev->channel;
        dcdb->target = sdev->id;
@@ -1305,11 +1306,11 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
                break;
        }
        dcdb->early_status = false;
-       if (scmd->request->timeout <= 10)
+       if (rq->timeout <= 10)
                dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
-       else if (scmd->request->timeout <= 60)
+       else if (rq->timeout <= 60)
                dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
-       else if (scmd->request->timeout <= 600)
+       else if (rq->timeout <= 600)
                dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
        else
                dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
@@ -1550,7 +1551,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
        }
 
        myrb_reset_cmd(cmd_blk);
-       mbox->type5.id = scmd->request->tag + 3;
+       mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
        if (scmd->sc_data_direction == DMA_NONE)
                goto submit;
        nsge = scsi_dma_map(scmd);
index 26326af..07f274a 100644 (file)
@@ -1582,6 +1582,7 @@ static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
 static int myrs_queuecommand(struct Scsi_Host *shost,
                struct scsi_cmnd *scmd)
 {
+       struct request *rq = scsi_cmd_to_rq(scmd);
        struct myrs_hba *cs = shost_priv(shost);
        struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
        union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
@@ -1628,7 +1629,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
                return SCSI_MLQUEUE_HOST_BUSY;
        cmd_blk->sense_addr = sense_addr;
 
-       timeout = scmd->request->timeout;
+       timeout = rq->timeout;
        if (scmd->cmd_len <= 10) {
                if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
                        struct myrs_ldev_info *ldev_info = sdev->hostdata;
@@ -1644,10 +1645,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
                        mbox->SCSI_10.pdev.target = sdev->id;
                        mbox->SCSI_10.pdev.channel = sdev->channel;
                }
-               mbox->SCSI_10.id = scmd->request->tag + 3;
+               mbox->SCSI_10.id = rq->tag + 3;
                mbox->SCSI_10.control.dma_ctrl_to_host =
                        (scmd->sc_data_direction == DMA_FROM_DEVICE);
-               if (scmd->request->cmd_flags & REQ_FUA)
+               if (rq->cmd_flags & REQ_FUA)
                        mbox->SCSI_10.control.fua = true;
                mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
                mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
@@ -1690,10 +1691,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
                        mbox->SCSI_255.pdev.target = sdev->id;
                        mbox->SCSI_255.pdev.channel = sdev->channel;
                }
-               mbox->SCSI_255.id = scmd->request->tag + 3;
+               mbox->SCSI_255.id = rq->tag + 3;
                mbox->SCSI_255.control.dma_ctrl_to_host =
                        (scmd->sc_data_direction == DMA_FROM_DEVICE);
-               if (scmd->request->cmd_flags & REQ_FUA)
+               if (rq->cmd_flags & REQ_FUA)
                        mbox->SCSI_255.control.fua = true;
                mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
                mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
index c76e9f0..7a4f5d4 100644 (file)
@@ -1453,11 +1453,6 @@ struct head {
 #define  xerr_status   phys.xerr_st
 #define  nego_status   phys.nego_st
 
-#if 0
-#define  sync_status   phys.sync_st
-#define  wide_status   phys.wide_st
-#endif
-
 /*==========================================================
 **
 **      Declaration of structs:     Data structure block
@@ -1980,9 +1975,6 @@ static inline char *ncr_name (struct ncb *np)
 #define        RELOC_SOFTC     0x40000000
 #define        RELOC_LABEL     0x50000000
 #define        RELOC_REGISTER  0x60000000
-#if 0
-#define        RELOC_KVAR      0x70000000
-#endif
 #define        RELOC_LABELH    0x80000000
 #define        RELOC_MASK      0xf0000000
 
@@ -1991,21 +1983,7 @@ static inline char *ncr_name (struct ncb *np)
 #define PADDRH(label)   (RELOC_LABELH | offsetof(struct scripth, label))
 #define        RADDR(label)    (RELOC_REGISTER | REG(label))
 #define        FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
-#if 0
-#define        KVAR(which)     (RELOC_KVAR | (which))
-#endif
 
-#if 0
-#define        SCRIPT_KVAR_JIFFIES     (0)
-#define        SCRIPT_KVAR_FIRST               SCRIPT_KVAR_JIFFIES
-#define        SCRIPT_KVAR_LAST                SCRIPT_KVAR_JIFFIES
-/*
- * Kernel variables referenced in the scripts.
- * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
- */
-static void *script_kvars[] __initdata =
-       { (void *)&jiffies };
-#endif
 
 static struct script script0 __initdata = {
 /*--------------------------< START >-----------------------*/ {
@@ -2162,11 +2140,6 @@ static   struct script script0 __initdata = {
        SCR_COPY (1),
                RADDR (scratcha),
                NADDR (msgout),
-#if 0
-       SCR_COPY (1),
-               RADDR (scratcha),
-               NADDR (msgin),
-#endif
        /*
        **      Anticipate the COMMAND phase.
        **      This is the normal case for initial selection.
@@ -4164,8 +4137,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
        **
        **----------------------------------------------------
        */
-       if (np->settle_time && cmd->request->timeout >= HZ) {
-               u_long tlimit = jiffies + cmd->request->timeout - HZ;
+       if (np->settle_time && scsi_cmd_to_rq(cmd)->timeout >= HZ) {
+               u_long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout - HZ;
                if (time_after(np->settle_time, tlimit))
                        np->settle_time = tlimit;
        }
@@ -4378,10 +4351,6 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
        cp->parity_status               = 0;
 
        cp->xerr_status                 = XE_OK;
-#if 0
-       cp->sync_status                 = tp->sval;
-       cp->wide_status                 = tp->wval;
-#endif
 
        /*----------------------------------------------------
        **
@@ -4553,12 +4522,8 @@ static void ncr_start_reset(struct ncb *np)
 **
 **==========================================================
 */
-static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
+static int ncr_reset_bus (struct ncb *np)
 {
-/*     struct scsi_device        *device    = cmd->device; */
-       struct ccb *cp;
-       int found;
-
 /*
  * Return immediately if reset is in progress.
  */
@@ -4572,24 +4537,6 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
  * delay of 2 seconds will be completed.
  */
        ncr_start_reset(np);
-/*
- * First, look in the wakeup list
- */
-       for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
-               /*
-               **      look for the ccb of this command.
-               */
-               if (cp->host_status == HS_IDLE) continue;
-               if (cp->cmd == cmd) {
-                       found = 1;
-                       break;
-               }
-       }
-/*
- * Then, look in the waiting list
- */
-       if (!found && retrieve_from_waiting_list(0, np, cmd))
-               found = 1;
 /*
  * Wake-up all awaiting commands with DID_RESET.
  */
@@ -4598,103 +4545,10 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
  * Wake-up all pending commands with HS_RESET -> DID_RESET.
  */
        ncr_wakeup(np, HS_RESET);
-/*
- * If the involved command was not in a driver queue, and the 
- * scsi driver told us reset is synchronous, and the command is not 
- * currently in the waiting list, complete it with DID_RESET status,
- * in order to keep it alive.
- */
-       if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
-               set_host_byte(cmd, DID_RESET);
-               ncr_queue_done_cmd(np, cmd);
-       }
 
        return SUCCESS;
 }
 
-#if 0 /* unused and broken.. */
-/*==========================================================
-**
-**
-**     Abort an SCSI command.
-**     This is called from the generic SCSI driver.
-**
-**
-**==========================================================
-*/
-static int ncr_abort_command (struct ncb *np, struct scsi_cmnd *cmd)
-{
-/*     struct scsi_device        *device    = cmd->device; */
-       struct ccb *cp;
-       int found;
-       int retv;
-
-/*
- * First, look for the scsi command in the waiting list
- */
-       if (remove_from_waiting_list(np, cmd)) {
-               set_host_byte(cmd, DID_ABORT);
-               ncr_queue_done_cmd(np, cmd);
-               return SCSI_ABORT_SUCCESS;
-       }
-
-/*
- * Then, look in the wakeup list
- */
-       for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
-               /*
-               **      look for the ccb of this command.
-               */
-               if (cp->host_status == HS_IDLE) continue;
-               if (cp->cmd == cmd) {
-                       found = 1;
-                       break;
-               }
-       }
-
-       if (!found) {
-               return SCSI_ABORT_NOT_RUNNING;
-       }
-
-       if (np->settle_time) {
-               return SCSI_ABORT_SNOOZE;
-       }
-
-       /*
-       **      If the CCB is active, patch schedule jumps for the 
-       **      script to abort the command.
-       */
-
-       switch(cp->host_status) {
-       case HS_BUSY:
-       case HS_NEGOTIATE:
-               printk ("%s: abort ccb=%p (cancel)\n", ncr_name (np), cp);
-                       cp->start.schedule.l_paddr =
-                               cpu_to_scr(NCB_SCRIPTH_PHYS (np, cancel));
-               retv = SCSI_ABORT_PENDING;
-               break;
-       case HS_DISCONNECT:
-               cp->restart.schedule.l_paddr =
-                               cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
-               retv = SCSI_ABORT_PENDING;
-               break;
-       default:
-               retv = SCSI_ABORT_NOT_RUNNING;
-               break;
-
-       }
-
-       /*
-       **      If there are no requests, the script
-       **      processor will sleep on SEL_WAIT_RESEL.
-       **      Let's wake it up, since it may have to work.
-       */
-       OUTB (nc_istat, SIGP);
-
-       return retv;
-}
-#endif
-
 static void ncr_detach(struct ncb *np)
 {
        struct ccb *cp;
@@ -5453,27 +5307,6 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl
        */
        fak = (kpc - 1) / div_10M[div] + 1;
 
-#if 0  /* This optimization does not seem very useful */
-
-       per = (fak * div_10M[div]) / clk;
-
-       /*
-       **      Why not to try the immediate lower divisor and to choose 
-       **      the one that allows the fastest output speed ?
-       **      We don't want input speed too much greater than output speed.
-       */
-       if (div >= 1 && fak < 8) {
-               u_long fak2, per2;
-               fak2 = (kpc - 1) / div_10M[div-1] + 1;
-               per2 = (fak2 * div_10M[div-1]) / clk;
-               if (per2 < per && fak2 <= 8) {
-                       fak = fak2;
-                       per = per2;
-                       --div;
-               }
-       }
-#endif
-
        if (fak < 4) fak = 4;   /* Should never happen, too bad ... */
 
        /*
@@ -5511,10 +5344,6 @@ static void ncr_set_sync_wide_status (struct ncb *np, u_char target)
        for (cp = np->ccb; cp; cp = cp->link_ccb) {
                if (!cp->cmd) continue;
                if (scmd_id(cp->cmd) != target) continue;
-#if 0
-               cp->sync_status = tp->sval;
-               cp->wide_status = tp->wval;
-#endif
                cp->phys.select.sel_scntl3 = tp->wval;
                cp->phys.select.sel_sxfer  = tp->sval;
        }
@@ -8125,7 +7954,7 @@ static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
         */
 
        spin_lock_irqsave(&np->smp_lock, flags);
-       sts = ncr_reset_bus(np, cmd, 1);
+       sts = ncr_reset_bus(np);
 
        done_list     = np->done_list;
        np->done_list = NULL;
@@ -8136,30 +7965,6 @@ static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
        return sts;
 }
 
-#if 0 /* unused and broken */
-static int ncr53c8xx_abort(struct scsi_cmnd *cmd)
-{
-       struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
-       int sts;
-       unsigned long flags;
-       struct scsi_cmnd *done_list;
-
-       printk("ncr53c8xx_abort\n");
-
-       NCR_LOCK_NCB(np, flags);
-
-       sts = ncr_abort_command(np, cmd);
-out:
-       done_list     = np->done_list;
-       np->done_list = NULL;
-       NCR_UNLOCK_NCB(np, flags);
-
-       ncr_flush_done_cmds(done_list);
-
-       return sts;
-}
-#endif
-
 
 /*
 **     Scsi command waiting list management.
index e42acf3..33df6a9 100644 (file)
@@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
                goto fail_disable;
 
        if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
-                           "fdomain_cs"))
+                           "fdomain_cs")) {
+               ret = -EBUSY;
                goto fail_disable;
+       }
 
        sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
        if (!sh) {
index 17c0f26..6369050 100644 (file)
@@ -1323,7 +1323,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
        void *pMessage;
        unsigned long flags;
        int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
-       int rv = -1;
+       int rv;
 
        WARN_ON(q_index >= PM8001_MAX_INB_NUM);
        spin_lock_irqsave(&circularQ->iq_lock, flags);
index 6b5b6a7..3404782 100644 (file)
@@ -1162,13 +1162,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
                return;
        }
 
-       if (!sc_cmd->request) {
-               QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
-                   "sc_cmd=%p.\n", sc_cmd);
-               return;
-       }
-
-       if (!sc_cmd->request->q) {
+       if (!scsi_cmd_to_rq(sc_cmd)->q) {
                QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
                   "is not valid, sc_cmd=%p.\n", sc_cmd);
                return;
index 85f41ab..42d0d94 100644 (file)
@@ -3004,7 +3004,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
 {
        u32 *list;
        int i;
-       int status = 0, rc;
+       int status;
        u32 *pbl;
        dma_addr_t page;
        int num_pages;
@@ -3016,7 +3016,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
         */
        if (!qedf->num_queues) {
                QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
-               return 1;
+               return -ENOMEM;
        }
 
        /*
@@ -3024,7 +3024,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
         * addresses of our queues
         */
        if (!qedf->p_cpuq) {
-               status = 1;
+               status = -EINVAL;
                QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
                goto mem_alloc_failure;
        }
@@ -3040,8 +3040,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
                   "qedf->global_queues=%p.\n", qedf->global_queues);
 
        /* Allocate DMA coherent buffers for BDQ */
-       rc = qedf_alloc_bdq(qedf);
-       if (rc) {
+       status = qedf_alloc_bdq(qedf);
+       if (status) {
                QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
                goto mem_alloc_failure;
        }
index 71333d3..d01cd82 100644 (file)
@@ -609,14 +609,7 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
                goto error;
        }
 
-       if (!sc_cmd->request) {
-               QEDI_WARN(&qedi->dbg_ctx,
-                         "sc_cmd->request is NULL, sc_cmd=%p.\n",
-                         sc_cmd);
-               goto error;
-       }
-
-       if (!sc_cmd->request->q) {
+       if (!scsi_cmd_to_rq(sc_cmd)->q) {
                QEDI_WARN(&qedi->dbg_ctx,
                          "request->q is NULL so request is not valid, sc_cmd=%p.\n",
                          sc_cmd);
@@ -936,17 +929,11 @@ exit_fp_process:
 
 static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
 {
-       struct iscsi_db_data dbell = { 0 };
-
-       dbell.agg_flags = 0;
+       qedi_conn->ep->db_data.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
 
-       dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
-       dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
-       dbell.params |=
-                  DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
-
-       dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
-       writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+       /* wmb - Make sure fw idx is coherent */
+       wmb();
+       writel(*(u32 *)&qedi_conn->ep->db_data, qedi_conn->ep->p_doorbell);
 
        /* Make sure fw write idx is coherent, and include both memory barriers
         * as a failsafe as for some architectures the call is the same but on
index 97f8376..c526042 100644 (file)
@@ -499,8 +499,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
 
 static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
 {
-       struct qedi_ctx *qedi = qedi_ep->qedi;
        struct qed_iscsi_params_offload *conn_info;
+       struct qedi_ctx *qedi = qedi_ep->qedi;
        int rval;
        int i;
 
@@ -577,10 +577,37 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
                  "Default cq index [%d], mss [%d]\n",
                  conn_info->default_cq, conn_info->mss);
 
+       /* Prepare the doorbell parameters */
+       qedi_ep->db_data.agg_flags = 0;
+       qedi_ep->db_data.params = 0;
+       SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_CMD,
+                 DB_AGG_CMD_MAX);
+       SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_ISCSI_SQ_PROD_CMD);
+       SET_FIELD(qedi_ep->db_data.params, ISCSI_DB_DATA_BYPASS_EN, 1);
+
+       /* Register doorbell with doorbell recovery mechanism */
+       rval = qedi_ops->common->db_recovery_add(qedi->cdev,
+                                                qedi_ep->p_doorbell,
+                                                &qedi_ep->db_data,
+                                                DB_REC_WIDTH_32B,
+                                                DB_REC_KERNEL);
+       if (rval) {
+               kfree(conn_info);
+               return rval;
+       }
+
        rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
-       if (rval)
+       if (rval) {
+               /* delete doorbell from doorbell recovery mechanism */
+               rval = qedi_ops->common->db_recovery_del(qedi->cdev,
+                                                        qedi_ep->p_doorbell,
+                                                        &qedi_ep->db_data);
+
                QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
                         rval, qedi_ep);
+       }
 
        kfree(conn_info);
        return rval;
@@ -1109,6 +1136,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
            test_bit(QEDI_IN_RECOVERY, &qedi->flags))
                goto ep_release_conn;
 
+       /* Delete doorbell from doorbell recovery mechanism */
+       ret = qedi_ops->common->db_recovery_del(qedi->cdev,
+                                              qedi_ep->p_doorbell,
+                                              &qedi_ep->db_data);
+
        ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
        if (ret) {
                QEDI_WARN(&qedi->dbg_ctx,
index 7587352..a31c5de 100644 (file)
@@ -80,6 +80,7 @@ struct qedi_endpoint {
        u32 handle;
        u32 fw_cid;
        void __iomem *p_doorbell;
+       struct iscsi_db_data db_data;
 
        /* Send queue management */
        struct iscsi_wqe *sq;
index 0b0acb8..e6dc0b4 100644 (file)
@@ -1621,7 +1621,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
 {
        u32 *list;
        int i;
-       int status = 0, rc;
+       int status;
        u32 *pbl;
        dma_addr_t page;
        int num_pages;
@@ -1632,14 +1632,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
         */
        if (!qedi->num_queues) {
                QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
-               return 1;
+               return -ENOMEM;
        }
 
        /* Make sure we allocated the PBL that will contain the physical
         * addresses of our queues
         */
        if (!qedi->p_cpuq) {
-               status = 1;
+               status = -EINVAL;
                goto mem_alloc_failure;
        }
 
@@ -1654,13 +1654,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
                  "qedi->global_queues=%p.\n", qedi->global_queues);
 
        /* Allocate DMA coherent buffers for BDQ */
-       rc = qedi_alloc_bdq(qedi);
-       if (rc)
+       status = qedi_alloc_bdq(qedi);
+       if (status)
                goto mem_alloc_failure;
 
        /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
-       rc = qedi_alloc_nvm_iscsi_cfg(qedi);
-       if (rc)
+       status = qedi_alloc_nvm_iscsi_cfg(qedi);
+       if (status)
                goto mem_alloc_failure;
 
        /* Allocate a CQ and an associated PBL for each MSI-X
index 928da90..d0b4e06 100644 (file)
@@ -490,7 +490,6 @@ __setup("qla1280=", qla1280_setup);
 #define        CMD_SNSLEN(Cmnd)        SCSI_SENSE_BUFFERSIZE
 #define        CMD_RESULT(Cmnd)        Cmnd->result
 #define        CMD_HANDLE(Cmnd)        Cmnd->host_scribble
-#define CMD_REQUEST(Cmnd)      Cmnd->request->cmd
 
 #define CMD_HOST(Cmnd)         Cmnd->device->host
 #define SCSI_BUS_32(Cmnd)      Cmnd->device->channel
@@ -2827,7 +2826,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
        memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
 
        /* Set ISP command timeout. */
-       pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+       pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
 
        /* Set device target ID and LUN */
        pkt->lun = SCSI_LUN_32(cmd);
@@ -3082,7 +3081,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
        memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
 
        /* Set ISP command timeout. */
-       pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+       pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
 
        /* Set device target ID and LUN */
        pkt->lun = SCSI_LUN_32(cmd);
@@ -3981,7 +3980,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
           qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
           } */
        printk("  tag=%d, transfersize=0x%x \n",
-              cmd->tag, cmd->transfersize);
+              scsi_cmd_to_rq(cmd)->tag, cmd->transfersize);
        printk("  SP=0x%p\n", CMD_SP(cmd));
        printk(" underflow size = 0x%x, direction=0x%x\n",
               cmd->underflow, cmd->sc_data_direction);
index 17d5bc1..cbc1303 100644 (file)
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
                qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-               qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
+               qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \
+               qla_edif.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
index 3aa9869..d09776b 100644 (file)
@@ -1887,6 +1887,30 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
        return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
 }
 
+static ssize_t
+qla2x00_mpi_pause_store(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int rval = 0;
+
+       if (sscanf(buf, "%d", &rval) != 1)
+               return -EINVAL;
+
+       ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n");
+
+       rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001);
+
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n");
+               count = 0;
+       }
+
+       return count;
+}
+
+static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store);
+
 /* ----- */
 
 static ssize_t
@@ -2435,6 +2459,7 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
     qla2x00_port_speed_store);
 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
+static DEVICE_ATTR_RO(edif_doorbell);
 
 
 struct device_attribute *qla2x00_host_attrs[] = {
@@ -2480,6 +2505,8 @@ struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_port_no,
        &dev_attr_fw_attr,
        &dev_attr_dport_diagnostics,
+       &dev_attr_edif_doorbell,
+       &dev_attr_mpi_pause,
        NULL, /* reserve for qlini_mode */
        NULL, /* reserve for ql2xiniexchg */
        NULL, /* reserve for ql2xexchoffld */
@@ -2706,12 +2733,14 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
         * final cleanup of firmware resources (PCBs and XCBs).
         */
        if (fcport->loop_id != FC_NO_LOOP_ID) {
-               if (IS_FWI2_CAPABLE(fcport->vha->hw))
-                       fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
-                           fcport->loop_id, fcport->d_id.b.domain,
-                           fcport->d_id.b.area, fcport->d_id.b.al_pa);
-               else
+               if (IS_FWI2_CAPABLE(fcport->vha->hw)) {
+                       if (fcport->loop_id != FC_NO_LOOP_ID)
+                               fcport->logout_on_delete = 1;
+
+                       qlt_schedule_sess_for_deletion(fcport);
+               } else {
                        qla2x00_port_logout(fcport->vha, fcport);
+               }
        }
 }
 
@@ -3107,6 +3136,9 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
        qla2x00_wait_for_sess_deletion(vha);
 
        qla_nvme_delete(vha);
+       qla_enode_stop(vha);
+       qla_edb_stop(vha);
+
        vha->flags.delete_progress = 1;
 
        qlt_remove_target(ha, vha);
index d42b2ad..4b5d28d 100644 (file)
@@ -25,6 +25,10 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
        struct bsg_job *bsg_job = sp->u.bsg_job;
        struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 
+       ql_dbg(ql_dbg_user, sp->vha, 0x7009,
+           "%s: sp hdl %x, result=%x bsg ptr %p\n",
+           __func__, sp->handle, res, bsg_job);
+
        sp->free(sp);
 
        bsg_reply->result = res;
@@ -53,11 +57,19 @@ void qla2x00_bsg_sp_free(srb_t *sp)
                            bsg_job->reply_payload.sg_list,
                            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
        } else {
-               dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
-                   bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 
-               dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
-                   bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+               if (sp->remap.remapped) {
+                       dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
+                           sp->remap.rsp.dma);
+                       dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
+                           sp->remap.req.dma);
+               } else {
+                       dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+                               bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+                       dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+                               bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+               }
        }
 
        if (sp->type == SRB_CT_CMD ||
@@ -266,6 +278,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
        int req_sg_cnt, rsp_sg_cnt;
        int rval =  (DID_ERROR << 16);
        uint16_t nextlid = 0;
+       uint32_t els_cmd = 0;
 
        if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
                rport = fc_bsg_to_rport(bsg_job);
@@ -279,6 +292,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
                vha = shost_priv(host);
                ha = vha->hw;
                type = "FC_BSG_HST_ELS_NOLOGIN";
+               els_cmd = bsg_request->rqst_data.h_els.command_code;
+               if (els_cmd == ELS_AUTH_ELS)
+                       return qla_edif_process_els(vha, bsg_job);
        }
 
        if (!vha->flags.online) {
@@ -2768,10 +2784,13 @@ qla2x00_manage_host_port(struct bsg_job *bsg_job)
 }
 
 static int
-qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
+qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
 {
        struct fc_bsg_request *bsg_request = bsg_job->request;
 
+       ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
+           __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
+
        switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
        case QL_VND_LOOPBACK:
                return qla2x00_process_loopback(bsg_job);
@@ -2840,6 +2859,9 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
        case QL_VND_DPORT_DIAGNOSTICS:
                return qla2x00_do_dport_diagnostics(bsg_job);
 
+       case QL_VND_EDIF_MGMT:
+               return qla_edif_app_mgmt(bsg_job);
+
        case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
                return qla2x00_get_flash_image_status(bsg_job);
 
@@ -2897,12 +2919,19 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
                ql_dbg(ql_dbg_user, vha, 0x709f,
                    "BSG: ISP abort active/needed -- cmd=%d.\n",
                    bsg_request->msgcode);
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
                return -EBUSY;
        }
 
+       if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               return -EIO;
+       }
+
 skip_chip_chk:
-       ql_dbg(ql_dbg_user, vha, 0x7000,
-           "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
+       ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
+           "Entered %s msgcode=0x%x. bsg ptr %px\n",
+           __func__, bsg_request->msgcode, bsg_job);
 
        switch (bsg_request->msgcode) {
        case FC_BSG_RPT_ELS:
@@ -2913,7 +2942,7 @@ skip_chip_chk:
                ret = qla2x00_process_ct(bsg_job);
                break;
        case FC_BSG_HST_VENDOR:
-               ret = qla2x00_process_vendor_specific(bsg_job);
+               ret = qla2x00_process_vendor_specific(vha, bsg_job);
                break;
        case FC_BSG_HST_ADD_RPORT:
        case FC_BSG_HST_DEL_RPORT:
@@ -2922,6 +2951,10 @@ skip_chip_chk:
                ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
                break;
        }
+
+       ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
+           "%s done with return %x\n", __func__, ret);
+
        return ret;
 }
 
@@ -2936,6 +2969,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
        unsigned long flags;
        struct req_que *req;
 
+       ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
+           __func__, bsg_job);
        /* find the bsg job from the active list of commands */
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (que = 0; que < ha->max_req_queues; que++) {
@@ -2945,27 +2980,26 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
 
                for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                        sp = req->outstanding_cmds[cnt];
-                       if (sp) {
-                               if (((sp->type == SRB_CT_CMD) ||
-                                       (sp->type == SRB_ELS_CMD_HST) ||
-                                       (sp->type == SRB_FXIOCB_BCMD))
-                                       && (sp->u.bsg_job == bsg_job)) {
-                                       req->outstanding_cmds[cnt] = NULL;
-                                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-                                       if (ha->isp_ops->abort_command(sp)) {
-                                               ql_log(ql_log_warn, vha, 0x7089,
-                                                   "mbx abort_command "
-                                                   "failed.\n");
-                                               bsg_reply->result = -EIO;
-                                       } else {
-                                               ql_dbg(ql_dbg_user, vha, 0x708a,
-                                                   "mbx abort_command "
-                                                   "success.\n");
-                                               bsg_reply->result = 0;
-                                       }
-                                       spin_lock_irqsave(&ha->hardware_lock, flags);
-                                       goto done;
+                       if (sp &&
+                           (sp->type == SRB_CT_CMD ||
+                            sp->type == SRB_ELS_CMD_HST ||
+                            sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
+                            sp->type == SRB_FXIOCB_BCMD) &&
+                           sp->u.bsg_job == bsg_job) {
+                               req->outstanding_cmds[cnt] = NULL;
+                               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+                               if (ha->isp_ops->abort_command(sp)) {
+                                       ql_log(ql_log_warn, vha, 0x7089,
+                                           "mbx abort_command failed.\n");
+                                       bsg_reply->result = -EIO;
+                               } else {
+                                       ql_dbg(ql_dbg_user, vha, 0x708a,
+                                           "mbx abort_command success.\n");
+                                       bsg_reply->result = 0;
                                }
+                               spin_lock_irqsave(&ha->hardware_lock, flags);
+                               goto done;
+
                        }
                }
        }
index 0274e99..dd793cf 100644 (file)
@@ -31,6 +31,7 @@
 #define QL_VND_DPORT_DIAGNOSTICS       0x19
 #define QL_VND_GET_PRIV_STATS_EX       0x1A
 #define QL_VND_SS_GET_FLASH_IMAGE_STATUS       0x1E
+#define QL_VND_EDIF_MGMT                0X1F
 #define QL_VND_MANAGE_HOST_STATS       0x23
 #define QL_VND_GET_HOST_STATS          0x24
 #define QL_VND_GET_TGT_STATS           0x25
@@ -294,4 +295,6 @@ struct qla_active_regions {
        uint8_t reserved[32];
 } __packed;
 
+#include "qla_edif_bsg.h"
+
 #endif
index f2d0559..25549a8 100644 (file)
@@ -12,8 +12,7 @@
  * ----------------------------------------------------------------------
  * | Module Init and Probe        |       0x0199       |                |
  * | Mailbox commands             |       0x1206       | 0x11a5-0x11ff |
- * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
- * |                             |                    | 0x211a         |
+ * | Device Discovery             |       0x2134       | 0x210e-0x2115  |
  * |                              |                    | 0x211c-0x2128  |
  * |                              |                    | 0x212c-0x2134  |
  * | Queue Command and IO tracing |       0x3074       | 0x300b         |
index 9eb708e..f1f6c74 100644 (file)
@@ -367,6 +367,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
 #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
 #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
 #define ql_dbg_tgt_dif  0x00000800 /* Target mode dif */
+#define ql_dbg_edif    0x00000400 /* edif and purex debug */
 
 extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
        uint32_t, void **);
index 2f67ec1..be2eb75 100644 (file)
@@ -49,6 +49,28 @@ typedef struct {
        uint8_t domain;
 } le_id_t;
 
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+       uint32_t b24 : 24;
+       struct {
+#ifdef __BIG_ENDIAN
+               uint8_t domain;
+               uint8_t area;
+               uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+               uint8_t al_pa;
+               uint8_t area;
+               uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+               uint8_t rsvd_1;
+       } b;
+} port_id_t;
+#define INVALID_PORT_ID        0xFFFFFF
+
 #include "qla_bsg.h"
 #include "qla_dsd.h"
 #include "qla_nx.h"
@@ -319,6 +341,13 @@ struct name_list_extended {
        u32                     size;
        u8                      sent;
 };
+
+struct els_reject {
+       struct fc_els_ls_rjt *c;
+       dma_addr_t  cdma;
+       u16 size;
+};
+
 /*
  * Timeout timer counts in seconds
  */
@@ -345,6 +374,8 @@ struct name_list_extended {
 #define FW_MAX_EXCHANGES_CNT (32 * 1024)
 #define REDUCE_EXCHANGES_CNT  (8 * 1024)
 
+#define SET_DID_STATUS(stat_var, status) (stat_var = status << 16)
+
 struct req_que;
 struct qla_tgt_sess;
 
@@ -370,32 +401,10 @@ struct srb_cmd {
 #define SRB_CRC_CTX_DSD_VALID          BIT_5   /* DIF: dsd_list valid */
 #define SRB_WAKEUP_ON_COMP             BIT_6
 #define SRB_DIF_BUNDL_DMA_VALID                BIT_7   /* DIF: DMA list valid */
+#define SRB_EDIF_CLEANUP_DELETE                BIT_9
 
 /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
 #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
-
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
-       uint32_t b24 : 24;
-
-       struct {
-#ifdef __BIG_ENDIAN
-               uint8_t domain;
-               uint8_t area;
-               uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
-               uint8_t al_pa;
-               uint8_t area;
-               uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
-               uint8_t rsvd_1;
-       } b;
-} port_id_t;
-#define INVALID_PORT_ID        0xFFFFFF
 #define ISP_REG16_DISCONNECT 0xFFFF
 
 static inline le_id_t be_id_to_le(be_id_t id)
@@ -483,6 +492,7 @@ struct srb_iocb {
 #define SRB_LOGIN_SKIP_PRLI    BIT_2
 #define SRB_LOGIN_NVME_PRLI    BIT_3
 #define SRB_LOGIN_PRLI_ONLY    BIT_4
+#define SRB_LOGIN_FCSP         BIT_5
                        uint16_t data[2];
                        u32 iop[2];
                } logio;
@@ -587,6 +597,10 @@ struct srb_iocb {
                        u16 cmd;
                        u16 vp_index;
                } ctrlvp;
+               struct {
+                       struct edif_sa_ctl      *sa_ctl;
+                       struct qla_sa_update_frame sa_frame;
+               } sa_update;
        } u;
 
        struct timer_list timer;
@@ -617,6 +631,21 @@ struct srb_iocb {
 #define SRB_PRLI_CMD   21
 #define SRB_CTRL_VP    22
 #define SRB_PRLO_CMD   23
+#define SRB_SA_UPDATE  25
+#define SRB_ELS_CMD_HST_NOLOGIN 26
+#define SRB_SA_REPLACE 27
+
+struct qla_els_pt_arg {
+       u8 els_opcode;
+       u8 vp_idx;
+       __le16 nport_handle;
+       u16 control_flags;
+       __le32 rx_xchg_address;
+       port_id_t did;
+       u32 tx_len, tx_byte_count, rx_len, rx_byte_count;
+       dma_addr_t tx_addr, rx_addr;
+
+};
 
 enum {
        TYPE_SRB,
@@ -630,6 +659,13 @@ struct iocb_resource {
        u16 iocb_cnt;
 };
 
+struct bsg_cmd {
+       struct bsg_job *bsg_job;
+       union {
+               struct qla_els_pt_arg els_arg;
+       } u;
+};
+
 typedef struct srb {
        /*
         * Do not move cmd_type field, it needs to
@@ -662,7 +698,21 @@ typedef struct srb {
                struct srb_iocb iocb_cmd;
                struct bsg_job *bsg_job;
                struct srb_cmd scmd;
+               struct bsg_cmd bsg_cmd;
        } u;
+       struct {
+               bool remapped;
+               struct {
+                       dma_addr_t dma;
+                       void *buf;
+                       uint len;
+               } req;
+               struct {
+                       dma_addr_t dma;
+                       void *buf;
+                       uint len;
+               } rsp;
+       } remap;
        /*
         * Report completion status @res and call sp_put(@sp). @res is
         * an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a
@@ -2294,6 +2344,7 @@ struct imm_ntfy_from_isp {
                        __le16  nport_handle;
                        uint16_t reserved_2;
                        __le16  flags;
+#define NOTIFY24XX_FLAGS_FCSP          BIT_5
 #define NOTIFY24XX_FLAGS_GLOBAL_TPRLO   BIT_1
 #define NOTIFY24XX_FLAGS_PUREX_IOCB     BIT_0
                        __le16  srr_rx_id;
@@ -2377,11 +2428,9 @@ struct mbx_24xx_entry {
  */
 typedef enum {
        FCT_UNKNOWN,
-       FCT_RSCN,
-       FCT_SWITCH,
-       FCT_BROADCAST,
-       FCT_INITIATOR,
-       FCT_TARGET,
+       FCT_BROADCAST = 0x01,
+       FCT_INITIATOR = 0x02,
+       FCT_TARGET    = 0x04,
        FCT_NVME_INITIATOR = 0x10,
        FCT_NVME_TARGET = 0x20,
        FCT_NVME_DISCOVERY = 0x40,
@@ -2424,6 +2473,7 @@ enum discovery_state {
        DSC_LOGIN_COMPLETE,
        DSC_ADISC,
        DSC_DELETE_PEND,
+       DSC_LOGIN_AUTH_PEND,
 };
 
 enum login_state {     /* FW control Target side */
@@ -2467,6 +2517,8 @@ typedef struct fc_port {
        unsigned int n2n_flag:1;
        unsigned int explicit_logout:1;
        unsigned int prli_pend_timer:1;
+       unsigned int do_prli_nvme:1;
+
        uint8_t nvme_flag;
 
        uint8_t node_name[WWN_SIZE];
@@ -2563,6 +2615,33 @@ typedef struct fc_port {
        u64 tgt_short_link_down_cnt;
        u64 tgt_link_down_time;
        u64 dev_loss_tmo;
+       /*
+        * EDIF parameters for encryption.
+        */
+       struct {
+               uint32_t        enable:1;       /* device is edif enabled/req'd */
+               uint32_t        app_stop:2;
+               uint32_t        app_started:1;
+               uint32_t        aes_gmac:1;
+               uint32_t        app_sess_online:1;
+               uint32_t        tx_sa_set:1;
+               uint32_t        rx_sa_set:1;
+               uint32_t        tx_sa_pending:1;
+               uint32_t        rx_sa_pending:1;
+               uint32_t        tx_rekey_cnt;
+               uint32_t        rx_rekey_cnt;
+               uint64_t        tx_bytes;
+               uint64_t        rx_bytes;
+               uint8_t         auth_state;
+               uint16_t        authok:1;
+               uint16_t        rekey_cnt;
+               struct list_head edif_indx_list;
+               spinlock_t  indx_list_lock;
+
+               struct list_head tx_sa_list;
+               struct list_head rx_sa_list;
+               spinlock_t      sa_list_lock;
+       } edif;
 } fc_port_t;
 
 enum {
@@ -2604,7 +2683,8 @@ static const char * const port_dstate_str[] = {
        "UPD_FCPORT",
        "LOGIN_COMPLETE",
        "ADISC",
-       "DELETE_PEND"
+       "DELETE_PEND",
+       "LOGIN_AUTH_PEND",
 };
 
 /*
@@ -2616,6 +2696,8 @@ static const char * const port_dstate_str[] = {
 #define FCF_ASYNC_SENT         BIT_3
 #define FCF_CONF_COMP_SUPPORTED BIT_4
 #define FCF_ASYNC_ACTIVE       BIT_5
+#define FCF_FCSP_DEVICE                BIT_6
+#define FCF_EDIF_DELETE                BIT_7
 
 /* No loop ID flag. */
 #define FC_NO_LOOP_ID          0x1000
@@ -2707,7 +2789,7 @@ static const char * const port_dstate_str[] = {
 /*
  * FDMI HBA attribute types.
  */
-#define FDMI1_HBA_ATTR_COUNT                   9
+#define FDMI1_HBA_ATTR_COUNT                   10
 #define FDMI2_HBA_ATTR_COUNT                   17
 
 #define FDMI_HBA_NODE_NAME                     0x1
@@ -3386,6 +3468,7 @@ enum qla_work_type {
        QLA_EVT_SP_RETRY,
        QLA_EVT_IIDMA,
        QLA_EVT_ELS_PLOGI,
+       QLA_EVT_SA_REPLACE,
 };
 
 
@@ -3444,6 +3527,11 @@ struct qla_work_evt {
                        u8 fc4_type;
                        srb_t *sp;
                } gpnft;
+               struct {
+                       struct edif_sa_ctl      *sa_ctl;
+                       fc_port_t *fcport;
+                       uint16_t nport_handle;
+               } sa_update;
         } u;
 };
 
@@ -3845,7 +3933,6 @@ struct qlt_hw_data {
        int num_act_qpairs;
 #define DEFAULT_NAQP 2
        spinlock_t atio_lock ____cacheline_aligned;
-       struct btree_head32 host_map;
 };
 
 #define MAX_QFULL_CMDS_ALLOC   8192
@@ -3935,7 +4022,9 @@ struct qla_hw_data {
                uint32_t        scm_supported_f:1;
                                /* Enabled in Driver */
                uint32_t        scm_enabled:1;
-               uint32_t        max_req_queue_warned:1;
+               uint32_t        edif_hw:1;
+               uint32_t        edif_enabled:1;
+               uint32_t        n2n_fw_acc_sec:1;
                uint32_t        plogi_template_valid:1;
                uint32_t        port_isolated:1;
        } flags;
@@ -4347,6 +4436,7 @@ struct qla_hw_data {
        /* Cisco fabric attached */
 #define FW_ATTR_EXT0_SCM_CISCO         0x00002000
 #define FW_ATTR_EXT0_NVME2     BIT_13
+#define FW_ATTR_EXT0_EDIF      BIT_5
        uint16_t        fw_attributes_ext[2];
        uint32_t        fw_memory_size;
        uint32_t        fw_transfer_size;
@@ -4619,8 +4709,24 @@ struct qla_hw_data {
        struct qla_hw_data_stat stat;
        pci_error_state_t pci_error_state;
        u64 prev_cmd_cnt;
+       struct dma_pool *purex_dma_pool;
+       struct btree_head32 host_map;
+
+#define EDIF_NUM_SA_INDEX      512
+#define EDIF_TX_SA_INDEX_BASE  EDIF_NUM_SA_INDEX
+       void *edif_rx_sa_id_map;
+       void *edif_tx_sa_id_map;
+       spinlock_t sadb_fp_lock;
+
+       struct list_head sadb_tx_index_list;
+       struct list_head sadb_rx_index_list;
+       spinlock_t sadb_lock;   /* protects list */
+       struct els_reject elsrej;
+       u8 edif_post_stop_cnt_down;
 };
 
+#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
+
 struct active_regions {
        uint8_t global;
        struct {
@@ -4659,6 +4765,8 @@ struct purex_item {
        } iocb;
 };
 
+#include "qla_edif.h"
+
 #define SCM_FLAG_RDF_REJECT            0x00
 #define SCM_FLAG_RDF_COMPLETED         0x01
 
@@ -4888,6 +4996,8 @@ typedef struct scsi_qla_host {
        u64 reset_cmd_err_cnt;
        u64 link_down_time;
        u64 short_link_down_cnt;
+       struct edif_dbell e_dbell;
+       struct pur_core pur_cinfo;
 } scsi_qla_host_t;
 
 struct qla27xx_image_status {
@@ -5058,6 +5168,9 @@ struct secure_flash_update_block_pk {
 #define QLA_BUSY                       0x107
 #define QLA_ALREADY_REGISTERED         0x109
 #define QLA_OS_TIMER_EXPIRED           0x10a
+#define QLA_ERR_NO_QPAIR               0x10b
+#define QLA_ERR_NOT_FOUND              0x10c
+#define QLA_ERR_FROM_FW                        0x10d
 
 #define NVRAM_DELAY()          udelay(10)
 
@@ -5088,6 +5201,43 @@ enum nexus_wait_type {
        WAIT_LUN,
 };
 
+#define INVALID_EDIF_SA_INDEX  0xffff
+#define RX_DELETE_NO_EDIF_SA_INDEX     0xfffe
+
+#define QLA_SKIP_HANDLE QLA_TGT_SKIP_HANDLE
+
+/* edif hash element */
+struct edif_list_entry {
+       uint16_t handle;                        /* nport_handle */
+       uint32_t update_sa_index;
+       uint32_t delete_sa_index;
+       uint32_t count;                         /* counter for filtering sa_index */
+#define EDIF_ENTRY_FLAGS_CLEANUP       0x01    /* this index is being cleaned up */
+       uint32_t flags;                         /* used by sadb cleanup code */
+       fc_port_t *fcport;                      /* needed by rx delay timer function */
+       struct timer_list timer;                /* rx delay timer */
+       struct list_head next;
+};
+
+#define EDIF_TX_INDX_BASE 512
+#define EDIF_RX_INDX_BASE 0
+#define EDIF_RX_DELETE_FILTER_COUNT 3  /* delay queuing rx delete until this many */
+
+/* entry in the sa_index free pool */
+
+struct sa_index_pair {
+       uint16_t sa_index;
+       uint32_t spi;
+};
+
+/* edif sa_index data structure */
+struct edif_sa_index_entry {
+       struct sa_index_pair sa_pair[2];
+       fc_port_t *fcport;
+       uint16_t handle;
+       struct list_head next;
+};
+
 /* Refer to SNIA SFF 8247 */
 struct sff_8247_a0 {
        u8 txid;        /* transceiver id */
@@ -5203,9 +5353,12 @@ struct sff_8247_a0 {
 #define NVME_FCP_TARGET(fcport) \
        (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \
 
+#define NVME_PRIORITY(ha, fcport) \
+       (NVME_FCP_TARGET(fcport) && \
+        (ha->fc4_type_priority == FC4_PRIORITY_NVME))
+
 #define NVME_TARGET(ha, fcport) \
-       ((NVME_FCP_TARGET(fcport) && \
-       (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
+       (fcport->do_prli_nvme || \
        NVME_ONLY_TARGET(fcport)) \
 
 #define PRLI_PHASE(_cls) \
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
new file mode 100644 (file)
index 0000000..ad746c6
--- /dev/null
@@ -0,0 +1,3461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (c)  2021     Marvell
+ */
+#include "qla_def.h"
+#include "qla_edif.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <scsi/scsi_tcq.h>
+
+static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
+               struct list_head *sa_list);
+static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
+               struct qla_sa_update_frame *sa_frame);
+static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
+               uint16_t sa_index);
+static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
+
+struct edb_node {
+       struct  list_head       list;
+       uint32_t                ntype;
+       union {
+               port_id_t       plogi_did;
+               uint32_t        async;
+               port_id_t       els_sid;
+               struct edif_sa_update_aen       sa_aen;
+       } u;
+};
+
+static struct els_sub_cmd {
+       uint16_t cmd;
+       const char *str;
+} sc_str[] = {
+       {SEND_ELS, "send ELS"},
+       {SEND_ELS_REPLY, "send ELS Reply"},
+       {PULL_ELS, "retrieve ELS"},
+};
+
+const char *sc_to_str(uint16_t cmd)
+{
+       int i;
+       struct els_sub_cmd *e;
+
+       for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
+               e = sc_str + i;
+               if (cmd == e->cmd)
+                       return e->str;
+       }
+       return "unknown";
+}
+
+static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
+               uint16_t handle)
+{
+       struct edif_list_entry *entry;
+       struct edif_list_entry *tentry;
+       struct list_head *indx_list = &fcport->edif.edif_indx_list;
+
+       list_for_each_entry_safe(entry, tentry, indx_list, next) {
+               if (entry->handle == handle)
+                       return entry;
+       }
+       return NULL;
+}
+
+/* timeout called when no traffic and delayed rx sa_index delete */
+static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
+{
+       struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
+       fc_port_t *fcport = edif_entry->fcport;
+       struct scsi_qla_host *vha = fcport->vha;
+       struct  edif_sa_ctl *sa_ctl;
+       uint16_t nport_handle;
+       unsigned long flags = 0;
+
+       ql_dbg(ql_dbg_edif, vha, 0x3069,
+           "%s:  nport_handle 0x%x,  SA REPL Delay Timeout, %8phC portid=%06x\n",
+           __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
+
+       /*
+        * if delete_sa_index is valid then no one has serviced this
+        * delayed delete
+        */
+       spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+
+       /*
+        * delete_sa_index is invalidated when we find the new sa_index in
+        * the incoming data stream.  If it is not invalidated then we are
+        * still looking for the new sa_index because there is no I/O and we
+        * need to just force the rx delete and move on.  Otherwise
+        * we could get another rekey which will result in an error 66.
+        */
+       if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
+               uint16_t delete_sa_index = edif_entry->delete_sa_index;
+
+               edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+               nport_handle = edif_entry->handle;
+               spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+
+               sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
+                   delete_sa_index, 0);
+
+               if (sa_ctl) {
+                       ql_dbg(ql_dbg_edif, vha, 0x3063,
+                           "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
+                           __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
+                           nport_handle);
+
+                       sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
+                       set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
+                       qla_post_sa_replace_work(fcport->vha, fcport,
+                           nport_handle, sa_ctl);
+
+               } else {
+                       ql_dbg(ql_dbg_edif, vha, 0x3063,
+                           "%s: sa_ctl not found for delete_sa_index: %d\n",
+                           __func__, edif_entry->delete_sa_index);
+               }
+       } else {
+               spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+       }
+}
+
+/*
+ * create a new list entry for this nport handle and
+ * add an sa_update index to the list - called for sa_update
+ */
+static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
+               uint16_t sa_index, uint16_t handle)
+{
+       struct edif_list_entry *entry;
+       unsigned long flags = 0;
+
+       /* if the entry exists, then just update the sa_index */
+       entry = qla_edif_list_find_sa_index(fcport, handle);
+       if (entry) {
+               entry->update_sa_index = sa_index;
+               entry->count = 0;
+               return 0;
+       }
+
+       /*
+        * This is the normal path - there should be no existing entry
+        * when update is called.  The exception is at startup
+        * when update is called for the first two sa_indexes
+        * followed by a delete of the first sa_index
+        */
+       entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
+       if (!entry)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&entry->next);
+       entry->handle = handle;
+       entry->update_sa_index = sa_index;
+       entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+       entry->count = 0;
+       entry->flags = 0;
+       timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
+       spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+       list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
+       spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+       return 0;
+}
+
+/* remove an entry from the list */
+static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
+{
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+       list_del(&entry->next);
+       spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+}
+
+int qla_post_sa_replace_work(struct scsi_qla_host *vha,
+        fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
+{
+       struct qla_work_evt *e;
+
+       e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
+       if (!e)
+               return QLA_FUNCTION_FAILED;
+
+       e->u.sa_update.fcport = fcport;
+       e->u.sa_update.sa_ctl = sa_ctl;
+       e->u.sa_update.nport_handle = nport_handle;
+       fcport->flags |= FCF_ASYNC_ACTIVE;
+       return qla2x00_post_work(vha, e);
+}
+
+static void
+qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port  *fcport)
+{
+       ql_dbg(ql_dbg_edif, vha, 0x2058,
+           "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
+           fcport->node_name, fcport->port_name, fcport->d_id.b24);
+
+       fcport->edif.tx_rekey_cnt = 0;
+       fcport->edif.rx_rekey_cnt = 0;
+
+       fcport->edif.tx_bytes = 0;
+       fcport->edif.rx_bytes = 0;
+}
+
+static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+fc_port_t *fcport)
+{
+       struct extra_auth_els *p;
+       struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+       struct qla_bsg_auth_els_request *req =
+           (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+       if (!vha->hw->flags.edif_enabled) {
+               ql_dbg(ql_dbg_edif, vha, 0x9105,
+                   "%s edif not enabled\n", __func__);
+               goto done;
+       }
+       if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                   "%s doorbell not enabled\n", __func__);
+               goto done;
+       }
+
+       p = &req->e;
+
+       /* Get response */
+       if (p->sub_cmd == PULL_ELS) {
+               struct qla_bsg_auth_els_reply *rpl =
+                       (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+               qla_pur_get_pending(vha, fcport, bsg_job);
+
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                       "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
+                       __func__, sc_to_str(p->sub_cmd), fcport->port_name,
+                       fcport->d_id.b24, rpl->rx_xchg_address,
+                       rpl->r.reply_payload_rcv_len, bsg_job);
+
+               goto done;
+       }
+       return 0;
+
+done:
+
+       bsg_job_done(bsg_job, bsg_reply->result,
+                       bsg_reply->reply_payload_rcv_len);
+       return -EIO;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
+{
+       fc_port_t *f, *tf;
+
+       f = NULL;
+       list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+               if ((f->flags & FCF_FCSP_DEVICE)) {
+                       ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
+                           "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
+                           f->node_name, f->port_name,
+                           f->d_id.b24, id->b24);
+                       if (f->d_id.b24 == id->b24)
+                               return f;
+               }
+       }
+       return NULL;
+}
+
+/**
+ * qla_edif_app_check(): check for valid application id.
+ * @vha: host adapter pointer
+ * @appid: application id
+ * Return: false = fail, true = pass
+ */
+static bool
+qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
+{
+       /* check that the app is allow/known to the driver */
+
+       if (appid.app_vid == EDIF_APP_ID) {
+               ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
+               return true;
+       }
+       ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
+           __func__, appid.app_vid);
+
+       return false;
+}
+
+static void qla_edif_reset_auth_wait(struct fc_port *fcport, int state,
+               int waitonly)
+{
+       int cnt, max_cnt = 200;
+       bool traced = false;
+
+       fcport->keep_nport_handle = 1;
+
+       if (!waitonly) {
+               qla2x00_set_fcport_disc_state(fcport, state);
+               qlt_schedule_sess_for_deletion(fcport);
+       } else {
+               qla2x00_set_fcport_disc_state(fcport, state);
+       }
+
+       ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+               "%s: waiting for session, max_cnt=%u\n",
+               __func__, max_cnt);
+
+       cnt = 0;
+
+       if (waitonly) {
+               /* Marker wait min 10 msecs. */
+               msleep(50);
+               cnt += 50;
+       }
+       while (1) {
+               if (!traced) {
+                       ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+                           "%s: session sleep.\n",
+                           __func__);
+                       traced = true;
+               }
+               msleep(20);
+               cnt++;
+               if (waitonly && (fcport->disc_state == state ||
+                       fcport->disc_state == DSC_LOGIN_COMPLETE))
+                       break;
+               if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
+                       break;
+               if (cnt > max_cnt)
+                       break;
+       }
+
+       if (!waitonly) {
+               ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+                   "%s: waited for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
+                   __func__, fcport->port_name, fcport->loop_id,
+                   fcport->d_id.b24, fcport, fcport->disc_state, cnt);
+       } else {
+               ql_dbg(ql_dbg_edif, fcport->vha, 0xf086,
+                   "%s: waited ONLY for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n",
+                   __func__, fcport->port_name, fcport->loop_id,
+                   fcport->d_id.b24, fcport, fcport->disc_state, cnt);
+       }
+}
+
+static void
+qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
+       int index)
+{
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+       list_del(&sa_ctl->next);
+       spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+       if (index >= 512)
+               fcport->edif.tx_rekey_cnt--;
+       else
+               fcport->edif.rx_rekey_cnt--;
+       kfree(sa_ctl);
+}
+
+/* return an index to the freepool */
+static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
+               uint16_t sa_index)
+{
+       void *sa_id_map;
+       struct scsi_qla_host *vha = fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
+       u16 lsa_index = sa_index;
+
+       ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+           "%s: entry\n", __func__);
+
+       if (dir) {
+               sa_id_map = ha->edif_tx_sa_id_map;
+               lsa_index -= EDIF_TX_SA_INDEX_BASE;
+       } else {
+               sa_id_map = ha->edif_rx_sa_id_map;
+       }
+
+       spin_lock_irqsave(&ha->sadb_fp_lock, flags);
+       clear_bit(lsa_index, sa_id_map);
+       spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+       ql_dbg(ql_dbg_edif, vha, 0x3063,
+           "%s: index %d added to free pool\n", __func__, sa_index);
+}
+
+static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
+       struct fc_port *fcport, struct edif_sa_index_entry *entry,
+       int pdir)
+{
+       struct edif_list_entry *edif_entry;
+       struct  edif_sa_ctl *sa_ctl;
+       int i, dir;
+       int key_cnt = 0;
+
+       for (i = 0; i < 2; i++) {
+               if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
+                       continue;
+
+               if (fcport->loop_id != entry->handle) {
+                       ql_dbg(ql_dbg_edif, vha, 0x3063,
+                           "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
+                           __func__, i, entry->handle, fcport->loop_id,
+                           entry->sa_pair[i].sa_index);
+               }
+
+               /* release the sa_ctl */
+               sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
+                               entry->sa_pair[i].sa_index, pdir);
+               if (sa_ctl &&
+                   qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
+                       ql_dbg(ql_dbg_edif, vha, 0x3063,
+                           "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
+                       qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
+               } else {
+                       ql_dbg(ql_dbg_edif, vha, 0x3063,
+                           "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
+               }
+
+               /* Release the index */
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                       "%s: freeing sa_index %d, nph: 0x%x\n",
+                       __func__, entry->sa_pair[i].sa_index, entry->handle);
+
+               dir = (entry->sa_pair[i].sa_index <
+                       EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
+               qla_edif_add_sa_index_to_freepool(fcport, dir,
+                       entry->sa_pair[i].sa_index);
+
+               /* Delete timer on RX */
+               if (pdir != SAU_FLG_TX) {
+                       edif_entry =
+                               qla_edif_list_find_sa_index(fcport, entry->handle);
+                       if (edif_entry) {
+                               ql_dbg(ql_dbg_edif, vha, 0x5033,
+                                   "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
+                                   __func__, edif_entry, edif_entry->update_sa_index,
+                                   edif_entry->delete_sa_index);
+                               qla_edif_list_delete_sa_index(fcport, edif_entry);
+                               /*
+                                * valid delete_sa_index indicates there is a rx
+                                * delayed delete queued
+                                */
+                               if (edif_entry->delete_sa_index !=
+                                               INVALID_EDIF_SA_INDEX) {
+                                       del_timer(&edif_entry->timer);
+
+                                       /* build and send the aen */
+                                       fcport->edif.rx_sa_set = 1;
+                                       fcport->edif.rx_sa_pending = 0;
+                                       qla_edb_eventcreate(vha,
+                                                       VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+                                                       QL_VND_SA_STAT_SUCCESS,
+                                                       QL_VND_RX_SA_KEY, fcport);
+                               }
+                               ql_dbg(ql_dbg_edif, vha, 0x5033,
+                                   "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
+                                   __func__, edif_entry, edif_entry->update_sa_index,
+                                   edif_entry->delete_sa_index);
+
+                               kfree(edif_entry);
+                       }
+               }
+               key_cnt++;
+       }
+       ql_dbg(ql_dbg_edif, vha, 0x3063,
+           "%s: %d %s keys released\n",
+           __func__, key_cnt, pdir ? "tx" : "rx");
+}
+
+/* find an release all outstanding sadb sa_indicies */
+void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
+{
+       struct edif_sa_index_entry *entry, *tmp;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+
+       ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+           "%s: Starting...\n", __func__);
+
+       spin_lock_irqsave(&ha->sadb_lock, flags);
+
+       list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
+               if (entry->fcport == fcport) {
+                       list_del(&entry->next);
+                       spin_unlock_irqrestore(&ha->sadb_lock, flags);
+                       __qla2x00_release_all_sadb(vha, fcport, entry, 0);
+                       kfree(entry);
+                       spin_lock_irqsave(&ha->sadb_lock, flags);
+                       break;
+               }
+       }
+
+       list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
+               if (entry->fcport == fcport) {
+                       list_del(&entry->next);
+                       spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+                       __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
+
+                       kfree(entry);
+                       spin_lock_irqsave(&ha->sadb_lock, flags);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&ha->sadb_lock, flags);
+}
+
+/**
+ * qla_edif_app_start:  application has announce its present
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ *
+ * Set/activate doorbell.  Reset current sessions and re-login with
+ * secure flag.
+ */
+static int
+qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+       int32_t                 rval = 0;
+       struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
+       struct app_start        appstart;
+       struct app_start_reply  appreply;
+       struct fc_port  *fcport, *tf;
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start\n", __func__);
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &appstart,
+           sizeof(struct app_start));
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
+            __func__, appstart.app_info.app_vid, appstart.app_start_flags);
+
+       if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+               /* mark doorbell as active since an app is now present */
+               vha->e_dbell.db_flags = EDB_ACTIVE;
+       } else {
+               ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
+                    __func__);
+       }
+
+       if (N2N_TOPO(vha->hw)) {
+               if (vha->hw->flags.n2n_fw_acc_sec)
+                       set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+               else
+                       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               qla2xxx_wake_dpc(vha);
+       } else {
+               list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+                       ql_dbg(ql_dbg_edif, vha, 0xf084,
+                              "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n",
+                              __func__, fcport, fcport->port_name,
+                              fcport->loop_id, fcport->d_id.b24,
+                              fcport->logout_on_delete);
+
+                       ql_dbg(ql_dbg_edif, vha, 0xf084,
+                              "keep %d els_logo %d disc state %d auth state %d stop state %d\n",
+                              fcport->keep_nport_handle,
+                              fcport->send_els_logo, fcport->disc_state,
+                              fcport->edif.auth_state, fcport->edif.app_stop);
+
+                       if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+                               break;
+                       if (!(fcport->flags & FCF_FCSP_DEVICE))
+                               continue;
+
+                       fcport->edif.app_started = 1;
+                       if (fcport->edif.app_stop ||
+                           (fcport->disc_state != DSC_LOGIN_COMPLETE &&
+                            fcport->disc_state != DSC_LOGIN_PEND &&
+                            fcport->disc_state != DSC_DELETED)) {
+                               /* no activity */
+                               fcport->edif.app_stop = 0;
+
+                               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                                      "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+                                      __func__, fcport->port_name);
+                               fcport->edif.app_sess_online = 1;
+                               qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
+                       }
+                       qla_edif_sa_ctl_init(vha, fcport);
+               }
+       }
+
+       if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+               /* mark as active since an app is now present */
+               vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
+       } else {
+               ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
+                    __func__);
+       }
+
+       appreply.host_support_edif = vha->hw->flags.edif_enabled;
+       appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
+       appreply.edif_edb_active = vha->e_dbell.db_flags;
+
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+           sizeof(struct app_start_reply);
+
+       SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+           bsg_job->reply_payload.sg_cnt, &appreply,
+           sizeof(struct app_start_reply));
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d,
+           "%s app start completed with 0x%x\n",
+           __func__, rval);
+
+       return rval;
+}
+
+/**
+ * qla_edif_app_stop - app has announced it's exiting.
+ * @vha: host adapter pointer
+ * @bsg_job: user space command pointer
+ *
+ * Free any in flight messages, clear all doorbell events
+ * to application. Reject any message relate to security.
+ */
+static int
+qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+       struct app_stop         appstop;
+       struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
+       struct fc_port  *fcport, *tf;
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &appstop,
+           sizeof(struct app_stop));
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
+           __func__, appstop.app_info.app_vid);
+
+       /* Call db stop and enode stop functions */
+
+       /* if we leave this running short waits are operational < 16 secs */
+       qla_enode_stop(vha);        /* stop enode */
+       qla_edb_stop(vha);          /* stop db */
+
+       list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+               if (!(fcport->flags & FCF_FCSP_DEVICE))
+                       continue;
+
+               if (fcport->flags & FCF_FCSP_DEVICE) {
+                       ql_dbg(ql_dbg_edif, vha, 0xf084,
+                           "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
+                           __func__, fcport,
+                           fcport->port_name, fcport->loop_id, fcport->d_id.b24,
+                           fcport->logout_on_delete, fcport->keep_nport_handle,
+                           fcport->send_els_logo);
+
+                       if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+                               break;
+
+                       fcport->edif.app_stop = 1;
+                       ql_dbg(ql_dbg_edif, vha, 0x911e,
+                               "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
+                               __func__, fcport->port_name);
+
+                       fcport->send_els_logo = 1;
+                       qlt_schedule_sess_for_deletion(fcport);
+
+                       /* qla_edif_flush_sa_ctl_lists(fcport); */
+                       fcport->edif.app_started = 0;
+               }
+       }
+
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+       /* no return interface to app - it assumes we cleaned up ok */
+
+       return 0;
+}
+
+static int
+qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
+               struct app_plogi_reply *appplogireply)
+{
+       int     ret = 0;
+
+       if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
+               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                   "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
+                   __func__, fcport->port_name, fcport->edif.tx_sa_set,
+                   fcport->edif.rx_sa_set);
+               appplogireply->prli_status = 0;
+               ret = 1;
+       } else  {
+               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                   "%s wwpn %8phC Both SA(s) updated.\n", __func__,
+                   fcport->port_name);
+               fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
+               fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
+               appplogireply->prli_status = 1;
+       }
+       return ret;
+}
+
+/**
+ * qla_edif_app_authok - authentication by app succeeded.  Driver can proceed
+ *   with prli
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int
+qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+       int32_t                 rval = 0;
+       struct auth_complete_cmd appplogiok;
+       struct app_plogi_reply  appplogireply = {0};
+       struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
+       fc_port_t               *fcport = NULL;
+       port_id_t               portid = {0};
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &appplogiok,
+           sizeof(struct auth_complete_cmd));
+
+       switch (appplogiok.type) {
+       case PL_TYPE_WWPN:
+               fcport = qla2x00_find_fcport_by_wwpn(vha,
+                   appplogiok.u.wwpn, 0);
+               if (!fcport)
+                       ql_dbg(ql_dbg_edif, vha, 0x911d,
+                           "%s wwpn lookup failed: %8phC\n",
+                           __func__, appplogiok.u.wwpn);
+               break;
+       case PL_TYPE_DID:
+               fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id);
+               if (!fcport)
+                       ql_dbg(ql_dbg_edif, vha, 0x911d,
+                           "%s d_id lookup failed: %x\n", __func__,
+                           portid.b24);
+               break;
+       default:
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s undefined type: %x\n", __func__,
+                   appplogiok.type);
+               break;
+       }
+
+       if (!fcport) {
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               goto errstate_exit;
+       }
+
+       /*
+        * if port is online then this is a REKEY operation
+        * Only do sa update checking
+        */
+       if (atomic_read(&fcport->state) == FCS_ONLINE) {
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s Skipping PRLI complete based on rekey\n", __func__);
+               appplogireply.prli_status = 1;
+               SET_DID_STATUS(bsg_reply->result, DID_OK);
+               qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
+               goto errstate_exit;
+       }
+
+       /* make sure in AUTH_PENDING or else reject */
+       if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
+               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                   "%s wwpn %8phC is not in auth pending state (%x)\n",
+                   __func__, fcport->port_name, fcport->disc_state);
+               SET_DID_STATUS(bsg_reply->result, DID_OK);
+               appplogireply.prli_status = 0;
+               goto errstate_exit;
+       }
+
+       SET_DID_STATUS(bsg_reply->result, DID_OK);
+       appplogireply.prli_status = 1;
+       fcport->edif.authok = 1;
+       if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
+               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                   "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
+                   __func__, fcport->port_name, fcport->edif.tx_sa_set,
+                   fcport->edif.rx_sa_set);
+               SET_DID_STATUS(bsg_reply->result, DID_OK);
+               appplogireply.prli_status = 0;
+               goto errstate_exit;
+
+       } else {
+               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                   "%s wwpn %8phC Both SA(s) updated.\n", __func__,
+                   fcport->port_name);
+               fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
+               fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
+       }
+
+       if (qla_ini_mode_enabled(vha)) {
+               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                   "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
+                   __func__, fcport->port_name);
+               qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 1);
+               qla24xx_post_prli_work(vha, fcport);
+       }
+
+errstate_exit:
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+           bsg_job->reply_payload.sg_cnt, &appplogireply,
+           sizeof(struct app_plogi_reply));
+
+       return rval;
+}
+
+/**
+ * qla_edif_app_authfail - authentication by app has failed.  Driver is given
+ *   notice to tear down current session.
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int
+qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+       int32_t                 rval = 0;
+       struct auth_complete_cmd appplogifail;
+       struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
+       fc_port_t               *fcport = NULL;
+       port_id_t               portid = {0};
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &appplogifail,
+           sizeof(struct auth_complete_cmd));
+
+       /*
+        * TODO: edif: app has failed this plogi. Inform driver to
+        * take any action (if any).
+        */
+       switch (appplogifail.type) {
+       case PL_TYPE_WWPN:
+               fcport = qla2x00_find_fcport_by_wwpn(vha,
+                   appplogifail.u.wwpn, 0);
+               SET_DID_STATUS(bsg_reply->result, DID_OK);
+               break;
+       case PL_TYPE_DID:
+               fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id);
+               if (!fcport)
+                       ql_dbg(ql_dbg_edif, vha, 0x911d,
+                           "%s d_id lookup failed: %x\n", __func__,
+                           portid.b24);
+               SET_DID_STATUS(bsg_reply->result, DID_OK);
+               break;
+       default:
+               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                   "%s undefined type: %x\n", __func__,
+                   appplogifail.type);
+               bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               rval = -1;
+               break;
+       }
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d,
+           "%s fcport is 0x%p\n", __func__, fcport);
+
+       if (fcport) {
+               /* set/reset edif values and flags */
+               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                   "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
+                   __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
+
+               if (qla_ini_mode_enabled(fcport->vha)) {
+                       fcport->send_els_logo = 1;
+                       qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0);
+               }
+       }
+
+       return rval;
+}
+
+/**
+ * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
+ *   [initiator|target] mode.  It can specific session with specific nport id or
+ *   all sessions.
+ * @vha: host adapter pointer
+ * @bsg_job: user request pointer
+ */
+static int
+qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+       int32_t                 rval = 0;
+       int32_t                 num_cnt;
+       struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
+       struct app_pinfo_req    app_req;
+       struct app_pinfo_reply  *app_reply;
+       port_id_t               tdid;
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &app_req,
+           sizeof(struct app_pinfo_req));
+
+       num_cnt = app_req.num_ports;    /* num of ports alloc'd by app */
+
+       app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
+           sizeof(struct app_pinfo) * num_cnt), GFP_KERNEL);
+       if (!app_reply) {
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               rval = -1;
+       } else {
+               struct fc_port  *fcport = NULL, *tf;
+               uint32_t        pcnt = 0;
+
+               list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+                       if (!(fcport->flags & FCF_FCSP_DEVICE))
+                               continue;
+
+                       tdid = app_req.remote_pid;
+
+                       ql_dbg(ql_dbg_edif, vha, 0x2058,
+                           "APP request entry - portid=%06x.\n", tdid.b24);
+
+                       /* Ran out of space */
+                       if (pcnt > app_req.num_ports)
+                               break;
+
+                       if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
+                               continue;
+
+                       app_reply->ports[pcnt].rekey_count =
+                               fcport->edif.rekey_cnt;
+
+                       app_reply->ports[pcnt].remote_type =
+                               VND_CMD_RTYPE_UNKNOWN;
+                       if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
+                               app_reply->ports[pcnt].remote_type |=
+                                       VND_CMD_RTYPE_TARGET;
+                       if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
+                               app_reply->ports[pcnt].remote_type |=
+                                       VND_CMD_RTYPE_INITIATOR;
+
+                       app_reply->ports[pcnt].remote_pid = fcport->d_id;
+
+                       ql_dbg(ql_dbg_edif, vha, 0x2058,
+                           "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
+                           fcport->node_name, fcport->port_name, pcnt,
+                           fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
+
+                       switch (fcport->edif.auth_state) {
+                       case VND_CMD_AUTH_STATE_ELS_RCVD:
+                               if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
+                                       fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
+                                       app_reply->ports[pcnt].auth_state =
+                                               VND_CMD_AUTH_STATE_NEEDED;
+                               } else {
+                                       app_reply->ports[pcnt].auth_state =
+                                               VND_CMD_AUTH_STATE_ELS_RCVD;
+                               }
+                               break;
+                       default:
+                               app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
+                               break;
+                       }
+
+                       memcpy(app_reply->ports[pcnt].remote_wwpn,
+                           fcport->port_name, 8);
+
+                       app_reply->ports[pcnt].remote_state =
+                               (atomic_read(&fcport->state) ==
+                                   FCS_ONLINE ? 1 : 0);
+
+                       pcnt++;
+
+                       if (tdid.b24 != 0)
+                               break;
+               }
+               app_reply->port_count = pcnt;
+               SET_DID_STATUS(bsg_reply->result, DID_OK);
+       }
+
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+           bsg_job->reply_payload.sg_cnt, app_reply,
+           sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * num_cnt);
+
+       kfree(app_reply);
+
+       return rval;
+}
+
+/**
+ * qla_edif_app_getstats - app would like to read various statistics info
+ * @vha: host adapter pointer
+ * @bsg_job: user request
+ */
+static int32_t
+qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+       int32_t                 rval = 0;
+       struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
+       uint32_t ret_size, size;
+
+       struct app_sinfo_req    app_req;
+       struct app_stats_reply  *app_reply;
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &app_req,
+           sizeof(struct app_sinfo_req));
+       if (app_req.num_ports == 0) {
+               ql_dbg(ql_dbg_async, vha, 0x911d,
+                  "%s app did not indicate number of ports to return\n",
+                   __func__);
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               rval = -1;
+       }
+
+       size = sizeof(struct app_stats_reply) +
+           (sizeof(struct app_sinfo) * app_req.num_ports);
+
+       if (size > bsg_job->reply_payload.payload_len)
+               ret_size = bsg_job->reply_payload.payload_len;
+       else
+               ret_size = size;
+
+       app_reply = kzalloc(size, GFP_KERNEL);
+       if (!app_reply) {
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               rval = -1;
+       } else {
+               struct fc_port  *fcport = NULL, *tf;
+               uint32_t        pcnt = 0;
+
+               list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+                       if (fcport->edif.enable) {
+                               if (pcnt > app_req.num_ports)
+                                       break;
+
+                               app_reply->elem[pcnt].rekey_count =
+                                   fcport->edif.rekey_cnt;
+                               app_reply->elem[pcnt].tx_bytes =
+                                   fcport->edif.tx_bytes;
+                               app_reply->elem[pcnt].rx_bytes =
+                                   fcport->edif.rx_bytes;
+
+                               memcpy(app_reply->elem[pcnt].remote_wwpn,
+                                   fcport->port_name, 8);
+
+                               pcnt++;
+                       }
+               }
+               app_reply->elem_count = pcnt;
+               SET_DID_STATUS(bsg_reply->result, DID_OK);
+       }
+
+       bsg_reply->reply_payload_rcv_len =
+           sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+              bsg_job->reply_payload.sg_cnt, app_reply, ret_size);
+
+       kfree(app_reply);
+
+       return rval;
+}
+
+int32_t
+qla_edif_app_mgmt(struct bsg_job *bsg_job)
+{
+       struct fc_bsg_request   *bsg_request = bsg_job->request;
+       struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
+       struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+       scsi_qla_host_t         *vha = shost_priv(host);
+       struct app_id           appcheck;
+       bool done = true;
+       int32_t         rval = 0;
+       uint32_t        vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
+           __func__, vnd_sc);
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &appcheck,
+           sizeof(struct app_id));
+
+       if (!vha->hw->flags.edif_enabled ||
+               test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
+                   __func__, bsg_job, vha->dpc_flags);
+
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               goto done;
+       }
+
+       if (!qla_edif_app_check(vha, appcheck)) {
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s app checked failed.\n",
+                   __func__);
+
+               bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               goto done;
+       }
+
+       switch (vnd_sc) {
+       case QL_VND_SC_SA_UPDATE:
+               done = false;
+               rval = qla24xx_sadb_update(bsg_job);
+               break;
+       case QL_VND_SC_APP_START:
+               rval = qla_edif_app_start(vha, bsg_job);
+               break;
+       case QL_VND_SC_APP_STOP:
+               rval = qla_edif_app_stop(vha, bsg_job);
+               break;
+       case QL_VND_SC_AUTH_OK:
+               rval = qla_edif_app_authok(vha, bsg_job);
+               break;
+       case QL_VND_SC_AUTH_FAIL:
+               rval = qla_edif_app_authfail(vha, bsg_job);
+               break;
+       case QL_VND_SC_GET_FCINFO:
+               rval = qla_edif_app_getfcinfo(vha, bsg_job);
+               break;
+       case QL_VND_SC_GET_STATS:
+               rval = qla_edif_app_getstats(vha, bsg_job);
+               break;
+       default:
+               ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
+                   __func__,
+                   bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
+               rval = EXT_STATUS_INVALID_PARAM;
+               bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               break;
+       }
+
+done:
+       if (done) {
+               ql_dbg(ql_dbg_user, vha, 0x7009,
+                   "%s: %d  bsg ptr done %p\n", __func__, __LINE__, bsg_job);
+               bsg_job_done(bsg_job, bsg_reply->result,
+                   bsg_reply->reply_payload_rcv_len);
+       }
+
+       return rval;
+}
+
+static struct edif_sa_ctl *
+qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
+       int dir)
+{
+       struct  edif_sa_ctl *sa_ctl;
+       struct qla_sa_update_frame *sap;
+       int     index = sa_frame->fast_sa_index;
+       unsigned long flags = 0;
+
+       sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
+       if (!sa_ctl) {
+               /* couldn't get space */
+               ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+                   "unable to allocate SA CTL\n");
+               return NULL;
+       }
+
+       /*
+        * need to allocate sa_index here and save it
+        * in both sa_ctl->index and sa_frame->fast_sa_index;
+        * If alloc fails then delete sa_ctl and return NULL
+        */
+       INIT_LIST_HEAD(&sa_ctl->next);
+       sap = &sa_ctl->sa_frame;
+       *sap = *sa_frame;
+       sa_ctl->index = index;
+       sa_ctl->fcport = fcport;
+       sa_ctl->flags = 0;
+       sa_ctl->state = 0L;
+       ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+           "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
+           __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
+       spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+       if (dir == SAU_FLG_TX)
+               list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
+       else
+               list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
+       spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+
+       return sa_ctl;
+}
+
+void
+qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
+{
+       struct edif_sa_ctl *sa_ctl, *tsa_ctl;
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
+
+       list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
+           next) {
+               list_del(&sa_ctl->next);
+               kfree(sa_ctl);
+       }
+
+       list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
+           next) {
+               list_del(&sa_ctl->next);
+               kfree(sa_ctl);
+       }
+
+       spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
+}
+
+struct edif_sa_ctl *
+qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
+{
+       struct edif_sa_ctl *sa_ctl, *tsa_ctl;
+       struct list_head *sa_list;
+
+       if (dir == SAU_FLG_TX)
+               sa_list = &fcport->edif.tx_sa_list;
+       else
+               sa_list = &fcport->edif.rx_sa_list;
+
+       list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
+               if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
+                   sa_ctl->index == index)
+                       return sa_ctl;
+       }
+       return NULL;
+}
+
+/* add the sa to the correct list */
+static int
+qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
+       struct qla_sa_update_frame *sa_frame)
+{
+       struct edif_sa_ctl *sa_ctl = NULL;
+       int dir;
+       uint16_t sa_index;
+
+       dir = (sa_frame->flags & SAU_FLG_TX);
+
+       /* map the spi to an sa_index */
+       sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
+       if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
+               /* process rx delete */
+               ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+                   "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
+                   __func__, fcport->loop_id, sa_frame->spi);
+
+               /* build and send the aen */
+               fcport->edif.rx_sa_set = 1;
+               fcport->edif.rx_sa_pending = 0;
+               qla_edb_eventcreate(fcport->vha,
+                   VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+                   QL_VND_SA_STAT_SUCCESS,
+                   QL_VND_RX_SA_KEY, fcport);
+
+               /* force a return of good bsg status; */
+               return RX_DELETE_NO_EDIF_SA_INDEX;
+       } else if (sa_index == INVALID_EDIF_SA_INDEX) {
+               ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+                   "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
+                   __func__, sa_frame->spi, dir);
+               return INVALID_EDIF_SA_INDEX;
+       }
+
+       ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+           "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
+           __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
+
+       /* This is a local copy of sa_frame. */
+       sa_frame->fast_sa_index = sa_index;
+       /* create the sa_ctl */
+       sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
+       if (!sa_ctl) {
+               ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+                   "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
+                   __func__, sa_frame->spi, dir, sa_index);
+               return -1;
+       }
+
+       set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
+
+       if (dir == SAU_FLG_TX)
+               fcport->edif.tx_rekey_cnt++;
+       else
+               fcport->edif.rx_rekey_cnt++;
+
+       ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
+           "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
+           __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
+           fcport->edif.tx_rekey_cnt,
+           fcport->edif.rx_rekey_cnt, fcport->loop_id);
+
+       return 0;
+}
+
+#define QLA_SA_UPDATE_FLAGS_RX_KEY      0x0
+#define QLA_SA_UPDATE_FLAGS_TX_KEY      0x2
+
+int
+qla24xx_sadb_update(struct bsg_job *bsg_job)
+{
+       struct  fc_bsg_reply    *bsg_reply = bsg_job->reply;
+       struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+       scsi_qla_host_t *vha = shost_priv(host);
+       fc_port_t               *fcport = NULL;
+       srb_t                   *sp = NULL;
+       struct edif_list_entry *edif_entry = NULL;
+       int                     found = 0;
+       int                     rval = 0;
+       int result = 0;
+       struct qla_sa_update_frame sa_frame;
+       struct srb_iocb *iocb_cmd;
+
+       ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
+           "%s entered, vha: 0x%p\n", __func__, vha);
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &sa_frame,
+           sizeof(struct qla_sa_update_frame));
+
+       /* Check if host is online */
+       if (!vha->flags.online) {
+               ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
+               rval = -EIO;
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               goto done;
+       }
+
+       if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+               ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
+               rval = -EIO;
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               goto done;
+       }
+
+       fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id);
+       if (fcport) {
+               found = 1;
+               if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
+                       fcport->edif.tx_bytes = 0;
+               if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
+                       fcport->edif.rx_bytes = 0;
+       }
+
+       if (!found) {
+               ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
+                   sa_frame.port_id.b24);
+               rval = -EINVAL;
+               SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
+               goto done;
+       }
+
+       /* make sure the nport_handle is valid */
+       if (fcport->loop_id == FC_NO_LOOP_ID) {
+               ql_dbg(ql_dbg_edif, vha, 0x70e1,
+                   "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
+                   __func__, fcport->port_name, sa_frame.spi,
+                   fcport->disc_state);
+               rval = -EINVAL;
+               SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
+               goto done;
+       }
+
+       /* allocate and queue an sa_ctl */
+       result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
+
+       /* failure of bsg */
+       if (result == INVALID_EDIF_SA_INDEX) {
+               ql_dbg(ql_dbg_edif, vha, 0x70e1,
+                   "%s: %8phN, skipping update.\n",
+                   __func__, fcport->port_name);
+               rval = -EINVAL;
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               goto done;
+
+       /* rx delete failure */
+       } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
+               ql_dbg(ql_dbg_edif, vha, 0x70e1,
+                   "%s: %8phN, skipping rx delete.\n",
+                   __func__, fcport->port_name);
+               SET_DID_STATUS(bsg_reply->result, DID_OK);
+               goto done;
+       }
+
+       ql_dbg(ql_dbg_edif, vha, 0x70e1,
+           "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
+           __func__, fcport->port_name, sa_frame.fast_sa_index,
+           sa_frame.flags);
+
+       /* looking for rx index and delete */
+       if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
+           (sa_frame.flags & SAU_FLG_INV)) {
+               uint16_t nport_handle = fcport->loop_id;
+               uint16_t sa_index = sa_frame.fast_sa_index;
+
+               /*
+                * make sure we have an existing rx key, otherwise just process
+                * this as a straight delete just like TX
+                * This is NOT a normal case, it indicates an error recovery or key cleanup
+                * by the ipsec code above us.
+                */
+               edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
+               if (!edif_entry) {
+                       ql_dbg(ql_dbg_edif, vha, 0x911d,
+                           "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
+                           __func__, fcport->loop_id, sa_index);
+                       goto force_rx_delete;
+               }
+
+               /*
+                * if we have a forced delete for rx, remove the sa_index from the edif list
+                * and proceed with normal delete.  The rx delay timer should not be running
+                */
+               if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
+                       qla_edif_list_delete_sa_index(fcport, edif_entry);
+                       ql_dbg(ql_dbg_edif, vha, 0x911d,
+                           "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
+                           __func__, fcport->loop_id, sa_index);
+                       kfree(edif_entry);
+                       goto force_rx_delete;
+               }
+
+               /*
+                * delayed rx delete
+                *
+                * if delete_sa_index is not invalid then there is already
+                * a delayed index in progress, return bsg bad status
+                */
+               if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
+                       struct edif_sa_ctl *sa_ctl;
+
+                       ql_dbg(ql_dbg_edif, vha, 0x911d,
+                           "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
+                           __func__, edif_entry->handle, edif_entry->delete_sa_index);
+
+                       /* free up the sa_ctl that was allocated with the sa_index */
+                       sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
+                           (sa_frame.flags & SAU_FLG_TX));
+                       if (sa_ctl) {
+                               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                                   "%s: freeing sa_ctl for index %d\n",
+                                   __func__, sa_ctl->index);
+                               qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
+                       }
+
+                       /* release the sa_index */
+                       ql_dbg(ql_dbg_edif, vha, 0x3063,
+                           "%s: freeing sa_index %d, nph: 0x%x\n",
+                           __func__, sa_index, nport_handle);
+                       qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
+
+                       rval = -EINVAL;
+                       SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+                       goto done;
+               }
+
+               fcport->edif.rekey_cnt++;
+
+               /* configure and start the rx delay timer */
+               edif_entry->fcport = fcport;
+               edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
+
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
+                   __func__, edif_entry, sa_index, nport_handle);
+
+               /*
+                * Start the timer when we queue the delayed rx delete.
+                * This is an activity timer that goes off if we have not
+                * received packets with the new sa_index
+                */
+               add_timer(&edif_entry->timer);
+
+               /*
+                * sa_delete for rx key with an active rx key including this one
+                * add the delete rx sa index to the hash so we can look for it
+                * in the rsp queue.  Do this after making any changes to the
+                * edif_entry as part of the rx delete.
+                */
+
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
+                   __func__, sa_index, nport_handle, bsg_job);
+
+               edif_entry->delete_sa_index = sa_index;
+
+               bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+               bsg_reply->result = DID_OK << 16;
+
+               goto done;
+
+       /*
+        * rx index and update
+        * add the index to the list and continue with normal update
+        */
+       } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
+           ((sa_frame.flags & SAU_FLG_INV) == 0)) {
+               /* sa_update for rx key */
+               uint32_t nport_handle = fcport->loop_id;
+               uint16_t sa_index = sa_frame.fast_sa_index;
+               int result;
+
+               /*
+                * add the update rx sa index to the hash so we can look for it
+                * in the rsp queue and continue normally
+                */
+
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s:  adding update sa_index %d, lid 0x%x to edif_list\n",
+                   __func__, sa_index, nport_handle);
+
+               result = qla_edif_list_add_sa_update_index(fcport, sa_index,
+                   nport_handle);
+               if (result) {
+                       ql_dbg(ql_dbg_edif, vha, 0x911d,
+                           "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
+                           __func__, sa_index, nport_handle);
+               }
+       }
+       if (sa_frame.flags & SAU_FLG_GMAC_MODE)
+               fcport->edif.aes_gmac = 1;
+       else
+               fcport->edif.aes_gmac = 0;
+
+force_rx_delete:
+       /*
+        * sa_update for both rx and tx keys, sa_delete for tx key
+        * immediately process the request
+        */
+       sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+       if (!sp) {
+               rval = -ENOMEM;
+               SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+               goto done;
+       }
+
+       sp->type = SRB_SA_UPDATE;
+       sp->name = "bsg_sa_update";
+       sp->u.bsg_job = bsg_job;
+       /* sp->free = qla2x00_bsg_sp_free; */
+       sp->free = qla2x00_rel_sp;
+       sp->done = qla2x00_bsg_job_done;
+       iocb_cmd = &sp->u.iocb_cmd;
+       iocb_cmd->u.sa_update.sa_frame  = sa_frame;
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_dbg_edif, vha, 0x70e3,
+                   "qla2x00_start_sp failed=%d.\n", rval);
+
+               qla2x00_rel_sp(sp);
+               rval = -EIO;
+               SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+               goto done;
+       }
+
+       ql_dbg(ql_dbg_edif, vha, 0x911d,
+           "%s:  %s sent, hdl=%x, portid=%06x.\n",
+           __func__, sp->name, sp->handle, fcport->d_id.b24);
+
+       fcport->edif.rekey_cnt++;
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+       return 0;
+
+/*
+ * send back error status
+ */
+done:
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       ql_dbg(ql_dbg_edif, vha, 0x911d,
+           "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
+           __func__, bsg_reply->result, bsg_job);
+       bsg_job_done(bsg_job, bsg_reply->result,
+           bsg_reply->reply_payload_rcv_len);
+
+       return 0;
+}
+
+static void
+qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
+{
+       node->ntype = N_UNDEF;
+       kfree(node);
+}
+
+/**
+ * qla_enode_init - initialize enode structs & lock
+ * @vha: host adapter pointer
+ *
+ * should only be called when driver attaching
+ */
+void
+qla_enode_init(scsi_qla_host_t *vha)
+{
+       struct  qla_hw_data *ha = vha->hw;
+       char    name[32];
+
+       if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
+               /* list still active - error */
+               ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
+                   __func__);
+               return;
+       }
+
+       /* initialize lock which protects pur_core & init list */
+       spin_lock_init(&vha->pur_cinfo.pur_lock);
+       INIT_LIST_HEAD(&vha->pur_cinfo.head);
+
+       snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
+           ha->pdev->device);
+}
+
+/**
+ * qla_enode_stop - stop and clear and enode data
+ * @vha: host adapter pointer
+ *
+ * called when app notified it is exiting
+ */
+void
+qla_enode_stop(scsi_qla_host_t *vha)
+{
+       unsigned long flags;
+       struct enode *node, *q;
+
+       if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
+               /* doorbell list not enabled */
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                   "%s enode not active\n", __func__);
+               return;
+       }
+
+       /* grab lock so list doesn't move */
+       spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+
+       vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
+
+       /* hopefully this is a null list at this point */
+       list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
+               ql_dbg(ql_dbg_edif, vha, 0x910f,
+                   "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
+                   node->dinfo.nodecnt);
+               list_del_init(&node->list);
+               qla_enode_free(vha, node);
+       }
+       spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+}
+
+/*
+ *  allocate enode struct and populate buffer
+ *  returns: enode pointer with buffers
+ *           NULL on error
+ */
+static struct enode *
+qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
+{
+       struct enode            *node;
+       struct purexevent       *purex;
+
+       node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
+       if (!node)
+               return NULL;
+
+       purex = &node->u.purexinfo;
+       purex->msgp = (u8 *)(node + 1);
+       purex->msgp_len = ELS_MAX_PAYLOAD;
+
+       node->ntype = ntype;
+       INIT_LIST_HEAD(&node->list);
+       return node;
+}
+
+static void
+qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
+{
+       unsigned long flags;
+
+       ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
+           "%s add enode for type=%x, cnt=%x\n",
+           __func__, ptr->ntype, ptr->dinfo.nodecnt);
+
+       spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+       list_add_tail(&ptr->list, &vha->pur_cinfo.head);
+       spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+       return;
+}
+
+static struct enode *
+qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
+{
+       struct enode            *node_rtn = NULL;
+       struct enode            *list_node = NULL;
+       unsigned long           flags;
+       struct list_head        *pos, *q;
+       uint32_t                sid;
+       uint32_t                rw_flag;
+       struct purexevent       *purex;
+
+       /* secure the list from moving under us */
+       spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
+
+       list_for_each_safe(pos, q, &vha->pur_cinfo.head) {
+               list_node = list_entry(pos, struct enode, list);
+
+               /* node type determines what p1 and p2 are */
+               purex = &list_node->u.purexinfo;
+               sid = p1;
+               rw_flag = p2;
+
+               if (purex->pur_info.pur_sid.b24 == sid) {
+                       if (purex->pur_info.pur_pend == 1 &&
+                           rw_flag == PUR_GET) {
+                               /*
+                                * if the receive is in progress
+                                * and its a read/get then can't
+                                * transfer yet
+                                */
+                               ql_dbg(ql_dbg_edif, vha, 0x9106,
+                                   "%s purex xfer in progress for sid=%x\n",
+                                   __func__, sid);
+                       } else {
+                               /* found it and its complete */
+                               node_rtn = list_node;
+                               list_del(pos);
+                               break;
+                       }
+               }
+       }
+
+       spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
+
+       return node_rtn;
+}
+
+/**
+ * qla_pur_get_pending - read/return authentication message sent
+ *  from remote port
+ * @vha: host adapter pointer
+ * @fcport: session pointer
+ * @bsg_job: user request where the message is copy to.
+ */
+static int
+qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+       struct bsg_job *bsg_job)
+{
+       struct enode            *ptr;
+       struct purexevent       *purex;
+       struct qla_bsg_auth_els_reply *rpl =
+           (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+       bsg_job->reply_len = sizeof(*rpl);
+
+       ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
+       if (!ptr) {
+               ql_dbg(ql_dbg_edif, vha, 0x9111,
+                   "%s no enode data found for %8phN sid=%06x\n",
+                   __func__, fcport->port_name, fcport->d_id.b24);
+               SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
+               return -EIO;
+       }
+
+       /*
+        * enode is now off the linked list and is ours to deal with
+        */
+       purex = &ptr->u.purexinfo;
+
+       /* Copy info back to caller */
+       rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
+
+       SET_DID_STATUS(rpl->r.result, DID_OK);
+       rpl->r.reply_payload_rcv_len =
+           sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
+               bsg_job->reply_payload.sg_cnt, purex->msgp,
+               purex->pur_info.pur_bytes_rcvd, 0);
+
+       /* data copy / passback completed - destroy enode */
+       qla_enode_free(vha, ptr);
+
+       return 0;
+}
+
+/* it is assume qpair lock is held */
+static int
+qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
+       struct qla_els_pt_arg *a)
+{
+       struct els_entry_24xx *els_iocb;
+
+       els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+       if (!els_iocb) {
+               ql_log(ql_log_warn, vha, 0x700c,
+                   "qla2x00_alloc_iocbs failed.\n");
+               return QLA_FUNCTION_FAILED;
+       }
+
+       qla_els_pt_iocb(vha, els_iocb, a);
+
+       ql_dbg(ql_dbg_edif, vha, 0x0183,
+           "Sending ELS reject...\n");
+       ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
+           vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
+       /* flush iocb to mem before notifying hw doorbell */
+       wmb();
+       qla2x00_start_iocbs(vha, qp->req);
+       return 0;
+}
+
+void
+qla_edb_init(scsi_qla_host_t *vha)
+{
+       if (vha->e_dbell.db_flags == EDB_ACTIVE) {
+               /* list already init'd - error */
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                   "edif db already initialized, cannot reinit\n");
+               return;
+       }
+
+       /* initialize lock which protects doorbell & init list */
+       spin_lock_init(&vha->e_dbell.db_lock);
+       INIT_LIST_HEAD(&vha->e_dbell.head);
+
+       /* create and initialize doorbell */
+       init_completion(&vha->e_dbell.dbell);
+}
+
+static void
+qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
+{
+       /*
+        * releases the space held by this edb node entry
+        * this function does _not_ free the edb node itself
+        * NB: the edb node entry passed should not be on any list
+        *
+        * currently for doorbell there's no additional cleanup
+        * needed, but here as a placeholder for furture use.
+        */
+
+       if (!node) {
+               ql_dbg(ql_dbg_edif, vha, 0x09122,
+                   "%s error - no valid node passed\n", __func__);
+               return;
+       }
+
+       node->ntype = N_UNDEF;
+}
+
+/* function called when app is stopping */
+
+void
+qla_edb_stop(scsi_qla_host_t *vha)
+{
+       unsigned long flags;
+       struct edb_node *node, *q;
+
+       if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+               /* doorbell list not enabled */
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                   "%s doorbell not enabled\n", __func__);
+               return;
+       }
+
+       /* grab lock so list doesn't move */
+       spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+       vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
+       /* hopefully this is a null list at this point */
+       list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
+               ql_dbg(ql_dbg_edif, vha, 0x910f,
+                   "%s freeing edb_node type=%x\n",
+                   __func__, node->ntype);
+               qla_edb_node_free(vha, node);
+               list_del(&node->list);
+
+               kfree(node);
+       }
+       spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+       /* wake up doorbell waiters - they'll be dismissed with error code */
+       complete_all(&vha->e_dbell.dbell);
+}
+
+static struct edb_node *
+qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
+{
+       struct edb_node *node;
+
+       node = kzalloc(sizeof(*node), GFP_ATOMIC);
+       if (!node) {
+               /* couldn't get space */
+               ql_dbg(ql_dbg_edif, vha, 0x9100,
+                   "edb node unable to be allocated\n");
+               return NULL;
+       }
+
+       node->ntype = ntype;
+       INIT_LIST_HEAD(&node->list);
+       return node;
+}
+
+/* adds a already allocated enode to the linked list */
+static bool
+qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
+{
+       unsigned long           flags;
+
+       if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+               /* doorbell list not enabled */
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                   "%s doorbell not enabled\n", __func__);
+               return false;
+       }
+
+       spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+       list_add_tail(&ptr->list, &vha->e_dbell.head);
+       spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+       /* ring doorbell for waiters */
+       complete(&vha->e_dbell.dbell);
+
+       return true;
+}
+
+/* adds event to doorbell list */
+void
+qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
+       uint32_t data, uint32_t data2, fc_port_t        *sfcport)
+{
+       struct edb_node *edbnode;
+       fc_port_t *fcport = sfcport;
+       port_id_t id;
+
+       if (!vha->hw->flags.edif_enabled) {
+               /* edif not enabled */
+               return;
+       }
+
+       if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+               if (fcport)
+                       fcport->edif.auth_state = dbtype;
+               /* doorbell list not enabled */
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                   "%s doorbell not enabled (type=%d\n", __func__, dbtype);
+               return;
+       }
+
+       edbnode = qla_edb_node_alloc(vha, dbtype);
+       if (!edbnode) {
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                   "%s unable to alloc db node\n", __func__);
+               return;
+       }
+
+       if (!fcport) {
+               id.b.domain = (data >> 16) & 0xff;
+               id.b.area = (data >> 8) & 0xff;
+               id.b.al_pa = data & 0xff;
+               ql_dbg(ql_dbg_edif, vha, 0x09222,
+                   "%s: Arrived s_id: %06x\n", __func__,
+                   id.b24);
+               fcport = qla2x00_find_fcport_by_pid(vha, &id);
+               if (!fcport) {
+                       ql_dbg(ql_dbg_edif, vha, 0x09102,
+                           "%s can't find fcport for sid= 0x%x - ignoring\n",
+                       __func__, id.b24);
+                       kfree(edbnode);
+                       return;
+               }
+       }
+
+       /* populate the edb node */
+       switch (dbtype) {
+       case VND_CMD_AUTH_STATE_NEEDED:
+       case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+               edbnode->u.plogi_did.b24 = fcport->d_id.b24;
+               break;
+       case VND_CMD_AUTH_STATE_ELS_RCVD:
+               edbnode->u.els_sid.b24 = fcport->d_id.b24;
+               break;
+       case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+               edbnode->u.sa_aen.port_id = fcport->d_id;
+               edbnode->u.sa_aen.status =  data;
+               edbnode->u.sa_aen.key_type =  data2;
+               break;
+       default:
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                       "%s unknown type: %x\n", __func__, dbtype);
+               qla_edb_node_free(vha, edbnode);
+               kfree(edbnode);
+               edbnode = NULL;
+               break;
+       }
+
+       if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
+               ql_dbg(ql_dbg_edif, vha, 0x09102,
+                   "%s unable to add dbnode\n", __func__);
+               qla_edb_node_free(vha, edbnode);
+               kfree(edbnode);
+               return;
+       }
+       if (edbnode && fcport)
+               fcport->edif.auth_state = dbtype;
+       ql_dbg(ql_dbg_edif, vha, 0x09102,
+           "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
+}
+
+static struct edb_node *
+qla_edb_getnext(scsi_qla_host_t *vha)
+{
+       unsigned long   flags;
+       struct edb_node *edbnode = NULL;
+
+       spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+       /* db nodes are fifo - no qualifications done */
+       if (!list_empty(&vha->e_dbell.head)) {
+               edbnode = list_first_entry(&vha->e_dbell.head,
+                   struct edb_node, list);
+               list_del(&edbnode->list);
+       }
+
+       spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+       return edbnode;
+}
+
+void
+qla_edif_timer(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
+               if (vha->e_dbell.db_flags != EDB_ACTIVE &&
+                   ha->edif_post_stop_cnt_down) {
+                       ha->edif_post_stop_cnt_down--;
+
+                       /*
+                        * turn off auto 'Plogi Acc + secure=1' feature
+                        * Set Add FW option[3]
+                        * BIT_15, if.
+                        */
+                       if (ha->edif_post_stop_cnt_down == 0) {
+                               ql_dbg(ql_dbg_async, vha, 0x911d,
+                                      "%s chip reset to turn off PLOGI ACC + secure\n",
+                                      __func__);
+                               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+                       }
+               } else {
+                       ha->edif_post_stop_cnt_down = 60;
+               }
+       }
+}
+
+/*
+ * app uses separate thread to read this. It'll wait until the doorbell
+ * is rung by the driver or the max wait time has expired
+ */
+ssize_t
+edif_doorbell_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       struct edb_node *dbnode = NULL;
+       struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
+       uint32_t dat_siz, buf_size, sz;
+
+       /* TODO: app currently hardcoded to 256. Will transition to bsg */
+       sz = 256;
+
+       /* stop new threads from waiting if we're not init'd */
+       if (vha->e_dbell.db_flags != EDB_ACTIVE) {
+               ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
+                   "%s error - edif db not enabled\n", __func__);
+               return 0;
+       }
+
+       if (!vha->hw->flags.edif_enabled) {
+               /* edif not enabled */
+               ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
+                   "%s error - edif not enabled\n", __func__);
+               return -1;
+       }
+
+       buf_size = 0;
+       while ((sz - buf_size) >= sizeof(struct edb_node)) {
+               /* remove the next item from the doorbell list */
+               dat_siz = 0;
+               dbnode = qla_edb_getnext(vha);
+               if (dbnode) {
+                       ap->event_code = dbnode->ntype;
+                       switch (dbnode->ntype) {
+                       case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+                       case VND_CMD_AUTH_STATE_NEEDED:
+                               ap->port_id = dbnode->u.plogi_did;
+                               dat_siz += sizeof(ap->port_id);
+                               break;
+                       case VND_CMD_AUTH_STATE_ELS_RCVD:
+                               ap->port_id = dbnode->u.els_sid;
+                               dat_siz += sizeof(ap->port_id);
+                               break;
+                       case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+                               ap->port_id = dbnode->u.sa_aen.port_id;
+                               memcpy(ap->event_data, &dbnode->u,
+                                               sizeof(struct edif_sa_update_aen));
+                               dat_siz += sizeof(struct edif_sa_update_aen);
+                               break;
+                       default:
+                               /* unknown node type, rtn unknown ntype */
+                               ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
+                               memcpy(ap->event_data, &dbnode->ntype, 4);
+                               dat_siz += 4;
+                               break;
+                       }
+
+                       ql_dbg(ql_dbg_edif, vha, 0x09102,
+                               "%s Doorbell consumed : type=%d %p\n",
+                               __func__, dbnode->ntype, dbnode);
+                       /* we're done with the db node, so free it up */
+                       qla_edb_node_free(vha, dbnode);
+                       kfree(dbnode);
+               } else {
+                       break;
+               }
+
+               ap->event_data_size = dat_siz;
+               /* 8bytes = ap->event_code + ap->event_data_size */
+               buf_size += dat_siz + 8;
+               ap = (struct edif_app_dbell *)(buf + buf_size);
+       }
+       return buf_size;
+}
+
+static void qla_noop_sp_done(srb_t *sp, int res)
+{
+       sp->free(sp);
+}
+
+/*
+ * Called from work queue
+ * build and send the sa_update iocb to delete an rx sa_index
+ */
+int
+qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
+{
+       srb_t *sp;
+       fc_port_t       *fcport = NULL;
+       struct srb_iocb *iocb_cmd = NULL;
+       int rval = QLA_SUCCESS;
+       struct  edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
+       uint16_t nport_handle = e->u.sa_update.nport_handle;
+
+       ql_dbg(ql_dbg_edif, vha, 0x70e6,
+           "%s: starting,  sa_ctl: %p\n", __func__, sa_ctl);
+
+       if (!sa_ctl) {
+               ql_dbg(ql_dbg_edif, vha, 0x70e6,
+                   "sa_ctl allocation failed\n");
+               return -ENOMEM;
+       }
+
+       fcport = sa_ctl->fcport;
+
+       /* Alloc SRB structure */
+       sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+       if (!sp) {
+               ql_dbg(ql_dbg_edif, vha, 0x70e6,
+                "SRB allocation failed\n");
+               return -ENOMEM;
+       }
+
+       fcport->flags |= FCF_ASYNC_SENT;
+       iocb_cmd = &sp->u.iocb_cmd;
+       iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
+
+       ql_dbg(ql_dbg_edif, vha, 0x3073,
+           "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
+           fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
+       /*
+        * if this is a sadb cleanup delete, mark it so the isr can
+        * take the correct action
+        */
+       if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
+               /* mark this srb as a cleanup delete */
+               sp->flags |= SRB_EDIF_CLEANUP_DELETE;
+               ql_dbg(ql_dbg_edif, vha, 0x70e6,
+                   "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
+       }
+
+       sp->type = SRB_SA_REPLACE;
+       sp->name = "SA_REPLACE";
+       sp->fcport = fcport;
+       sp->free = qla2x00_rel_sp;
+       sp->done = qla_noop_sp_done;
+
+       rval = qla2x00_start_sp(sp);
+
+       if (rval != QLA_SUCCESS)
+               rval = QLA_FUNCTION_FAILED;
+
+       return rval;
+}
+
+void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
+{
+       int     itr = 0;
+       struct  scsi_qla_host           *vha = sp->vha;
+       struct  qla_sa_update_frame     *sa_frame =
+               &sp->u.iocb_cmd.u.sa_update.sa_frame;
+       u8 flags = 0;
+
+       switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
+       case 0:
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s: EDIF SA UPDATE RX IOCB  vha: 0x%p  index: %d\n",
+                   __func__, vha, sa_frame->fast_sa_index);
+               break;
+       case 1:
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s: EDIF SA DELETE RX IOCB  vha: 0x%p  index: %d\n",
+                   __func__, vha, sa_frame->fast_sa_index);
+               flags |= SA_FLAG_INVALIDATE;
+               break;
+       case 2:
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s: EDIF SA UPDATE TX IOCB  vha: 0x%p  index: %d\n",
+                   __func__, vha, sa_frame->fast_sa_index);
+               flags |= SA_FLAG_TX;
+               break;
+       case 3:
+               ql_dbg(ql_dbg_edif, vha, 0x911d,
+                   "%s: EDIF SA DELETE TX IOCB  vha: 0x%p  index: %d\n",
+                   __func__, vha, sa_frame->fast_sa_index);
+               flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
+               break;
+       }
+
+       sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
+       sa_update_iocb->entry_count = 1;
+       sa_update_iocb->sys_define = 0;
+       sa_update_iocb->entry_status = 0;
+       sa_update_iocb->handle = sp->handle;
+       sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
+       sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
+       sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+       sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
+       sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+
+       sa_update_iocb->flags = flags;
+       sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
+       sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
+       sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
+
+       sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
+       if (sp->fcport->edif.aes_gmac)
+               sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
+
+       if (sa_frame->flags & SAU_FLG_KEY256) {
+               sa_update_iocb->sa_control |= SA_CNTL_KEY256;
+               for (itr = 0; itr < 32; itr++)
+                       sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
+       } else {
+               sa_update_iocb->sa_control |= SA_CNTL_KEY128;
+               for (itr = 0; itr < 16; itr++)
+                       sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
+       }
+
+       ql_dbg(ql_dbg_edif, vha, 0x921d,
+           "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
+           __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
+           sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
+           sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
+           sp->fcport->edif.aes_gmac);
+
+       if (sa_frame->flags & SAU_FLG_TX)
+               sp->fcport->edif.tx_sa_pending = 1;
+       else
+               sp->fcport->edif.rx_sa_pending = 1;
+
+       sp->fcport->vha->qla_stats.control_requests++;
+}
+
+void
+qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
+{
+       struct  scsi_qla_host           *vha = sp->vha;
+       struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
+       struct  edif_sa_ctl             *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
+       uint16_t nport_handle = sp->fcport->loop_id;
+
+       sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
+       sa_update_iocb->entry_count = 1;
+       sa_update_iocb->sys_define = 0;
+       sa_update_iocb->entry_status = 0;
+       sa_update_iocb->handle = sp->handle;
+
+       sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
+
+       sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
+       sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+       sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
+       sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+
+       /* Invalidate the index. salt, spi, control & key are ignore */
+       sa_update_iocb->flags = SA_FLAG_INVALIDATE;
+       sa_update_iocb->salt = 0;
+       sa_update_iocb->spi = 0;
+       sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
+       sa_update_iocb->sa_control = 0;
+
+       ql_dbg(ql_dbg_edif, vha, 0x921d,
+           "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
+           __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
+           sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
+           sa_update_iocb->sa_index, sp->handle);
+
+       sp->fcport->vha->qla_stats.control_requests++;
+}
+
+void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
+{
+       struct purex_entry_24xx *p = *pkt;
+       struct enode            *ptr;
+       int             sid;
+       u16 totlen;
+       struct purexevent       *purex;
+       struct scsi_qla_host *host = NULL;
+       int rc;
+       struct fc_port *fcport;
+       struct qla_els_pt_arg a;
+       be_id_t beid;
+
+       memset(&a, 0, sizeof(a));
+
+       a.els_opcode = ELS_AUTH_ELS;
+       a.nport_handle = p->nport_handle;
+       a.rx_xchg_address = p->rx_xchg_addr;
+       a.did.b.domain = p->s_id[2];
+       a.did.b.area   = p->s_id[1];
+       a.did.b.al_pa  = p->s_id[0];
+       a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
+       a.tx_addr = vha->hw->elsrej.cdma;
+       a.vp_idx = vha->vp_idx;
+       a.control_flags = EPD_ELS_RJT;
+
+       sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
+
+       totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
+       if (le16_to_cpu(p->status_flags) & 0x8000) {
+               totlen = le16_to_cpu(p->trunc_frame_size);
+               qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+               __qla_consume_iocb(vha, pkt, rsp);
+               return;
+       }
+
+       if (totlen > MAX_PAYLOAD) {
+               ql_dbg(ql_dbg_edif, vha, 0x0910d,
+                   "%s WARNING: verbose ELS frame received (totlen=%x)\n",
+                   __func__, totlen);
+               qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+               __qla_consume_iocb(vha, pkt, rsp);
+               return;
+       }
+
+       if (!vha->hw->flags.edif_enabled) {
+               /* edif support not enabled */
+               ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
+                   __func__);
+               qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+               __qla_consume_iocb(vha, pkt, rsp);
+               return;
+       }
+
+       ptr = qla_enode_alloc(vha, N_PUREX);
+       if (!ptr) {
+               ql_dbg(ql_dbg_edif, vha, 0x09109,
+                   "WARNING: enode alloc failed for sid=%x\n",
+                   sid);
+               qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+               __qla_consume_iocb(vha, pkt, rsp);
+               return;
+       }
+
+       purex = &ptr->u.purexinfo;
+       purex->pur_info.pur_sid = a.did;
+       purex->pur_info.pur_pend = 0;
+       purex->pur_info.pur_bytes_rcvd = totlen;
+       purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
+       purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
+       purex->pur_info.pur_did.b.domain =  p->d_id[2];
+       purex->pur_info.pur_did.b.area =  p->d_id[1];
+       purex->pur_info.pur_did.b.al_pa =  p->d_id[0];
+       purex->pur_info.vp_idx = p->vp_idx;
+
+       rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
+               purex->msgp_len);
+       if (rc) {
+               qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+               qla_enode_free(vha, ptr);
+               return;
+       }
+       beid.al_pa = purex->pur_info.pur_did.b.al_pa;
+       beid.area   = purex->pur_info.pur_did.b.area;
+       beid.domain = purex->pur_info.pur_did.b.domain;
+       host = qla_find_host_by_d_id(vha, beid);
+       if (!host) {
+               ql_log(ql_log_fatal, vha, 0x508b,
+                   "%s Drop ELS due to unable to find host %06x\n",
+                   __func__, purex->pur_info.pur_did.b24);
+
+               qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
+               qla_enode_free(vha, ptr);
+               return;
+       }
+
+       fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
+
+       if (host->e_dbell.db_flags != EDB_ACTIVE ||
+           (fcport && EDIF_SESSION_DOWN(fcport))) {
+               ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
+                   __func__, host->e_dbell.db_flags,
+                   fcport ? fcport->d_id.b24 : 0);
+
+               qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+               qla_enode_free(host, ptr);
+               return;
+       }
+
+       /* add the local enode to the list */
+       qla_enode_add(host, ptr);
+
+       ql_dbg(ql_dbg_edif, host, 0x0910c,
+           "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
+           __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
+           purex->pur_info.pur_did.b24, p->rx_xchg_addr);
+
+       qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
+}
+
+static uint16_t  qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
+{
+       struct scsi_qla_host *vha = fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       void *sa_id_map;
+       unsigned long flags = 0;
+       u16 sa_index;
+
+       ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+           "%s: entry\n", __func__);
+
+       if (dir)
+               sa_id_map = ha->edif_tx_sa_id_map;
+       else
+               sa_id_map = ha->edif_rx_sa_id_map;
+
+       spin_lock_irqsave(&ha->sadb_fp_lock, flags);
+       sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
+       if (sa_index >=  EDIF_NUM_SA_INDEX) {
+               spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+               return INVALID_EDIF_SA_INDEX;
+       }
+       set_bit(sa_index, sa_id_map);
+       spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
+
+       if (dir)
+               sa_index += EDIF_TX_SA_INDEX_BASE;
+
+       ql_dbg(ql_dbg_edif, vha, 0x3063,
+           "%s: index retrieved from free pool %d\n", __func__, sa_index);
+
+       return sa_index;
+}
+
+/* find an sadb entry for an nport_handle */
+static struct edif_sa_index_entry *
+qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
+               struct list_head *sa_list)
+{
+       struct edif_sa_index_entry *entry;
+       struct edif_sa_index_entry *tentry;
+       struct list_head *indx_list = sa_list;
+
+       list_for_each_entry_safe(entry, tentry, indx_list, next) {
+               if (entry->handle == nport_handle)
+                       return entry;
+       }
+       return NULL;
+}
+
+/* remove an sa_index from the nport_handle and return it to the free pool */
+static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
+               uint16_t sa_index)
+{
+       struct edif_sa_index_entry *entry;
+       struct list_head *sa_list;
+       int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
+       int slot = 0;
+       int free_slot_count = 0;
+       scsi_qla_host_t *vha = fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
+
+       ql_dbg(ql_dbg_edif, vha, 0x3063,
+           "%s: entry\n", __func__);
+
+       if (dir)
+               sa_list = &ha->sadb_tx_index_list;
+       else
+               sa_list = &ha->sadb_rx_index_list;
+
+       entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
+       if (!entry) {
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: no entry found for nport_handle 0x%x\n",
+                   __func__, nport_handle);
+               return -1;
+       }
+
+       spin_lock_irqsave(&ha->sadb_lock, flags);
+       /*
+        * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
+        * the other is use at re-key time.
+        */
+       for (slot = 0; slot < 2; slot++) {
+               if (entry->sa_pair[slot].sa_index == sa_index) {
+                       entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
+                       entry->sa_pair[slot].spi = 0;
+                       free_slot_count++;
+                       qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
+               } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
+                       free_slot_count++;
+               }
+       }
+
+       if (free_slot_count == 2) {
+               list_del(&entry->next);
+               kfree(entry);
+       }
+       spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+       ql_dbg(ql_dbg_edif, vha, 0x3063,
+           "%s: sa_index %d removed, free_slot_count: %d\n",
+           __func__, sa_index, free_slot_count);
+
+       return 0;
+}
+
+void
+qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
+       struct sa_update_28xx *pkt)
+{
+       const char *func = "SA_UPDATE_RESPONSE_IOCB";
+       srb_t *sp;
+       struct edif_sa_ctl *sa_ctl;
+       int old_sa_deleted = 1;
+       uint16_t nport_handle;
+       struct scsi_qla_host *vha;
+
+       sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
+
+       if (!sp) {
+               ql_dbg(ql_dbg_edif, v, 0x3063,
+                       "%s: no sp found for pkt\n", __func__);
+               return;
+       }
+       /* use sp->vha due to npiv */
+       vha = sp->vha;
+
+       switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
+       case 0:
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: EDIF SA UPDATE RX IOCB  vha: 0x%p  index: %d\n",
+                   __func__, vha, pkt->sa_index);
+               break;
+       case 1:
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: EDIF SA DELETE RX IOCB  vha: 0x%p  index: %d\n",
+                   __func__, vha, pkt->sa_index);
+               break;
+       case 2:
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: EDIF SA UPDATE TX IOCB  vha: 0x%p  index: %d\n",
+                   __func__, vha, pkt->sa_index);
+               break;
+       case 3:
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: EDIF SA DELETE TX IOCB  vha: 0x%p  index: %d\n",
+                   __func__, vha, pkt->sa_index);
+               break;
+       }
+
+       /*
+        * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
+        * to be correct during cleanup sa_update iocbs.
+        */
+       nport_handle = sp->fcport->loop_id;
+
+       ql_dbg(ql_dbg_edif, vha, 0x3063,
+           "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
+           __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
+           nport_handle, pkt->sa_index, pkt->flags, sp->handle);
+
+       /* if rx delete, remove the timer */
+       if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) ==  SA_FLAG_INVALIDATE) {
+               struct edif_list_entry *edif_entry;
+
+               sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
+               edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
+               if (edif_entry) {
+                       ql_dbg(ql_dbg_edif, vha, 0x5033,
+                           "%s: removing edif_entry %p, new sa_index: 0x%x\n",
+                           __func__, edif_entry, pkt->sa_index);
+                       qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
+                       del_timer(&edif_entry->timer);
+
+                       ql_dbg(ql_dbg_edif, vha, 0x5033,
+                           "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
+                           __func__, edif_entry, pkt->sa_index);
+
+                       kfree(edif_entry);
+               }
+       }
+
+       /*
+        * if this is a delete for either tx or rx, make sure it succeeded.
+        * The new_sa_info field should be 0xffff on success
+        */
+       if (pkt->flags & SA_FLAG_INVALIDATE)
+               old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
+
+       /* Process update and delete the same way */
+
+       /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
+       if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
+               sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: nph 0x%x, sa_index %d removed from fw\n",
+                   __func__, sp->fcport->loop_id, pkt->sa_index);
+
+       } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
+           old_sa_deleted) {
+               /*
+                * Note: Wa are only keeping track of latest SA,
+                * so we know when we can start enableing encryption per I/O.
+                * If all SA's get deleted, let FW reject the IOCB.
+
+                * TODO: edif: don't set enabled here I think
+                * TODO: edif: prli complete is where it should be set
+                */
+               ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+                       "SA(%x)updated for s_id %02x%02x%02x\n",
+                       pkt->new_sa_info,
+                       pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
+               sp->fcport->edif.enable = 1;
+               if (pkt->flags & SA_FLAG_TX) {
+                       sp->fcport->edif.tx_sa_set = 1;
+                       sp->fcport->edif.tx_sa_pending = 0;
+                       qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+                               QL_VND_SA_STAT_SUCCESS,
+                               QL_VND_TX_SA_KEY, sp->fcport);
+               } else {
+                       sp->fcport->edif.rx_sa_set = 1;
+                       sp->fcport->edif.rx_sa_pending = 0;
+                       qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+                               QL_VND_SA_STAT_SUCCESS,
+                               QL_VND_RX_SA_KEY, sp->fcport);
+               }
+       } else {
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
+                   __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
+                   pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
+
+               if (pkt->flags & SA_FLAG_TX)
+                       qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+                               (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
+                               QL_VND_TX_SA_KEY, sp->fcport);
+               else
+                       qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
+                               (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
+                               QL_VND_RX_SA_KEY, sp->fcport);
+       }
+
+       /* for delete, release sa_ctl, sa_index */
+       if (pkt->flags & SA_FLAG_INVALIDATE) {
+               /* release the sa_ctl */
+               sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
+                   le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
+               if (sa_ctl &&
+                   qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
+                       (pkt->flags & SA_FLAG_TX)) != NULL) {
+                       ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
+                           "%s: freeing sa_ctl for index %d\n",
+                           __func__, sa_ctl->index);
+                       qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
+               } else {
+                       ql_dbg(ql_dbg_edif, vha, 0x3063,
+                           "%s: sa_ctl NOT freed, sa_ctl: %p\n",
+                           __func__, sa_ctl);
+               }
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: freeing sa_index %d, nph: 0x%x\n",
+                   __func__, le16_to_cpu(pkt->sa_index), nport_handle);
+               qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
+                   le16_to_cpu(pkt->sa_index));
+       /*
+        * check for a failed sa_update and remove
+        * the sadb entry.
+        */
+       } else if (pkt->u.comp_sts) {
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: freeing sa_index %d, nph: 0x%x\n",
+                   __func__, pkt->sa_index, nport_handle);
+               qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
+                   le16_to_cpu(pkt->sa_index));
+               switch (le16_to_cpu(pkt->u.comp_sts)) {
+               case CS_PORT_EDIF_UNAVAIL:
+               case CS_PORT_EDIF_LOGOUT:
+                       qlt_schedule_sess_for_deletion(sp->fcport);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       sp->done(sp, 0);
+}
+
+/**
+ * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Return: non-zero if a failure occurred, else zero.
+ */
+int
+qla28xx_start_scsi_edif(srb_t *sp)
+{
+       int             nseg;
+       unsigned long   flags;
+       struct scsi_cmnd *cmd;
+       uint32_t        *clr_ptr;
+       uint32_t        index, i;
+       uint32_t        handle;
+       uint16_t        cnt;
+       int16_t        req_cnt;
+       uint16_t        tot_dsds;
+       __be32 *fcp_dl;
+       uint8_t additional_cdb_len;
+       struct ct6_dsd *ctx;
+       struct scsi_qla_host *vha = sp->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct cmd_type_6 *cmd_pkt;
+       struct dsd64    *cur_dsd;
+       uint8_t         avail_dsds = 0;
+       struct scatterlist *sg;
+       struct req_que *req = sp->qpair->req;
+       spinlock_t *lock = sp->qpair->qp_lock_ptr;
+
+       /* Setup device pointers. */
+       cmd = GET_CMD_SP(sp);
+
+       /* So we know we haven't pci_map'ed anything yet */
+       tot_dsds = 0;
+
+       /* Send marker if required */
+       if (vha->marker_needed != 0) {
+               if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
+                       QLA_SUCCESS) {
+                       ql_log(ql_log_warn, vha, 0x300c,
+                           "qla2x00_marker failed for cmd=%p.\n", cmd);
+                       return QLA_FUNCTION_FAILED;
+               }
+               vha->marker_needed = 0;
+       }
+
+       /* Acquire ring specific lock */
+       spin_lock_irqsave(lock, flags);
+
+       /* Check for room in outstanding command list. */
+       handle = req->current_outstanding_cmd;
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
+               handle++;
+               if (handle == req->num_outstanding_cmds)
+                       handle = 1;
+               if (!req->outstanding_cmds[handle])
+                       break;
+       }
+       if (index == req->num_outstanding_cmds)
+               goto queuing_error;
+
+       /* Map the sg table so we have an accurate count of sg entries needed */
+       if (scsi_sg_count(cmd)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+                   scsi_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+       } else {
+               nseg = 0;
+       }
+
+       tot_dsds = nseg;
+       req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+       if (req->cnt < (req_cnt + 2)) {
+               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+                   rd_reg_dword(req->req_q_out);
+               if (req->ring_index < cnt)
+                       req->cnt = cnt - req->ring_index;
+               else
+                       req->cnt = req->length -
+                           (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+       }
+
+       ctx = sp->u.scmd.ct6_ctx =
+           mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+       if (!ctx) {
+               ql_log(ql_log_fatal, vha, 0x3010,
+                   "Failed to allocate ctx for cmd=%p.\n", cmd);
+               goto queuing_error;
+       }
+
+       memset(ctx, 0, sizeof(struct ct6_dsd));
+       ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
+           GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+       if (!ctx->fcp_cmnd) {
+               ql_log(ql_log_fatal, vha, 0x3011,
+                   "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+               goto queuing_error;
+       }
+
+       /* Initialize the DSD list and dma handle */
+       INIT_LIST_HEAD(&ctx->dsd_list);
+       ctx->dsd_use_cnt = 0;
+
+       if (cmd->cmd_len > 16) {
+               additional_cdb_len = cmd->cmd_len - 16;
+               if ((cmd->cmd_len % 4) != 0) {
+                       /*
+                        * SCSI command bigger than 16 bytes must be
+                        * multiple of 4
+                        */
+                       ql_log(ql_log_warn, vha, 0x3012,
+                           "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
+                           cmd->cmd_len, cmd);
+                       goto queuing_error_fcp_cmnd;
+               }
+               ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+       } else {
+               additional_cdb_len = 0;
+               ctx->fcp_cmnd_len = 12 + 16 + 4;
+       }
+
+       cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+       cmd_pkt->handle = make_handle(req->id, handle);
+
+       /*
+        * Zero out remaining portion of packet.
+        * tagged queuing modifier -- default is TSK_SIMPLE (0).
+        */
+       clr_ptr = (uint32_t *)cmd_pkt + 2;
+       memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+       cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+       /* No data transfer */
+       if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+               cmd_pkt->byte_count = cpu_to_le32(0);
+               goto no_dsds;
+       }
+
+       /* Set transfer direction */
+       if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+               cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
+               vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+               vha->qla_stats.output_requests++;
+               sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
+       } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+               cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
+               vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+               vha->qla_stats.input_requests++;
+               sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
+       }
+
+       cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+       cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
+
+       /* One DSD is available in the Command Type 6 IOCB */
+       avail_dsds = 1;
+       cur_dsd = &cmd_pkt->fcp_dsd;
+
+       /* Load data segments */
+       scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+               dma_addr_t      sle_dma;
+               cont_a64_entry_t *cont_pkt;
+
+               /* Allocate additional continuation packets? */
+               if (avail_dsds == 0) {
+                       /*
+                        * Five DSDs are available in the Continuation
+                        * Type 1 IOCB.
+                        */
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
+                       cur_dsd = cont_pkt->dsd;
+                       avail_dsds = 5;
+               }
+
+               sle_dma = sg_dma_address(sg);
+               put_unaligned_le64(sle_dma, &cur_dsd->address);
+               cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
+               cur_dsd++;
+               avail_dsds--;
+       }
+
+no_dsds:
+       /* Set NPORT-ID and LUN number*/
+       cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+       cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+       cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+       cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+       cmd_pkt->vp_index = sp->vha->vp_idx;
+
+       cmd_pkt->entry_type = COMMAND_TYPE_6;
+
+       /* Set total data segment count. */
+       cmd_pkt->entry_count = (uint8_t)req_cnt;
+
+       int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+       host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+       /* build FCP_CMND IU */
+       int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+       ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+       if (cmd->sc_data_direction == DMA_TO_DEVICE)
+               ctx->fcp_cmnd->additional_cdb_len |= 1;
+       else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+               ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+       /* Populate the FCP_PRIO. */
+       if (ha->flags.fcp_prio_enabled)
+               ctx->fcp_cmnd->task_attribute |=
+                   sp->fcport->fcp_prio << 3;
+
+       memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+       fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
+           additional_cdb_len);
+       *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+       cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+       put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
+
+       sp->flags |= SRB_FCP_CMND_DMA_VALID;
+       cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+       /* Set total data segment count. */
+       cmd_pkt->entry_count = (uint8_t)req_cnt;
+       cmd_pkt->entry_status = 0;
+
+       /* Build command packet. */
+       req->current_outstanding_cmd = handle;
+       req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
+       cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+       req->cnt -= req_cnt;
+
+       /* Adjust ring index. */
+       wmb();
+       req->ring_index++;
+       if (req->ring_index == req->length) {
+               req->ring_index = 0;
+               req->ring_ptr = req->ring;
+       } else {
+               req->ring_ptr++;
+       }
+
+       sp->qpair->cmd_cnt++;
+       /* Set chip new ring index. */
+       wrt_reg_dword(req->req_q_in, req->ring_index);
+
+       spin_unlock_irqrestore(lock, flags);
+
+       return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+       dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+       if (tot_dsds)
+               scsi_dma_unmap(cmd);
+
+       if (sp->u.scmd.ct6_ctx) {
+               mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
+               sp->u.scmd.ct6_ctx = NULL;
+       }
+       spin_unlock_irqrestore(lock, flags);
+
+       return QLA_FUNCTION_FAILED;
+}
+
+/**********************************************
+ * edif update/delete sa_index list functions *
+ **********************************************/
+
+/* clear the edif_indx_list for this port */
+void qla_edif_list_del(fc_port_t *fcport)
+{
+       struct edif_list_entry *indx_lst;
+       struct edif_list_entry *tindx_lst;
+       struct list_head *indx_list = &fcport->edif.edif_indx_list;
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+       list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
+               list_del(&indx_lst->next);
+               kfree(indx_lst);
+       }
+       spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+}
+
+/******************
+ * SADB functions *
+ ******************/
+
+/* allocate/retrieve an sa_index for a given spi */
+static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
+               struct qla_sa_update_frame *sa_frame)
+{
+       struct edif_sa_index_entry *entry;
+       struct list_head *sa_list;
+       uint16_t sa_index;
+       int dir = sa_frame->flags & SAU_FLG_TX;
+       int slot = 0;
+       int free_slot = -1;
+       scsi_qla_host_t *vha = fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags = 0;
+       uint16_t nport_handle = fcport->loop_id;
+
+       ql_dbg(ql_dbg_edif, vha, 0x3063,
+           "%s: entry  fc_port: %p, nport_handle: 0x%x\n",
+           __func__, fcport, nport_handle);
+
+       if (dir)
+               sa_list = &ha->sadb_tx_index_list;
+       else
+               sa_list = &ha->sadb_rx_index_list;
+
+       entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
+       if (!entry) {
+               if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
+                       ql_dbg(ql_dbg_edif, vha, 0x3063,
+                           "%s: rx delete request with no entry\n", __func__);
+                       return RX_DELETE_NO_EDIF_SA_INDEX;
+               }
+
+               /* if there is no entry for this nport, add one */
+               entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
+               if (!entry)
+                       return INVALID_EDIF_SA_INDEX;
+
+               sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
+               if (sa_index == INVALID_EDIF_SA_INDEX) {
+                       kfree(entry);
+                       return INVALID_EDIF_SA_INDEX;
+               }
+
+               INIT_LIST_HEAD(&entry->next);
+               entry->handle = nport_handle;
+               entry->fcport = fcport;
+               entry->sa_pair[0].spi = sa_frame->spi;
+               entry->sa_pair[0].sa_index = sa_index;
+               entry->sa_pair[1].spi = 0;
+               entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
+               spin_lock_irqsave(&ha->sadb_lock, flags);
+               list_add_tail(&entry->next, sa_list);
+               spin_unlock_irqrestore(&ha->sadb_lock, flags);
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
+                   __func__, nport_handle, sa_frame->spi, sa_index);
+
+               return sa_index;
+       }
+
+       spin_lock_irqsave(&ha->sadb_lock, flags);
+
+       /* see if we already have an entry for this spi */
+       for (slot = 0; slot < 2; slot++) {
+               if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
+                       free_slot = slot;
+               } else {
+                       if (entry->sa_pair[slot].spi == sa_frame->spi) {
+                               spin_unlock_irqrestore(&ha->sadb_lock, flags);
+                               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                                   "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
+                                   __func__, slot, entry->handle, sa_frame->spi,
+                                   entry->sa_pair[slot].sa_index);
+                               return entry->sa_pair[slot].sa_index;
+                       }
+               }
+       }
+       spin_unlock_irqrestore(&ha->sadb_lock, flags);
+
+       /* both slots are used */
+       if (free_slot == -1) {
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
+                   __func__, entry->handle, sa_frame->spi);
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: Slot 0  spi: 0x%x  sa_index: %d,  Slot 1  spi: 0x%x  sa_index: %d\n",
+                   __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
+                   entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
+
+               return INVALID_EDIF_SA_INDEX;
+       }
+
+       /* there is at least one free slot, use it */
+       sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
+       if (sa_index == INVALID_EDIF_SA_INDEX) {
+               ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+                   "%s: empty freepool!!\n", __func__);
+               return INVALID_EDIF_SA_INDEX;
+       }
+
+       spin_lock_irqsave(&ha->sadb_lock, flags);
+       entry->sa_pair[free_slot].spi = sa_frame->spi;
+       entry->sa_pair[free_slot].sa_index = sa_index;
+       spin_unlock_irqrestore(&ha->sadb_lock, flags);
+       ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
+           "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
+           __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
+
+       return sa_index;
+}
+
+/* release any sadb entries -- only done at teardown */
+void qla_edif_sadb_release(struct qla_hw_data *ha)
+{
+       struct list_head *pos;
+       struct list_head *tmp;
+       struct edif_sa_index_entry *entry;
+
+       list_for_each_safe(pos, tmp, &ha->sadb_rx_index_list) {
+               entry = list_entry(pos, struct edif_sa_index_entry, next);
+               list_del(&entry->next);
+               kfree(entry);
+       }
+
+       list_for_each_safe(pos, tmp, &ha->sadb_tx_index_list) {
+               entry = list_entry(pos, struct edif_sa_index_entry, next);
+               list_del(&entry->next);
+               kfree(entry);
+       }
+}
+
+/**************************
+ * sadb freepool functions
+ **************************/
+
+/* build the rx and tx sa_index free pools -- only done at fcport init */
+int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
+{
+       ha->edif_tx_sa_id_map =
+           kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
+
+       if (!ha->edif_tx_sa_id_map) {
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
+                   "Unable to allocate memory for sadb tx.\n");
+               return -ENOMEM;
+       }
+
+       ha->edif_rx_sa_id_map =
+           kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
+       if (!ha->edif_rx_sa_id_map) {
+               kfree(ha->edif_tx_sa_id_map);
+               ha->edif_tx_sa_id_map = NULL;
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
+                   "Unable to allocate memory for sadb rx.\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+/* release the free pool - only done during fcport teardown */
+void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
+{
+       kfree(ha->edif_tx_sa_id_map);
+       ha->edif_tx_sa_id_map = NULL;
+       kfree(ha->edif_rx_sa_id_map);
+       ha->edif_rx_sa_id_map = NULL;
+}
+
+static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+               fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
+{
+       struct edif_list_entry *edif_entry;
+       struct edif_sa_ctl *sa_ctl;
+       uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
+       unsigned long flags = 0;
+       uint16_t nport_handle = fcport->loop_id;
+       uint16_t cached_nport_handle;
+
+       spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
+       edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
+       if (!edif_entry) {
+               spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+               return;         /* no pending delete for this handle */
+       }
+
+       /*
+        * check for no pending delete for this index or iocb does not
+        * match rx sa_index
+        */
+       if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
+           edif_entry->update_sa_index != sa_index) {
+               spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+               return;
+       }
+
+       /*
+        * wait until we have seen at least EDIF_DELAY_COUNT transfers before
+        * queueing RX delete
+        */
+       if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
+               spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+               return;
+       }
+
+       ql_dbg(ql_dbg_edif, vha, 0x5033,
+           "%s: invalidating delete_sa_index,  update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
+           __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
+
+       delete_sa_index = edif_entry->delete_sa_index;
+       edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
+       cached_nport_handle = edif_entry->handle;
+       spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
+
+       /* sanity check on the nport handle */
+       if (nport_handle != cached_nport_handle) {
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
+                   __func__, nport_handle, cached_nport_handle);
+       }
+
+       /* find the sa_ctl for the delete and schedule the delete */
+       sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
+       if (sa_ctl) {
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
+                   __func__, sa_ctl, sa_index);
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
+                   delete_sa_index,
+                   edif_entry->update_sa_index, nport_handle, handle);
+
+               sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
+               set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
+               qla_post_sa_replace_work(fcport->vha, fcport,
+                   nport_handle, sa_ctl);
+       } else {
+               ql_dbg(ql_dbg_edif, vha, 0x3063,
+                   "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
+                   __func__, delete_sa_index);
+       }
+}
+
+void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+               srb_t *sp, struct sts_entry_24xx *sts24)
+{
+       fc_port_t *fcport = sp->fcport;
+       /* sa_index used by this iocb */
+       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       uint32_t handle;
+
+       handle = (uint32_t)LSW(sts24->handle);
+
+       /* find out if this status iosb is for a scsi read */
+       if (cmd->sc_data_direction != DMA_FROM_DEVICE)
+               return;
+
+       return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
+          le16_to_cpu(sts24->edif_sa_index));
+}
+
+void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+               struct ctio7_from_24xx *pkt)
+{
+       __chk_edif_rx_sa_delete_pending(vha, fcport,
+           pkt->handle, le16_to_cpu(pkt->edif_sa_index));
+}
+
+static void qla_parse_auth_els_ctl(struct srb *sp)
+{
+       struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
+       struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
+       struct fc_bsg_request *request = bsg_job->request;
+       struct qla_bsg_auth_els_request *p =
+           (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+       a->tx_len = a->tx_byte_count = sp->remap.req.len;
+       a->tx_addr = sp->remap.req.dma;
+       a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
+       a->rx_addr = sp->remap.rsp.dma;
+
+       if (p->e.sub_cmd == SEND_ELS_REPLY) {
+               a->control_flags = p->e.extra_control_flags << 13;
+               a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
+               if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
+                       a->els_opcode = ELS_LS_ACC;
+               else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
+                       a->els_opcode = ELS_LS_RJT;
+       }
+       a->did = sp->fcport->d_id;
+       a->els_opcode =  request->rqst_data.h_els.command_code;
+       a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+       a->vp_idx = sp->vha->vp_idx;
+}
+
+int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+       struct fc_bsg_request *bsg_request = bsg_job->request;
+       struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+       fc_port_t *fcport = NULL;
+       struct qla_hw_data *ha = vha->hw;
+       srb_t *sp;
+       int rval =  (DID_ERROR << 16);
+       port_id_t d_id;
+       struct qla_bsg_auth_els_request *p =
+           (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+       d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
+       d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
+       d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
+
+       /* find matching d_id in fcport list */
+       fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
+       if (!fcport) {
+               ql_dbg(ql_dbg_edif, vha, 0x911a,
+                   "%s fcport not find online portid=%06x.\n",
+                   __func__, d_id.b24);
+               SET_DID_STATUS(bsg_reply->result, DID_ERROR);
+               return -EIO;
+       }
+
+       if (qla_bsg_check(vha, bsg_job, fcport))
+               return 0;
+
+       if (fcport->loop_id == FC_NO_LOOP_ID) {
+               ql_dbg(ql_dbg_edif, vha, 0x910d,
+                   "%s ELS code %x, no loop id.\n", __func__,
+                   bsg_request->rqst_data.r_els.els_code);
+               SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+               return -ENXIO;
+       }
+
+       if (!vha->flags.online) {
+               ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
+               SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+               rval = -EIO;
+               goto done;
+       }
+
+       /* pass through is supported only for ISP 4Gb or higher */
+       if (!IS_FWI2_CAPABLE(ha)) {
+               ql_dbg(ql_dbg_user, vha, 0x7001,
+                   "ELS passthru not supported for ISP23xx based adapters.\n");
+               SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
+               rval = -EPERM;
+               goto done;
+       }
+
+       sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+       if (!sp) {
+               ql_dbg(ql_dbg_user, vha, 0x7004,
+                   "Failed get sp pid=%06x\n", fcport->d_id.b24);
+               rval = -ENOMEM;
+               SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+               goto done;
+       }
+
+       sp->remap.req.len = bsg_job->request_payload.payload_len;
+       sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
+           GFP_KERNEL, &sp->remap.req.dma);
+       if (!sp->remap.req.buf) {
+               ql_dbg(ql_dbg_user, vha, 0x7005,
+                   "Failed allocate request dma len=%x\n",
+                   bsg_job->request_payload.payload_len);
+               rval = -ENOMEM;
+               SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+               goto done_free_sp;
+       }
+
+       sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
+       sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
+           GFP_KERNEL, &sp->remap.rsp.dma);
+       if (!sp->remap.rsp.buf) {
+               ql_dbg(ql_dbg_user, vha, 0x7006,
+                   "Failed allocate response dma len=%x\n",
+                   bsg_job->reply_payload.payload_len);
+               rval = -ENOMEM;
+               SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+               goto done_free_remap_req;
+       }
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
+           sp->remap.req.len);
+       sp->remap.remapped = true;
+
+       sp->type = SRB_ELS_CMD_HST_NOLOGIN;
+       sp->name = "SPCN_BSG_HST_NOLOGIN";
+       sp->u.bsg_cmd.bsg_job = bsg_job;
+       qla_parse_auth_els_ctl(sp);
+
+       sp->free = qla2x00_bsg_sp_free;
+       sp->done = qla2x00_bsg_job_done;
+
+       rval = qla2x00_start_sp(sp);
+
+       ql_dbg(ql_dbg_edif, vha, 0x700a,
+           "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
+           __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
+           p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+           sp->handle, sp->remap.req.len, bsg_job);
+
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x700e,
+                   "qla2x00_start_sp failed = %d\n", rval);
+               SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
+               rval = -EIO;
+               goto done_free_remap_rsp;
+       }
+       return rval;
+
+done_free_remap_rsp:
+       dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
+           sp->remap.rsp.dma);
+done_free_remap_req:
+       dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
+           sp->remap.req.dma);
+done_free_sp:
+       qla2x00_rel_sp(sp);
+
+done:
+       return rval;
+}
+
+void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
+{
+       if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) {
+               ql_dbg(ql_dbg_disc, vha, 0xf09c,
+                       "%s: sess %8phN send port_offline event\n",
+                       __func__, sess->port_name);
+               sess->edif.app_sess_online = 0;
+               qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
+                   sess->d_id.b24, 0, sess);
+               qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
+       }
+}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
new file mode 100644 (file)
index 0000000..9e8f28d
--- /dev/null
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (c)  2021    Marvell
+ */
+#ifndef __QLA_EDIF_H
+#define __QLA_EDIF_H
+
+struct qla_scsi_host;
+#define EDIF_APP_ID 0x73730001
+
+#define EDIF_MAX_INDEX 2048
+struct edif_sa_ctl {
+       struct list_head next;
+       uint16_t        del_index;
+       uint16_t        index;
+       uint16_t        slot;
+       uint16_t        flags;
+#define        EDIF_SA_CTL_FLG_REPL            BIT_0
+#define        EDIF_SA_CTL_FLG_DEL             BIT_1
+#define EDIF_SA_CTL_FLG_CLEANUP_DEL BIT_4
+       // Invalidate Index bit and mirrors QLA_SA_UPDATE_FLAGS_DELETE
+       unsigned long   state;
+#define EDIF_SA_CTL_USED       1       /* Active Sa update  */
+#define EDIF_SA_CTL_PEND       2       /* Waiting for slot */
+#define EDIF_SA_CTL_REPL       3       /* Active Replace and Delete */
+#define EDIF_SA_CTL_DEL                4       /* Delete Pending */
+       struct fc_port  *fcport;
+       struct bsg_job *bsg_job;
+       struct qla_sa_update_frame sa_frame;
+};
+
+enum enode_flags_t {
+       ENODE_ACTIVE = 0x1,
+};
+
+struct pur_core {
+       enum enode_flags_t      enode_flags;
+       spinlock_t              pur_lock;
+       struct  list_head       head;
+};
+
+enum db_flags_t {
+       EDB_ACTIVE = 0x1,
+};
+
+struct edif_dbell {
+       enum db_flags_t         db_flags;
+       spinlock_t              db_lock;
+       struct  list_head       head;
+       struct  completion      dbell;
+};
+
+#define SA_UPDATE_IOCB_TYPE            0x71    /* Security Association Update IOCB entry */
+struct sa_update_28xx {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System Defined. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* IOCB System handle. */
+
+       union {
+               __le16 nport_handle;  /* in: N_PORT handle. */
+               __le16 comp_sts;              /* out: completion status */
+#define CS_PORT_EDIF_UNAVAIL   0x28
+#define CS_PORT_EDIF_LOGOUT    0x29
+#define CS_PORT_EDIF_SUPP_NOT_RDY 0x64
+#define CS_PORT_EDIF_INV_REQ      0x66
+       } u;
+       uint8_t vp_index;
+       uint8_t reserved_1;
+       uint8_t port_id[3];
+       uint8_t flags;
+#define SA_FLAG_INVALIDATE BIT_0
+#define SA_FLAG_TX        BIT_1 // 1=tx, 0=rx
+
+       uint8_t sa_key[32];     /* 256 bit key */
+       __le32 salt;
+       __le32 spi;
+       uint8_t sa_control;
+#define SA_CNTL_ENC_FCSP        (1 << 3)
+#define SA_CNTL_ENC_OPD         (2 << 3)
+#define SA_CNTL_ENC_MSK         (3 << 3)  // mask bits 4,3
+#define SA_CNTL_AES_GMAC       (1 << 2)
+#define SA_CNTL_KEY256          (2 << 0)
+#define SA_CNTL_KEY128          0
+
+       uint8_t reserved_2;
+       __le16 sa_index;   // reserve: bit 11-15
+       __le16 old_sa_info;
+       __le16 new_sa_info;
+};
+
+#define        NUM_ENTRIES     256
+#define        MAX_PAYLOAD     1024
+#define        PUR_GET         1
+
+struct dinfo {
+       int             nodecnt;
+       int             lstate;
+};
+
+struct pur_ninfo {
+       unsigned int    pur_pend:1;
+       port_id_t       pur_sid;
+       port_id_t       pur_did;
+       uint8_t         vp_idx;
+       short           pur_bytes_rcvd;
+       unsigned short  pur_nphdl;
+       unsigned int    pur_rx_xchg_address;
+};
+
+struct purexevent {
+       struct  pur_ninfo       pur_info;
+       unsigned char           *msgp;
+       u32                     msgp_len;
+};
+
+#define        N_UNDEF         0
+#define        N_PUREX         1
+struct enode {
+       struct list_head        list;
+       struct dinfo            dinfo;
+       uint32_t                ntype;
+       union {
+               struct purexevent       purexinfo;
+       } u;
+};
+
+#define EDIF_SESSION_DOWN(_s) \
+       (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
+        _s->disc_state == DSC_DELETED || \
+        !_s->edif.app_sess_online))
+
+#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
new file mode 100644 (file)
index 0000000..58b718d
--- /dev/null
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Marvell Fibre Channel HBA Driver
+ * Copyright (C)  2018-            Marvell
+ *
+ */
+#ifndef __QLA_EDIF_BSG_H
+#define __QLA_EDIF_BSG_H
+
+/* BSG Vendor specific commands */
+#define        ELS_MAX_PAYLOAD         1024
+#ifndef        WWN_SIZE
+#define WWN_SIZE               8
+#endif
+#define        VND_CMD_APP_RESERVED_SIZE       32
+
+enum auth_els_sub_cmd {
+       SEND_ELS = 0,
+       SEND_ELS_REPLY,
+       PULL_ELS,
+};
+
+struct extra_auth_els {
+       enum auth_els_sub_cmd sub_cmd;
+       uint32_t        extra_rx_xchg_address;
+       uint8_t         extra_control_flags;
+#define BSG_CTL_FLAG_INIT       0
+#define BSG_CTL_FLAG_LS_ACC     1
+#define BSG_CTL_FLAG_LS_RJT     2
+#define BSG_CTL_FLAG_TRM        3
+       uint8_t         extra_rsvd[3];
+} __packed;
+
+struct qla_bsg_auth_els_request {
+       struct fc_bsg_request r;
+       struct extra_auth_els e;
+};
+
+struct qla_bsg_auth_els_reply {
+       struct fc_bsg_reply r;
+       uint32_t rx_xchg_address;
+};
+
+struct app_id {
+       int             app_vid;
+       uint8_t         app_key[32];
+} __packed;
+
+struct app_start_reply {
+       uint32_t        host_support_edif;
+       uint32_t        edif_enode_active;
+       uint32_t        edif_edb_active;
+       uint32_t        reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_start {
+       struct app_id   app_info;
+       uint32_t        prli_to;
+       uint32_t        key_shred;
+       uint8_t         app_start_flags;
+       uint8_t         reserved[VND_CMD_APP_RESERVED_SIZE - 1];
+} __packed;
+
+struct app_stop {
+       struct app_id   app_info;
+       char            buf[16];
+} __packed;
+
+struct app_plogi_reply {
+       uint32_t        prli_status;
+       uint8_t         reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define        RECFG_TIME      1
+#define        RECFG_BYTES     2
+
+struct app_rekey_cfg {
+       struct app_id app_info;
+       uint8_t  rekey_mode;
+       port_id_t d_id;
+       uint8_t  force;
+       union {
+               int64_t bytes;
+               int64_t time;
+       } rky_units;
+
+       uint8_t         reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_pinfo_req {
+       struct app_id app_info;
+       uint8_t  num_ports;
+       port_id_t remote_pid;
+       uint8_t  reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_pinfo {
+       port_id_t remote_pid;
+       uint8_t remote_wwpn[WWN_SIZE];
+       uint8_t remote_type;
+#define        VND_CMD_RTYPE_UNKNOWN           0
+#define        VND_CMD_RTYPE_TARGET            1
+#define        VND_CMD_RTYPE_INITIATOR         2
+       uint8_t remote_state;
+       uint8_t auth_state;
+       uint8_t rekey_mode;
+       int64_t rekey_count;
+       int64_t rekey_config_value;
+       int64_t rekey_consumed_value;
+
+       uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+/* AUTH States */
+#define        VND_CMD_AUTH_STATE_UNDEF        0
+#define        VND_CMD_AUTH_STATE_SESSION_SHUTDOWN     1
+#define        VND_CMD_AUTH_STATE_NEEDED       2
+#define        VND_CMD_AUTH_STATE_ELS_RCVD     3
+#define        VND_CMD_AUTH_STATE_SAUPDATE_COMPL 4
+
+struct app_pinfo_reply {
+       uint8_t         port_count;
+       uint8_t         reserved[VND_CMD_APP_RESERVED_SIZE];
+       struct app_pinfo ports[0];
+} __packed;
+
+struct app_sinfo_req {
+       struct app_id   app_info;
+       uint8_t         num_ports;
+       uint8_t         reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct app_sinfo {
+       uint8_t remote_wwpn[WWN_SIZE];
+       int64_t rekey_count;
+       uint8_t rekey_mode;
+       int64_t tx_bytes;
+       int64_t rx_bytes;
+} __packed;
+
+struct app_stats_reply {
+       uint8_t         elem_count;
+       struct app_sinfo elem[0];
+} __packed;
+
+struct qla_sa_update_frame {
+       struct app_id   app_info;
+       uint16_t        flags;
+#define SAU_FLG_INV            0x01    /* delete key */
+#define SAU_FLG_TX             0x02    /* 1=tx, 0 = rx */
+#define SAU_FLG_FORCE_DELETE   0x08
+#define SAU_FLG_GMAC_MODE      0x20    /*
+                                        * GMAC mode is cleartext for the IO
+                                        * (i.e. NULL encryption)
+                                        */
+#define SAU_FLG_KEY128          0x40
+#define SAU_FLG_KEY256          0x80
+       uint16_t        fast_sa_index:10,
+                       reserved:6;
+       uint32_t        salt;
+       uint32_t        spi;
+       uint8_t         sa_key[32];
+       uint8_t         node_name[WWN_SIZE];
+       uint8_t         port_name[WWN_SIZE];
+       port_id_t       port_id;
+} __packed;
+
+// used for edif mgmt bsg interface
+#define        QL_VND_SC_UNDEF         0
+#define        QL_VND_SC_SA_UPDATE     1
+#define        QL_VND_SC_APP_START     2
+#define        QL_VND_SC_APP_STOP      3
+#define        QL_VND_SC_AUTH_OK       4
+#define        QL_VND_SC_AUTH_FAIL     5
+#define        QL_VND_SC_REKEY_CONFIG  6
+#define        QL_VND_SC_GET_FCINFO    7
+#define        QL_VND_SC_GET_STATS     8
+
+/* Application interface data structure for rtn data */
+#define        EXT_DEF_EVENT_DATA_SIZE 64
+struct edif_app_dbell {
+       uint32_t        event_code;
+       uint32_t        event_data_size;
+       union  {
+               port_id_t       port_id;
+               uint8_t         event_data[EXT_DEF_EVENT_DATA_SIZE];
+       };
+} __packed;
+
+struct edif_sa_update_aen {
+       port_id_t port_id;
+       uint32_t key_type;      /* Tx (1) or RX (2) */
+       uint32_t status;        /* 0 succes,  1 failed, 2 timeout , 3 error */
+       uint8_t         reserved[16];
+} __packed;
+
+#define        QL_VND_SA_STAT_SUCCESS  0
+#define        QL_VND_SA_STAT_FAILED   1
+#define        QL_VND_SA_STAT_TIMEOUT  2
+#define        QL_VND_SA_STAT_ERROR    3
+
+#define        QL_VND_RX_SA_KEY        1
+#define        QL_VND_TX_SA_KEY        2
+
+/* App defines for plogi auth'd ok and plogi auth bad requests */
+struct auth_complete_cmd {
+       struct app_id app_info;
+#define PL_TYPE_WWPN    1
+#define PL_TYPE_DID     2
+       uint32_t    type;
+       union {
+               uint8_t  wwpn[WWN_SIZE];
+               port_id_t d_id;
+       } u;
+       uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+#define RX_DELAY_DELETE_TIMEOUT 20
+
+#endif /* QLA_EDIF_BSG_H */
index 49df418..073d06e 100644 (file)
@@ -82,10 +82,11 @@ struct port_database_24xx {
        uint8_t port_name[WWN_SIZE];
        uint8_t node_name[WWN_SIZE];
 
-       uint8_t reserved_3[4];
+       uint8_t reserved_3[2];
+       uint16_t nvme_first_burst_size;
        uint16_t prli_nvme_svc_param_word_0;    /* Bits 15-0 of word 0 */
        uint16_t prli_nvme_svc_param_word_3;    /* Bits 15-0 of word 3 */
-       uint16_t nvme_first_burst_size;
+       uint8_t secure_login;
        uint8_t reserved_4[14];
 };
 
@@ -489,6 +490,9 @@ struct cmd_type_6 {
        struct scsi_lun lun;            /* FCP LUN (BE). */
 
        __le16  control_flags;          /* Control flags. */
+#define CF_NEW_SA                      BIT_12
+#define CF_EN_EDIF                     BIT_9
+#define CF_ADDITIONAL_PARAM_BLK                BIT_8
 #define CF_DIF_SEG_DESCR_ENABLE                BIT_3
 #define CF_DATA_SEG_DESCR_ENABLE       BIT_2
 #define CF_READ_DATA                   BIT_1
@@ -611,6 +615,7 @@ struct sts_entry_24xx {
        union {
                __le16 reserved_1;
                __le16  nvme_rsp_pyld_len;
+               __le16 edif_sa_index;    /* edif sa_index used for initiator read data */
        };
 
        __le16  state_flags;            /* State flags. */
@@ -805,6 +810,7 @@ struct els_entry_24xx {
 #define EPD_RX_XCHG            (3 << 13)
 #define ECF_CLR_PASSTHRU_PEND  BIT_12
 #define ECF_INCL_FRAME_HDR     BIT_11
+#define ECF_SEC_LOGIN          BIT_3
 
        union {
                struct {
@@ -896,6 +902,7 @@ struct logio_entry_24xx {
 #define LCF_FCP2_OVERRIDE      BIT_9   /* Set/Reset word 3 of PRLI. */
 #define LCF_CLASS_2            BIT_8   /* Enable class 2 during PLOGI. */
 #define LCF_FREE_NPORT         BIT_7   /* Release NPORT handle after LOGO. */
+#define LCF_COMMON_FEAT                BIT_7   /* PLOGI - Set Common Features Field */
 #define LCF_EXPL_LOGO          BIT_6   /* Perform an explicit LOGO. */
 #define LCF_NVME_PRLI          BIT_6   /* Perform NVME FC4 PRLI */
 #define LCF_SKIP_PRLI          BIT_5   /* Skip PRLI after PLOGI. */
@@ -920,6 +927,8 @@ struct logio_entry_24xx {
        uint8_t rsp_size;               /* Response size in 32bit words. */
 
        __le32  io_parameter[11];       /* General I/O parameters. */
+#define LIO_COMM_FEAT_FCSP     BIT_21
+#define LIO_COMM_FEAT_CIO      BIT_31
 #define LSC_SCODE_NOLINK       0x01
 #define LSC_SCODE_NOIOCB       0x02
 #define LSC_SCODE_NOXCB                0x03
index 2f867da..1c3f055 100644 (file)
@@ -12,6 +12,7 @@
  * Global Function Prototypes in qla_init.c source file.
  */
 extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
+extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport);
 
 extern int qla2100_pci_config(struct scsi_qla_host *);
 extern int qla2300_pci_config(struct scsi_qla_host *);
@@ -130,6 +131,18 @@ void qla24xx_free_purex_item(struct purex_item *item);
 extern bool qla24xx_risc_firmware_invalid(uint32_t *);
 void qla_init_iocb_limit(scsi_qla_host_t *);
 
+void qla_edif_list_del(fc_port_t *fcport);
+void qla_edif_sadb_release(struct qla_hw_data *ha);
+int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha);
+void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha);
+void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
+               srb_t *sp, struct sts_entry_24xx *sts24);
+void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
+               struct ctio7_from_24xx *ctio);
+void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport);
+int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob);
+void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
+const char *sc_to_str(uint16_t cmd);
 
 /*
  * Global Data in qla_os.c source file.
@@ -175,6 +188,7 @@ extern int ql2xenablemsix;
 extern int qla2xuseresexchforels;
 extern int ql2xdifbundlinginternalbuffers;
 extern int ql2xfulldump_on_mpifail;
+extern int ql2xsecenable;
 extern int ql2xenforce_iocb_limit;
 extern int ql2xabts_wait_nvme;
 
@@ -236,6 +250,8 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
                               struct purex_item *pkt);
 void qla_pci_set_eeh_busy(struct scsi_qla_host *);
 void qla_schedule_eeh_work(struct scsi_qla_host *);
+struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport,
+                                                 int index, int dir);
 
 /*
  * Global Functions in qla_mid.c source file.
@@ -280,7 +296,10 @@ extern int  qla2x00_vp_abort_isp(scsi_qla_host_t *);
 /*
  * Global Function Prototypes in qla_iocb.c source file.
  */
-
+void qla_els_pt_iocb(struct scsi_qla_host *vha,
+       struct els_entry_24xx *pkt, struct qla_els_pt_arg *a);
+cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha,
+               struct req_que *que);
 extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
 extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
 extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -310,6 +329,8 @@ extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
        struct dsd64 *, uint16_t, struct qla_tgt_cmd *);
 extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
 extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
+       struct qla_work_evt *e);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -578,6 +599,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
 fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
 fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
 fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
+void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
 
 /*
  * Global Function Prototypes in qla_sup.c source file.
@@ -640,6 +662,8 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
 
 extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
 extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
+       struct rsp_que **rsp, u8 *buf, u32 buf_len);
 
 /*
  * Global Function Prototypes in qla_dbg.c source file.
@@ -879,6 +903,9 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
        dma_addr_t, size_t, uint32_t);
 extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
        uint16_t *, uint16_t *);
+extern int qla24xx_sadb_update(struct bsg_job *bsg_job);
+extern int qla_post_sa_replace_work(struct scsi_qla_host *vha,
+        fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl);
 
 /* 83xx related functions */
 void qla83xx_fw_dump(scsi_qla_host_t *vha);
@@ -923,6 +950,7 @@ extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *);
 extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *,
        response_t *);
 
+struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id);
 int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
        struct imm_ntfy_from_isp *, int);
 void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
@@ -935,7 +963,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
 void qla24xx_delete_sess_fn(struct work_struct *);
 void qlt_unknown_atio_work_fn(struct work_struct *);
 void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
-void qlt_remove_target_resources(struct qla_hw_data *);
+void qla_remove_hostmap(struct qla_hw_data *ha);
 void qlt_clr_qp_table(struct scsi_qla_host *vha);
 void qlt_set_mode(struct scsi_qla_host *);
 int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
@@ -950,6 +978,25 @@ extern void qla_nvme_abort_process_comp_status
 
 /* nvme.c */
 void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+
+/* qla_edif.c */
+fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
+void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2,
+               fc_port_t *fcport);
+void qla_edb_stop(scsi_qla_host_t *vha);
+ssize_t edif_doorbell_show(struct device *dev, struct device_attribute *attr, char *buf);
+int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
+void qla_enode_init(scsi_qla_host_t *vha);
+void qla_enode_stop(scsi_qla_host_t *vha);
+void qla_edif_flush_sa_ctl_lists(fc_port_t *fcport);
+void qla_edb_init(scsi_qla_host_t *vha);
+void qla_edif_timer(scsi_qla_host_t *vha);
+int qla28xx_start_scsi_edif(srb_t *sp);
+void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb);
+void qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb);
+void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp);
+void qla28xx_sa_update_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+               struct sa_update_28xx *pkt);
 void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
 
 #define QLA2XX_HW_ERROR                        BIT_0
index 5b6e04a..ebc8fdb 100644 (file)
@@ -632,7 +632,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
        ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
        ct_req->req.rft_id.fc4_types[2] = 0x01;         /* FCP-3 */
 
-       if (vha->flags.nvme_enabled)
+       if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha))
                ct_req->req.rft_id.fc4_types[6] = 1;    /* NVMe type 28h */
 
        sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
@@ -1730,8 +1730,6 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
        size += alen;
        ql_dbg(ql_dbg_disc, vha, 0x20a8,
            "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
-       if (callopt == CALLOPT_FDMI1)
-               goto done;
        /* OS Name and Version */
        eiter = entries + size;
        eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
@@ -1754,6 +1752,8 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
        size += alen;
        ql_dbg(ql_dbg_disc, vha, 0x20a9,
            "OS VERSION = %s.\n", eiter->a.os_version);
+       if (callopt == CALLOPT_FDMI1)
+               goto done;
        /* MAX CT Payload Length */
        eiter = entries + size;
        eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
@@ -2826,6 +2826,10 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
        if (fcport->disc_state == DSC_DELETE_PEND)
                return;
 
+       /* We will figure-out what happen after AUTH completes */
+       if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
+               return;
+
        if (ea->sp->gen2 != fcport->login_gen) {
                /* target side must have changed it. */
                ql_dbg(ql_dbg_disc, vha, 0x20d3,
@@ -3498,7 +3502,16 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                                continue;
                        fcport->scan_state = QLA_FCPORT_FOUND;
                        fcport->last_rscn_gen = fcport->rscn_gen;
+                       fcport->fc4_type = rp->fc4type;
                        found = true;
+
+                       if (fcport->scan_needed) {
+                               if (NVME_PRIORITY(vha->hw, fcport))
+                                       fcport->do_prli_nvme = 1;
+                               else
+                                       fcport->do_prli_nvme = 0;
+                       }
+
                        /*
                         * If device was not a fabric device before.
                         */
index f8f4711..1e4e3e8 100644 (file)
@@ -34,7 +34,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *);
 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
 static int qla84xx_init_chip(scsi_qla_host_t *);
 static int qla25xx_init_queues(struct qla_hw_data *);
-static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
                                      struct event_arg *ea);
 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
@@ -158,7 +157,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
        sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
                                  GFP_ATOMIC);
        if (!sp)
-               return rval;
+               return QLA_MEMORY_ALLOC_FAILED;
 
        abt_iocb = &sp->u.iocb_cmd;
        sp->type = SRB_ABT_CMD;
@@ -191,7 +190,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
        if (wait) {
                wait_for_completion(&abt_iocb->u.abt.comp);
                rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
-                       QLA_SUCCESS : QLA_FUNCTION_FAILED;
+                       QLA_SUCCESS : QLA_ERR_FROM_FW;
                sp->free(sp);
        }
 
@@ -293,22 +292,6 @@ static void qla2x00_async_login_sp_done(srb_t *sp, int res)
        sp->free(sp);
 }
 
-static inline bool
-fcport_is_smaller(fc_port_t *fcport)
-{
-       if (wwn_to_u64(fcport->port_name) <
-           wwn_to_u64(fcport->vha->port_name))
-               return true;
-       else
-               return false;
-}
-
-static inline bool
-fcport_is_bigger(fc_port_t *fcport)
-{
-       return !fcport_is_smaller(fcport);
-}
-
 int
 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
     uint16_t *data)
@@ -343,19 +326,28 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 
        sp->done = qla2x00_async_login_sp_done;
-       if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
+       if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
                lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
-       else
-               lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+       } else {
+               if (vha->hw->flags.edif_enabled &&
+                   vha->e_dbell.db_flags & EDB_ACTIVE) {
+                       lio->u.logio.flags |=
+                               (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
+                       ql_dbg(ql_dbg_disc, vha, 0x2072,
+                           "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n",
+                           fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24);
+               } else {
+                       lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+               }
+       }
 
        if (NVME_TARGET(vha->hw, fcport))
                lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
 
-       ql_log(ql_log_warn, vha, 0x2072,
-              "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x retries=%d.\n",
+       ql_dbg(ql_dbg_disc, vha, 0x2072,
+              "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
               fcport->port_name, sp->handle, fcport->loop_id,
-              fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
-              fcport->login_retry);
+              fcport->d_id.b24, fcport->login_retry);
 
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS) {
@@ -378,7 +370,7 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
 {
        sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
        sp->fcport->login_gen++;
-       qlt_logo_completion_handler(sp->fcport, res);
+       qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
        sp->free(sp);
 }
 
@@ -404,10 +396,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
        sp->done = qla2x00_async_logout_sp_done;
 
        ql_dbg(ql_dbg_disc, vha, 0x2070,
-           "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
+           "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
            sp->handle, fcport->loop_id, fcport->d_id.b.domain,
                fcport->d_id.b.area, fcport->d_id.b.al_pa,
-               fcport->port_name);
+               fcport->port_name, fcport->explicit_logout);
 
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS)
@@ -692,11 +684,11 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
 
        fcport = ea->fcport;
        ql_dbg(ql_dbg_disc, vha, 0xffff,
-           "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+           "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
            __func__, fcport->port_name, fcport->disc_state,
            fcport->fw_login_state, ea->rc,
            fcport->login_gen, fcport->last_login_gen,
-           fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+           fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
 
        if (fcport->disc_state == DSC_DELETE_PEND)
                return;
@@ -810,7 +802,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                default:
                        switch (current_login_state) {
                        case DSC_LS_PRLI_COMP:
-                               ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+                               ql_dbg(ql_dbg_disc,
                                    vha, 0x20e4, "%s %d %8phC post gpdb\n",
                                    __func__, __LINE__, fcport->port_name);
 
@@ -822,6 +814,13 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                                qla2x00_post_async_adisc_work(vha, fcport,
                                    data);
                                break;
+                       case DSC_LS_PLOGI_COMP:
+                               if (vha->hw->flags.edif_enabled) {
+                                       /* check to see if App support Secure */
+                                       qla24xx_post_gpdb_work(vha, fcport, 0);
+                                       break;
+                               }
+                               fallthrough;
                        case DSC_LS_PORT_UNAVAIL:
                        default:
                                if (fcport->loop_id == FC_NO_LOOP_ID) {
@@ -849,6 +848,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                                 */
                                qla2x00_set_fcport_disc_state(fcport,
                                    DSC_DELETED);
+                               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                                break;
                        case DSC_LS_PRLI_COMP:
                                if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -861,6 +861,12 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                                    data);
                                break;
                        case DSC_LS_PLOGI_COMP:
+                               if (vha->hw->flags.edif_enabled &&
+                                   vha->e_dbell.db_flags & EDB_ACTIVE) {
+                                       /* check to see if App support secure or not */
+                                       qla24xx_post_gpdb_work(vha, fcport, 0);
+                                       break;
+                               }
                                if (fcport_is_bigger(fcport)) {
                                        /* local adapter is smaller */
                                        if (fcport->loop_id != FC_NO_LOOP_ID)
@@ -1191,7 +1197,7 @@ done:
        sp->free(sp);
 }
 
-static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
        struct qla_work_evt *e;
 
@@ -1214,7 +1220,7 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
        struct event_arg ea;
 
        ql_dbg(ql_dbg_disc, vha, 0x2129,
-           "%s %8phC res %\n", __func__,
+           "%s %8phC res %x\n", __func__,
            sp->fcport->port_name, res);
 
        sp->fcport->flags &= ~FCF_ASYNC_SENT;
@@ -1227,6 +1233,8 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
                ea.iop[0] = lio->u.logio.iop[0];
                ea.iop[1] = lio->u.logio.iop[1];
                ea.sp = sp;
+               if (res == QLA_OS_TIMER_EXPIRED)
+                       ea.data[0] = QLA_OS_TIMER_EXPIRED;
 
                qla24xx_handle_prli_done_event(vha, &ea);
        }
@@ -1418,6 +1426,57 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 }
 
+static int     qla_chk_secure_login(scsi_qla_host_t    *vha, fc_port_t *fcport,
+       struct port_database_24xx *pd)
+{
+       int rc = 0;
+
+       if (pd->secure_login) {
+               ql_dbg(ql_dbg_disc, vha, 0x104d,
+                   "Secure Login established on %8phC\n",
+                   fcport->port_name);
+               fcport->flags |= FCF_FCSP_DEVICE;
+       } else {
+               ql_dbg(ql_dbg_disc, vha, 0x104d,
+                   "non-Secure Login %8phC",
+                   fcport->port_name);
+               fcport->flags &= ~FCF_FCSP_DEVICE;
+       }
+       if (vha->hw->flags.edif_enabled) {
+               if (fcport->flags & FCF_FCSP_DEVICE) {
+                       qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
+                       /* Start edif prli timer & ring doorbell for app */
+                       fcport->edif.rx_sa_set = 0;
+                       fcport->edif.tx_sa_set = 0;
+                       fcport->edif.rx_sa_pending = 0;
+                       fcport->edif.tx_sa_pending = 0;
+
+                       qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+                           fcport->d_id.b24);
+
+                       if (vha->e_dbell.db_flags ==  EDB_ACTIVE) {
+                               ql_dbg(ql_dbg_disc, vha, 0x20ef,
+                                   "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
+                                   __func__, __LINE__, fcport->port_name);
+                               fcport->edif.app_started = 1;
+                               fcport->edif.app_sess_online = 1;
+
+                               qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
+                                   fcport->d_id.b24, 0, fcport);
+                       }
+
+                       rc = 1;
+               } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
+                       ql_dbg(ql_dbg_disc, vha, 0x2117,
+                           "%s %d %8phC post prli\n",
+                           __func__, __LINE__, fcport->port_name);
+                       qla24xx_post_prli_work(vha, fcport);
+                       rc = 1;
+               }
+       }
+       return rc;
+}
+
 static
 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
 {
@@ -1431,12 +1490,15 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
        fcport->flags &= ~FCF_ASYNC_SENT;
 
        ql_dbg(ql_dbg_disc, vha, 0x20d2,
-           "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
+           "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
            fcport->port_name, fcport->disc_state, pd->current_login_state,
            fcport->fc4_type, ea->rc);
 
-       if (fcport->disc_state == DSC_DELETE_PEND)
+       if (fcport->disc_state == DSC_DELETE_PEND) {
+               ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
+                      __func__, __LINE__, fcport->port_name);
                return;
+       }
 
        if (NVME_TARGET(vha->hw, fcport))
                ls = pd->current_login_state >> 4;
@@ -1453,6 +1515,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
        } else if (ea->sp->gen1 != fcport->rscn_gen) {
                qla_rscn_replay(fcport);
                qlt_schedule_sess_for_deletion(fcport);
+               ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+                      __func__, __LINE__, fcport->port_name, ls);
                return;
        }
 
@@ -1460,8 +1524,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
        case PDS_PRLI_COMPLETE:
                __qla24xx_parse_gpdb(vha, fcport, pd);
                break;
-       case PDS_PLOGI_PENDING:
        case PDS_PLOGI_COMPLETE:
+               if (qla_chk_secure_login(vha, fcport, pd)) {
+                       ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+                              __func__, __LINE__, fcport->port_name, ls);
+                       return;
+               }
+               fallthrough;
+       case PDS_PLOGI_PENDING:
        case PDS_PRLI_PENDING:
        case PDS_PRLI2_PENDING:
                /* Set discovery state back to GNL to Relogin attempt */
@@ -1470,6 +1540,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
                        qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                }
+               ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
+                      __func__, __LINE__, fcport->port_name, ls);
                return;
        case PDS_LOGO_PENDING:
        case PDS_PORT_UNAVAILABLE:
@@ -1538,11 +1610,12 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
        u16 sec;
 
        ql_dbg(ql_dbg_disc, vha, 0x20d8,
-           "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
+           "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
            __func__, fcport->port_name, fcport->disc_state,
            fcport->fw_login_state, fcport->login_pause, fcport->flags,
            fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
-           fcport->login_gen, fcport->loop_id, fcport->scan_state);
+           fcport->login_gen, fcport->loop_id, fcport->scan_state,
+           fcport->fc4_type);
 
        if (fcport->scan_state != QLA_FCPORT_FOUND)
                return 0;
@@ -1715,6 +1788,12 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
 
        fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
        if (fcport) {
+               if (fcport->flags & FCF_FCP2_DEVICE) {
+                       ql_dbg(ql_dbg_disc, vha, 0x2115,
+                              "Delaying session delete for FCP2 portid=%06x %8phC ",
+                              fcport->d_id.b24, fcport->port_name);
+                       return;
+               }
                fcport->scan_needed = 1;
                fcport->rscn_gen++;
        }
@@ -1758,6 +1837,13 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
                                      struct event_arg *ea)
 {
+       if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
+           vha->hw->flags.edif_enabled) {
+               /* check to see if App support Secure */
+               qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+               return;
+       }
+
        /* for pure Target Mode, PRLI will not be initiated */
        if (vha->host->active_mode == MODE_TARGET)
                return;
@@ -1902,7 +1988,7 @@ qla24xx_async_abort_command(srb_t *sp)
 
        if (handle == req->num_outstanding_cmds) {
                /* Command not found. */
-               return QLA_FUNCTION_FAILED;
+               return QLA_ERR_NOT_FOUND;
        }
        if (sp->type == SRB_FXIOCB_DCMD)
                return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
@@ -1914,6 +2000,7 @@ qla24xx_async_abort_command(srb_t *sp)
 static void
 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
 {
+       struct srb *sp;
        WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
                  ea->data[0]);
 
@@ -1941,22 +2028,27 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                        break;
                }
 
+               sp = ea->sp;
                ql_dbg(ql_dbg_disc, vha, 0x2118,
-                      "%s %d %8phC priority %s, fc4type %x\n",
+                      "%s %d %8phC priority %s, fc4type %x prev try %s\n",
                       __func__, __LINE__, ea->fcport->port_name,
                       vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
-                      "FCP" : "NVMe", ea->fcport->fc4_type);
+                      "FCP" : "NVMe", ea->fcport->fc4_type,
+                      (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
+                       "NVME" : "FCP");
 
-               if (N2N_TOPO(vha->hw)) {
-                       if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) {
-                               ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
-                               ea->fcport->fc4_type |= FS_FC4TYPE_FCP;
-                       } else {
-                               ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
-                               ea->fcport->fc4_type |= FS_FC4TYPE_NVME;
-                       }
+               if (NVME_FCP_TARGET(ea->fcport)) {
+                       if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
+                               ea->fcport->do_prli_nvme = 0;
+                       else
+                               ea->fcport->do_prli_nvme = 1;
+               } else {
+                       ea->fcport->do_prli_nvme = 0;
+               }
 
-                       if (ea->fcport->n2n_link_reset_cnt < 3) {
+               if (N2N_TOPO(vha->hw)) {
+                       if (ea->fcport->n2n_link_reset_cnt <
+                           vha->hw->login_retry_count) {
                                ea->fcport->n2n_link_reset_cnt++;
                                vha->relogin_jif = jiffies + 2 * HZ;
                                /*
@@ -1964,6 +2056,7 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                                 * state machine
                                 */
                                set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+                               qla2xxx_wake_dpc(vha);
                        } else {
                                ql_log(ql_log_warn, vha, 0x2119,
                                       "%s %d %8phC Unable to reconnect\n",
@@ -1975,19 +2068,6 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                         * switch connect. login failed. Take connection down
                         * and allow relogin to retrigger
                         */
-                       if (NVME_FCP_TARGET(ea->fcport)) {
-                               ql_dbg(ql_dbg_disc, vha, 0x2118,
-                                      "%s %d %8phC post %s prli\n",
-                                      __func__, __LINE__,
-                                      ea->fcport->port_name,
-                                      (ea->fcport->fc4_type & FS_FC4TYPE_NVME)
-                                      ? "NVMe" : "FCP");
-                               if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
-                                       ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
-                               else
-                                       ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
-                       }
-
                        ea->fcport->flags &= ~FCF_ASYNC_SENT;
                        ea->fcport->keep_nport_handle = 0;
                        ea->fcport->logout_on_delete = 1;
@@ -2053,26 +2133,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
                 * requests.
                 */
-               if (NVME_TARGET(vha->hw, ea->fcport)) {
-                       ql_dbg(ql_dbg_disc, vha, 0x2117,
-                               "%s %d %8phC post prli\n",
-                               __func__, __LINE__, ea->fcport->port_name);
-                       qla24xx_post_prli_work(vha, ea->fcport);
-               } else {
-                       ql_dbg(ql_dbg_disc, vha, 0x20ea,
-                           "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
-                           __func__, __LINE__, ea->fcport->port_name,
-                           ea->fcport->loop_id, ea->fcport->d_id.b24);
-
+               if (vha->hw->flags.edif_enabled) {
                        set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
                        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
                        ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
                        ea->fcport->logout_on_delete = 1;
                        ea->fcport->send_els_logo = 0;
-                       ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+                       ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
                        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
                        qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+               } else {
+                       if (NVME_TARGET(vha->hw, fcport)) {
+                               ql_dbg(ql_dbg_disc, vha, 0x2117,
+                                   "%s %d %8phC post prli\n",
+                                   __func__, __LINE__, fcport->port_name);
+                               qla24xx_post_prli_work(vha, fcport);
+                       } else {
+                               ql_dbg(ql_dbg_disc, vha, 0x20ea,
+                                   "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
+                                   __func__, __LINE__, fcport->port_name,
+                                   fcport->loop_id, fcport->d_id.b24);
+
+                               set_bit(fcport->loop_id, vha->hw->loop_id_map);
+                               spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+                               fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+                               fcport->logout_on_delete = 1;
+                               fcport->send_els_logo = 0;
+                               fcport->fw_login_state = DSC_LS_PRLI_COMP;
+                               spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+                               qla24xx_post_gpdb_work(vha, fcport, 0);
+                       }
                }
                break;
        case MBS_COMMAND_ERROR:
@@ -3877,7 +3969,8 @@ enable_82xx_npiv:
                }
 
                /* Enable PUREX PASSTHRU */
-               if (ql2xrdpenable || ha->flags.scm_supported_f)
+               if (ql2xrdpenable || ha->flags.scm_supported_f ||
+                   ha->flags.edif_enabled)
                        qla25xx_set_els_cmds_supported(vha);
        } else
                goto failed;
@@ -4062,7 +4155,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
        }
 
        /* Move PUREX, ABTS RX & RIDA to ATIOQ */
-       if (ql2xmvasynctoatio &&
+       if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
            (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
                if (qla_tgt_mode_enabled(vha) ||
                    qla_dual_mode_enabled(vha))
@@ -4081,16 +4174,30 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
                    qla_dual_mode_enabled(vha))
                        ha->fw_options[2] |= BIT_4;
                else
-                       ha->fw_options[2] &= ~BIT_4;
+                       ha->fw_options[2] &= ~(BIT_4);
 
                /* Reserve 1/2 of emergency exchanges for ELS.*/
                if (qla2xuseresexchforels)
                        ha->fw_options[2] |= BIT_8;
                else
                        ha->fw_options[2] &= ~BIT_8;
+
+               /*
+                * N2N: set Secure=1 for PLOGI ACC and
+                * fw shal not send PRLI after PLOGI Acc
+                */
+               if (ha->flags.edif_enabled &&
+                   vha->e_dbell.db_flags & EDB_ACTIVE) {
+                       ha->fw_options[3] |= BIT_15;
+                       ha->flags.n2n_fw_acc_sec = 1;
+               } else {
+                       ha->fw_options[3] &= ~BIT_15;
+                       ha->flags.n2n_fw_acc_sec = 0;
+               }
        }
 
-       if (ql2xrdpenable || ha->flags.scm_supported_f)
+       if (ql2xrdpenable || ha->flags.scm_supported_f ||
+           ha->flags.edif_enabled)
                ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
 
        /* Enable Async 8130/8131 events -- transceiver insertion/removal */
@@ -4289,8 +4396,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
-
        if (IS_QLAFX00(ha)) {
                rval = qlafx00_init_firmware(vha, ha->init_cb_size);
                goto next_check;
@@ -4299,6 +4404,12 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
        /* Update any ISP specific firmware options before initialization. */
        ha->isp_ops->update_fw_options(vha);
 
+       ql_dbg(ql_dbg_init, vha, 0x00d1,
+              "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
+              le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
+              le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
+              le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
+
        if (ha->flags.npiv_supported) {
                if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
                        ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
@@ -4531,11 +4642,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        /* initialize */
        ha->min_external_loopid = SNS_FIRST_LOOP_ID;
        ha->operating_mode = LOOP;
-       ha->switch_cap = 0;
 
        switch (topo) {
        case 0:
                ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
+               ha->switch_cap = 0;
                ha->current_topology = ISP_CFG_NL;
                strcpy(connect_type, "(Loop)");
                break;
@@ -4549,6 +4660,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
 
        case 2:
                ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
+               ha->switch_cap = 0;
                ha->operating_mode = P2P;
                ha->current_topology = ISP_CFG_N;
                strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -4565,6 +4677,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        default:
                ql_dbg(ql_dbg_disc, vha, 0x200f,
                    "HBA in unknown topology %x, using NL.\n", topo);
+               ha->switch_cap = 0;
                ha->current_topology = ISP_CFG_NL;
                strcpy(connect_type, "(Loop)");
                break;
@@ -4577,7 +4690,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
        id.b.al_pa = al_pa;
        id.b.rsvd_1 = 0;
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       if (!(topo == 2 && ha->flags.n2n_bigger))
+       if (vha->hw->flags.edif_enabled) {
+               if (topo != 2)
+                       qlt_update_host_map(vha, id);
+       } else if (!(topo == 2 && ha->flags.n2n_bigger))
                qlt_update_host_map(vha, id);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -5071,6 +5187,16 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
        INIT_LIST_HEAD(&fcport->sess_cmd_list);
        spin_lock_init(&fcport->sess_cmd_lock);
 
+       spin_lock_init(&fcport->edif.sa_list_lock);
+       INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
+       INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
+
+       if (vha->e_dbell.db_flags == EDB_ACTIVE)
+               fcport->edif.app_started = 1;
+
+       spin_lock_init(&fcport->edif.indx_list_lock);
+       INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
+
        return fcport;
 }
 
@@ -5084,8 +5210,13 @@ qla2x00_free_fcport(fc_port_t *fcport)
 
                fcport->ct_desc.ct_sns = NULL;
        }
+
+       qla_edif_flush_sa_ctl_lists(fcport);
        list_del(&fcport->list);
        qla2x00_clear_loop_id(fcport);
+
+       qla_edif_list_del(fcport);
+
        kfree(fcport);
 }
 
@@ -5204,6 +5335,16 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
                            "LOOP READY.\n");
                        ha->flags.fw_init_done = 1;
 
+                       if (ha->flags.edif_enabled &&
+                           !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
+                           N2N_TOPO(vha->hw)) {
+                               /*
+                                * use port online to wake up app to get ready
+                                * for authentication
+                                */
+                               qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0);
+                       }
+
                        /*
                         * Process any ATIO queue entries that came in
                         * while we weren't online.
@@ -5223,7 +5364,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
                    "%s *** FAILED ***.\n", __func__);
        } else {
                ql_dbg(ql_dbg_disc, vha, 0x206b,
-                   "%s: exiting normally.\n", __func__);
+                   "%s: exiting normally. local port wwpn %8phN id %06x)\n",
+                   __func__, vha->port_name, vha->d_id.b24);
        }
 
        /* Restore state if a resync event occurred during processing */
@@ -5243,6 +5385,8 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
        unsigned long flags;
        fc_port_t *fcport;
 
+       ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
+
        if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 
@@ -6459,13 +6603,13 @@ void
 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
 {
        fc_port_t *fcport;
-       struct scsi_qla_host *vha;
+       struct scsi_qla_host *vha, *tvp;
        struct qla_hw_data *ha = base_vha->hw;
        unsigned long flags;
 
        spin_lock_irqsave(&ha->vport_slock, flags);
        /* Go with deferred removal of rport references. */
-       list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+       list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
                atomic_inc(&vha->vref_count);
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (fcport->drport &&
@@ -6810,7 +6954,8 @@ void
 qla2x00_quiesce_io(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       struct scsi_qla_host *vp;
+       struct scsi_qla_host *vp, *tvp;
+       unsigned long flags;
 
        ql_dbg(ql_dbg_dpc, vha, 0x401d,
            "Quiescing I/O - ha=%p.\n", ha);
@@ -6819,8 +6964,18 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
                qla2x00_mark_all_devices_lost(vha);
-               list_for_each_entry(vp, &ha->vp_list, list)
+
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
+                       atomic_inc(&vp->vref_count);
+                       spin_unlock_irqrestore(&ha->vport_slock, flags);
+
                        qla2x00_mark_all_devices_lost(vp);
+
+                       spin_lock_irqsave(&ha->vport_slock, flags);
+                       atomic_dec(&vp->vref_count);
+               }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
        } else {
                if (!atomic_read(&vha->loop_down_timer))
                        atomic_set(&vha->loop_down_timer,
@@ -6835,7 +6990,7 @@ void
 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       struct scsi_qla_host *vp;
+       struct scsi_qla_host *vp, *tvp;
        unsigned long flags;
        fc_port_t *fcport;
        u16 i;
@@ -6903,7 +7058,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
                qla2x00_mark_all_devices_lost(vha);
 
                spin_lock_irqsave(&ha->vport_slock, flags);
-               list_for_each_entry(vp, &ha->vp_list, list) {
+               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                        atomic_inc(&vp->vref_count);
                        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
@@ -6925,7 +7080,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
                fcport->scan_state = 0;
        }
        spin_lock_irqsave(&ha->vport_slock, flags);
-       list_for_each_entry(vp, &ha->vp_list, list) {
+       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                atomic_inc(&vp->vref_count);
                spin_unlock_irqrestore(&ha->vport_slock, flags);
 
@@ -6969,7 +7124,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
        int rval;
        uint8_t        status = 0;
        struct qla_hw_data *ha = vha->hw;
-       struct scsi_qla_host *vp;
+       struct scsi_qla_host *vp, *tvp;
        struct req_que *req = ha->req_q_map[0];
        unsigned long flags;
 
@@ -7125,7 +7280,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
                qla2x00_configure_hba(vha);
                spin_lock_irqsave(&ha->vport_slock, flags);
-               list_for_each_entry(vp, &ha->vp_list, list) {
+               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                        if (vp->vp_idx) {
                                atomic_inc(&vp->vref_count);
                                spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -8810,7 +8965,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
 {
        int status, rval;
        struct qla_hw_data *ha = vha->hw;
-       struct scsi_qla_host *vp;
+       struct scsi_qla_host *vp, *tvp;
        unsigned long flags;
 
        status = qla2x00_init_rings(vha);
@@ -8882,7 +9037,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                    "qla82xx_restart_isp succeeded.\n");
 
                spin_lock_irqsave(&ha->vport_slock, flags);
-               list_for_each_entry(vp, &ha->vp_list, list) {
+               list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                        if (vp->vp_idx) {
                                atomic_inc(&vp->vref_count);
                                spin_unlock_irqrestore(&ha->vport_slock, flags);
index 82937c6..5f3b799 100644 (file)
@@ -478,3 +478,19 @@ bool qla_pci_disconnected(struct scsi_qla_host *vha,
        }
        return ret;
 }
+
+static inline bool
+fcport_is_smaller(fc_port_t *fcport)
+{
+       if (wwn_to_u64(fcport->port_name) <
+               wwn_to_u64(fcport->vha->port_name))
+               return true;
+       else
+               return false;
+}
+
+static inline bool
+fcport_is_bigger(fc_port_t *fcport)
+{
+       return !fcport_is_smaller(fcport);
+}
index d0ee843..9d4ad1d 100644 (file)
@@ -118,7 +118,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  *
  * Returns a pointer to the continuation type 1 IOCB packet.
  */
-static inline cont_a64_entry_t *
+cont_a64_entry_t *
 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 {
        cont_a64_entry_t *cont_pkt;
@@ -145,7 +145,6 @@ inline int
 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 {
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-       uint8_t guard = scsi_host_get_guard(cmd->device->host);
 
        /* We always use DIFF Bundling for best performance */
        *fw_prot_opts = 0;
@@ -166,7 +165,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
                break;
        case SCSI_PROT_READ_PASS:
        case SCSI_PROT_WRITE_PASS:
-               if (guard & SHOST_DIX_GUARD_IP)
+               if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
                        *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
                else
                        *fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -176,6 +175,9 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
                break;
        }
 
+       if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
+               *fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
        return scsi_prot_sg_count(cmd);
 }
 
@@ -772,74 +774,19 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 {
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
-       switch (scsi_get_prot_type(cmd)) {
-       case SCSI_PROT_DIF_TYPE0:
-               /*
-                * No check for ql2xenablehba_err_chk, as it would be an
-                * I/O error if hba tag generation is not done.
-                */
-               pkt->ref_tag = cpu_to_le32((uint32_t)
-                   (0xffffffff & scsi_get_lba(cmd)));
-
-               if (!qla2x00_hba_err_chk_enabled(sp))
-                       break;
+       pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
 
+       if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
+           qla2x00_hba_err_chk_enabled(sp)) {
                pkt->ref_tag_mask[0] = 0xff;
                pkt->ref_tag_mask[1] = 0xff;
                pkt->ref_tag_mask[2] = 0xff;
                pkt->ref_tag_mask[3] = 0xff;
-               break;
-
-       /*
-        * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
-        * match LBA in CDB + N
-        */
-       case SCSI_PROT_DIF_TYPE2:
-               pkt->app_tag = cpu_to_le16(0);
-               pkt->app_tag_mask[0] = 0x0;
-               pkt->app_tag_mask[1] = 0x0;
-
-               pkt->ref_tag = cpu_to_le32((uint32_t)
-                   (0xffffffff & scsi_get_lba(cmd)));
-
-               if (!qla2x00_hba_err_chk_enabled(sp))
-                       break;
-
-               /* enable ALL bytes of the ref tag */
-               pkt->ref_tag_mask[0] = 0xff;
-               pkt->ref_tag_mask[1] = 0xff;
-               pkt->ref_tag_mask[2] = 0xff;
-               pkt->ref_tag_mask[3] = 0xff;
-               break;
-
-       /* For Type 3 protection: 16 bit GUARD only */
-       case SCSI_PROT_DIF_TYPE3:
-               pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
-                       pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
-                                                               0x00;
-               break;
-
-       /*
-        * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
-        * 16 bit app tag.
-        */
-       case SCSI_PROT_DIF_TYPE1:
-               pkt->ref_tag = cpu_to_le32((uint32_t)
-                   (0xffffffff & scsi_get_lba(cmd)));
-               pkt->app_tag = cpu_to_le16(0);
-               pkt->app_tag_mask[0] = 0x0;
-               pkt->app_tag_mask[1] = 0x0;
-
-               if (!qla2x00_hba_err_chk_enabled(sp))
-                       break;
-
-               /* enable ALL bytes of the ref tag */
-               pkt->ref_tag_mask[0] = 0xff;
-               pkt->ref_tag_mask[1] = 0xff;
-               pkt->ref_tag_mask[2] = 0xff;
-               pkt->ref_tag_mask[3] = 0xff;
-               break;
        }
+
+       pkt->app_tag = cpu_to_le16(0);
+       pkt->app_tag_mask[0] = 0x0;
+       pkt->app_tag_mask[1] = 0x0;
 }
 
 int
@@ -905,7 +852,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
        memset(&sgx, 0, sizeof(struct qla2_sgx));
        if (sp) {
                cmd = GET_CMD_SP(sp);
-               prot_int = cmd->device->sector_size;
+               prot_int = scsi_prot_interval(cmd);
 
                sgx.tot_bytes = scsi_bufflen(cmd);
                sgx.cur_sg = scsi_sglist(cmd);
@@ -1605,6 +1552,9 @@ qla24xx_start_scsi(srb_t *sp)
        struct scsi_qla_host *vha = sp->vha;
        struct qla_hw_data *ha = vha->hw;
 
+       if (sp->fcport->edif.enable  && (sp->fcport->flags & FCF_FCSP_DEVICE))
+               return qla28xx_start_scsi_edif(sp);
+
        /* Setup device pointers. */
        req = vha->req;
        rsp = req->rsp;
@@ -1963,6 +1913,9 @@ qla2xxx_start_scsi_mq(srb_t *sp)
        struct qla_hw_data *ha = vha->hw;
        struct qla_qpair *qpair = sp->qpair;
 
+       if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
+               return qla28xx_start_scsi_edif(sp);
+
        /* Acquire qpair specific lock */
        spin_lock_irqsave(&qpair->qp_lock, flags);
 
@@ -2466,6 +2419,12 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
                        logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
                if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
                        logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+               if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
+                       logio->control_flags |=
+                           cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
+                       logio->io_parameter[0] =
+                           cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
+               }
        }
        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2789,7 +2748,10 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
        els_iocb->s_id[0] = vha->d_id.b.domain;
 
        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
-               els_iocb->control_flags = 0;
+               if (vha->hw->flags.edif_enabled)
+                       els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN);
+               else
+                       els_iocb->control_flags = 0;
                els_iocb->tx_byte_count = els_iocb->tx_len =
                        cpu_to_le32(sizeof(struct els_plogi_payload));
                put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
@@ -2806,7 +2768,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
                    (uint8_t *)els_iocb,
                    sizeof(*els_iocb));
        } else {
-               els_iocb->control_flags = cpu_to_le16(1 << 13);
                els_iocb->tx_byte_count =
                        cpu_to_le32(sizeof(struct els_logo_payload));
                put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -3030,7 +2991,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
        qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
        elsio = &sp->u.iocb_cmd;
        ql_dbg(ql_dbg_io, vha, 0x3073,
-           "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
+              "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
 
        sp->type = SRB_ELS_DCMD;
        sp->name = "ELS_DCMD";
@@ -3073,6 +3034,13 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
        elsio->u.els_plogi.els_cmd = els_opcode;
        elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
 
+       if (els_opcode == ELS_DCMD_PLOGI && vha->hw->flags.edif_enabled &&
+           vha->e_dbell.db_flags & EDB_ACTIVE) {
+               struct fc_els_flogi *p = ptr;
+
+               p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
+       }
+
        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
        ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
            (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
@@ -3106,6 +3074,43 @@ done:
        return rval;
 }
 
+/* it is assume qpair lock is held */
+void qla_els_pt_iocb(struct scsi_qla_host *vha,
+       struct els_entry_24xx *els_iocb,
+       struct qla_els_pt_arg *a)
+{
+       els_iocb->entry_type = ELS_IOCB_TYPE;
+       els_iocb->entry_count = 1;
+       els_iocb->sys_define = 0;
+       els_iocb->entry_status = 0;
+       els_iocb->handle = QLA_SKIP_HANDLE;
+       els_iocb->nport_handle = a->nport_handle;
+       els_iocb->rx_xchg_address = a->rx_xchg_address;
+       els_iocb->tx_dsd_count = cpu_to_le16(1);
+       els_iocb->vp_index = a->vp_idx;
+       els_iocb->sof_type = EST_SOFI3;
+       els_iocb->rx_dsd_count = cpu_to_le16(0);
+       els_iocb->opcode = a->els_opcode;
+
+       els_iocb->d_id[0] = a->did.b.al_pa;
+       els_iocb->d_id[1] = a->did.b.area;
+       els_iocb->d_id[2] = a->did.b.domain;
+       /* For SID the byte order is different than DID */
+       els_iocb->s_id[1] = vha->d_id.b.al_pa;
+       els_iocb->s_id[2] = vha->d_id.b.area;
+       els_iocb->s_id[0] = vha->d_id.b.domain;
+
+       els_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+       els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+       els_iocb->tx_len = cpu_to_le32(a->tx_len);
+       put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
+
+       els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
+       els_iocb->rx_len = cpu_to_le32(a->rx_len);
+       put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
+}
+
 static void
 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
 {
@@ -3700,6 +3705,16 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
        nack->u.isp24.srr_reject_code = 0;
        nack->u.isp24.srr_reject_code_expl = 0;
        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+       if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
+           (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
+           sp->vha->hw->flags.edif_enabled) {
+               ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
+                   "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
+                   sp->name, sp->handle, sp->fcport->loop_id,
+                   sp->fcport->d_id.b24);
+               nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
+       }
 }
 
 /*
@@ -3804,6 +3819,10 @@ qla2x00_start_sp(srb_t *sp)
        case SRB_ELS_CMD_HST:
                qla24xx_els_iocb(sp, pkt);
                break;
+       case SRB_ELS_CMD_HST_NOLOGIN:
+               qla_els_pt_iocb(sp->vha, pkt,  &sp->u.bsg_cmd.u.els_arg);
+               ((struct els_entry_24xx *)pkt)->handle = sp->handle;
+               break;
        case SRB_CT_CMD:
                IS_FWI2_CAPABLE(ha) ?
                    qla24xx_ct_iocb(sp, pkt) :
@@ -3851,6 +3870,12 @@ qla2x00_start_sp(srb_t *sp)
        case SRB_PRLO_CMD:
                qla24xx_prlo_iocb(sp, pkt);
                break;
+       case SRB_SA_UPDATE:
+               qla24xx_sa_update_iocb(sp, pkt);
+               break;
+       case SRB_SA_REPLACE:
+               qla24xx_sa_replace_iocb(sp, pkt);
+               break;
        default:
                break;
        }
index d9fb093..ece6026 100644 (file)
@@ -169,6 +169,149 @@ qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
        dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
 }
 
+/**
+ * __qla_consume_iocb - this routine is used to tell fw driver has processed
+ *   or consumed the head IOCB along with the continuation IOCB's from the
+ *   provided respond queue.
+ * @vha: host adapter pointer
+ * @pkt: pointer to current packet.  On return, this pointer shall move
+ *       to the next packet.
+ * @rsp: respond queue pointer.
+ *
+ * it is assumed pkt is the head iocb, not the continuation iocbk
+ */
+void __qla_consume_iocb(struct scsi_qla_host *vha,
+       void **pkt, struct rsp_que **rsp)
+{
+       struct rsp_que *rsp_q = *rsp;
+       response_t *new_pkt;
+       uint16_t entry_count_remaining;
+       struct purex_entry_24xx *purex = *pkt;
+
+       entry_count_remaining = purex->entry_count;
+       while (entry_count_remaining > 0) {
+               new_pkt = rsp_q->ring_ptr;
+               *pkt = new_pkt;
+
+               rsp_q->ring_index++;
+               if (rsp_q->ring_index == rsp_q->length) {
+                       rsp_q->ring_index = 0;
+                       rsp_q->ring_ptr = rsp_q->ring;
+               } else {
+                       rsp_q->ring_ptr++;
+               }
+
+               new_pkt->signature = RESPONSE_PROCESSED;
+               /* flush signature */
+               wmb();
+               --entry_count_remaining;
+       }
+}
+
+/**
+ * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
+ *    and save to provided buffer
+ * @vha: host adapter pointer
+ * @pkt: pointer Purex IOCB
+ * @rsp: respond queue
+ * @buf: extracted ELS payload copy here
+ * @buf_len: buffer length
+ */
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
+       void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
+{
+       struct purex_entry_24xx *purex = *pkt;
+       struct rsp_que *rsp_q = *rsp;
+       sts_cont_entry_t *new_pkt;
+       uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+       uint16_t buffer_copy_offset = 0;
+       uint16_t entry_count_remaining;
+       u16 tpad;
+
+       entry_count_remaining = purex->entry_count;
+       total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
+               - PURX_ELS_HEADER_SIZE;
+
+       /*
+        * end of payload may not end in 4bytes boundary.  Need to
+        * round up / pad for room to swap, before saving data
+        */
+       tpad = roundup(total_bytes, 4);
+
+       if (buf_len < tpad) {
+               ql_dbg(ql_dbg_async, vha, 0x5084,
+                   "%s buffer is too small %d < %d\n",
+                   __func__, buf_len, tpad);
+               __qla_consume_iocb(vha, pkt, rsp);
+               return -EIO;
+       }
+
+       pending_bytes = total_bytes = tpad;
+       no_bytes = (pending_bytes > sizeof(purex->els_frame_payload))  ?
+           sizeof(purex->els_frame_payload) : pending_bytes;
+
+       memcpy(buf, &purex->els_frame_payload[0], no_bytes);
+       buffer_copy_offset += no_bytes;
+       pending_bytes -= no_bytes;
+       --entry_count_remaining;
+
+       ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+       /* flush signature */
+       wmb();
+
+       do {
+               while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+                       new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+                       *pkt = new_pkt;
+
+                       if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+                               ql_log(ql_log_warn, vha, 0x507a,
+                                   "Unexpected IOCB type, partial data 0x%x\n",
+                                   buffer_copy_offset);
+                               break;
+                       }
+
+                       rsp_q->ring_index++;
+                       if (rsp_q->ring_index == rsp_q->length) {
+                               rsp_q->ring_index = 0;
+                               rsp_q->ring_ptr = rsp_q->ring;
+                       } else {
+                               rsp_q->ring_ptr++;
+                       }
+                       no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+                           sizeof(new_pkt->data) : pending_bytes;
+                       if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+                               memcpy((buf + buffer_copy_offset), new_pkt->data,
+                                   no_bytes);
+                               buffer_copy_offset += no_bytes;
+                               pending_bytes -= no_bytes;
+                               --entry_count_remaining;
+                       } else {
+                               ql_log(ql_log_warn, vha, 0x5044,
+                                   "Attempt to copy more that we got, optimizing..%x\n",
+                                   buffer_copy_offset);
+                               memcpy((buf + buffer_copy_offset), new_pkt->data,
+                                   total_bytes - buffer_copy_offset);
+                       }
+
+                       ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+                       /* flush signature */
+                       wmb();
+               }
+
+               if (pending_bytes != 0 || entry_count_remaining != 0) {
+                       ql_log(ql_log_fatal, vha, 0x508b,
+                           "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
+                           total_bytes, entry_count_remaining);
+                       return -EIO;
+               }
+       } while (entry_count_remaining > 0);
+
+       be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
+
+       return 0;
+}
+
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  * @irq: interrupt number
@@ -505,7 +648,7 @@ const char *
 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
 {
        static const char *const link_speeds[] = {
-               "1", "2", "?", "4", "8", "16", "32", "10"
+               "1", "2", "?", "4", "8", "16", "32", "64", "10"
        };
 #define        QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
 
@@ -1727,6 +1870,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
        srb_t *sp;
        uint16_t index;
 
+       if (pkt->handle == QLA_SKIP_HANDLE)
+               return NULL;
+
        index = LSW(pkt->handle);
        if (index >= req->num_outstanding_cmds) {
                ql_log(ql_log_warn, vha, 0x5031,
@@ -1971,7 +2117,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 }
 
 static void
-qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
     struct sts_entry_24xx *pkt, int iocb_type)
 {
        struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
@@ -1982,18 +2128,58 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
        struct fc_bsg_reply *bsg_reply;
        uint16_t comp_status;
        uint32_t fw_status[3];
-       int res;
+       int res, logit = 1;
        struct srb_iocb *els;
+       uint n;
+       scsi_qla_host_t *vha;
+       struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
 
-       sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+       sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
        if (!sp)
                return;
+       bsg_job = sp->u.bsg_job;
+       vha = sp->vha;
 
        type = NULL;
+
+       comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+       fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
+       fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
+
        switch (sp->type) {
        case SRB_ELS_CMD_RPT:
        case SRB_ELS_CMD_HST:
+               type = "rpt hst";
+               break;
+       case SRB_ELS_CMD_HST_NOLOGIN:
                type = "els";
+               {
+                       struct els_entry_24xx *els = (void *)pkt;
+                       struct qla_bsg_auth_els_request *p =
+                               (struct qla_bsg_auth_els_request *)bsg_job->request;
+
+                       ql_dbg(ql_dbg_user, vha, 0x700f,
+                            "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
+                            __func__, sc_to_str(p->e.sub_cmd),
+                            e->d_id[2], e->d_id[1], e->d_id[0],
+                            comp_status, p->e.extra_rx_xchg_address, bsg_job);
+
+                       if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
+                               if (sp->remap.remapped) {
+                                       n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+                                               bsg_job->reply_payload.sg_cnt,
+                                               sp->remap.rsp.buf,
+                                               sp->remap.rsp.len);
+                                       ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
+                                          "%s: SG copied %x of %x\n",
+                                          __func__, n, sp->remap.rsp.len);
+                               } else {
+                                       ql_dbg(ql_dbg_user, vha, 0x700f,
+                                          "%s: NOT REMAPPED (error)...!!!\n",
+                                          __func__);
+                               }
+                       }
+               }
                break;
        case SRB_CT_CMD:
                type = "ct pass-through";
@@ -2023,10 +2209,6 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                return;
        }
 
-       comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
-       fw_status[1] = le32_to_cpu(ese->error_subcode_1);
-       fw_status[2] = le32_to_cpu(ese->error_subcode_2);
-
        if (iocb_type == ELS_IOCB_TYPE) {
                els = &sp->u.iocb_cmd;
                els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
@@ -2040,15 +2222,53 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
                                res =  DID_OK << 16;
                                els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
                                        ese->total_byte_count));
+
+                               if (sp->remap.remapped &&
+                                   ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
+                                       ql_dbg(ql_dbg_user, vha, 0x503f,
+                                           "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
+                                           __func__, e->s_id[0], e->s_id[2], e->s_id[1],
+                                           e->d_id[2], e->d_id[1], e->d_id[0]);
+                                       logit = 0;
+                               }
+
+                       } else if (comp_status == CS_PORT_LOGGED_OUT) {
+                               els->u.els_plogi.len = 0;
+                               res = DID_IMM_RETRY << 16;
+                               qlt_schedule_sess_for_deletion(sp->fcport);
                        } else {
                                els->u.els_plogi.len = 0;
                                res = DID_ERROR << 16;
                        }
+
+                       if (logit) {
+                               if (sp->remap.remapped &&
+                                   ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+                                       ql_dbg(ql_dbg_user, vha, 0x503f,
+                                           "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
+                                           type, sp->handle, comp_status);
+
+                                       ql_dbg(ql_dbg_user, vha, 0x503f,
+                                           "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+                                           fw_status[1], fw_status[2],
+                                           le32_to_cpu(((struct els_sts_entry_24xx *)
+                                               pkt)->total_byte_count),
+                                           e->s_id[0], e->s_id[2], e->s_id[1],
+                                           e->d_id[2], e->d_id[1], e->d_id[0]);
+                               } else {
+                                       ql_log(ql_log_info, vha, 0x503f,
+                                           "%s IOCB Done hdl=%x comp_status=0x%x\n",
+                                           type, sp->handle, comp_status);
+                                       ql_log(ql_log_info, vha, 0x503f,
+                                           "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+                                           fw_status[1], fw_status[2],
+                                           le32_to_cpu(((struct els_sts_entry_24xx *)
+                                               pkt)->total_byte_count),
+                                           e->s_id[0], e->s_id[2], e->s_id[1],
+                                           e->d_id[2], e->d_id[1], e->d_id[0]);
+                               }
+                       }
                }
-               ql_dbg(ql_dbg_disc, vha, 0x503f,
-                   "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
-                   type, sp->handle, comp_status, fw_status[1], fw_status[2],
-                   le32_to_cpu(ese->total_byte_count));
                goto els_ct_done;
        }
 
@@ -2107,6 +2327,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
        struct srb_iocb *lio;
        uint16_t *data;
        uint32_t iop[2];
+       int logit = 1;
 
        sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
        if (!sp)
@@ -2153,6 +2374,10 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
                if (sp->type != SRB_LOGIN_CMD)
                        goto logio_done;
 
+               lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
+               if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
+                       fcport->flags |= FCF_FCSP_DEVICE;
+
                iop[0] = le32_to_cpu(logio->io_parameter[0]);
                if (iop[0] & BIT_4) {
                        fcport->port_type = FCT_TARGET;
@@ -2180,9 +2405,11 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
        case LSC_SCODE_PORTID_USED:
                data[0] = MBS_PORT_ID_USED;
                data[1] = LSW(iop[1]);
+               logit = 0;
                break;
        case LSC_SCODE_NPORT_USED:
                data[0] = MBS_LOOP_ID_USED;
+               logit = 0;
                break;
        case LSC_SCODE_CMD_FAILED:
                if (iop[1] == 0x0606) {
@@ -2215,12 +2442,20 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
                break;
        }
 
-       ql_log(ql_log_warn, sp->vha, 0x5037,
-              "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
-              type, sp->handle, fcport->d_id.b24, fcport->port_name,
-              le16_to_cpu(logio->comp_status),
-              le32_to_cpu(logio->io_parameter[0]),
-              le32_to_cpu(logio->io_parameter[1]));
+       if (logit)
+               ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
+                      "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+                      type, sp->handle, fcport->d_id.b24, fcport->port_name,
+                      le16_to_cpu(logio->comp_status),
+                      le32_to_cpu(logio->io_parameter[0]),
+                      le32_to_cpu(logio->io_parameter[1]));
+       else
+               ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
+                      "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+                      type, sp->handle, fcport->d_id.b24, fcport->port_name,
+                      le16_to_cpu(logio->comp_status),
+                      le32_to_cpu(logio->io_parameter[0]),
+                      le32_to_cpu(logio->io_parameter[1]));
 
 logio_done:
        sp->done(sp, 0);
@@ -2417,6 +2652,15 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
        case CS_PORT_UNAVAILABLE:
        case CS_PORT_LOGGED_OUT:
                fcport->nvme_flag |= NVME_FLAG_RESETTING;
+               if (atomic_read(&fcport->state) == FCS_ONLINE) {
+                       ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+                              "Port to be marked lost on fcport=%06x, current "
+                              "port state= %s comp_status %x.\n",
+                              fcport->d_id.b24, port_state_str[FCS_ONLINE],
+                              comp_status);
+
+                       qlt_schedule_sess_for_deletion(fcport);
+               }
                fallthrough;
        case CS_ABORTED:
        case CS_PORT_BUSY:
@@ -2969,9 +3213,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                return;
        }
 
+       /* Fast path completion. */
+       qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
        sp->qpair->cmd_completion_cnt++;
 
-       /* Fast path completion. */
        if (comp_status == CS_COMPLETE && scsi_status == 0) {
                qla2x00_process_completed_request(vha, req, handle);
 
@@ -3366,6 +3611,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
                }
                break;
 
+       case SA_UPDATE_IOCB_TYPE:
        case ABTS_RESP_24XX:
        case CTIO_TYPE7:
        case CTIO_CRC2:
@@ -3452,6 +3698,63 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
        sp->done(sp, comp_status);
 }
 
+/**
+ * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
+ *   before iocb processing can start.
+ * @vha: host adapter pointer
+ * @rsp: respond queue
+ * @pkt: head iocb describing how many continuation iocb
+ * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
+ */
+static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
+       struct rsp_que *rsp, response_t *pkt)
+{
+       int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
+       response_t *end_pkt;
+       int rc = 0;
+       u32 rsp_q_in;
+
+       if (pkt->entry_count == 1)
+               return rc;
+
+       /* ring_index was pre-increment. set it back to current pkt */
+       if (rsp->ring_index == 0)
+               start_pkt_ring_index = rsp->length - 1;
+       else
+               start_pkt_ring_index = rsp->ring_index - 1;
+
+       if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
+               end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
+                       rsp->length - 1;
+       else
+               end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
+
+       end_pkt = rsp->ring + end_pkt_ring_index;
+
+       /*  next pkt = end_pkt + 1 */
+       n_ring_index = end_pkt_ring_index + 1;
+       if (n_ring_index >= rsp->length)
+               n_ring_index = 0;
+
+       rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
+               rd_reg_dword(rsp->rsp_q_in);
+
+       /* rsp_q_in is either wrapped or pointing beyond endpkt */
+       if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
+                       rsp_q_in >= n_ring_index)
+               /* all IOCBs arrived. */
+               rc = 0;
+       else
+               rc = -EIO;
+
+       ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
+           "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
+           __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
+           rsp_q_in, rc);
+
+       return rc;
+}
+
 /**
  * qla24xx_process_response_queue() - Process response queue entries.
  * @vha: SCSI driver HA context
@@ -3592,12 +3895,26 @@ process_err:
                                                 qla27xx_process_purex_fpin);
                                break;
 
+                       case ELS_AUTH_ELS:
+                               if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
+                                       ql_dbg(ql_dbg_init, vha, 0x5091,
+                                           "Defer processing ELS opcode %#x...\n",
+                                           purex_entry->els_frame_payload[3]);
+                                       return;
+                               }
+                               qla24xx_auth_els(vha, (void **)&pkt, &rsp);
+                               break;
                        default:
                                ql_log(ql_log_warn, vha, 0x509c,
                                       "Discarding ELS Request opcode 0x%x\n",
                                       purex_entry->els_frame_payload[3]);
                        }
                        break;
+               case SA_UPDATE_IOCB_TYPE:
+                       qla28xx_sa_update_iocb_entry(vha, rsp->req,
+                               (struct sa_update_28xx *)pkt);
+                       break;
+
                default:
                        /* Type Not Supported. */
                        ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -4201,6 +4518,8 @@ skip_msi:
                ql_dbg(ql_dbg_init, vha, 0x0125,
                    "INTa mode: Enabled.\n");
                ha->flags.mr_intr_valid = 1;
+               /* Set max_qpair to 0, as MSI-X and MSI in not enabled */
+               ha->max_qpairs = 0;
        }
 
 clear_risc_ints:
index 9f3ad8a..7811c49 100644 (file)
@@ -663,6 +663,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
 }
 
 #define        NVME_ENABLE_FLAG        BIT_3
+#define        EDIF_HW_SUPPORT         BIT_10
 
 /*
  * qla2x00_execute_fw
@@ -739,7 +740,7 @@ again:
                        mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
 
                mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
-               mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
+               mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
        } else {
                mcp->mb[1] = LSW(risc_addr);
                mcp->out_mb |= MBX_1;
@@ -795,6 +796,12 @@ again:
                }
        }
 
+       if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
+               ha->flags.edif_hw = 1;
+               ql_log(ql_log_info, vha, 0xffff,
+                   "%s: edif HW\n", __func__);
+       }
+
 done:
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
            "Done %s.\n", __func__);
@@ -1130,6 +1137,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
                               ha->fw_attributes_ext[0]);
                        vha->flags.nvme2_enabled = 1;
                }
+
+               if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
+                   (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
+                       ha->flags.edif_enabled = 1;
+                       ql_log(ql_log_info, vha, 0xffff,
+                              "%s: edif is enabled\n", __func__);
+               }
        }
 
        if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -3231,7 +3245,7 @@ qla24xx_abort_command(srb_t *sp)
        if (sp->qpair)
                req = sp->qpair->req;
        else
-               return QLA_FUNCTION_FAILED;
+               return QLA_ERR_NO_QPAIR;
 
        if (ql2xasynctmfenable)
                return qla24xx_async_abort_command(sp);
@@ -3244,7 +3258,7 @@ qla24xx_abort_command(srb_t *sp)
        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
        if (handle == req->num_outstanding_cmds) {
                /* Command not found. */
-               return QLA_FUNCTION_FAILED;
+               return QLA_ERR_NOT_FOUND;
        }
 
        abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
@@ -4035,6 +4049,10 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                fcport->scan_state = QLA_FCPORT_FOUND;
                                fcport->n2n_flag = 1;
                                fcport->keep_nport_handle = 1;
+                               fcport->login_retry = vha->hw->login_retry_count;
+                               fcport->fc4_type = FS_FC4TYPE_FCP;
+                               if (vha->flags.nvme_enabled)
+                                       fcport->fc4_type |= FS_FC4TYPE_NVME;
 
                                if (wwn_to_u64(vha->port_name) >
                                    wwn_to_u64(fcport->port_name)) {
@@ -4172,6 +4190,16 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                rptid_entry->u.f2.remote_nport_id[1];
                        fcport->d_id.b.al_pa =
                                rptid_entry->u.f2.remote_nport_id[0];
+
+                       /*
+                        * For the case where remote port sending PRLO, FW
+                        * sends up RIDA Format 2 as an indication of session
+                        * loss. In other word, FW state change from PRLI
+                        * complete back to PLOGI complete. Delete the
+                        * session and let relogin drive the reconnect.
+                        */
+                       if (atomic_read(&fcport->state) == FCS_ONLINE)
+                               qlt_schedule_sess_for_deletion(fcport);
                }
        }
 }
@@ -4946,7 +4974,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
        return rval;
 }
 
-#define PUREX_CMD_COUNT        2
+#define PUREX_CMD_COUNT        4
 int
 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
 {
@@ -4954,6 +4982,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
        uint8_t *els_cmd_map;
+       uint8_t active_cnt = 0;
        dma_addr_t els_cmd_map_dma;
        uint8_t cmd_opcode[PUREX_CMD_COUNT];
        uint8_t i, index, purex_bit;
@@ -4975,10 +5004,20 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
        }
 
        /* List of Purex ELS */
-       cmd_opcode[0] = ELS_FPIN;
-       cmd_opcode[1] = ELS_RDP;
+       if (ql2xrdpenable) {
+               cmd_opcode[active_cnt] = ELS_RDP;
+               active_cnt++;
+       }
+       if (ha->flags.scm_supported_f) {
+               cmd_opcode[active_cnt] = ELS_FPIN;
+               active_cnt++;
+       }
+       if (ha->flags.edif_enabled) {
+               cmd_opcode[active_cnt] = ELS_AUTH_ELS;
+               active_cnt++;
+       }
 
-       for (i = 0; i < PUREX_CMD_COUNT; i++) {
+       for (i = 0; i < active_cnt; i++) {
                index = cmd_opcode[i] / 8;
                purex_bit = cmd_opcode[i] % 8;
                els_cmd_map[index] |= 1 << purex_bit;
@@ -6588,6 +6627,12 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
        fcport->d_id.b.al_pa = pd->port_id[2];
        fcport->d_id.b.rsvd_1 = 0;
 
+       ql_dbg(ql_dbg_disc, vha, 0x2062,
+            "%8phC SVC Param w3 %02x%02x",
+            fcport->port_name,
+            pd->prli_svc_param_word_3[1],
+            pd->prli_svc_param_word_3[0]);
+
        if (NVME_TARGET(vha->hw, fcport)) {
                fcport->port_type = FCT_NVME;
                if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
index c7caf32..1c02405 100644 (file)
@@ -65,7 +65,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
        uint16_t vp_id;
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags = 0;
-       u8 i;
+       u32 i, bailout;
 
        mutex_lock(&ha->vport_lock);
        /*
@@ -75,21 +75,29 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
         * ensures no active vp_list traversal while the vport is removed
         * from the queue)
         */
-       for (i = 0; i < 10; i++) {
-               if (wait_event_timeout(vha->vref_waitq,
-                   !atomic_read(&vha->vref_count), HZ) > 0)
+       bailout = 0;
+       for (i = 0; i < 500; i++) {
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               if (atomic_read(&vha->vref_count) == 0) {
+                       list_del(&vha->list);
+                       qlt_update_vp_map(vha, RESET_VP_IDX);
+                       bailout = 1;
+               }
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+               if (bailout)
                        break;
+               else
+                       msleep(20);
        }
-
-       spin_lock_irqsave(&ha->vport_slock, flags);
-       if (atomic_read(&vha->vref_count)) {
-               ql_dbg(ql_dbg_vport, vha, 0xfffa,
-                   "vha->vref_count=%u timeout\n", vha->vref_count.counter);
-               vha->vref_count = (atomic_t)ATOMIC_INIT(0);
+       if (!bailout) {
+               ql_log(ql_log_info, vha, 0xfffa,
+                       "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+               spin_lock_irqsave(&ha->vport_slock, flags);
+               list_del(&vha->list);
+               qlt_update_vp_map(vha, RESET_VP_IDX);
+               spin_unlock_irqrestore(&ha->vport_slock, flags);
        }
-       list_del(&vha->list);
-       qlt_update_vp_map(vha, RESET_VP_IDX);
-       spin_unlock_irqrestore(&ha->vport_slock, flags);
 
        vp_id = vha->vp_idx;
        ha->num_vhosts--;
@@ -158,6 +166,10 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
        int ret = QLA_SUCCESS;
        fc_port_t *fcport;
 
+       if (vha->hw->flags.edif_enabled)
+               /* delete sessions and flush sa_indexes */
+               qla2x00_wait_for_sess_deletion(vha);
+
        if (vha->hw->flags.fw_started)
                ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 
@@ -166,7 +178,8 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
        list_for_each_entry(fcport, &vha->vp_fcports, list)
                fcport->logout_on_delete = 0;
 
-       qla2x00_mark_all_devices_lost(vha);
+       if (!vha->hw->flags.edif_enabled)
+               qla2x00_wait_for_sess_deletion(vha);
 
        /* Remove port id from vp target map */
        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
@@ -257,13 +270,13 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
 void
 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 {
-       scsi_qla_host_t *vha;
+       scsi_qla_host_t *vha, *tvp;
        struct qla_hw_data *ha = rsp->hw;
        int i = 0;
        unsigned long flags;
 
        spin_lock_irqsave(&ha->vport_slock, flags);
-       list_for_each_entry(vha, &ha->vp_list, list) {
+       list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
                if (vha->vp_idx) {
                        if (test_bit(VPORT_DELETE, &vha->dpc_flags))
                                continue;
@@ -416,7 +429,7 @@ void
 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
-       scsi_qla_host_t *vp;
+       scsi_qla_host_t *vp, *tvp;
        unsigned long flags = 0;
 
        if (vha->vp_idx)
@@ -430,7 +443,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
                return;
 
        spin_lock_irqsave(&ha->vport_slock, flags);
-       list_for_each_entry(vp, &ha->vp_list, list) {
+       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                if (vp->vp_idx) {
                        atomic_inc(&vp->vref_count);
                        spin_unlock_irqrestore(&ha->vport_slock, flags);
index 3e5c70a..1c5da2d 100644 (file)
@@ -91,8 +91,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
        struct qla_hw_data *ha;
        struct qla_qpair *qpair;
 
-       if (!qidx)
-               qidx++;
+       /* Map admin queue and 1st IO queue to index 0 */
+       if (qidx)
+               qidx--;
 
        vha = (struct scsi_qla_host *)lport->private;
        ha = vha->hw;
@@ -108,19 +109,24 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
                return -EINVAL;
        }
 
-       if (ha->queue_pair_map[qidx]) {
-               *handle = ha->queue_pair_map[qidx];
-               ql_log(ql_log_info, vha, 0x2121,
-                   "Returning existing qpair of %p for idx=%x\n",
-                   *handle, qidx);
-               return 0;
-       }
+       /* Use base qpair if max_qpairs is 0 */
+       if (!ha->max_qpairs) {
+               qpair = ha->base_qpair;
+       } else {
+               if (ha->queue_pair_map[qidx]) {
+                       *handle = ha->queue_pair_map[qidx];
+                       ql_log(ql_log_info, vha, 0x2121,
+                              "Returning existing qpair of %p for idx=%x\n",
+                              *handle, qidx);
+                       return 0;
+               }
 
-       qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
-       if (qpair == NULL) {
-               ql_log(ql_log_warn, vha, 0x2122,
-                   "Failed to allocate qpair\n");
-               return -EINVAL;
+               qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
+               if (!qpair) {
+                       ql_log(ql_log_warn, vha, 0x2122,
+                              "Failed to allocate qpair\n");
+                       return -EINVAL;
+               }
        }
        *handle = qpair;
 
@@ -221,13 +227,13 @@ static void qla_nvme_abort_work(struct work_struct *work)
        srb_t *sp = priv->sp;
        fc_port_t *fcport = sp->fcport;
        struct qla_hw_data *ha = fcport->vha->hw;
-       int rval;
+       int rval, abts_done_called = 1;
 
        ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
-              "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
-              __func__, sp, sp->handle, fcport, fcport->deleted);
+              "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
+              __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
 
-       if (!ha->flags.fw_started || fcport->deleted)
+       if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
                goto out;
 
        if (ha->flags.host_shutting_down) {
@@ -245,12 +251,20 @@ static void qla_nvme_abort_work(struct work_struct *work)
            __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
            sp, sp->handle, fcport, rval);
 
+       /*
+        * If async tmf is enabled, the abort callback is called only on
+        * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
+        */
+       if (ql2xasynctmfenable &&
+           rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
+               abts_done_called = 0;
+
        /*
         * Returned before decreasing kref so that I/O requests
         * are waited until ABTS complete. This kref is decreased
         * at qla24xx_abort_sp_done function.
         */
-       if (ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
+       if (abts_done_called && ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
                return;
 out:
        /* kref_get was done before work was schedule. */
@@ -463,6 +477,10 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
        } else if (fd->io_dir == 0) {
                cmd_pkt->control_flags = 0;
        }
+
+       if (sp->fcport->edif.enable && fd->io_dir != 0)
+               cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+
        /* Set BIT_13 of control flags for Async event */
        if (vha->flags.nvme2_enabled &&
            cmd->sqe.common.opcode == nvme_admin_async_event) {
@@ -727,18 +745,9 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
 
        WARN_ON(vha->nvme_local_port);
 
-       if (ha->max_req_queues < 3) {
-               if (!ha->flags.max_req_queue_warned)
-                       ql_log(ql_log_info, vha, 0x2120,
-                              "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
-                              __func__, ha->max_req_queues);
-               ha->flags.max_req_queue_warned = 1;
-               return ret;
-       }
-
        qla_nvme_fc_transport.max_hw_queues =
            min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
-               (uint8_t)(ha->max_req_queues - 2));
+               (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
 
        pinfo.node_name = wwn_to_u64(vha->node_name);
        pinfo.port_name = wwn_to_u64(vha->port_name);
@@ -803,14 +812,14 @@ void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *ori
        case CS_PORT_LOGGED_OUT:
        /* BA_RJT was received for the ABTS */
        case CS_PORT_CONFIG_CHG:
-               ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09d,
+               ql_dbg(ql_dbg_async, vha, 0xf09d,
                       "Abort I/O IOCB completed with error, comp_status=%x\n",
                comp_status);
                break;
 
        /* BA_RJT was received for the ABTS */
        case CS_REJECT_RECEIVED:
-               ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
+               ql_dbg(ql_dbg_async, vha, 0xf09e,
                       "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
                        abt->fw.ba_rjt_vendorUnique);
                ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
@@ -819,18 +828,18 @@ void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *ori
                break;
 
        case CS_COMPLETE:
-               ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09f,
+               ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
                       "IOCB request is completed successfully comp_status=%x\n",
                comp_status);
                break;
 
        case CS_IOCB_ERROR:
-               ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a0,
+               ql_dbg(ql_dbg_async, vha, 0xf0a0,
                       "IOCB request is failed, comp_status=%x\n", comp_status);
                break;
 
        default:
-               ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a1,
+               ql_dbg(ql_dbg_async, vha, 0xf0a1,
                       "Invalid Abort IO IOCB Completion Status %x\n",
                comp_status);
                break;
index 615e44a..11aad97 100644 (file)
@@ -2166,7 +2166,6 @@ qla82xx_poll(int irq, void *dev_id)
        struct qla_hw_data *ha;
        struct rsp_que *rsp;
        struct device_reg_82xx __iomem *reg;
-       int status = 0;
        uint32_t stat;
        uint32_t host_int = 0;
        uint16_t mb[8];
@@ -2195,7 +2194,6 @@ qla82xx_poll(int irq, void *dev_id)
                case 0x10:
                case 0x11:
                        qla82xx_mbx_completion(vha, MSW(stat));
-                       status |= MBX_INTERRUPT;
                        break;
                case 0x12:
                        mb[0] = MSW(stat);
index cedd558..d2e40aa 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/blk-mq-pci.h>
 #include <linux/refcount.h>
+#include <linux/crash_dump.h>
 
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
@@ -53,6 +54,11 @@ static struct kmem_cache *ctx_cachep;
  */
 uint ql_errlev = 0x8001;
 
+int ql2xsecenable;
+module_param(ql2xsecenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xsecenable,
+       "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
+
 static int ql2xenableclass2;
 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
 MODULE_PARM_DESC(ql2xenableclass2,
@@ -849,7 +855,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                uint16_t hwq;
                struct qla_qpair *qpair = NULL;
 
-               tag = blk_mq_unique_tag(cmd->request);
+               tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
                hwq = blk_mq_unique_tag_to_hwq(tag);
                qpair = ha->queue_pair_map[hwq];
 
@@ -1120,12 +1126,28 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags;
        int res;
+       /* Return 0 = sleep, x=wake */
 
        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        ql_dbg(ql_dbg_init, vha, 0x00ec,
            "tgt %p, fcport_count=%d\n",
            vha, vha->fcport_count);
        res = (vha->fcport_count == 0);
+       if  (res) {
+               struct fc_port *fcport;
+
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                       if (fcport->deleted != QLA_SESS_DELETED) {
+                               /* session(s) may not be fully logged in
+                                * (ie fcport_count=0), but session
+                                * deletion thread(s) may be inflight.
+                                */
+
+                               res = 0;
+                               break;
+                       }
+               }
+       }
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 
        return res;
@@ -1367,18 +1389,27 @@ static char *reset_errors[] = {
 };
 
 static int
-__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
-    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
+qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 {
-       scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+       struct scsi_device *sdev = cmd->device;
+       scsi_qla_host_t *vha = shost_priv(sdev->host);
+       struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
+       struct qla_hw_data *ha = vha->hw;
        int err;
 
+       if (qla2x00_isp_reg_stat(ha)) {
+               ql_log(ql_log_info, vha, 0x803e,
+                   "PCI/Register disconnect, exiting.\n");
+               qla_pci_set_eeh_busy(vha);
+               return FAILED;
+       }
+
        if (!fcport) {
                return FAILED;
        }
 
-       err = fc_block_scsi_eh(cmd);
+       err = fc_block_rport(rport);
        if (err != 0)
                return err;
 
@@ -1386,8 +1417,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
                return SUCCESS;
 
        ql_log(ql_log_info, vha, 0x8009,
-           "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
-           cmd->device->id, cmd->device->lun, cmd);
+           "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
+           sdev->id, sdev->lun, cmd);
 
        err = 0;
        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
@@ -1396,67 +1427,100 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
                goto eh_reset_failed;
        }
        err = 2;
-       if (do_reset(fcport, cmd->device->lun, 1)
+       if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
                != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x800c,
                    "do_reset failed for cmd=%p.\n", cmd);
                goto eh_reset_failed;
        }
        err = 3;
-       if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
-           cmd->device->lun, type) != QLA_SUCCESS) {
+       if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+           sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x800d,
                    "wait for pending cmds failed for cmd=%p.\n", cmd);
                goto eh_reset_failed;
        }
 
        ql_log(ql_log_info, vha, 0x800e,
-           "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
-           vha->host_no, cmd->device->id, cmd->device->lun, cmd);
+           "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
+           vha->host_no, sdev->id, sdev->lun, cmd);
 
        return SUCCESS;
 
 eh_reset_failed:
        ql_log(ql_log_info, vha, 0x800f,
-           "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
-           reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+           "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
+           reset_errors[err], vha->host_no, sdev->id, sdev->lun,
            cmd);
        vha->reset_cmd_err_cnt++;
        return FAILED;
 }
 
 static int
-qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
+qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
 {
-       scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+       struct scsi_device *sdev = cmd->device;
+       struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
        struct qla_hw_data *ha = vha->hw;
+       fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+       int err;
 
        if (qla2x00_isp_reg_stat(ha)) {
-               ql_log(ql_log_info, vha, 0x803e,
+               ql_log(ql_log_info, vha, 0x803f,
                    "PCI/Register disconnect, exiting.\n");
                qla_pci_set_eeh_busy(vha);
                return FAILED;
        }
 
-       return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
-           ha->isp_ops->lun_reset);
-}
+       if (!fcport) {
+               return FAILED;
+       }
 
-static int
-qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
-{
-       scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-       struct qla_hw_data *ha = vha->hw;
+       err = fc_block_rport(rport);
+       if (err != 0)
+               return err;
 
-       if (qla2x00_isp_reg_stat(ha)) {
-               ql_log(ql_log_info, vha, 0x803f,
-                   "PCI/Register disconnect, exiting.\n");
-               qla_pci_set_eeh_busy(vha);
-               return FAILED;
+       if (fcport->deleted)
+               return SUCCESS;
+
+       ql_log(ql_log_info, vha, 0x8009,
+           "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
+           sdev->id, cmd);
+
+       err = 0;
+       if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x800a,
+                   "Wait for hba online failed for cmd=%p.\n", cmd);
+               goto eh_reset_failed;
        }
+       err = 2;
+       if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x800c,
+                   "target_reset failed for cmd=%p.\n", cmd);
+               goto eh_reset_failed;
+       }
+       err = 3;
+       if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
+           0, WAIT_TARGET) != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x800d,
+                   "wait for pending cmds failed for cmd=%p.\n", cmd);
+               goto eh_reset_failed;
+       }
+
+       ql_log(ql_log_info, vha, 0x800e,
+           "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
+           vha->host_no, sdev->id, cmd);
+
+       return SUCCESS;
 
-       return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
-           ha->isp_ops->target_reset);
+eh_reset_failed:
+       ql_log(ql_log_info, vha, 0x800f,
+           "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
+           reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+           cmd);
+       vha->reset_cmd_err_cnt++;
+       return FAILED;
 }
 
 /**************************************************************************
@@ -1478,7 +1542,6 @@ static int
 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 {
        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
-       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
        int ret = FAILED;
        unsigned int id;
        uint64_t lun;
@@ -1494,15 +1557,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
        id = cmd->device->id;
        lun = cmd->device->lun;
 
-       if (!fcport) {
-               return ret;
-       }
-
-       ret = fc_block_scsi_eh(cmd);
-       if (ret != 0)
-               return ret;
-       ret = FAILED;
-
        if (qla2x00_chip_is_down(vha))
                return ret;
 
@@ -1742,7 +1796,7 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
                }
 
                spin_lock_irqsave(qp->qp_lock_ptr, *flags);
-               if (ret_cmd && blk_mq_request_started(cmd->request))
+               if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
                        sp->done(sp, res);
        } else {
                sp->done(sp, res);
@@ -2818,6 +2872,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                        return ret;
        }
 
+       if (is_kdump_kernel()) {
+               ql2xmqsupport = 0;
+               ql2xallocfwdump = 0;
+       }
+
        /* This may fail but that's ok */
        pci_enable_pcie_error_reporting(pdev);
 
@@ -2835,6 +2894,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        spin_lock_init(&ha->tgt.sess_lock);
        spin_lock_init(&ha->tgt.atio_lock);
 
+       spin_lock_init(&ha->sadb_lock);
+       INIT_LIST_HEAD(&ha->sadb_tx_index_list);
+       INIT_LIST_HEAD(&ha->sadb_rx_index_list);
+
+       spin_lock_init(&ha->sadb_fp_lock);
+
+       if (qla_edif_sadb_build_free_pool(ha)) {
+               kfree(ha);
+               goto  disable_device;
+       }
+
        atomic_set(&ha->nvme_active_aen_cnt, 0);
 
        /* Clear our data area */
@@ -3033,8 +3103,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->portnum = PCI_FUNC(ha->pdev->devfn);
                ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
-               req_length = REQUEST_ENTRY_CNT_24XX;
-               rsp_length = RESPONSE_ENTRY_CNT_2300;
+               req_length = REQUEST_ENTRY_CNT_83XX;
+               rsp_length = RESPONSE_ENTRY_CNT_83XX;
                ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
@@ -3460,6 +3530,8 @@ skip_dpc:
        return 0;
 
 probe_failed:
+       qla_enode_stop(base_vha);
+       qla_edb_stop(base_vha);
        if (base_vha->gnl.l) {
                dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
                                base_vha->gnl.l, base_vha->gnl.ldma);
@@ -3762,6 +3834,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
                base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
 
        base_vha->gnl.l = NULL;
+       qla_enode_stop(base_vha);
+       qla_edb_stop(base_vha);
 
        vfree(base_vha->scan.l);
 
@@ -3795,7 +3869,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
        qla2x00_free_sysfs_attr(base_vha, true);
 
        fc_remove_host(base_vha->host);
-       qlt_remove_target_resources(ha);
 
        scsi_remove_host(base_vha->host);
 
@@ -3867,6 +3940,9 @@ qla2x00_free_device(scsi_qla_host_t *vha)
 
        qla82xx_md_free(vha);
 
+       qla_edif_sadb_release_free_pool(ha);
+       qla_edif_sadb_release(ha);
+
        qla2x00_free_queues(ha);
 }
 
@@ -3919,6 +3995,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
                qla2x00_schedule_rport_del(vha, fcport);
        }
+
        /*
         * We may need to retry the login, so don't change the state of the
         * port but do the retries.
@@ -3941,6 +4018,16 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
            "Mark all dev lost\n");
 
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
+               if (fcport->loop_id != FC_NO_LOOP_ID &&
+                   (fcport->flags & FCF_FCP2_DEVICE) &&
+                   fcport->port_type == FCT_TARGET &&
+                   !qla2x00_reset_active(vha)) {
+                       ql_dbg(ql_dbg_disc, vha, 0x211a,
+                              "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
+                              fcport->flags, fcport->port_type,
+                              fcport->d_id.b24, fcport->port_name);
+                       continue;
+               }
                fcport->scan_state = 0;
                qlt_schedule_sess_for_deletion(fcport);
        }
@@ -3972,15 +4059,20 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        struct req_que **req, struct rsp_que **rsp)
 {
        char    name[16];
+       int rc;
 
        ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
                &ha->init_cb_dma, GFP_KERNEL);
        if (!ha->init_cb)
                goto fail;
 
-       if (qlt_mem_alloc(ha) < 0)
+       rc = btree_init32(&ha->host_map);
+       if (rc)
                goto fail_free_init_cb;
 
+       if (qlt_mem_alloc(ha) < 0)
+               goto fail_free_btree;
+
        ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
                qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
        if (!ha->gid_list)
@@ -3990,7 +4082,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        if (!ha->srb_mempool)
                goto fail_free_gid_list;
 
-       if (IS_P3P_TYPE(ha)) {
+       if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
                /* Allocate cache for CT6 Ctx. */
                if (!ctx_cachep) {
                        ctx_cachep = kmem_cache_create("qla2xxx_ctx",
@@ -4024,7 +4116,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
            "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
            ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
 
-       if (IS_P3P_TYPE(ha) || ql2xenabledif) {
+       if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
                ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
                        DSD_LIST_DMA_POOL_SIZE, 8, 0);
                if (!ha->dl_dma_pool) {
@@ -4264,8 +4356,36 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                goto fail_flt_buffer;
        }
 
+       /* allocate the purex dma pool */
+       ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+           MAX_PAYLOAD, 8, 0);
+
+       if (!ha->purex_dma_pool) {
+               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+                   "Unable to allocate purex_dma_pool.\n");
+               goto fail_flt;
+       }
+
+       ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
+       ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
+           ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
+
+       if (!ha->elsrej.c) {
+               ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
+                   "Alloc failed for els reject cmd.\n");
+               goto fail_elsrej;
+       }
+       ha->elsrej.c->er_cmd = ELS_LS_RJT;
+       ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
+       ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
        return 0;
 
+fail_elsrej:
+       dma_pool_destroy(ha->purex_dma_pool);
+fail_flt:
+       dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+           ha->flt, ha->flt_dma);
+
 fail_flt_buffer:
        dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
            ha->sfp_data, ha->sfp_data_dma);
@@ -4356,6 +4476,8 @@ fail_free_gid_list:
        ha->gid_list_dma = 0;
 fail_free_tgt_mem:
        qlt_mem_free(ha);
+fail_free_btree:
+       btree_destroy32(&ha->host_map);
 fail_free_init_cb:
        dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
        ha->init_cb_dma);
@@ -4772,10 +4894,21 @@ qla2x00_mem_free(struct qla_hw_data *ha)
        ha->dif_bundl_pool = NULL;
 
        qlt_mem_free(ha);
+       qla_remove_hostmap(ha);
 
        if (ha->init_cb)
                dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
                        ha->init_cb, ha->init_cb_dma);
+
+       dma_pool_destroy(ha->purex_dma_pool);
+       ha->purex_dma_pool = NULL;
+
+       if (ha->elsrej.c) {
+               dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
+                   ha->elsrej.c, ha->elsrej.cdma);
+               ha->elsrej.c = NULL;
+       }
+
        ha->init_cb = NULL;
        ha->init_cb_dma = 0;
 
@@ -4837,6 +4970,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        spin_lock_init(&vha->cmd_list_lock);
        init_waitqueue_head(&vha->fcport_waitQ);
        init_waitqueue_head(&vha->vref_waitq);
+       qla_enode_init(vha);
+       qla_edb_init(vha);
+
 
        vha->gnl.size = sizeof(struct get_name_list_extended) *
                        (ha->max_loop_id + 1);
@@ -5080,6 +5216,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
                            WWN_SIZE);
 
                        fcport->fc4_type = e->u.new_sess.fc4_type;
+                       if (NVME_PRIORITY(vha->hw, fcport))
+                               fcport->do_prli_nvme = 1;
+                       else
+                               fcport->do_prli_nvme = 0;
+
                        if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
                                fcport->dm_login_expire = jiffies +
                                        QLA_N2N_WAIT_TIME * HZ;
@@ -5327,6 +5468,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                        qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
                            e->u.fcport.fcport, false);
                        break;
+               case QLA_EVT_SA_REPLACE:
+                       qla24xx_issue_sa_replace_iocb(vha, e);
+                       break;
                }
 
                if (rc == EAGAIN) {
@@ -5376,6 +5520,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
                if (atomic_read(&fcport->state) != FCS_ONLINE &&
                    fcport->login_retry) {
                        if (fcport->scan_state != QLA_FCPORT_FOUND ||
+                           fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
                            fcport->disc_state == DSC_LOGIN_COMPLETE)
                                continue;
 
@@ -7234,6 +7379,10 @@ qla2x00_timer(struct timer_list *t)
                }
        }
 
+       /* check if edif running */
+       if (vha->hw->flags.edif_enabled)
+               qla_edif_timer(vha);
+
        /* Process any deferred work. */
        if (!list_empty(&vha->work_list)) {
                unsigned long flags;
@@ -7430,7 +7579,7 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
        struct qla_qpair *qpair = NULL;
-       struct scsi_qla_host *vp;
+       struct scsi_qla_host *vp, *tvp;
        fc_port_t *fcport;
        int i;
        unsigned long flags;
@@ -7461,7 +7610,7 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
        qla2x00_mark_all_devices_lost(vha);
 
        spin_lock_irqsave(&ha->vport_slock, flags);
-       list_for_each_entry(vp, &ha->vp_list, list) {
+       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                atomic_inc(&vp->vref_count);
                spin_unlock_irqrestore(&ha->vport_slock, flags);
                qla2x00_mark_all_devices_lost(vp);
@@ -7475,7 +7624,7 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
                fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
 
        spin_lock_irqsave(&ha->vport_slock, flags);
-       list_for_each_entry(vp, &ha->vp_list, list) {
+       list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
                atomic_inc(&vp->vref_count);
                spin_unlock_irqrestore(&ha->vport_slock, flags);
                list_for_each_entry(fcport, &vp->vp_fcports, list)
@@ -7887,7 +8036,7 @@ qla2x00_module_init(void)
        BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
        BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
        BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
-       BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344);
+       BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604);
        BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
        BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
        BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
index 060c892..a0aeba6 100644 (file)
@@ -2936,7 +2936,6 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
                liter += dburst - 1;
                faddr += dburst - 1;
                dwptr += dburst - 1;
-               continue;
        }
 
 write_protect:
index eb47140..b3478ed 100644 (file)
@@ -184,8 +184,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
        return QLA_SUCCESS;
 }
 
-static inline
-struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
                                            be_id_t d_id)
 {
        struct scsi_qla_host *host;
@@ -198,7 +197,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
 
        key = be_to_port_id(d_id).b24;
 
-       host = btree_lookup32(&vha->hw->tgt.host_map, key);
+       host = btree_lookup32(&vha->hw->host_map, key);
        if (!host)
                ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
                    "Unable to find host %06x\n", key);
@@ -299,7 +298,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
                        goto abort;
                }
 
-               host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
+               host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
                if (host != NULL) {
                        ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
                            "Requeuing unknown ATIO_TYPE7 %p\n", u);
@@ -348,7 +347,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
        switch (atio->u.raw.entry_type) {
        case ATIO_TYPE7:
        {
-               struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+               struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
                    atio->u.isp24.fcp_hdr.d_id);
                if (unlikely(NULL == host)) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe03e,
@@ -577,6 +576,18 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
                sp->fcport->logout_on_delete = 1;
                sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
                sp->fcport->send_els_logo = 0;
+
+               if (sp->fcport->flags & FCF_FCSP_DEVICE) {
+                       ql_dbg(ql_dbg_edif, vha, 0x20ef,
+                           "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
+                           sp->fcport->port_name);
+                       qla2x00_set_fcport_disc_state(sp->fcport,
+                           DSC_LOGIN_AUTH_PEND);
+                       qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+                           sp->fcport->d_id.b24);
+                       qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
+                           0, sp->fcport);
+               }
                break;
 
        case SRB_NACK_PRLI:
@@ -624,6 +635,9 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
        case SRB_NACK_PLOGI:
                fcport->fw_login_state = DSC_LS_PLOGI_PEND;
                c = "PLOGI";
+               if (vha->hw->flags.edif_enabled &&
+                   (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP))
+                       fcport->flags |= FCF_FCSP_DEVICE;
                break;
        case SRB_NACK_PRLI:
                fcport->fw_login_state = DSC_LS_PRLI_PEND;
@@ -693,7 +707,12 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
 void qla24xx_delete_sess_fn(struct work_struct *work)
 {
        fc_port_t *fcport = container_of(work, struct fc_port, del_work);
-       struct qla_hw_data *ha = fcport->vha->hw;
+       struct qla_hw_data *ha = NULL;
+
+       if (!fcport || !fcport->vha || !fcport->vha->hw)
+               return;
+
+       ha = fcport->vha->hw;
 
        if (fcport->se_sess) {
                ha->tgt.tgt_ops->shutdown_sess(fcport);
@@ -917,6 +936,11 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
        qlt_port_logo_t *tmp;
        int res;
 
+       if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
+               res = 0;
+               goto out;
+       }
+
        mutex_lock(&vha->vha_tgt.tgt_mutex);
 
        list_for_each_entry(tmp, &vha->logo_list, list) {
@@ -937,6 +961,7 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
        list_del(&logo->list);
        mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
+out:
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
            "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
            logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
@@ -965,6 +990,21 @@ void qlt_free_session_done(struct work_struct *work)
                sess->send_els_logo);
 
        if (!IS_SW_RESV_ADDR(sess->d_id)) {
+               if (ha->flags.edif_enabled &&
+                   (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
+                       sess->edif.authok = 0;
+                       if (!ha->flags.host_shutting_down) {
+                               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                                       "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
+                                       __func__, sess->port_name);
+                               qla2x00_release_all_sadb(vha, sess);
+                       } else {
+                               ql_dbg(ql_dbg_edif, vha, 0x911e,
+                                       "%s bypassing release_all_sadb\n",
+                                       __func__);
+                       }
+                       qla_edif_sess_down(vha, sess);
+               }
                qla2x00_mark_device_lost(vha, sess, 0);
 
                if (sess->send_els_logo) {
@@ -972,6 +1012,7 @@ void qlt_free_session_done(struct work_struct *work)
 
                        logo.id = sess->d_id;
                        logo.cmd_count = 0;
+                       INIT_LIST_HEAD(&logo.list);
                        if (!own)
                                qlt_send_first_logo(vha, &logo);
                        sess->send_els_logo = 0;
@@ -982,6 +1023,7 @@ void qlt_free_session_done(struct work_struct *work)
 
                        if (!own ||
                             (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
+                               sess->logout_completed = 0;
                                rc = qla2x00_post_async_logout_work(vha, sess,
                                    NULL);
                                if (rc != QLA_SUCCESS)
@@ -1278,8 +1320,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
        qla24xx_chk_fcp_state(sess);
 
        ql_dbg(ql_log_warn, sess->vha, 0xe001,
-           "Scheduling sess %p for deletion %8phC\n",
-           sess, sess->port_name);
+           "Scheduling sess %p for deletion %8phC fc4_type %x\n",
+           sess, sess->port_name, sess->fc4_type);
 
        WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
 }
@@ -1720,6 +1762,12 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
        nack->u.isp24.srr_reject_code_expl = srr_explan;
        nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
 
+       /* TODO qualify this with EDIF enable */
+       if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
+           (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+               nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
+       }
+
        ql_dbg(ql_dbg_tgt, vha, 0xe005,
            "qla_target(%d): Sending 24xx Notify Ack %d\n",
            vha->vp_idx, nack->u.isp24.status);
@@ -2571,6 +2619,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
        struct ctio7_to_24xx *pkt;
        struct atio_from_isp *atio = &prm->cmd->atio;
        uint16_t temp;
+       struct qla_tgt_cmd      *cmd = prm->cmd;
 
        pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
        prm->pkt = pkt;
@@ -2603,6 +2652,15 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
        pkt->u.status0.ox_id = cpu_to_le16(temp);
        pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
 
+       if (cmd->edif) {
+               if (cmd->dma_data_direction == DMA_TO_DEVICE)
+                       prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
+               if (cmd->dma_data_direction == DMA_FROM_DEVICE)
+                       prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
+
+               pkt->u.status0.edif_flags |= EF_EN_EDIF;
+       }
+
        return 0;
 }
 
@@ -3293,8 +3351,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                        if (xmit_type & QLA_TGT_XMIT_STATUS) {
                                pkt->u.status0.scsi_status =
                                    cpu_to_le16(prm.rq_result);
-                               pkt->u.status0.residual =
-                                   cpu_to_le32(prm.residual);
+                               if (!cmd->edif)
+                                       pkt->u.status0.residual =
+                                               cpu_to_le32(prm.residual);
+
                                pkt->u.status0.flags |= cpu_to_le16(
                                    CTIO7_FLAGS_SEND_STATUS);
                                if (qlt_need_explicit_conf(cmd, 0)) {
@@ -3941,6 +4001,12 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
        if (cmd == NULL)
                return;
 
+       if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
+           cmd->sess) {
+               qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
+                   (struct ctio7_from_24xx *)ctio);
+       }
+
        se_cmd = &cmd->se_cmd;
        cmd->cmd_sent_to_fw = 0;
 
@@ -4011,6 +4077,16 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
                        qlt_handle_dif_error(qpair, cmd, ctio);
                        return;
                }
+
+               case CTIO_FAST_AUTH_ERR:
+               case CTIO_FAST_INCOMP_PAD_LEN:
+               case CTIO_FAST_INVALID_REQ:
+               case CTIO_FAST_SPI_ERR:
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+                           "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
+                           vha->vp_idx, status, cmd->state, se_cmd);
+                       break;
+
                default:
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
                            "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
@@ -4312,6 +4388,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
        qlt_assign_qpair(vha, cmd);
        cmd->reset_count = vha->hw->base_qpair->chip_reset;
        cmd->vp_idx = vha->vp_idx;
+       cmd->edif = sess->edif.enable;
 
        return cmd;
 }
@@ -4727,6 +4804,34 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
                goto out;
        }
 
+       if (vha->hw->flags.edif_enabled &&
+           !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
+           iocb->u.isp24.status_subcode == ELS_PLOGI &&
+           !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+               ql_dbg(ql_dbg_disc, vha, 0xffff,
+                       "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
+                       __func__, __LINE__, loop_id, port_id.b24);
+               qlt_send_term_imm_notif(vha, iocb, 1);
+               goto out;
+       }
+
+       if (vha->hw->flags.edif_enabled) {
+               if (!(vha->e_dbell.db_flags & EDB_ACTIVE)) {
+                       ql_dbg(ql_dbg_disc, vha, 0xffff,
+                              "%s %d Term INOT due to app not started lid=%d, NportID %06X ",
+                              __func__, __LINE__, loop_id, port_id.b24);
+                       qlt_send_term_imm_notif(vha, iocb, 1);
+                       goto out;
+               } else if (iocb->u.isp24.status_subcode == ELS_PLOGI &&
+                          !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
+                       ql_dbg(ql_dbg_disc, vha, 0xffff,
+                              "%s %d Term INOT due to unsecure lid=%d, NportID %06X ",
+                              __func__, __LINE__, loop_id, port_id.b24);
+                       qlt_send_term_imm_notif(vha, iocb, 1);
+                       goto out;
+               }
+       }
+
        pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
        if (!pla) {
                ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
@@ -4792,6 +4897,20 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
        qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
        sess->d_id = port_id;
        sess->login_gen++;
+       sess->loop_id = loop_id;
+
+       if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
+               /* remote port has assigned Port ID */
+               if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
+                       vha->d_id = sess->d_id;
+
+               ql_dbg(ql_dbg_disc, vha, 0xffff,
+                   "%s %8phC - send port online\n",
+                   __func__, sess->port_name);
+
+               qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
+                   sess->d_id.b24);
+       }
 
        if (iocb->u.isp24.status_subcode == ELS_PRLI) {
                sess->fw_login_state = DSC_LS_PRLI_PEND;
@@ -4904,6 +5023,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                        sess = qla2x00_find_fcport_by_wwpn(vha,
                            iocb->u.isp24.port_name, 1);
 
+                       if (vha->hw->flags.edif_enabled && sess &&
+                           (!(sess->flags & FCF_FCSP_DEVICE) ||
+                            !sess->edif.authok)) {
+                               ql_dbg(ql_dbg_disc, vha, 0xffff,
+                                      "%s %d %8phC Term PRLI due to unauthorize PRLI\n",
+                                      __func__, __LINE__, iocb->u.isp24.port_name);
+                               qlt_send_term_imm_notif(vha, iocb, 1);
+                               break;
+                       }
+
                        if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
                                ql_dbg(ql_dbg_disc, vha, 0xffff,
                                    "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
@@ -4952,6 +5081,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                        bool delete = false;
                        int sec;
 
+                       if (vha->hw->flags.edif_enabled && sess &&
+                           (!(sess->flags & FCF_FCSP_DEVICE) ||
+                            !sess->edif.authok)) {
+                               ql_dbg(ql_dbg_disc, vha, 0xffff,
+                                      "%s %d %8phC Term PRLI due to unauthorize prli\n",
+                                      __func__, __LINE__, iocb->u.isp24.port_name);
+                               qlt_send_term_imm_notif(vha, iocb, 1);
+                               break;
+                       }
+
                        spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
                        switch (sess->fw_login_state) {
                        case DSC_LS_PLOGI_PEND:
@@ -5141,7 +5280,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
 }
 
 /*
- * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * ha->hardware_lock supposed to be held on entry.
+ * Might drop it, then reacquire.
  */
 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
        struct imm_ntfy_from_isp *iocb)
@@ -6444,15 +6584,15 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
        return 0;
 }
 
-void qlt_remove_target_resources(struct qla_hw_data *ha)
+void qla_remove_hostmap(struct qla_hw_data *ha)
 {
        struct scsi_qla_host *node;
        u32 key = 0;
 
-       btree_for_each_safe32(&ha->tgt.host_map, key, node)
-               btree_remove32(&ha->tgt.host_map, key);
+       btree_for_each_safe32(&ha->host_map, key, node)
+               btree_remove32(&ha->host_map, key);
 
-       btree_destroy32(&ha->tgt.host_map);
+       btree_destroy32(&ha->host_map);
 }
 
 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
@@ -7080,8 +7220,7 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
 void
 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
 {
-       int rc;
-
+       mutex_init(&base_vha->vha_tgt.tgt_mutex);
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
@@ -7094,7 +7233,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
                ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
        }
 
-       mutex_init(&base_vha->vha_tgt.tgt_mutex);
        mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
 
        INIT_LIST_HEAD(&base_vha->unknown_atio_list);
@@ -7103,11 +7241,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
 
        qlt_clear_mode(base_vha);
 
-       rc = btree_init32(&ha->tgt.host_map);
-       if (rc)
-               ql_log(ql_log_info, base_vha, 0xd03d,
-                   "Unable to initialize ha->host_map btree\n");
-
        qlt_update_vp_map(base_vha, SET_VP_IDX);
 }
 
@@ -7228,21 +7361,20 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
        u32 key;
        int rc;
 
-       if (!QLA_TGT_MODE_ENABLED())
-               return;
-
        key = vha->d_id.b24;
 
        switch (cmd) {
        case SET_VP_IDX:
+               if (!QLA_TGT_MODE_ENABLED())
+                       return;
                vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
                break;
        case SET_AL_PA:
-               slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+               slot = btree_lookup32(&vha->hw->host_map, key);
                if (!slot) {
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
                            "Save vha in host_map %p %06x\n", vha, key);
-                       rc = btree_insert32(&vha->hw->tgt.host_map,
+                       rc = btree_insert32(&vha->hw->host_map,
                                key, vha, GFP_ATOMIC);
                        if (rc)
                                ql_log(ql_log_info, vha, 0xd03e,
@@ -7252,17 +7384,19 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
                }
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
                    "replace existing vha in host_map %p %06x\n", vha, key);
-               btree_update32(&vha->hw->tgt.host_map, key, vha);
+               btree_update32(&vha->hw->host_map, key, vha);
                break;
        case RESET_VP_IDX:
+               if (!QLA_TGT_MODE_ENABLED())
+                       return;
                vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
                break;
        case RESET_AL_PA:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
                   "clear vha in host_map %p %06x\n", vha, key);
-               slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+               slot = btree_lookup32(&vha->hw->host_map, key);
                if (slot)
-                       btree_remove32(&vha->hw->tgt.host_map, key);
+                       btree_remove32(&vha->hw->host_map, key);
                vha->d_id.b24 = 0;
                break;
        }
index 01620f3..156b950 100644 (file)
@@ -176,6 +176,7 @@ struct nack_to_isp {
        uint8_t  reserved[2];
        __le16  ox_id;
 } __packed;
+#define NOTIFY_ACK_FLAGS_FCSP          BIT_5
 #define NOTIFY_ACK_FLAGS_TERMINATE     BIT_3
 #define NOTIFY_ACK_SRR_FLAGS_ACCEPT    0
 #define NOTIFY_ACK_SRR_FLAGS_REJECT    1
@@ -238,6 +239,10 @@ struct ctio_to_2xxx {
 #define CTIO_PORT_LOGGED_OUT           0x29
 #define CTIO_PORT_CONF_CHANGED         0x2A
 #define CTIO_SRR_RECEIVED              0x45
+#define CTIO_FAST_AUTH_ERR             0x63
+#define CTIO_FAST_INCOMP_PAD_LEN       0x65
+#define CTIO_FAST_INVALID_REQ          0x66
+#define CTIO_FAST_SPI_ERR              0x67
 #endif
 
 #ifndef CTIO_RET_TYPE
@@ -408,7 +413,16 @@ struct ctio7_to_24xx {
                struct {
                        __le16  reserved1;
                        __le16 flags;
-                       __le32  residual;
+                       union {
+                               __le32  residual;
+                               struct {
+                                       uint8_t rsvd1;
+                                       uint8_t edif_flags;
+#define EF_EN_EDIF     BIT_0
+#define EF_NEW_SA      BIT_1
+                                       uint16_t rsvd2;
+                               };
+                       };
                        __le16 ox_id;
                        __le16  scsi_status;
                        __le32  relative_offset;
@@ -446,7 +460,7 @@ struct ctio7_from_24xx {
        uint8_t  vp_index;
        uint8_t  reserved1[5];
        __le32  exchange_address;
-       __le16  reserved2;
+       __le16  edif_sa_index;
        __le16  flags;
        __le32  residual;
        __le16  ox_id;
@@ -875,6 +889,7 @@ struct qla_tgt_cmd {
        unsigned int term_exchg:1;
        unsigned int cmd_sent_to_fw:1;
        unsigned int cmd_in_wq:1;
+       unsigned int edif:1;
 
        /*
         * This variable may be set from outside the LIO and I/O completion
index da11829..055040c 100644 (file)
@@ -6,9 +6,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.02.00.106-k"
+#define QLA2XXX_VERSION      "10.02.06.200-k"
 
 #define QLA_DRIVER_MAJOR_VER   10
 #define QLA_DRIVER_MINOR_VER   2
-#define QLA_DRIVER_PATCH_VER   0
-#define QLA_DRIVER_BETA_VER    106
+#define QLA_DRIVER_PATCH_VER   6
+#define QLA_DRIVER_BETA_VER    200
index f786ac2..301bc09 100644 (file)
@@ -119,8 +119,8 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
                 * the interrupt_handler to think there are responses to be
                 * processed when there aren't.
                 */
-               ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
-               ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
+               ha->shadow_regs->req_q_out = cpu_to_le32(0);
+               ha->shadow_regs->rsp_q_in = cpu_to_le32(0);
                wmb();
 
                writel(0, &ha->reg->req_q_in);
index cbd1e6f..28eab07 100644 (file)
@@ -160,7 +160,7 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
 
        if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
                /* No data being transferred */
-               cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
+               cmd_entry->ttlByteCnt = cpu_to_le32(0);
                return;
        }
 
@@ -288,7 +288,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
        /* Acquire hardware specific lock */
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
-       index = (uint32_t)cmd->request->tag;
+       index = scsi_cmd_to_rq(cmd)->tag;
 
        /*
         * Check to see if adapter is online before placing request on
index 187d78a..cd71074 100644 (file)
@@ -645,8 +645,8 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
        /* Fill in the request and response queue information. */
        init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
        init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
-       init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
-       init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+       init_fw_cb->rqq_len = cpu_to_le16(REQUEST_QUEUE_DEPTH);
+       init_fw_cb->compq_len = cpu_to_le16(RESPONSE_QUEUE_DEPTH);
        init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
        init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
        init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
@@ -656,20 +656,20 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
 
        /* Set up required options. */
        init_fw_cb->fw_options |=
-               __constant_cpu_to_le16(FWOPT_SESSION_MODE |
-                                      FWOPT_INITIATOR_MODE);
+               cpu_to_le16(FWOPT_SESSION_MODE |
+                           FWOPT_INITIATOR_MODE);
 
        if (is_qla80XX(ha))
                init_fw_cb->fw_options |=
-                   __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+                   cpu_to_le16(FWOPT_ENABLE_CRBDB);
 
-       init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+       init_fw_cb->fw_options &= cpu_to_le16(~FWOPT_TARGET_MODE);
 
        init_fw_cb->add_fw_options = 0;
        init_fw_cb->add_fw_options |=
-                       __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
+                       cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
        init_fw_cb->add_fw_options |=
-                       __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
+                       cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
 
        if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
                != QLA_SUCCESS) {
@@ -1613,7 +1613,7 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
 
        strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
        strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
-       chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+       chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
 
 exit_get_chap:
        dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
@@ -1655,7 +1655,7 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
        chap_table->secret_len = strlen(password);
        strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
        strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
-       chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+       chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE);
 
        if (is_qla40XX(ha)) {
                chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
@@ -1721,7 +1721,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
 
        mutex_lock(&ha->chap_sem);
        chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
-       if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+       if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) {
                rval = QLA_ERROR;
                goto exit_unlock_uni_chap;
        }
@@ -1784,7 +1784,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
        for (i = 0; i < max_chap_entries; i++) {
                chap_table = (struct ql4_chap_table *)ha->chap_list + i;
                if (chap_table->cookie !=
-                   __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+                   cpu_to_le16(CHAP_VALID_COOKIE)) {
                        if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
                                free_index = i;
                        continue;
@@ -2105,18 +2105,18 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
 
        if (conn->max_recv_dlength)
                fw_ddb_entry->iscsi_max_rcv_data_seg_len =
-                 __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
+                 cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
 
        if (sess->max_r2t)
                fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
 
        if (sess->first_burst)
                fw_ddb_entry->iscsi_first_burst_len =
-                      __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
+                      cpu_to_le16((sess->first_burst / BYTE_UNITS));
 
        if (sess->max_burst)
                fw_ddb_entry->iscsi_max_burst_len =
-                       __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
+                       cpu_to_le16((sess->max_burst / BYTE_UNITS));
 
        if (sess->time2wait)
                fw_ddb_entry->iscsi_def_time2wait =
index 66a4877..47adff9 100644 (file)
@@ -3658,7 +3658,7 @@ qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
                            "Do ROM fast read failed\n");
                        goto done_read;
                }
-               dwptr[i] = __constant_cpu_to_le32(val);
+               dwptr[i] = cpu_to_le32(val);
        }
 
 done_read:
@@ -3721,9 +3721,9 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
                        goto no_flash_data;
        }
 
-       if (*wptr == __constant_cpu_to_le16(0xffff))
+       if (*wptr == cpu_to_le16(0xffff))
                goto no_flash_data;
-       if (flt->version != __constant_cpu_to_le16(1)) {
+       if (flt->version != cpu_to_le16(1)) {
                DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
                        "version=0x%x length=0x%x checksum=0x%x.\n",
                        le16_to_cpu(flt->version), le16_to_cpu(flt->length),
@@ -3826,7 +3826,7 @@ qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
        qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
            hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
 
-       if (*wptr == __constant_cpu_to_le16(0xffff))
+       if (*wptr == cpu_to_le16(0xffff))
                goto no_flash_data;
 
        if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -3883,7 +3883,7 @@ qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
        qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
                        QLA82XX_IDC_PARAM_ADDR , 8);
 
-       if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+       if (*wptr == cpu_to_le32(0xffffffff)) {
                ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
                ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
        } else {
index 6ee7ea4..f1ea65c 100644 (file)
@@ -702,7 +702,7 @@ static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
 
        *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
        if ((*chap_entry)->cookie !=
-            __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+            cpu_to_le16(CHAP_VALID_COOKIE)) {
                *chap_entry = NULL;
        } else {
                rval = QLA_SUCCESS;
@@ -745,7 +745,7 @@ static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
                chap_table = (struct ql4_chap_table *)ha->chap_list + i;
 
                if ((chap_table->cookie !=
-                   __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+                   cpu_to_le16(CHAP_VALID_COOKIE)) &&
                   (i > MAX_RESRV_CHAP_IDX)) {
                                free_index = i;
                                break;
@@ -794,7 +794,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
        for (i = chap_tbl_idx; i < max_chap_entries; i++) {
                chap_table = (struct ql4_chap_table *)ha->chap_list + i;
                if (chap_table->cookie !=
-                   __constant_cpu_to_le16(CHAP_VALID_COOKIE))
+                   cpu_to_le16(CHAP_VALID_COOKIE))
                        continue;
 
                chap_rec->chap_tbl_idx = i;
@@ -923,7 +923,7 @@ static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
                goto exit_delete_chap;
        }
 
-       chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
+       chap_table->cookie = cpu_to_le16(0xFFFF);
 
        offset = FLASH_CHAP_OFFSET |
                        (chap_tbl_idx * sizeof(struct ql4_chap_table));
@@ -6043,7 +6043,7 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
        for (i = 0; i < max_chap_entries; i++) {
                chap_table = (struct ql4_chap_table *)ha->chap_list + i;
                if (chap_table->cookie !=
-                   __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+                   cpu_to_le16(CHAP_VALID_COOKIE)) {
                        continue;
                }
 
@@ -9282,7 +9282,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
        DEBUG2(printk(KERN_INFO
                      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
                      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
-                     cmd, jiffies, cmd->request->timeout / HZ,
+                     cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ,
                      ha->dpc_flags, cmd->result, cmd->allowed));
 
        rval = qla4xxx_isp_check_reg(ha);
@@ -9349,7 +9349,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
        DEBUG2(printk(KERN_INFO
                      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
                      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
-                     ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
+                     ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ,
                      ha->dpc_flags, cmd->result, cmd->allowed));
 
        rval = qla4xxx_isp_check_reg(ha);
index d84e218..8e7e833 100644 (file)
@@ -890,7 +890,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
                cmd->control_flags |= CFLAG_WRITE;
        else
                cmd->control_flags |= CFLAG_READ;
-       cmd->time_out = Cmnd->request->timeout/HZ;
+       cmd->time_out = scsi_cmd_to_rq(Cmnd)->timeout / HZ;
        memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
 }
 
index d26025c..b241f9e 100644 (file)
@@ -190,7 +190,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
                                "(result %x)\n", cmd->result));
 
        good_bytes = scsi_bufflen(cmd);
-       if (!blk_rq_is_passthrough(cmd->request)) {
+       if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
                int old_good_bytes = good_bytes;
                drv = scsi_cmd_to_driver(cmd);
                if (drv->done)
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
new file mode 100644 (file)
index 0000000..81c3853
--- /dev/null
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bsg.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/sg.h>
+#include "scsi_priv.h"
+
+#define uptr64(val) ((void __user *)(uintptr_t)(val))
+
+static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
+               fmode_t mode, unsigned int timeout)
+{
+       struct scsi_request *sreq;
+       struct request *rq;
+       struct bio *bio;
+       int ret;
+
+       if (hdr->protocol != BSG_PROTOCOL_SCSI  ||
+           hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
+               return -EINVAL;
+       if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+               pr_warn_once("BIDI support in bsg has been removed.\n");
+               return -EOPNOTSUPP;
+       }
+
+       rq = blk_get_request(q, hdr->dout_xfer_len ?
+                            REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+       rq->timeout = timeout;
+
+       ret = -ENOMEM;
+       sreq = scsi_req(rq);
+       sreq->cmd_len = hdr->request_len;
+       if (sreq->cmd_len > BLK_MAX_CDB) {
+               sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
+               if (!sreq->cmd)
+                       goto out_put_request;
+       }
+
+       ret = -EFAULT;
+       if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
+               goto out_free_cmd;
+       ret = -EPERM;
+       if (!scsi_cmd_allowed(sreq->cmd, mode))
+               goto out_free_cmd;
+
+       ret = 0;
+       if (hdr->dout_xfer_len) {
+               ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
+                               hdr->dout_xfer_len, GFP_KERNEL);
+       } else if (hdr->din_xfer_len) {
+               ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
+                               hdr->din_xfer_len, GFP_KERNEL);
+       }
+
+       if (ret)
+               goto out_free_cmd;
+
+       bio = rq->bio;
+       blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
+
+       /*
+        * fill in all the output members
+        */
+       hdr->device_status = sreq->result & 0xff;
+       hdr->transport_status = host_byte(sreq->result);
+       hdr->driver_status = 0;
+       if (scsi_status_is_check_condition(sreq->result))
+               hdr->driver_status = DRIVER_SENSE;
+       hdr->info = 0;
+       if (hdr->device_status || hdr->transport_status || hdr->driver_status)
+               hdr->info |= SG_INFO_CHECK;
+       hdr->response_len = 0;
+
+       if (sreq->sense_len && hdr->response) {
+               int len = min_t(unsigned int, hdr->max_response_len,
+                                       sreq->sense_len);
+
+               if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
+                       ret = -EFAULT;
+               else
+                       hdr->response_len = len;
+       }
+
+       if (rq_data_dir(rq) == READ)
+               hdr->din_resid = sreq->resid_len;
+       else
+               hdr->dout_resid = sreq->resid_len;
+
+       blk_rq_unmap_user(bio);
+
+out_free_cmd:
+       scsi_req_free_cmd(scsi_req(rq));
+out_put_request:
+       blk_put_request(rq);
+       return ret;
+}
+
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev)
+{
+       return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev,
+                       dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn);
+}
index 9034949..6e50e81 100644 (file)
@@ -7,9 +7,18 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
+#include <linux/module.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi_common.h>
 
+MODULE_LICENSE("GPL v2");
+
+/* Command group 3 is reserved and should never be used.  */
+const unsigned char scsi_command_size_tbl[8] = {
+       6, 10, 10, 12, 16, 12, 10, 10
+};
+EXPORT_SYMBOL(scsi_command_size_tbl);
+
 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
  * You may not alter any existing entry (although adding new ones is
  * encouraged once assigned by ANSI/INCITS T10).
index 58f6936..66f5074 100644 (file)
@@ -3076,6 +3076,7 @@ static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
                            unsigned int sectors, u32 ei_lba)
 {
+       int ret = 0;
        unsigned int i;
        sector_t sector;
        struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
@@ -3083,26 +3084,33 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
        struct t10_pi_tuple *sdt;
 
        for (i = 0; i < sectors; i++, ei_lba++) {
-               int ret;
-
                sector = start_sec + i;
                sdt = dif_store(sip, sector);
 
                if (sdt->app_tag == cpu_to_be16(0xffff))
                        continue;
 
-               ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
-                                ei_lba);
-               if (ret) {
-                       dif_errors++;
-                       return ret;
+               /*
+                * Because scsi_debug acts as both initiator and
+                * target we proceed to verify the PI even if
+                * RDPROTECT=3. This is done so the "initiator" knows
+                * which type of error to return. Otherwise we would
+                * have to iterate over the PI twice.
+                */
+               if (scp->cmnd[1] >> 5) { /* RDPROTECT */
+                       ret = dif_verify(sdt, lba2fake_store(sip, sector),
+                                        sector, ei_lba);
+                       if (ret) {
+                               dif_errors++;
+                               break;
+                       }
                }
        }
 
        dif_copy_prot(scp, start_sec, sectors, true);
        dix_reads++;
 
-       return 0;
+       return ret;
 }
 
 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
@@ -3196,12 +3204,29 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
        /* DIX + T10 DIF */
        if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
-               int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
-
-               if (prot_ret) {
-                       read_unlock(macc_lckp);
-                       mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
-                       return illegal_condition_result;
+               switch (prot_verify_read(scp, lba, num, ei_lba)) {
+               case 1: /* Guard tag error */
+                       if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
+                               read_unlock(macc_lckp);
+                               mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+                               return check_condition_result;
+                       } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
+                               read_unlock(macc_lckp);
+                               mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+                               return illegal_condition_result;
+                       }
+                       break;
+               case 3: /* Reference tag error */
+                       if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
+                               read_unlock(macc_lckp);
+                               mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
+                               return check_condition_result;
+                       } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
+                               read_unlock(macc_lckp);
+                               mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
+                               return illegal_condition_result;
+                       }
+                       break;
                }
        }
 
@@ -3232,28 +3257,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        return 0;
 }
 
-static void dump_sector(unsigned char *buf, int len)
-{
-       int i, j, n;
-
-       pr_err(">>> Sector Dump <<<\n");
-       for (i = 0 ; i < len ; i += 16) {
-               char b[128];
-
-               for (j = 0, n = 0; j < 16; j++) {
-                       unsigned char c = buf[i+j];
-
-                       if (c >= 0x20 && c < 0x7e)
-                               n += scnprintf(b + n, sizeof(b) - n,
-                                              " %c ", buf[i+j]);
-                       else
-                               n += scnprintf(b + n, sizeof(b) - n,
-                                              "%02x ", buf[i+j]);
-               }
-               pr_err("%04d: %s\n", i, b);
-       }
-}
-
 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
                             unsigned int sectors, u32 ei_lba)
 {
@@ -3299,10 +3302,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
                        sdt = piter.addr + ppage_offset;
                        daddr = diter.addr + dpage_offset;
 
-                       ret = dif_verify(sdt, daddr, sector, ei_lba);
-                       if (ret) {
-                               dump_sector(daddr, sdebug_sector_size);
-                               goto out;
+                       if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
+                               ret = dif_verify(sdt, daddr, sector, ei_lba);
+                               if (ret)
+                                       goto out;
                        }
 
                        sector++;
@@ -3480,12 +3483,29 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 
        /* DIX + T10 DIF */
        if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
-               int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
-
-               if (prot_ret) {
-                       write_unlock(macc_lckp);
-                       mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
-                       return illegal_condition_result;
+               switch (prot_verify_write(scp, lba, num, ei_lba)) {
+               case 1: /* Guard tag error */
+                       if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
+                               write_unlock(macc_lckp);
+                               mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+                               return illegal_condition_result;
+                       } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
+                               write_unlock(macc_lckp);
+                               mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+                               return check_condition_result;
+                       }
+                       break;
+               case 3: /* Reference tag error */
+                       if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
+                               write_unlock(macc_lckp);
+                               mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
+                               return illegal_condition_result;
+                       } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
+                               write_unlock(macc_lckp);
+                               mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
+                               return check_condition_result;
+                       }
+                       break;
                }
        }
 
@@ -4702,7 +4722,7 @@ fini:
 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
 {
        u16 hwq;
-       u32 tag = blk_mq_unique_tag(cmnd->request);
+       u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
 
        hwq = blk_mq_unique_tag_to_hwq(tag);
 
@@ -4715,7 +4735,7 @@ static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
 
 static u32 get_tag(struct scsi_cmnd *cmnd)
 {
-       return blk_mq_unique_tag(cmnd->request);
+       return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
 }
 
 /* Queued (deferred) command completions converge here. */
@@ -5364,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 {
        bool new_sd_dp;
        bool inject = false;
-       bool hipri = (cmnd->request->cmd_flags & REQ_HIPRI);
+       bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
        int k, num_in_q, qdepth;
        unsigned long iflags;
        u64 ns_from_boot = 0;
@@ -5567,8 +5587,9 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
                if (sdebug_statistics)
                        sd_dp->issuing_cpu = raw_smp_processor_id();
                if (unlikely(sd_dp->aborted)) {
-                       sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
-                       blk_abort_request(cmnd->request);
+                       sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
+                                   scsi_cmd_to_rq(cmnd)->tag);
+                       blk_abort_request(scsi_cmd_to_rq(cmnd));
                        atomic_set(&sdeb_inject_pending, 0);
                        sd_dp->aborted = false;
                }
@@ -7394,7 +7415,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
                                               (u32)cmd[k]);
                }
                sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
-                           blk_mq_unique_tag(scp->request), b);
+                           blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
        }
        if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
                return SCSI_MLQUEUE_HOST_BUSY;
index d33355a..c708045 100644 (file)
@@ -171,6 +171,7 @@ static struct {
        {"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1},
        {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
        {"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */
+       {"Generic", "Ultra HS-SD/MMC", "2.09", BLIST_IGN_MEDIA_CHANGE | BLIST_INQUIRY_36},
        {"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
        {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
        {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
index 58a252c..b6c86cc 100644 (file)
@@ -242,7 +242,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
  */
 static void scsi_eh_reset(struct scsi_cmnd *scmd)
 {
-       if (!blk_rq_is_passthrough(scmd->request)) {
+       if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
                struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
                if (sdrv->eh_reset)
                        sdrv->eh_reset(scmd);
@@ -1182,7 +1182,7 @@ static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd)
 static enum scsi_disposition
 scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn)
 {
-       if (!blk_rq_is_passthrough(scmd->request)) {
+       if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
                struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
                if (sdrv->eh_action)
                        rtn = sdrv->eh_action(scmd, rtn);
@@ -1750,21 +1750,23 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
  */
 int scsi_noretry_cmd(struct scsi_cmnd *scmd)
 {
+       struct request *req = scsi_cmd_to_rq(scmd);
+
        switch (host_byte(scmd->result)) {
        case DID_OK:
                break;
        case DID_TIME_OUT:
                goto check_type;
        case DID_BUS_BUSY:
-               return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
+               return req->cmd_flags & REQ_FAILFAST_TRANSPORT;
        case DID_PARITY:
-               return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
+               return req->cmd_flags & REQ_FAILFAST_DEV;
        case DID_ERROR:
                if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
                        return 0;
                fallthrough;
        case DID_SOFT_ERROR:
-               return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
+               return req->cmd_flags & REQ_FAILFAST_DRIVER;
        }
 
        if (!scsi_status_is_check_condition(scmd->result))
@@ -1775,8 +1777,7 @@ check_type:
         * assume caller has checked sense and determined
         * the check condition was retryable.
         */
-       if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
-           blk_rq_is_passthrough(scmd->request))
+       if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req))
                return 1;
 
        return 0;
@@ -2376,7 +2377,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
 
        scmd = (struct scsi_cmnd *)(rq + 1);
        scsi_init_command(dev, scmd);
-       scmd->request = rq;
        scmd->cmnd = scsi_req(rq)->cmd;
 
        scmd->scsi_done         = scsi_reset_provider_done_command;
index 0d13610..6ff2207 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
+#include <linux/cdrom.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -63,29 +64,6 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
        return 1;
 }
 
-/*
-
- * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host.
- * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES  variables are used.  
- * 
- * dev is the SCSI device struct ptr, *(int *) arg is the length of the
- * input data, if any, not including the command string & counts, 
- * *((int *)arg + 1) is the output buffer size in bytes.
- * 
- * *(char *) ((int *) arg)[2] the actual command byte.   
- * 
- * Note that if more than MAX_BUF bytes are requested to be transferred,
- * the ioctl will fail with error EINVAL.
- * 
- * This size *does not* include the initial lengths that were passed.
- * 
- * The SCSI command is read from the memory location immediately after the
- * length words, and the input data is right after the command.  The SCSI
- * routines know the command size based on the opcode decode.  
- * 
- * The output area is then filled in starting from the command byte. 
- */
-
 static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
                                  int timeout, int retries)
 {
@@ -189,10 +167,732 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
                ? -EFAULT: 0;
 }
 
+static int sg_get_version(int __user *p)
+{
+       static const int sg_version_num = 30527;
+       return put_user(sg_version_num, p);
+}
 
-static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg)
+static int sg_set_timeout(struct scsi_device *sdev, int __user *p)
 {
-       char scsi_cmd[MAX_COMMAND_SIZE];
+       int timeout, err = get_user(timeout, p);
+
+       if (!err)
+               sdev->sg_timeout = clock_t_to_jiffies(timeout);
+
+       return err;
+}
+
+static int sg_get_reserved_size(struct scsi_device *sdev, int __user *p)
+{
+       int val = min(sdev->sg_reserved_size,
+                     queue_max_bytes(sdev->request_queue));
+
+       return put_user(val, p);
+}
+
+static int sg_set_reserved_size(struct scsi_device *sdev, int __user *p)
+{
+       int size, err = get_user(size, p);
+
+       if (err)
+               return err;
+
+       if (size < 0)
+               return -EINVAL;
+
+       sdev->sg_reserved_size = min_t(unsigned int, size,
+                                      queue_max_bytes(sdev->request_queue));
+       return 0;
+}
+
+/*
+ * will always return that we are ATAPI even for a real SCSI drive, I'm not
+ * so sure this is worth doing anything about (why would you care??)
+ */
+static int sg_emulated_host(struct request_queue *q, int __user *p)
+{
+       return put_user(1, p);
+}
+
+static int scsi_get_idlun(struct scsi_device *sdev, void __user *argp)
+{
+       struct scsi_idlun v = {
+               .dev_id = (sdev->id & 0xff) +
+                       ((sdev->lun & 0xff) << 8) +
+                       ((sdev->channel & 0xff) << 16) +
+                       ((sdev->host->host_no & 0xff) << 24),
+               .host_unique_id = sdev->host->unique_id
+       };
+       if (copy_to_user(argp, &v, sizeof(struct scsi_idlun)))
+               return -EFAULT;
+       return 0;
+}
+
+static int scsi_send_start_stop(struct scsi_device *sdev, int data)
+{
+       u8 cdb[MAX_COMMAND_SIZE] = { };
+
+       cdb[0] = START_STOP;
+       cdb[4] = data;
+       return ioctl_internal_command(sdev, cdb, START_STOP_TIMEOUT,
+                                     NORMAL_RETRIES);
+}
+
+/*
+ * Check if the given command is allowed.
+ *
+ * Only a subset of commands are allowed for unprivileged users. Commands used
+ * to format the media, update the firmware, etc. are not permitted.
+ */
+bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode)
+{
+       /* root can do any command. */
+       if (capable(CAP_SYS_RAWIO))
+               return true;
+
+       /* Anybody who can open the device can do a read-safe command */
+       switch (cmd[0]) {
+       /* Basic read-only commands */
+       case TEST_UNIT_READY:
+       case REQUEST_SENSE:
+       case READ_6:
+       case READ_10:
+       case READ_12:
+       case READ_16:
+       case READ_BUFFER:
+       case READ_DEFECT_DATA:
+       case READ_CAPACITY: /* also GPCMD_READ_CDVD_CAPACITY */
+       case READ_LONG:
+       case INQUIRY:
+       case MODE_SENSE:
+       case MODE_SENSE_10:
+       case LOG_SENSE:
+       case START_STOP:
+       case GPCMD_VERIFY_10:
+       case VERIFY_16:
+       case REPORT_LUNS:
+       case SERVICE_ACTION_IN_16:
+       case RECEIVE_DIAGNOSTIC:
+       case MAINTENANCE_IN: /* also GPCMD_SEND_KEY, which is a write command */
+       case GPCMD_READ_BUFFER_CAPACITY:
+       /* Audio CD commands */
+       case GPCMD_PLAY_CD:
+       case GPCMD_PLAY_AUDIO_10:
+       case GPCMD_PLAY_AUDIO_MSF:
+       case GPCMD_PLAY_AUDIO_TI:
+       case GPCMD_PAUSE_RESUME:
+       /* CD/DVD data reading */
+       case GPCMD_READ_CD:
+       case GPCMD_READ_CD_MSF:
+       case GPCMD_READ_DISC_INFO:
+       case GPCMD_READ_DVD_STRUCTURE:
+       case GPCMD_READ_HEADER:
+       case GPCMD_READ_TRACK_RZONE_INFO:
+       case GPCMD_READ_SUBCHANNEL:
+       case GPCMD_READ_TOC_PMA_ATIP:
+       case GPCMD_REPORT_KEY:
+       case GPCMD_SCAN:
+       case GPCMD_GET_CONFIGURATION:
+       case GPCMD_READ_FORMAT_CAPACITIES:
+       case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+       case GPCMD_GET_PERFORMANCE:
+       case GPCMD_SEEK:
+       case GPCMD_STOP_PLAY_SCAN:
+       /* ZBC */
+       case ZBC_IN:
+               return true;
+       /* Basic writing commands */
+       case WRITE_6:
+       case WRITE_10:
+       case WRITE_VERIFY:
+       case WRITE_12:
+       case WRITE_VERIFY_12:
+       case WRITE_16:
+       case WRITE_LONG:
+       case WRITE_LONG_2:
+       case WRITE_SAME:
+       case WRITE_SAME_16:
+       case WRITE_SAME_32:
+       case ERASE:
+       case GPCMD_MODE_SELECT_10:
+       case MODE_SELECT:
+       case LOG_SELECT:
+       case GPCMD_BLANK:
+       case GPCMD_CLOSE_TRACK:
+       case GPCMD_FLUSH_CACHE:
+       case GPCMD_FORMAT_UNIT:
+       case GPCMD_REPAIR_RZONE_TRACK:
+       case GPCMD_RESERVE_RZONE_TRACK:
+       case GPCMD_SEND_DVD_STRUCTURE:
+       case GPCMD_SEND_EVENT:
+       case GPCMD_SEND_OPC:
+       case GPCMD_SEND_CUE_SHEET:
+       case GPCMD_SET_SPEED:
+       case GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL:
+       case GPCMD_LOAD_UNLOAD:
+       case GPCMD_SET_STREAMING:
+       case GPCMD_SET_READ_AHEAD:
+       /* ZBC */
+       case ZBC_OUT:
+               return (mode & FMODE_WRITE);
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL(scsi_cmd_allowed);
+
+static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
+               struct sg_io_hdr *hdr, fmode_t mode)
+{
+       struct scsi_request *req = scsi_req(rq);
+
+       if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
+               return -EFAULT;
+       if (!scsi_cmd_allowed(req->cmd, mode))
+               return -EPERM;
+
+       /*
+        * fill in request structure
+        */
+       req->cmd_len = hdr->cmd_len;
+
+       rq->timeout = msecs_to_jiffies(hdr->timeout);
+       if (!rq->timeout)
+               rq->timeout = sdev->sg_timeout;
+       if (!rq->timeout)
+               rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+       if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+               rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+       return 0;
+}
+
+static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
+               struct bio *bio)
+{
+       struct scsi_request *req = scsi_req(rq);
+       int r, ret = 0;
+
+       /*
+        * fill in all the output members
+        */
+       hdr->status = req->result & 0xff;
+       hdr->masked_status = status_byte(req->result);
+       hdr->msg_status = COMMAND_COMPLETE;
+       hdr->host_status = host_byte(req->result);
+       hdr->driver_status = 0;
+       if (scsi_status_is_check_condition(hdr->status))
+               hdr->driver_status = DRIVER_SENSE;
+       hdr->info = 0;
+       if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+               hdr->info |= SG_INFO_CHECK;
+       hdr->resid = req->resid_len;
+       hdr->sb_len_wr = 0;
+
+       if (req->sense_len && hdr->sbp) {
+               int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
+
+               if (!copy_to_user(hdr->sbp, req->sense, len))
+                       hdr->sb_len_wr = len;
+               else
+                       ret = -EFAULT;
+       }
+
+       r = blk_rq_unmap_user(bio);
+       if (!ret)
+               ret = r;
+
+       return ret;
+}
+
+static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
+               struct sg_io_hdr *hdr, fmode_t mode)
+{
+       unsigned long start_time;
+       ssize_t ret = 0;
+       int writing = 0;
+       int at_head = 0;
+       struct request *rq;
+       struct scsi_request *req;
+       struct bio *bio;
+
+       if (hdr->interface_id != 'S')
+               return -EINVAL;
+
+       if (hdr->dxfer_len > (queue_max_hw_sectors(sdev->request_queue) << 9))
+               return -EIO;
+
+       if (hdr->dxfer_len)
+               switch (hdr->dxfer_direction) {
+               default:
+                       return -EINVAL;
+               case SG_DXFER_TO_DEV:
+                       writing = 1;
+                       break;
+               case SG_DXFER_TO_FROM_DEV:
+               case SG_DXFER_FROM_DEV:
+                       break;
+               }
+       if (hdr->flags & SG_FLAG_Q_AT_HEAD)
+               at_head = 1;
+
+       ret = -ENOMEM;
+       rq = blk_get_request(sdev->request_queue, writing ?
+                            REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+       req = scsi_req(rq);
+
+       if (hdr->cmd_len > BLK_MAX_CDB) {
+               req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
+               if (!req->cmd)
+                       goto out_put_request;
+       }
+
+       ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode);
+       if (ret < 0)
+               goto out_free_cdb;
+
+       ret = 0;
+       if (hdr->iovec_count) {
+               struct iov_iter i;
+               struct iovec *iov = NULL;
+
+               ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
+                                  hdr->iovec_count, 0, &iov, &i);
+               if (ret < 0)
+                       goto out_free_cdb;
+
+               /* SG_IO howto says that the shorter of the two wins */
+               iov_iter_truncate(&i, hdr->dxfer_len);
+
+               ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL);
+               kfree(iov);
+       } else if (hdr->dxfer_len)
+               ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp,
+                                     hdr->dxfer_len, GFP_KERNEL);
+
+       if (ret)
+               goto out_free_cdb;
+
+       bio = rq->bio;
+       req->retries = 0;
+
+       start_time = jiffies;
+
+       blk_execute_rq(disk, rq, at_head);
+
+       hdr->duration = jiffies_to_msecs(jiffies - start_time);
+
+       ret = scsi_complete_sghdr_rq(rq, hdr, bio);
+
+out_free_cdb:
+       scsi_req_free_cmd(req);
+out_put_request:
+       blk_put_request(rq);
+       return ret;
+}
+
+/**
+ * sg_scsi_ioctl  --  handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
+ * @q:         request queue to send scsi commands down
+ * @disk:      gendisk to operate on (option)
+ * @mode:      mode used to open the file through which the ioctl has been
+ *             submitted
+ * @sic:       userspace structure describing the command to perform
+ *
+ * Send down the scsi command described by @sic to the device below
+ * the request queue @q.  If @file is non-NULL it's used to perform
+ * fine-grained permission checks that allow users to send down
+ * non-destructive SCSI commands.  If the caller has a struct gendisk
+ * available it should be passed in as @disk to allow the low level
+ * driver to use the information contained in it.  A non-NULL @disk
+ * is only allowed if the caller knows that the low level driver doesn't
+ * need it (e.g. in the scsi subsystem).
+ *
+ * Notes:
+ *   -  This interface is deprecated - users should use the SG_IO
+ *      interface instead, as this is a more flexible approach to
+ *      performing SCSI commands on a device.
+ *   -  The SCSI command length is determined by examining the 1st byte
+ *      of the given command. There is no way to override this.
+ *   -  Data transfers are limited to PAGE_SIZE
+ *   -  The length (x + y) must be at least OMAX_SB_LEN bytes long to
+ *      accommodate the sense buffer when an error occurs.
+ *      The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
+ *      old code will not be surprised.
+ *   -  If a Unix error occurs (e.g. ENOMEM) then the user will receive
+ *      a negative return and the Unix error code in 'errno'.
+ *      If the SCSI command succeeds then 0 is returned.
+ *      Positive numbers returned are the compacted SCSI error codes (4
+ *      bytes in one int) where the lowest byte is the SCSI status.
+ */
+static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
+               fmode_t mode, struct scsi_ioctl_command __user *sic)
+{
+       enum { OMAX_SB_LEN = 16 };      /* For backward compatibility */
+       struct request *rq;
+       struct scsi_request *req;
+       int err;
+       unsigned int in_len, out_len, bytes, opcode, cmdlen;
+       char *buffer = NULL;
+
+       if (!sic)
+               return -EINVAL;
+
+       /*
+        * get in an out lengths, verify they don't exceed a page worth of data
+        */
+       if (get_user(in_len, &sic->inlen))
+               return -EFAULT;
+       if (get_user(out_len, &sic->outlen))
+               return -EFAULT;
+       if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
+               return -EINVAL;
+       if (get_user(opcode, sic->data))
+               return -EFAULT;
+
+       bytes = max(in_len, out_len);
+       if (bytes) {
+               buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
+               if (!buffer)
+                       return -ENOMEM;
+
+       }
+
+       rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto error_free_buffer;
+       }
+       req = scsi_req(rq);
+
+       cmdlen = COMMAND_SIZE(opcode);
+
+       /*
+        * get command and data to send to device, if any
+        */
+       err = -EFAULT;
+       req->cmd_len = cmdlen;
+       if (copy_from_user(req->cmd, sic->data, cmdlen))
+               goto error;
+
+       if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+               goto error;
+
+       err = -EPERM;
+       if (!scsi_cmd_allowed(req->cmd, mode))
+               goto error;
+
+       /* default.  possible overridden later */
+       req->retries = 5;
+
+       switch (opcode) {
+       case SEND_DIAGNOSTIC:
+       case FORMAT_UNIT:
+               rq->timeout = FORMAT_UNIT_TIMEOUT;
+               req->retries = 1;
+               break;
+       case START_STOP:
+               rq->timeout = START_STOP_TIMEOUT;
+               break;
+       case MOVE_MEDIUM:
+               rq->timeout = MOVE_MEDIUM_TIMEOUT;
+               break;
+       case READ_ELEMENT_STATUS:
+               rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
+               break;
+       case READ_DEFECT_DATA:
+               rq->timeout = READ_DEFECT_DATA_TIMEOUT;
+               req->retries = 1;
+               break;
+       default:
+               rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+               break;
+       }
+
+       if (bytes) {
+               err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
+               if (err)
+                       goto error;
+       }
+
+       blk_execute_rq(disk, rq, 0);
+
+       err = req->result & 0xff;       /* only 8 bit SCSI status */
+       if (err) {
+               if (req->sense_len && req->sense) {
+                       bytes = (OMAX_SB_LEN > req->sense_len) ?
+                               req->sense_len : OMAX_SB_LEN;
+                       if (copy_to_user(sic->data, req->sense, bytes))
+                               err = -EFAULT;
+               }
+       } else {
+               if (copy_to_user(sic->data, buffer, out_len))
+                       err = -EFAULT;
+       }
+
+error:
+       blk_put_request(rq);
+
+error_free_buffer:
+       kfree(buffer);
+
+       return err;
+}
+
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+       if (in_compat_syscall()) {
+               struct compat_sg_io_hdr hdr32 =  {
+                       .interface_id    = hdr->interface_id,
+                       .dxfer_direction = hdr->dxfer_direction,
+                       .cmd_len         = hdr->cmd_len,
+                       .mx_sb_len       = hdr->mx_sb_len,
+                       .iovec_count     = hdr->iovec_count,
+                       .dxfer_len       = hdr->dxfer_len,
+                       .dxferp          = (uintptr_t)hdr->dxferp,
+                       .cmdp            = (uintptr_t)hdr->cmdp,
+                       .sbp             = (uintptr_t)hdr->sbp,
+                       .timeout         = hdr->timeout,
+                       .flags           = hdr->flags,
+                       .pack_id         = hdr->pack_id,
+                       .usr_ptr         = (uintptr_t)hdr->usr_ptr,
+                       .status          = hdr->status,
+                       .masked_status   = hdr->masked_status,
+                       .msg_status      = hdr->msg_status,
+                       .sb_len_wr       = hdr->sb_len_wr,
+                       .host_status     = hdr->host_status,
+                       .driver_status   = hdr->driver_status,
+                       .resid           = hdr->resid,
+                       .duration        = hdr->duration,
+                       .info            = hdr->info,
+               };
+
+               if (copy_to_user(argp, &hdr32, sizeof(hdr32)))
+                       return -EFAULT;
+
+               return 0;
+       }
+#endif
+
+       if (copy_to_user(argp, hdr, sizeof(*hdr)))
+               return -EFAULT;
+
+       return 0;
+}
+EXPORT_SYMBOL(put_sg_io_hdr);
+
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+       struct compat_sg_io_hdr hdr32;
+
+       if (in_compat_syscall()) {
+               if (copy_from_user(&hdr32, argp, sizeof(hdr32)))
+                       return -EFAULT;
+
+               *hdr = (struct sg_io_hdr) {
+                       .interface_id    = hdr32.interface_id,
+                       .dxfer_direction = hdr32.dxfer_direction,
+                       .cmd_len         = hdr32.cmd_len,
+                       .mx_sb_len       = hdr32.mx_sb_len,
+                       .iovec_count     = hdr32.iovec_count,
+                       .dxfer_len       = hdr32.dxfer_len,
+                       .dxferp          = compat_ptr(hdr32.dxferp),
+                       .cmdp            = compat_ptr(hdr32.cmdp),
+                       .sbp             = compat_ptr(hdr32.sbp),
+                       .timeout         = hdr32.timeout,
+                       .flags           = hdr32.flags,
+                       .pack_id         = hdr32.pack_id,
+                       .usr_ptr         = compat_ptr(hdr32.usr_ptr),
+                       .status          = hdr32.status,
+                       .masked_status   = hdr32.masked_status,
+                       .msg_status      = hdr32.msg_status,
+                       .sb_len_wr       = hdr32.sb_len_wr,
+                       .host_status     = hdr32.host_status,
+                       .driver_status   = hdr32.driver_status,
+                       .resid           = hdr32.resid,
+                       .duration        = hdr32.duration,
+                       .info            = hdr32.info,
+               };
+
+               return 0;
+       }
+#endif
+
+       if (copy_from_user(hdr, argp, sizeof(*hdr)))
+               return -EFAULT;
+
+       return 0;
+}
+EXPORT_SYMBOL(get_sg_io_hdr);
+
+#ifdef CONFIG_COMPAT
+struct compat_cdrom_generic_command {
+       unsigned char   cmd[CDROM_PACKET_SIZE];
+       compat_caddr_t  buffer;
+       compat_uint_t   buflen;
+       compat_int_t    stat;
+       compat_caddr_t  sense;
+       unsigned char   data_direction;
+       unsigned char   pad[3];
+       compat_int_t    quiet;
+       compat_int_t    timeout;
+       compat_caddr_t  unused;
+};
+#endif
+
+static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
+                                     const void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+       if (in_compat_syscall()) {
+               struct compat_cdrom_generic_command cgc32;
+
+               if (copy_from_user(&cgc32, arg, sizeof(cgc32)))
+                       return -EFAULT;
+
+               *cgc = (struct cdrom_generic_command) {
+                       .buffer         = compat_ptr(cgc32.buffer),
+                       .buflen         = cgc32.buflen,
+                       .stat           = cgc32.stat,
+                       .sense          = compat_ptr(cgc32.sense),
+                       .data_direction = cgc32.data_direction,
+                       .quiet          = cgc32.quiet,
+                       .timeout        = cgc32.timeout,
+                       .unused         = compat_ptr(cgc32.unused),
+               };
+               memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
+               return 0;
+       }
+#endif
+       if (copy_from_user(cgc, arg, sizeof(*cgc)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
+                                     void __user *arg)
+{
+#ifdef CONFIG_COMPAT
+       if (in_compat_syscall()) {
+               struct compat_cdrom_generic_command cgc32 = {
+                       .buffer         = (uintptr_t)(cgc->buffer),
+                       .buflen         = cgc->buflen,
+                       .stat           = cgc->stat,
+                       .sense          = (uintptr_t)(cgc->sense),
+                       .data_direction = cgc->data_direction,
+                       .quiet          = cgc->quiet,
+                       .timeout        = cgc->timeout,
+                       .unused         = (uintptr_t)(cgc->unused),
+               };
+               memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
+
+               if (copy_to_user(arg, &cgc32, sizeof(cgc32)))
+                       return -EFAULT;
+
+               return 0;
+       }
+#endif
+       if (copy_to_user(arg, cgc, sizeof(*cgc)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk,
+               fmode_t mode, void __user *arg)
+{
+       struct cdrom_generic_command cgc;
+       struct sg_io_hdr hdr;
+       int err;
+
+       err = scsi_get_cdrom_generic_arg(&cgc, arg);
+       if (err)
+               return err;
+
+       cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+       memset(&hdr, 0, sizeof(hdr));
+       hdr.interface_id = 'S';
+       hdr.cmd_len = sizeof(cgc.cmd);
+       hdr.dxfer_len = cgc.buflen;
+       switch (cgc.data_direction) {
+       case CGC_DATA_UNKNOWN:
+               hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+               break;
+       case CGC_DATA_WRITE:
+               hdr.dxfer_direction = SG_DXFER_TO_DEV;
+               break;
+       case CGC_DATA_READ:
+               hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+               break;
+       case CGC_DATA_NONE:
+               hdr.dxfer_direction = SG_DXFER_NONE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       hdr.dxferp = cgc.buffer;
+       hdr.sbp = cgc.sense;
+       if (hdr.sbp)
+               hdr.mx_sb_len = sizeof(struct request_sense);
+       hdr.timeout = jiffies_to_msecs(cgc.timeout);
+       hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
+       hdr.cmd_len = sizeof(cgc.cmd);
+
+       err = sg_io(sdev, disk, &hdr, mode);
+       if (err == -EFAULT)
+               return -EFAULT;
+
+       if (hdr.status)
+               return -EIO;
+
+       cgc.stat = err;
+       cgc.buflen = hdr.resid;
+       if (scsi_put_cdrom_generic_arg(&cgc, arg))
+               return -EFAULT;
+
+       return err;
+}
+
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
+               fmode_t mode, void __user *argp)
+{
+       struct sg_io_hdr hdr;
+       int error;
+
+       error = get_sg_io_hdr(&hdr, argp);
+       if (error)
+               return error;
+       error = sg_io(sdev, disk, &hdr, mode);
+       if (error == -EFAULT)
+               return error;
+       if (put_sg_io_hdr(&hdr, argp))
+               return -EFAULT;
+       return error;
+}
+
+/**
+ * scsi_ioctl - Dispatch ioctl to scsi device
+ * @sdev: scsi device receiving ioctl
+ * @disk: disk receiving the ioctl
+ * @mode: mode the block/char device is opened with
+ * @cmd: which ioctl is it
+ * @arg: data associated with ioctl
+ *
+ * Description: The scsi_ioctl() function differs from most ioctls in that it
+ * does not take a major/minor number as the dev field.  Rather, it takes
+ * a pointer to a &struct scsi_device.
+ */
+int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
+               int cmd, void __user *arg)
+{
+       struct request_queue *q = sdev->request_queue;
        struct scsi_sense_hdr sense_hdr;
 
        /* Check for deprecated ioctls ... all the ioctls which don't
@@ -212,26 +912,34 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
        }
 
        switch (cmd) {
-       case SCSI_IOCTL_GET_IDLUN: {
-               struct scsi_idlun v = {
-                       .dev_id = (sdev->id & 0xff)
-                                + ((sdev->lun & 0xff) << 8)
-                                + ((sdev->channel & 0xff) << 16)
-                                + ((sdev->host->host_no & 0xff) << 24),
-                       .host_unique_id = sdev->host->unique_id
-               };
-               if (copy_to_user(arg, &v, sizeof(struct scsi_idlun)))
-                       return -EFAULT;
-               return 0;
-       }
+       case SG_GET_VERSION_NUM:
+               return sg_get_version(arg);
+       case SG_SET_TIMEOUT:
+               return sg_set_timeout(sdev, arg);
+       case SG_GET_TIMEOUT:
+               return jiffies_to_clock_t(sdev->sg_timeout);
+       case SG_GET_RESERVED_SIZE:
+               return sg_get_reserved_size(sdev, arg);
+       case SG_SET_RESERVED_SIZE:
+               return sg_set_reserved_size(sdev, arg);
+       case SG_EMULATED_HOST:
+               return sg_emulated_host(q, arg);
+       case SG_IO:
+               return scsi_ioctl_sg_io(sdev, disk, mode, arg);
+       case SCSI_IOCTL_SEND_COMMAND:
+               return sg_scsi_ioctl(q, disk, mode, arg);
+       case CDROM_SEND_PACKET:
+               return scsi_cdrom_send_packet(sdev, disk, mode, arg);
+       case CDROMCLOSETRAY:
+               return scsi_send_start_stop(sdev, 3);
+       case CDROMEJECT:
+               return scsi_send_start_stop(sdev, 2);
+       case SCSI_IOCTL_GET_IDLUN:
+               return scsi_get_idlun(sdev, arg);
        case SCSI_IOCTL_GET_BUS_NUMBER:
                return put_user(sdev->host->host_no, (int __user *)arg);
        case SCSI_IOCTL_PROBE_HOST:
                return ioctl_probe(sdev->host, arg);
-       case SCSI_IOCTL_SEND_COMMAND:
-               if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
-                       return -EACCES;
-               return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
        case SCSI_IOCTL_DOORLOCK:
                return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
        case SCSI_IOCTL_DOORUNLOCK:
@@ -240,66 +948,27 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg
                return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
                                            NORMAL_RETRIES, &sense_hdr);
        case SCSI_IOCTL_START_UNIT:
-               scsi_cmd[0] = START_STOP;
-               scsi_cmd[1] = 0;
-               scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
-               scsi_cmd[4] = 1;
-               return ioctl_internal_command(sdev, scsi_cmd,
-                                    START_STOP_TIMEOUT, NORMAL_RETRIES);
+               return scsi_send_start_stop(sdev, 1);
        case SCSI_IOCTL_STOP_UNIT:
-               scsi_cmd[0] = START_STOP;
-               scsi_cmd[1] = 0;
-               scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0;
-               scsi_cmd[4] = 0;
-               return ioctl_internal_command(sdev, scsi_cmd,
-                                    START_STOP_TIMEOUT, NORMAL_RETRIES);
+               return scsi_send_start_stop(sdev, 0);
         case SCSI_IOCTL_GET_PCI:
                 return scsi_ioctl_get_pci(sdev, arg);
        case SG_SCSI_RESET:
                return scsi_ioctl_reset(sdev, arg);
        }
-       return -ENOIOCTLCMD;
-}
-
-/**
- * scsi_ioctl - Dispatch ioctl to scsi device
- * @sdev: scsi device receiving ioctl
- * @cmd: which ioctl is it
- * @arg: data associated with ioctl
- *
- * Description: The scsi_ioctl() function differs from most ioctls in that it
- * does not take a major/minor number as the dev field.  Rather, it takes
- * a pointer to a &struct scsi_device.
- */
-int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
-{
-       int ret = scsi_ioctl_common(sdev, cmd, arg);
-
-       if (ret != -ENOIOCTLCMD)
-               return ret;
-
-       if (sdev->host->hostt->ioctl)
-               return sdev->host->hostt->ioctl(sdev, cmd, arg);
-
-       return -EINVAL;
-}
-EXPORT_SYMBOL(scsi_ioctl);
 
 #ifdef CONFIG_COMPAT
-int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
-{
-       int ret = scsi_ioctl_common(sdev, cmd, arg);
-
-       if (ret != -ENOIOCTLCMD)
-               return ret;
-
-       if (sdev->host->hostt->compat_ioctl)
+       if (in_compat_syscall()) {
+               if (!sdev->host->hostt->compat_ioctl)
+                       return -EINVAL;
                return sdev->host->hostt->compat_ioctl(sdev, cmd, arg);
-
-       return ret;
-}
-EXPORT_SYMBOL(scsi_compat_ioctl);
+       }
 #endif
+       if (!sdev->host->hostt->ioctl)
+               return -EINVAL;
+       return sdev->host->hostt->ioctl(sdev, cmd, arg);
+}
+EXPORT_SYMBOL(scsi_ioctl);
 
 /*
  * We can process a reset even when a device isn't fully operable.
index 7456a26..5726738 100644 (file)
@@ -119,13 +119,15 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
 
 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
 {
-       if (cmd->request->rq_flags & RQF_DONTPREP) {
-               cmd->request->rq_flags &= ~RQF_DONTPREP;
+       struct request *rq = scsi_cmd_to_rq(cmd);
+
+       if (rq->rq_flags & RQF_DONTPREP) {
+               rq->rq_flags &= ~RQF_DONTPREP;
                scsi_mq_uninit_cmd(cmd);
        } else {
                WARN_ON_ONCE(true);
        }
-       blk_mq_requeue_request(cmd->request, true);
+       blk_mq_requeue_request(rq, true);
 }
 
 /**
@@ -164,7 +166,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
         */
        cmd->result = 0;
 
-       blk_mq_requeue_request(cmd->request, true);
+       blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true);
 }
 
 /**
@@ -478,7 +480,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
 
 static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
 {
-       if (!blk_rq_is_passthrough(cmd->request)) {
+       if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) {
                struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
 
                if (drv->uninit_command)
@@ -624,7 +626,7 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
 
 static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
 {
-       struct request *req = cmd->request;
+       struct request *req = scsi_cmd_to_rq(cmd);
        unsigned long wait_for;
 
        if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
@@ -643,7 +645,7 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
 static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
 {
        struct request_queue *q = cmd->device->request_queue;
-       struct request *req = cmd->request;
+       struct request *req = scsi_cmd_to_rq(cmd);
        int level = 0;
        enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
              ACTION_DELAYED_RETRY} action;
@@ -818,7 +820,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
 {
        bool sense_valid;
        bool sense_current = true;      /* false implies "deferred sense" */
-       struct request *req = cmd->request;
+       struct request *req = scsi_cmd_to_rq(cmd);
        struct scsi_sense_hdr sshdr;
 
        sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -907,7 +909,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 {
        int result = cmd->result;
        struct request_queue *q = cmd->device->request_queue;
-       struct request *req = cmd->request;
+       struct request *req = scsi_cmd_to_rq(cmd);
        blk_status_t blk_stat = BLK_STS_OK;
 
        if (unlikely(result))   /* a nz result may or may not be an error */
@@ -978,7 +980,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
 blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
        struct scatterlist *last_sg = NULL;
        blk_status_t ret;
@@ -1083,8 +1085,13 @@ EXPORT_SYMBOL(scsi_alloc_sgtables);
 static void scsi_initialize_rq(struct request *rq)
 {
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+       struct scsi_request *req = &cmd->req;
+
+       memset(req->__cmd, 0, sizeof(req->__cmd));
+       req->cmd = req->__cmd;
+       req->cmd_len = BLK_MAX_CDB;
+       req->sense_len = 0;
 
-       scsi_req_init(&cmd->req);
        init_rcu_head(&cmd->rcu);
        cmd->jiffies_at_alloc = jiffies;
        cmd->retries = 0;
@@ -1107,7 +1114,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
 {
        void *buf = cmd->sense_buffer;
        void *prot = cmd->prot_sdb;
-       struct request *rq = blk_mq_rq_from_pdu(cmd);
+       struct request *rq = scsi_cmd_to_rq(cmd);
        unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
        unsigned long jiffies_at_alloc;
        int retries, to_clear;
@@ -1533,8 +1540,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
 
        scsi_init_command(sdev, cmd);
 
-       cmd->request = req;
-       cmd->tag = req->tag;
        cmd->prot_op = SCSI_PROT_NORMAL;
        if (blk_rq_bytes(req))
                cmd->sc_data_direction = rq_dma_dir(req);
@@ -1572,12 +1577,12 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
 
 static void scsi_mq_done(struct scsi_cmnd *cmd)
 {
-       if (unlikely(blk_should_fake_timeout(cmd->request->q)))
+       if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
                return;
        if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
                return;
        trace_scsi_dispatch_cmd_done(cmd);
-       blk_mq_complete_request(cmd->request);
+       blk_mq_complete_request(scsi_cmd_to_rq(cmd));
 }
 
 static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
index 2317717..ed95722 100644 (file)
@@ -28,8 +28,9 @@ static void scsi_log_release_buffer(char *bufptr)
 
 static inline const char *scmd_name(const struct scsi_cmnd *scmd)
 {
-       return scmd->request->rq_disk ?
-               scmd->request->rq_disk->disk_name : NULL;
+       struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
+
+       return rq->rq_disk ? rq->rq_disk->disk_name : NULL;
 }
 
 static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
@@ -91,7 +92,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
        if (!logbuf)
                return;
        off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd),
-                                scmd->request->tag);
+                                scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag);
        if (off < logbuf_len) {
                va_start(args, fmt);
                off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args);
@@ -188,7 +189,7 @@ void scsi_print_command(struct scsi_cmnd *cmd)
                return;
 
        off = sdev_format_header(logbuf, logbuf_len,
-                                scmd_name(cmd), cmd->request->tag);
+                                scmd_name(cmd), scsi_cmd_to_rq(cmd)->tag);
        if (off >= logbuf_len)
                goto out_printk;
        off += scnprintf(logbuf + off, logbuf_len - off, "CDB: ");
@@ -210,7 +211,7 @@ void scsi_print_command(struct scsi_cmnd *cmd)
 
                        off = sdev_format_header(logbuf, logbuf_len,
                                                 scmd_name(cmd),
-                                                cmd->request->tag);
+                                                scsi_cmd_to_rq(cmd)->tag);
                        if (!WARN_ON(off > logbuf_len - 58)) {
                                off += scnprintf(logbuf + off, logbuf_len - off,
                                                 "CDB[%02x]: ", k);
@@ -373,7 +374,8 @@ EXPORT_SYMBOL(__scsi_print_sense);
 /* Normalize and print sense buffer in SCSI command */
 void scsi_print_sense(const struct scsi_cmnd *cmd)
 {
-       scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag,
+       scsi_log_print_sense(cmd->device, scmd_name(cmd),
+                            scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag,
                             cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
 }
 EXPORT_SYMBOL(scsi_print_sense);
@@ -391,8 +393,8 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
        if (!logbuf)
                return;
 
-       off = sdev_format_header(logbuf, logbuf_len,
-                                scmd_name(cmd), cmd->request->tag);
+       off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd),
+                                scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag);
 
        if (off >= logbuf_len)
                goto out_printk;
index eae2235..6d91520 100644 (file)
@@ -7,6 +7,7 @@
 #include <scsi/scsi_device.h>
 #include <linux/sbitmap.h>
 
+struct bsg_device;
 struct request_queue;
 struct request;
 struct scsi_cmnd;
@@ -180,6 +181,8 @@ static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
 static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
 #endif
 
+struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev);
+
 extern int scsi_device_max_queue_depth(struct scsi_device *sdev);
 
 /* 
index 5b6996a..fe22191 100644 (file)
@@ -267,6 +267,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
         */
        sdev->borken = 1;
 
+       sdev->sg_reserved_size = INT_MAX;
+
        q = blk_mq_init_queue(&sdev->host->tag_set);
        if (IS_ERR(q)) {
                /* release fn is set up in scsi_sysfs_device_initialise, so
@@ -974,6 +976,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        if (*bflags & BLIST_UNMAP_LIMIT_WS)
                sdev->unmap_limit_for_ws = 1;
 
+       if (*bflags & BLIST_IGN_MEDIA_CHANGE)
+               sdev->ignore_media_change = 1;
+
        sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
 
        if (*bflags & BLIST_TRY_VPD_PAGES)
index c0d3111..8679325 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/blkdev.h>
 #include <linux/device.h>
 #include <linux/pm_runtime.h>
+#include <linux/bsg.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -1333,7 +1334,6 @@ static int scsi_target_add(struct scsi_target *starget)
 int scsi_sysfs_add_sdev(struct scsi_device *sdev)
 {
        int error, i;
-       struct request_queue *rq = sdev->request_queue;
        struct scsi_target *starget = sdev->sdev_target;
 
        error = scsi_target_add(starget);
@@ -1372,12 +1372,19 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
        transport_add_device(&sdev->sdev_gendev);
        sdev->is_visible = 1;
 
-       error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev);
-       if (error)
-               /* we're treating error on bsg register as non-fatal,
-                * so pretend nothing went wrong */
-               sdev_printk(KERN_INFO, sdev,
-                           "Failed to register bsg queue, errno=%d\n", error);
+       if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
+               sdev->bsg_dev = scsi_bsg_register_queue(sdev);
+               if (IS_ERR(sdev->bsg_dev)) {
+                       /*
+                        * We're treating error on bsg register as non-fatal, so
+                        * pretend nothing went wrong.
+                        */
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Failed to register bsg queue, errno=%d\n",
+                                   error);
+                       sdev->bsg_dev = NULL;
+               }
+       }
 
        /* add additional host specific attributes */
        if (sdev->host->hostt->sdev_attrs) {
@@ -1439,7 +1446,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
                        sysfs_remove_groups(&sdev->sdev_gendev.kobj,
                                        sdev->host->hostt->sdev_groups);
 
-               bsg_unregister_queue(sdev->request_queue);
+               if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev)
+                       bsg_unregister_queue(sdev->bsg_dev);
                device_unregister(&sdev->sdev_dev);
                transport_remove_device(dev);
                device_del(dev);
index 49748cd..60e406b 100644 (file)
@@ -3804,7 +3804,7 @@ bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd)
        struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
 
        if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
-               (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
+               (scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
                set_host_byte(scmd, DID_TRANSPORT_MARGINAL);
                return false;
        }
index 5af7a10..bd72c38 100644 (file)
@@ -1230,7 +1230,7 @@ int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
 {
         if (cmd->flags & SCMD_TAGGED) {
                *msg++ = SIMPLE_QUEUE_TAG;
-               *msg++ = cmd->request->tag;
+               *msg++ = scsi_cmd_to_rq(cmd)->tag;
                return 2;
        }
 
index 610ebba..cbd9999 100644 (file)
@@ -110,6 +110,7 @@ static void sd_shutdown(struct device *);
 static int sd_suspend_system(struct device *);
 static int sd_suspend_runtime(struct device *);
 static int sd_resume(struct device *);
+static int sd_resume_runtime(struct device *);
 static void sd_rescan(struct device *);
 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
 static void sd_uninit_command(struct scsi_cmnd *SCpnt);
@@ -605,7 +606,7 @@ static const struct dev_pm_ops sd_pm_ops = {
        .poweroff               = sd_suspend_system,
        .restore                = sd_resume,
        .runtime_suspend        = sd_suspend_runtime,
-       .runtime_resume         = sd_resume,
+       .runtime_resume         = sd_resume_runtime,
 };
 
 static struct scsi_driver sd_template = {
@@ -776,8 +777,9 @@ static unsigned int sd_prot_flag_mask(unsigned int prot_op)
 static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
                                           unsigned int dix, unsigned int dif)
 {
-       struct bio *bio = scmd->request->bio;
-       unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
+       struct request *rq = scsi_cmd_to_rq(scmd);
+       struct bio *bio = rq->bio;
+       unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
        unsigned int protect = 0;
 
        if (dix) {                              /* DIX Type 0, 1, 2, 3 */
@@ -868,7 +870,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
 static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdp = cmd->device;
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
        u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -904,7 +906,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
                bool unmap)
 {
        struct scsi_device *sdp = cmd->device;
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
        u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -936,7 +938,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
                bool unmap)
 {
        struct scsi_device *sdp = cmd->device;
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
        u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -966,7 +968,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
 
 static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
 {
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_device *sdp = cmd->device;
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
@@ -1063,7 +1065,7 @@ out:
  **/
 static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 {
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_device *sdp = cmd->device;
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        struct bio *bio = rq->bio;
@@ -1112,7 +1114,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 
 static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
 {
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
 
        /* flush requests don't perform I/O, zero the S/G table */
@@ -1210,7 +1212,7 @@ static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
 
 static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
 {
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_device *sdp = cmd->device;
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
@@ -1324,7 +1326,7 @@ fail:
 
 static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
 {
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
 
        switch (req_op(rq)) {
        case REQ_OP_DISCARD:
@@ -1370,7 +1372,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
 
 static void sd_uninit_command(struct scsi_cmnd *SCpnt)
 {
-       struct request *rq = SCpnt->request;
+       struct request *rq = scsi_cmd_to_rq(SCpnt);
        u8 *cmnd;
 
        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
@@ -1530,11 +1532,11 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 }
 
 /**
- *     sd_ioctl_common - process an ioctl
+ *     sd_ioctl - process an ioctl
  *     @bdev: target block device
  *     @mode: FMODE_* mask
  *     @cmd: ioctl command number
- *     @p: this is third argument given to ioctl(2) system call.
+ *     @arg: this is third argument given to ioctl(2) system call.
  *     Often contains a pointer.
  *
  *     Returns 0 if successful (some ioctls return positive numbers on
@@ -1543,20 +1545,20 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  *     Note: most ioctls are forward onto the block subsystem or further
  *     down in the scsi subsystem.
  **/
-static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
-                          unsigned int cmd, void __user *p)
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+                   unsigned int cmd, unsigned long arg)
 {
        struct gendisk *disk = bdev->bd_disk;
        struct scsi_disk *sdkp = scsi_disk(disk);
        struct scsi_device *sdp = sdkp->device;
+       void __user *p = (void __user *)arg;
        int error;
     
        SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
                                    "cmd=0x%x\n", disk->disk_name, cmd));
 
-       error = scsi_verify_blk_ioctl(bdev, cmd);
-       if (error < 0)
-               return error;
+       if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
+               return -ENOIOCTLCMD;
 
        /*
         * If we are in the middle of error recovery, don't let anyone
@@ -1567,27 +1569,11 @@ static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
        error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
                        (mode & FMODE_NDELAY) != 0);
        if (error)
-               goto out;
+               return error;
 
        if (is_sed_ioctl(cmd))
                return sed_ioctl(sdkp->opal_dev, cmd, p);
-
-       /*
-        * Send SCSI addressing ioctls directly to mid level, send other
-        * ioctls to block level and then onto mid level if they can't be
-        * resolved.
-        */
-       switch (cmd) {
-               case SCSI_IOCTL_GET_IDLUN:
-               case SCSI_IOCTL_GET_BUS_NUMBER:
-                       error = scsi_ioctl(sdp, cmd, p);
-                       break;
-               default:
-                       error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
-                       break;
-       }
-out:
-       return error;
+       return scsi_ioctl(sdp, disk, mode, cmd, p);
 }
 
 static void set_media_not_present(struct scsi_disk *sdkp)
@@ -1770,34 +1756,6 @@ static void sd_rescan(struct device *dev)
        sd_revalidate_disk(sdkp->disk);
 }
 
-static int sd_ioctl(struct block_device *bdev, fmode_t mode,
-                   unsigned int cmd, unsigned long arg)
-{
-       void __user *p = (void __user *)arg;
-       int ret;
-
-       ret = sd_ioctl_common(bdev, mode, cmd, p);
-       if (ret != -ENOTTY)
-               return ret;
-
-       return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
-}
-
-#ifdef CONFIG_COMPAT
-static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
-                          unsigned int cmd, unsigned long arg)
-{
-       void __user *p = compat_ptr(arg);
-       int ret;
-
-       ret = sd_ioctl_common(bdev, mode, cmd, p);
-       if (ret != -ENOTTY)
-               return ret;
-
-       return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
-}
-#endif
-
 static char sd_pr_type(enum pr_type type)
 {
        switch (type) {
@@ -1898,9 +1856,7 @@ static const struct block_device_operations sd_fops = {
        .release                = sd_release,
        .ioctl                  = sd_ioctl,
        .getgeo                 = sd_getgeo,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl           = sd_compat_ioctl,
-#endif
+       .compat_ioctl           = blkdev_compat_ptr_ioctl,
        .check_events           = sd_check_events,
        .unlock_native_capacity = sd_unlock_native_capacity,
        .report_zones           = sd_zbc_report_zones,
@@ -1921,7 +1877,7 @@ static const struct block_device_operations sd_fops = {
  **/
 static void sd_eh_reset(struct scsi_cmnd *scmd)
 {
-       struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+       struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
 
        /* New SCSI EH run, reset gate variable */
        sdkp->ignore_medium_access_errors = false;
@@ -1941,7 +1897,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
  **/
 static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
 {
-       struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+       struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
        struct scsi_device *sdev = scmd->device;
 
        if (!scsi_device_online(sdev) ||
@@ -1982,7 +1938,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
 
 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
 {
-       struct request *req = scmd->request;
+       struct request *req = scsi_cmd_to_rq(scmd);
        struct scsi_device *sdev = scmd->device;
        unsigned int transferred, good_bytes;
        u64 start_lba, end_lba, bad_lba;
@@ -2037,8 +1993,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
        unsigned int sector_size = SCpnt->device->sector_size;
        unsigned int resid;
        struct scsi_sense_hdr sshdr;
-       struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
-       struct request *req = SCpnt->request;
+       struct request *req = scsi_cmd_to_rq(SCpnt);
+       struct scsi_disk *sdkp = scsi_disk(req->rq_disk);
        int sense_valid = 0;
        int sense_deferred = 0;
 
@@ -2181,8 +2137,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
                         * doesn't have any media in it, don't bother
                         * with any more polling.
                         */
-                       if (media_not_present(sdkp, &sshdr))
+                       if (media_not_present(sdkp, &sshdr)) {
+                               sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
                                return;
+                       }
 
                        if (the_result)
                                sense_valid = scsi_sense_valid(&sshdr);
@@ -3718,6 +3676,25 @@ static int sd_resume(struct device *dev)
        return ret;
 }
 
+static int sd_resume_runtime(struct device *dev)
+{
+       struct scsi_disk *sdkp = dev_get_drvdata(dev);
+       struct scsi_device *sdp = sdkp->device;
+
+       if (sdp->ignore_media_change) {
+               /* clear the device's sense data */
+               static const u8 cmd[10] = { REQUEST_SENSE };
+
+               if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
+                                NULL, sdp->request_queue->rq_timeout, 1, 0,
+                                RQF_PM, NULL))
+                       sd_printk(KERN_NOTICE, sdkp,
+                                 "Failed to clear sense data\n");
+       }
+
+       return sd_resume(dev);
+}
+
 /**
  *     init_sd - entry point for this driver (both when built in or when
  *     a module).
index 186b5ff..b9757f2 100644 (file)
@@ -243,7 +243,7 @@ out:
 
 static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
 {
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        sector_t sector = blk_rq_pos(rq);
 
@@ -321,7 +321,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
 blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
                                        unsigned int nr_blocks)
 {
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        unsigned int wp_offset, zno = blk_rq_zone_no(rq);
        unsigned long flags;
@@ -386,7 +386,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
 blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
                                         unsigned char op, bool all)
 {
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        sector_t sector = blk_rq_pos(rq);
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        sector_t block = sectors_to_logical(sdkp->device, sector);
@@ -442,7 +442,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
                                          unsigned int good_bytes)
 {
        int result = cmd->result;
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        unsigned int zno = blk_rq_zone_no(rq);
        enum req_opf op = req_op(rq);
@@ -516,7 +516,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
                     struct scsi_sense_hdr *sshdr)
 {
        int result = cmd->result;
-       struct request *rq = cmd->request;
+       struct request *rq = scsi_cmd_to_rq(cmd);
 
        if (op_is_zone_mgmt(req_op(rq)) &&
            result &&
index d5889b4..8f05248 100644 (file)
@@ -237,8 +237,9 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
 
        if (sfp->parentdp->device->type == TYPE_SCANNER)
                return 0;
-
-       return blk_verify_command(cmd, filp->f_mode);
+       if (!scsi_cmd_allowed(cmd, filp->f_mode))
+               return -EPERM;
+       return 0;
 }
 
 static int
@@ -1107,7 +1108,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
        case SCSI_IOCTL_SEND_COMMAND:
                if (atomic_read(&sdp->detaching))
                        return -ENODEV;
-               return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
+               return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
        case SG_SET_DEBUG:
                result = get_user(val, ip);
                if (result)
@@ -1163,28 +1164,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
        ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
        if (ret != -ENOIOCTLCMD)
                return ret;
-
-       return scsi_ioctl(sdp->device, cmd_in, p);
-}
-
-#ifdef CONFIG_COMPAT
-static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
-{
-       void __user *p = compat_ptr(arg);
-       Sg_device *sdp;
-       Sg_fd *sfp;
-       int ret;
-
-       if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
-               return -ENXIO;
-
-       ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
-       if (ret != -ENOIOCTLCMD)
-               return ret;
-
-       return scsi_compat_ioctl(sdp->device, cmd_in, p);
+       return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
 }
-#endif
 
 static __poll_t
 sg_poll(struct file *filp, poll_table * wait)
@@ -1439,9 +1420,7 @@ static const struct file_operations sg_fops = {
        .write = sg_write,
        .poll = sg_poll,
        .unlocked_ioctl = sg_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = sg_compat_ioctl,
-#endif
+       .compat_ioctl = compat_ptr_ioctl,
        .open = sg_open,
        .mmap = sg_mmap,
        .release = sg_release,
index cb9e4e9..6f83e2d 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Kernel configuration file for the SMARTPQI
 #
-# Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
 # Copyright (c) 2017-2018 Microsemi Corporation
 # Copyright (c) 2016 Microsemi Corporation
 # Copyright (c) 2016 PMC-Sierra, Inc.
 # HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
 
 config SCSI_SMARTPQI
-       tristate "Microsemi PQI Driver"
+       tristate "Microchip PQI Driver"
        depends on PCI && SCSI && !S390
        select SCSI_SAS_ATTRS
        select RAID_ATTRS
        help
-       This driver supports Microsemi PQI controllers.
+       This driver supports Microchip PQI controllers.
 
-       <http://www.microsemi.com>
+       <http://www.microchip.com>
 
        To compile this driver as a module, choose M here: the
        module will be called smartpqi.
index d7dac55..70eca20 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ *    driver for Microchip PQI-based storage controllers
+ *    Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
  *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
@@ -59,7 +59,7 @@ struct pqi_device_registers {
 /*
  * controller registers
  *
- * These are defined by the Microsemi implementation.
+ * These are defined by the Microchip implementation.
  *
  * Some registers (those named sis_*) are only used when in
  * legacy SIS mode before we transition the controller into
@@ -415,7 +415,7 @@ struct pqi_event_config {
        u8      reserved[2];
        u8      num_event_descriptors;
        u8      reserved1;
-       struct pqi_event_descriptor descriptors[1];
+       struct pqi_event_descriptor descriptors[];
 };
 
 #define PQI_MAX_EVENT_DESCRIPTORS      255
index dcc0b96..ecb2af3 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ *    driver for Microchip PQI-based storage controllers
+ *    Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
  *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
 #define BUILD_TIMESTAMP
 #endif
 
-#define DRIVER_VERSION         "2.1.8-045"
+#define DRIVER_VERSION         "2.1.10-020"
 #define DRIVER_MAJOR           2
 #define DRIVER_MINOR           1
-#define DRIVER_RELEASE         8
-#define DRIVER_REVISION                45
+#define DRIVER_RELEASE         10
+#define DRIVER_REVISION                20
 
-#define DRIVER_NAME            "Microsemi PQI Driver (v" \
+#define DRIVER_NAME            "Microchip SmartPQI Driver (v" \
                                DRIVER_VERSION BUILD_TIMESTAMP ")"
 #define DRIVER_NAME_SHORT      "smartpqi"
 
@@ -48,8 +48,8 @@
 #define PQI_POST_RESET_DELAY_SECS                      5
 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS     10
 
-MODULE_AUTHOR("Microsemi");
-MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
+MODULE_AUTHOR("Microchip");
+MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
        DRIVER_VERSION);
 MODULE_VERSION(DRIVER_VERSION);
 MODULE_LICENSE("GPL");
@@ -1322,6 +1322,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
                                "requested %u bytes, received %u bytes\n",
                                raid_map_size,
                                get_unaligned_le32(&raid_map->structure_size));
+                       rc = -EINVAL;
                        goto error;
                }
        }
@@ -4740,8 +4741,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
 }
 
 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH  \
-       (offsetof(struct pqi_event_config, descriptors) + \
-       (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
+       struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
 
 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
        bool enable_events)
@@ -5568,7 +5568,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
 {
        u16 hw_queue;
 
-       hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+       hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
        if (hw_queue > ctrl_info->max_hw_queue_index)
                hw_queue = 0;
 
@@ -5577,7 +5577,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
 
 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
 {
-       if (blk_rq_is_passthrough(scmd->request))
+       if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
                return false;
 
        return scmd->SCp.this_residual == 0;
@@ -6033,8 +6033,10 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
        mutex_lock(&ctrl_info->lun_reset_mutex);
 
        dev_err(&ctrl_info->pci_dev->dev,
-               "resetting scsi %d:%d:%d:%d\n",
-               shost->host_no, device->bus, device->target, device->lun);
+               "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
+               shost->host_no,
+               device->bus, device->target, device->lun,
+               scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
 
        pqi_check_ctrl_health(ctrl_info);
        if (pqi_ctrl_offline(ctrl_info))
@@ -7758,11 +7760,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
 
        pqi_init_operational_queues(ctrl_info);
 
-       rc = pqi_request_irqs(ctrl_info);
+       rc = pqi_create_queues(ctrl_info);
        if (rc)
                return rc;
 
-       rc = pqi_create_queues(ctrl_info);
+       rc = pqi_request_irqs(ctrl_info);
        if (rc)
                return rc;
 
@@ -8451,7 +8453,7 @@ static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
        if (id->driver_data)
                ctrl_description = (char *)id->driver_data;
        else
-               ctrl_description = "Microsemi Smart Family Controller";
+               ctrl_description = "Microchip Smart Family Controller";
 
        dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
 }
@@ -8711,6 +8713,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x193d, 0x1107)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x193d, 0x1108)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x193d, 0x1109)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               0x193d, 0x8460)
@@ -9171,6 +9181,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_VENDOR_ID_GIGABYTE, 0x1000)
        },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1dfc, 0x3161)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1cf2, 0x5445)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1cf2, 0x5446)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1cf2, 0x5447)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1cf2, 0x0b27)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1cf2, 0x0b29)
+       },
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+                              0x1cf2, 0x0b45)
+       },
        {
                PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
                               PCI_ANY_ID, PCI_ANY_ID)
index dd628cc..afd9baf 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ *    driver for Microchip PQI-based storage controllers
+ *    Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
  *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
index c954620..d63c46a 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ *    driver for Microchip PQI-based storage controllers
+ *    Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
  *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
index 12cd2ab..d29c135 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ *    driver for Microchip PQI-based storage controllers
+ *    Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
  *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
index 6dd0ff1..43a9501 100644 (file)
@@ -33,7 +33,7 @@
 #include "snic_io.h"
 #include "snic.h"
 
-#define snic_cmd_tag(sc)       (((struct scsi_cmnd *) sc)->request->tag)
+#define snic_cmd_tag(sc)       (scsi_cmd_to_rq(sc)->tag)
 
 const char *snic_state_str[] = {
        [SNIC_INIT]     = "SNIC_INIT",
@@ -1636,7 +1636,7 @@ snic_abort_cmd(struct scsi_cmnd *sc)
        u32 start_time = jiffies;
 
        SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
-                      sc, sc->cmnd[0], sc->request, tag);
+                      sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag);
 
        if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
                SNIC_HOST_ERR(snic->shost,
@@ -2152,7 +2152,7 @@ snic_device_reset(struct scsi_cmnd *sc)
        int dr_supp = 0;
 
        SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
-                     sc, sc->cmnd[0], sc->request,
+                     sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
                      snic_cmd_tag(sc));
        dr_supp = snic_dev_reset_supported(sc->device);
        if (!dr_supp) {
@@ -2335,7 +2335,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
        spin_lock_irqsave(&snic->snic_lock, flags);
        if (snic_get_state(snic) == SNIC_FWRESET) {
                spin_unlock_irqrestore(&snic->snic_lock, flags);
-               SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n");
+               SNIC_HOST_INFO(shost, "reset:prev reset is in progress\n");
 
                msleep(SNIC_HOST_RESET_TIMEOUT);
                ret = SUCCESS;
@@ -2383,11 +2383,11 @@ snic_host_reset(struct scsi_cmnd *sc)
 {
        struct Scsi_Host *shost = sc->device->host;
        u32 start_time  = jiffies;
-       int ret = FAILED;
+       int ret;
 
        SNIC_SCSI_DBG(shost,
                      "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
-                     sc, sc->cmnd[0], sc->request,
+                     sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
                      snic_cmd_tag(sc), CMD_FLAGS(sc));
 
        ret = snic_reset(shost, sc);
@@ -2494,7 +2494,7 @@ cleanup:
                sc->result = DID_TRANSPORT_DISRUPTED << 16;
                SNIC_HOST_INFO(snic->shost,
                               "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
-                              sc, sc->request->tag, CMD_FLAGS(sc), rqi,
+                              sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi,
                               jiffies_to_msecs(jiffies - st_time));
 
                /* Update IO stats */
index 2942a4e..8b17b35 100644 (file)
@@ -122,6 +122,8 @@ static void get_capabilities(struct scsi_cd *);
 static unsigned int sr_check_events(struct cdrom_device_info *cdi,
                                    unsigned int clearing, int slot);
 static int sr_packet(struct cdrom_device_info *, struct packet_command *);
+static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
+               u32 lba, u32 nr, u8 *last_sense);
 
 static const struct cdrom_device_ops sr_dops = {
        .open                   = sr_open,
@@ -135,8 +137,9 @@ static const struct cdrom_device_ops sr_dops = {
        .get_mcn                = sr_get_mcn,
        .reset                  = sr_reset,
        .audio_ioctl            = sr_audio_ioctl,
-       .capability             = SR_CAPABILITIES,
        .generic_packet         = sr_packet,
+       .read_cdda_bpc          = sr_read_cdda_bpc,
+       .capability             = SR_CAPABILITIES,
 };
 
 static void sr_kref_release(struct kref *kref);
@@ -330,7 +333,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
        int good_bytes = (result == 0 ? this_count : 0);
        int block_sectors = 0;
        long error_sector;
-       struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+       struct request *rq = scsi_cmd_to_rq(SCpnt);
+       struct scsi_cd *cd = scsi_cd(rq->rq_disk);
 
 #ifdef DEBUG
        scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
@@ -352,16 +356,14 @@ static int sr_done(struct scsi_cmnd *SCpnt)
                                break;
                        error_sector =
                                get_unaligned_be32(&SCpnt->sense_buffer[3]);
-                       if (SCpnt->request->bio != NULL)
-                               block_sectors =
-                                       bio_sectors(SCpnt->request->bio);
+                       if (rq->bio != NULL)
+                               block_sectors = bio_sectors(rq->bio);
                        if (block_sectors < 4)
                                block_sectors = 4;
                        if (cd->device->sector_size == 2048)
                                error_sector <<= 2;
                        error_sector &= ~(block_sectors - 1);
-                       good_bytes = (error_sector -
-                                     blk_rq_pos(SCpnt->request)) << 9;
+                       good_bytes = (error_sector - blk_rq_pos(rq)) << 9;
                        if (good_bytes < 0 || good_bytes >= this_count)
                                good_bytes = 0;
                        /*
@@ -393,7 +395,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
 {
        int block = 0, this_count, s_size;
        struct scsi_cd *cd;
-       struct request *rq = SCpnt->request;
+       struct request *rq = scsi_cmd_to_rq(SCpnt);
        blk_status_t ret;
 
        ret = scsi_alloc_sgtables(SCpnt);
@@ -558,53 +560,14 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode)
 static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                          unsigned long arg)
 {
-       struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+       struct gendisk *disk = bdev->bd_disk;
+       struct scsi_cd *cd = scsi_cd(disk);
        struct scsi_device *sdev = cd->device;
        void __user *argp = (void __user *)arg;
        int ret;
 
-       mutex_lock(&cd->lock);
-
-       ret = scsi_ioctl_block_when_processing_errors(sdev, cmd,
-                       (mode & FMODE_NDELAY) != 0);
-       if (ret)
-               goto out;
-
-       scsi_autopm_get_device(sdev);
-
-       /*
-        * Send SCSI addressing ioctls directly to mid level, send other
-        * ioctls to cdrom/block level.
-        */
-       switch (cmd) {
-       case SCSI_IOCTL_GET_IDLUN:
-       case SCSI_IOCTL_GET_BUS_NUMBER:
-               ret = scsi_ioctl(sdev, cmd, argp);
-               goto put;
-       }
-
-       ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
-       if (ret != -ENOSYS)
-               goto put;
-
-       ret = scsi_ioctl(sdev, cmd, argp);
-
-put:
-       scsi_autopm_put_device(sdev);
-
-out:
-       mutex_unlock(&cd->lock);
-       return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
-                         unsigned long arg)
-{
-       struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
-       struct scsi_device *sdev = cd->device;
-       void __user *argp = compat_ptr(arg);
-       int ret;
+       if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
+               return -ENOIOCTLCMD;
 
        mutex_lock(&cd->lock);
 
@@ -615,32 +578,19 @@ static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsign
 
        scsi_autopm_get_device(sdev);
 
-       /*
-        * Send SCSI addressing ioctls directly to mid level, send other
-        * ioctls to cdrom/block level.
-        */
-       switch (cmd) {
-       case SCSI_IOCTL_GET_IDLUN:
-       case SCSI_IOCTL_GET_BUS_NUMBER:
-               ret = scsi_compat_ioctl(sdev, cmd, argp);
-               goto put;
+       if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+               ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
+               if (ret != -ENOSYS)
+                       goto put;
        }
-
-       ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp);
-       if (ret != -ENOSYS)
-               goto put;
-
-       ret = scsi_compat_ioctl(sdev, cmd, argp);
+       ret = scsi_ioctl(sdev, disk, mode, cmd, argp);
 
 put:
        scsi_autopm_put_device(sdev);
-
 out:
        mutex_unlock(&cd->lock);
        return ret;
-
 }
-#endif
 
 static unsigned int sr_block_check_events(struct gendisk *disk,
                                          unsigned int clearing)
@@ -665,9 +615,7 @@ static const struct block_device_operations sr_bdops =
        .open           = sr_block_open,
        .release        = sr_block_release,
        .ioctl          = sr_block_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl   = sr_block_compat_ioctl,
-#endif
+       .compat_ioctl   = blkdev_compat_ptr_ioctl,
        .check_events   = sr_block_check_events,
 };
 
@@ -1008,6 +956,57 @@ static int sr_packet(struct cdrom_device_info *cdi,
        return cgc->stat;
 }
 
+static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
+               u32 lba, u32 nr, u8 *last_sense)
+{
+       struct gendisk *disk = cdi->disk;
+       u32 len = nr * CD_FRAMESIZE_RAW;
+       struct scsi_request *req;
+       struct request *rq;
+       struct bio *bio;
+       int ret;
+
+       rq = blk_get_request(disk->queue, REQ_OP_DRV_IN, 0);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+       req = scsi_req(rq);
+
+       ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL);
+       if (ret)
+               goto out_put_request;
+
+       req->cmd[0] = GPCMD_READ_CD;
+       req->cmd[1] = 1 << 2;
+       req->cmd[2] = (lba >> 24) & 0xff;
+       req->cmd[3] = (lba >> 16) & 0xff;
+       req->cmd[4] = (lba >>  8) & 0xff;
+       req->cmd[5] = lba & 0xff;
+       req->cmd[6] = (nr >> 16) & 0xff;
+       req->cmd[7] = (nr >>  8) & 0xff;
+       req->cmd[8] = nr & 0xff;
+       req->cmd[9] = 0xf8;
+       req->cmd_len = 12;
+       rq->timeout = 60 * HZ;
+       bio = rq->bio;
+
+       blk_execute_rq(disk, rq, 0);
+       if (scsi_req(rq)->result) {
+               struct scsi_sense_hdr sshdr;
+
+               scsi_normalize_sense(req->sense, req->sense_len,
+                                    &sshdr);
+               *last_sense = sshdr.sense_key;
+               ret = -EIO;
+       }
+
+       if (blk_rq_unmap_user(bio))
+               ret = -EFAULT;
+out_put_request:
+       blk_put_request(rq);
+       return ret;
+}
+
+
 /**
  *     sr_kref_release - Called to free the scsi_cd structure
  *     @kref: pointer to embedded kref
index d1abc02..9d04929 100644 (file)
@@ -3494,8 +3494,9 @@ out:
 
 
 /* The ioctl command */
-static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p)
+static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
 {
+       void __user *p = (void __user *)arg;
        int i, cmd_nr, cmd_type, bt;
        int retval = 0;
        unsigned int blk;
@@ -3815,74 +3816,44 @@ static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user
                goto out;
        }
        mutex_unlock(&STp->lock);
-       switch (cmd_in) {
-               case SCSI_IOCTL_STOP_UNIT:
-                       /* unload */
-                       retval = scsi_ioctl(STp->device, cmd_in, p);
-                       if (!retval) {
-                               STp->rew_at_close = 0;
-                               STp->ready = ST_NO_TAPE;
-                       }
-                       return retval;
 
-               case SCSI_IOCTL_GET_IDLUN:
-               case SCSI_IOCTL_GET_BUS_NUMBER:
-                       break;
+       switch (cmd_in) {
+       case SG_IO:
+       case SCSI_IOCTL_SEND_COMMAND:
+       case CDROM_SEND_PACKET:
+               if (!capable(CAP_SYS_RAWIO))
+                       return -EPERM;
+       default:
+               break;
+       }
 
-               default:
-                       if ((cmd_in == SG_IO ||
-                            cmd_in == SCSI_IOCTL_SEND_COMMAND ||
-                            cmd_in == CDROM_SEND_PACKET) &&
-                           !capable(CAP_SYS_RAWIO))
-                               i = -EPERM;
-                       else
-                               i = scsi_cmd_ioctl(STp->device->request_queue,
-                                                  NULL, file->f_mode, cmd_in,
-                                                  p);
-                       if (i != -ENOTTY)
-                               return i;
-                       break;
+       retval = scsi_ioctl(STp->device, NULL, file->f_mode, cmd_in, p);
+       if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
+               /* unload */
+               STp->rew_at_close = 0;
+               STp->ready = ST_NO_TAPE;
        }
-       return -ENOTTY;
+       return retval;
 
  out:
        mutex_unlock(&STp->lock);
        return retval;
 }
 
-static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
-{
-       void __user *p = (void __user *)arg;
-       struct scsi_tape *STp = file->private_data;
-       int ret;
-
-       ret = st_ioctl_common(file, cmd_in, p);
-       if (ret != -ENOTTY)
-               return ret;
-
-       return scsi_ioctl(STp->device, cmd_in, p);
-}
-
 #ifdef CONFIG_COMPAT
 static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
 {
-       void __user *p = compat_ptr(arg);
-       struct scsi_tape *STp = file->private_data;
-       int ret;
-
        /* argument conversion is handled using put_user_mtpos/put_user_mtget */
        switch (cmd_in) {
        case MTIOCPOS32:
-               return st_ioctl_common(file, MTIOCPOS, p);
+               cmd_in = MTIOCPOS;
+               break;
        case MTIOCGET32:
-               return st_ioctl_common(file, MTIOCGET, p);
+               cmd_in = MTIOCGET;
+               break;
        }
 
-       ret = st_ioctl_common(file, cmd_in, p);
-       if (ret != -ENOTTY)
-               return ret;
-
-       return scsi_compat_ioctl(STp->device, cmd_in, p);
+       return st_ioctl(file, cmd_in, arg);
 }
 #endif
 
index 491b435..f1ba7f5 100644 (file)
@@ -540,7 +540,7 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
        msg_h = (struct st_msg_header *)req - 1;
        if (likely(cmd)) {
                msg_h->channel = (u8)cmd->device->channel;
-               msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
+               msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
        }
        addr = hba->dma_handle + hba->req_head * hba->rq_size;
        addr += (hba->ccb[tag].sg_count+4)/11;
@@ -690,7 +690,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 
        cmd->scsi_done = done;
 
-       tag = cmd->request->tag;
+       tag = scsi_cmd_to_rq(cmd)->tag;
 
        if (unlikely(tag >= host->can_queue))
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -1246,7 +1246,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
 {
        struct Scsi_Host *host = cmd->device->host;
        struct st_hba *hba = (struct st_hba *)host->hostdata;
-       u16 tag = cmd->request->tag;
+       u16 tag = scsi_cmd_to_rq(cmd)->tag;
        void __iomem *base;
        u32 data;
        int result = SUCCESS;
index 37506b3..ebbbc12 100644 (file)
@@ -710,7 +710,7 @@ static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr)
         * Cannot return an ID of 0, which is reserved for an unsolicited
         * message from Hyper-V.
         */
-       return (u64)blk_mq_unique_tag(request->cmd->request) + 1;
+       return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1;
 }
 
 static void handle_sc_creation(struct vmbus_channel *new_sc)
@@ -1211,7 +1211,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
 
                storvsc_log(device, loglevel,
                        "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
-                       request->cmd->request->tag,
+                       scsi_cmd_to_rq(request->cmd)->tag,
                        stor_pkt->vm_srb.cdb[0],
                        vstor_packet->vm_srb.scsi_status,
                        vstor_packet->vm_srb.srb_status,
index 2e3fbc2..f7f724a 100644 (file)
@@ -336,7 +336,7 @@ static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
 {
        int wanted_len = cmd->SCp.this_residual;
 
-       if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request))
+       if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
                return 0;
 
        return wanted_len;
@@ -366,8 +366,9 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
 }
 
 /* clean up after our dma is done */
-static int sun3scsi_dma_finish(int write_flag)
+static int sun3scsi_dma_finish(enum dma_data_direction data_dir)
 {
+       const bool write_flag = data_dir == DMA_TO_DEVICE;
        unsigned short __maybe_unused count;
        unsigned short fifo;
        int ret = 0;
index 16b65fc..6d0b07b 100644 (file)
@@ -500,8 +500,8 @@ static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd,
         *  Shorten our settle_time if needed for 
         *  this command not to time out.
         */
-       if (np->s.settle_time_valid && cmd->request->timeout) {
-               unsigned long tlimit = jiffies + cmd->request->timeout;
+       if (np->s.settle_time_valid && scsi_cmd_to_rq(cmd)->timeout) {
+               unsigned long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout;
                tlimit -= SYM_CONF_TIMER_INTERVAL*2;
                if (time_after(np->s.settle_time, tlimit)) {
                        np->s.settle_time = tlimit;
index 2d13795..432df76 100644 (file)
@@ -183,3 +183,19 @@ config SCSI_UFS_CRYPTO
          Enabling this makes it possible for the kernel to use the crypto
          capabilities of the UFS device (if present) to perform crypto
          operations on data being transferred to/from the device.
+
+config SCSI_UFS_HPB
+       bool "Support UFS Host Performance Booster"
+       depends on SCSI_UFSHCD
+       help
+         The UFS HPB feature improves random read performance. It caches
+         L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
+         read command by piggybacking physical page number for bypassing FTL (flash
+         translation layer)'s L2P address translation.
+
+config SCSI_UFS_FAULT_INJECTION
+       bool "UFS Fault Injection Support"
+       depends on SCSI_UFSHCD && FAULT_INJECTION
+       help
+         Enable fault injection support in the UFS driver. This makes it easier
+         to test the UFS error handler and abort handler.
index 06f3a3f..c407da9 100644 (file)
@@ -8,6 +8,8 @@ ufshcd-core-y                           += ufshcd.o ufs-sysfs.o
 ufshcd-core-$(CONFIG_DEBUG_FS)         += ufs-debugfs.o
 ufshcd-core-$(CONFIG_SCSI_UFS_BSG)     += ufs_bsg.o
 ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO)  += ufshcd-crypto.o
+ufshcd-core-$(CONFIG_SCSI_UFS_HPB)     += ufshpb.o
+ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
 
 obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
 obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
index 908ff39..7da8be2 100644 (file)
@@ -318,11 +318,8 @@ static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
 }
 
 static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
-       .suspend         = ufshcd_pltfrm_suspend,
-       .resume          = ufshcd_pltfrm_resume,
-       .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
-       .runtime_resume  = ufshcd_pltfrm_runtime_resume,
-       .runtime_idle    = ufshcd_pltfrm_runtime_idle,
+       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+       SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
        .prepare         = ufshcd_suspend_prepare,
        .complete        = ufshcd_resume_complete,
 };
index ec4589a..679289e 100644 (file)
@@ -23,31 +23,6 @@ static int tc_type = TC_G210_INV;
 module_param(tc_type, int, 0);
 MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)");
 
-static int tc_dwc_g210_pci_suspend(struct device *dev)
-{
-       return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_resume(struct device *dev)
-{
-       return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_suspend(struct device *dev)
-{
-       return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_resume(struct device *dev)
-{
-       return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-
-static int tc_dwc_g210_pci_runtime_idle(struct device *dev)
-{
-       return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-
 /*
  * struct ufs_hba_dwc_vops - UFS DWC specific variant operations
  */
@@ -143,11 +118,8 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 }
 
 static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
-       .suspend        = tc_dwc_g210_pci_suspend,
-       .resume         = tc_dwc_g210_pci_resume,
-       .runtime_suspend = tc_dwc_g210_pci_runtime_suspend,
-       .runtime_resume  = tc_dwc_g210_pci_runtime_resume,
-       .runtime_idle    = tc_dwc_g210_pci_runtime_idle,
+       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+       SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
        .prepare         = ufshcd_suspend_prepare,
        .complete        = ufshcd_resume_complete,
 };
index a1268e4..783ec43 100644 (file)
@@ -84,11 +84,8 @@ static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
 }
 
 static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
-       .suspend        = ufshcd_pltfrm_suspend,
-       .resume         = ufshcd_pltfrm_resume,
-       .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
-       .runtime_resume  = ufshcd_pltfrm_runtime_resume,
-       .runtime_idle    = ufshcd_pltfrm_runtime_idle,
+       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+       SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
 };
 
 static struct platform_driver tc_dwc_g210_pltfm_driver = {
index cf46d6f..a14dd8c 100644 (file)
@@ -260,7 +260,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
        struct ufs_hba *hba = ufs->hba;
        struct list_head *head = &hba->clk_list_head;
        struct ufs_clk_info *clki;
-       u32 pclk_rate;
+       unsigned long pclk_rate;
        u32 f_min, f_max;
        u8 div = 0;
        int ret = 0;
@@ -299,7 +299,7 @@ static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
        }
 
        if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
-               dev_err(hba->dev, "not available pclk range %d\n", pclk_rate);
+               dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
                ret = -EINVAL;
                goto out;
        }
@@ -1287,11 +1287,8 @@ static const struct of_device_id exynos_ufs_of_match[] = {
 };
 
 static const struct dev_pm_ops exynos_ufs_pm_ops = {
-       .suspend        = ufshcd_pltfrm_suspend,
-       .resume         = ufshcd_pltfrm_resume,
-       .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
-       .runtime_resume  = ufshcd_pltfrm_runtime_resume,
-       .runtime_idle    = ufshcd_pltfrm_runtime_idle,
+       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+       SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
        .prepare         = ufshcd_suspend_prepare,
        .complete        = ufshcd_resume_complete,
 };
index 67505fe..dadf4fd 100644 (file)
@@ -184,7 +184,7 @@ struct exynos_ufs {
        u32 pclk_div;
        u32 pclk_avail_min;
        u32 pclk_avail_max;
-       u32 mclk_rate;
+       unsigned long mclk_rate;
        int avail_ln_rx;
        int avail_ln_tx;
        int rx_sel_idx;
diff --git a/drivers/scsi/ufs/ufs-fault-injection.c b/drivers/scsi/ufs/ufs-fault-injection.c
new file mode 100644 (file)
index 0000000..7ac7c4e
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+#include <linux/fault-inject.h>
+#include <linux/module.h>
+#include "ufs-fault-injection.h"
+
+static int ufs_fault_get(char *buffer, const struct kernel_param *kp);
+static int ufs_fault_set(const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops ufs_fault_ops = {
+       .get = ufs_fault_get,
+       .set = ufs_fault_set,
+};
+
+enum { FAULT_INJ_STR_SIZE = 80 };
+
+/*
+ * For more details about fault injection, please refer to
+ * Documentation/fault-injection/fault-injection.rst.
+ */
+static char g_trigger_eh_str[FAULT_INJ_STR_SIZE];
+module_param_cb(trigger_eh, &ufs_fault_ops, g_trigger_eh_str, 0644);
+MODULE_PARM_DESC(trigger_eh,
+       "Fault injection. trigger_eh=<interval>,<probability>,<space>,<times>");
+static DECLARE_FAULT_ATTR(ufs_trigger_eh_attr);
+
+static char g_timeout_str[FAULT_INJ_STR_SIZE];
+module_param_cb(timeout, &ufs_fault_ops, g_timeout_str, 0644);
+MODULE_PARM_DESC(timeout,
+       "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
+static DECLARE_FAULT_ATTR(ufs_timeout_attr);
+
+static int ufs_fault_get(char *buffer, const struct kernel_param *kp)
+{
+       const char *fault_str = kp->arg;
+
+       return sysfs_emit(buffer, "%s\n", fault_str);
+}
+
+static int ufs_fault_set(const char *val, const struct kernel_param *kp)
+{
+       struct fault_attr *attr = NULL;
+
+       if (kp->arg == g_trigger_eh_str)
+               attr = &ufs_trigger_eh_attr;
+       else if (kp->arg == g_timeout_str)
+               attr = &ufs_timeout_attr;
+
+       if (WARN_ON_ONCE(!attr))
+               return -EINVAL;
+
+       if (!setup_fault_attr(attr, (char *)val))
+               return -EINVAL;
+
+       strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE);
+
+       return 0;
+}
+
+bool ufs_trigger_eh(void)
+{
+       return should_fail(&ufs_trigger_eh_attr, 1);
+}
+
+bool ufs_fail_completion(void)
+{
+       return should_fail(&ufs_timeout_attr, 1);
+}
diff --git a/drivers/scsi/ufs/ufs-fault-injection.h b/drivers/scsi/ufs/ufs-fault-injection.h
new file mode 100644 (file)
index 0000000..6d0cd8e
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _UFS_FAULT_INJECTION_H
+#define _UFS_FAULT_INJECTION_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
+bool ufs_trigger_eh(void);
+bool ufs_fail_completion(void);
+#else
+static inline bool ufs_trigger_eh(void)
+{
+       return false;
+}
+
+static inline bool ufs_fail_completion(void)
+{
+       return false;
+}
+#endif
+
+#endif /* _UFS_FAULT_INJECTION_H */
index 5b147a4..6b706de 100644 (file)
@@ -572,11 +572,8 @@ static int ufs_hisi_remove(struct platform_device *pdev)
 }
 
 static const struct dev_pm_ops ufs_hisi_pm_ops = {
-       .suspend        = ufshcd_pltfrm_suspend,
-       .resume         = ufshcd_pltfrm_resume,
-       .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
-       .runtime_resume  = ufshcd_pltfrm_runtime_resume,
-       .runtime_idle    = ufshcd_pltfrm_runtime_idle,
+       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+       SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
        .prepare         = ufshcd_suspend_prepare,
        .complete        = ufshcd_resume_complete,
 };
index d2c2516..80b3545 100644 (file)
@@ -1140,11 +1140,8 @@ static int ufs_mtk_remove(struct platform_device *pdev)
 }
 
 static const struct dev_pm_ops ufs_mtk_pm_ops = {
-       .suspend         = ufshcd_pltfrm_suspend,
-       .resume          = ufshcd_pltfrm_resume,
-       .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
-       .runtime_resume  = ufshcd_pltfrm_runtime_resume,
-       .runtime_idle    = ufshcd_pltfrm_runtime_idle,
+       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+       SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
        .prepare         = ufshcd_suspend_prepare,
        .complete        = ufshcd_resume_complete,
 };
index 9b1d18d..9d9770f 100644 (file)
@@ -1546,11 +1546,8 @@ MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
 #endif
 
 static const struct dev_pm_ops ufs_qcom_pm_ops = {
-       .suspend        = ufshcd_pltfrm_suspend,
-       .resume         = ufshcd_pltfrm_resume,
-       .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
-       .runtime_resume  = ufshcd_pltfrm_runtime_resume,
-       .runtime_idle    = ufshcd_pltfrm_runtime_idle,
+       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+       SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
        .prepare         = ufshcd_suspend_prepare,
        .complete        = ufshcd_resume_complete,
 };
index 52bd807..5c405ff 100644 (file)
@@ -604,6 +604,8 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
 UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
 UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
 UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
+UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2);
+UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1);
 UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
 UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
 UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
@@ -636,6 +638,8 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
        &dev_attr_number_of_secure_wpa.attr,
        &dev_attr_psa_max_data_size.attr,
        &dev_attr_psa_state_timeout.attr,
+       &dev_attr_hpb_version.attr,
+       &dev_attr_hpb_control.attr,
        &dev_attr_ext_feature_sup.attr,
        &dev_attr_wb_presv_us_en.attr,
        &dev_attr_wb_type.attr,
@@ -709,6 +713,10 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
        _ENM4_MAX_NUM_UNITS, 4);
 UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
        _ENM4_CAP_ADJ_FCTR, 2);
+UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1);
+UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2);
 UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
 UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
 UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
@@ -746,6 +754,10 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
        &dev_attr_enh3_memory_capacity_adjustment_factor.attr,
        &dev_attr_enh4_memory_max_alloc_units.attr,
        &dev_attr_enh4_memory_capacity_adjustment_factor.attr,
+       &dev_attr_hpb_region_size.attr,
+       &dev_attr_hpb_number_lu.attr,
+       &dev_attr_hpb_subregion_size.attr,
+       &dev_attr_hpb_max_active_regions.attr,
        &dev_attr_wb_max_alloc_units.attr,
        &dev_attr_wb_max_wb_luns.attr,
        &dev_attr_wb_buff_cap_adj.attr,
@@ -1006,6 +1018,7 @@ UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
 UFS_FLAG(wb_enable, _WB_EN);
 UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
 UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
+UFS_FLAG(hpb_enable, _HPB_EN);
 
 static struct attribute *ufs_sysfs_device_flags[] = {
        &dev_attr_device_init.attr,
@@ -1019,6 +1032,7 @@ static struct attribute *ufs_sysfs_device_flags[] = {
        &dev_attr_wb_enable.attr,
        &dev_attr_wb_flush_en.attr,
        &dev_attr_wb_flush_during_h8.attr,
+       &dev_attr_hpb_enable.attr,
        NULL,
 };
 
@@ -1065,6 +1079,7 @@ out:                                                                      \
 static DEVICE_ATTR_RO(_name)
 
 UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
+UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD);
 UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
 UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
 UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
@@ -1088,6 +1103,7 @@ UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
 
 static struct attribute *ufs_sysfs_attributes[] = {
        &dev_attr_boot_lun_enabled.attr,
+       &dev_attr_max_data_size_hpb_single_cmd.attr,
        &dev_attr_current_power_mode.attr,
        &dev_attr_active_icc_level.attr,
        &dev_attr_ooo_data_enabled.attr,
@@ -1147,6 +1163,7 @@ static DEVICE_ATTR_RO(_pname)
 #define UFS_UNIT_DESC_PARAM(_name, _uname, _size)                      \
        UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
 
+UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1);
 UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
 UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
 UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
@@ -1160,10 +1177,13 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
 UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
 UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
 UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
+UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2);
+UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2);
+UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2);
 UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
 
-
 static struct attribute *ufs_sysfs_unit_descriptor[] = {
+       &dev_attr_lu_enable.attr,
        &dev_attr_boot_lun_id.attr,
        &dev_attr_lun_write_protect.attr,
        &dev_attr_lun_queue_depth.attr,
@@ -1177,6 +1197,9 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
        &dev_attr_physical_memory_resourse_count.attr,
        &dev_attr_context_capabilities.attr,
        &dev_attr_large_unit_granularity.attr,
+       &dev_attr_hpb_lu_max_active_regions.attr,
+       &dev_attr_hpb_pinned_region_start_offset.attr,
+       &dev_attr_hpb_number_pinned_regions.attr,
        &dev_attr_wb_buf_alloc_units.attr,
        NULL,
 };
index cb80b96..8c6b38b 100644 (file)
@@ -122,12 +122,14 @@ enum flag_idn {
        QUERY_FLAG_IDN_WB_EN                            = 0x0E,
        QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN                 = 0x0F,
        QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8     = 0x10,
+       QUERY_FLAG_IDN_HPB_RESET                        = 0x11,
+       QUERY_FLAG_IDN_HPB_EN                           = 0x12,
 };
 
 /* Attribute idn for Query requests */
 enum attr_idn {
        QUERY_ATTR_IDN_BOOT_LU_EN               = 0x00,
-       QUERY_ATTR_IDN_RESERVED                 = 0x01,
+       QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD       = 0x01,
        QUERY_ATTR_IDN_POWER_MODE               = 0x02,
        QUERY_ATTR_IDN_ACTIVE_ICC_LVL           = 0x03,
        QUERY_ATTR_IDN_OOO_DATA_EN              = 0x04,
@@ -195,6 +197,9 @@ enum unit_desc_param {
        UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT        = 0x18,
        UNIT_DESC_PARAM_CTX_CAPABILITIES        = 0x20,
        UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1      = 0x22,
+       UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS  = 0x23,
+       UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF   = 0x25,
+       UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS        = 0x27,
        UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS      = 0x29,
 };
 
@@ -235,6 +240,8 @@ enum device_desc_param {
        DEVICE_DESC_PARAM_PSA_MAX_DATA          = 0x25,
        DEVICE_DESC_PARAM_PSA_TMT               = 0x29,
        DEVICE_DESC_PARAM_PRDCT_REV             = 0x2A,
+       DEVICE_DESC_PARAM_HPB_VER               = 0x40,
+       DEVICE_DESC_PARAM_HPB_CONTROL           = 0x42,
        DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP   = 0x4F,
        DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN   = 0x53,
        DEVICE_DESC_PARAM_WB_TYPE               = 0x54,
@@ -283,6 +290,10 @@ enum geometry_desc_param {
        GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS  = 0x3E,
        GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR   = 0x42,
        GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE    = 0x44,
+       GEOMETRY_DESC_PARAM_HPB_REGION_SIZE     = 0x48,
+       GEOMETRY_DESC_PARAM_HPB_NUMBER_LU       = 0x49,
+       GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE  = 0x4A,
+       GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
        GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS  = 0x4F,
        GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS      = 0x53,
        GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ     = 0x54,
@@ -327,8 +338,10 @@ enum {
 
 /* Possible values for dExtendedUFSFeaturesSupport */
 enum {
+       UFS_DEV_HPB_SUPPORT             = BIT(7),
        UFS_DEV_WRITE_BOOSTER_SUP       = BIT(8),
 };
+#define UFS_DEV_HPB_SUPPORT_VERSION            0x310
 
 #define POWER_DESC_MAX_ACTV_ICC_LVLS           16
 
@@ -466,6 +479,41 @@ struct utp_cmd_rsp {
        u8 sense_data[UFS_SENSE_SIZE];
 };
 
+struct ufshpb_active_field {
+       __be16 active_rgn;
+       __be16 active_srgn;
+};
+#define HPB_ACT_FIELD_SIZE 4
+
+/**
+ * struct utp_hpb_rsp - Response UPIU structure
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved1: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @desc_type: Descriptor type of sense data
+ * @additional_len: Additional length of sense data
+ * @hpb_op: HPB operation type
+ * @lun: LUN of response UPIU
+ * @active_rgn_cnt: Active region count
+ * @inactive_rgn_cnt: Inactive region count
+ * @hpb_active_field: Recommended to read HPB region and subregion
+ * @hpb_inactive_field: To be inactivated HPB region and subregion
+ */
+struct utp_hpb_rsp {
+       __be32 residual_transfer_count;
+       __be32 reserved1[4];
+       __be16 sense_data_len;
+       u8 desc_type;
+       u8 additional_len;
+       u8 hpb_op;
+       u8 lun;
+       u8 active_rgn_cnt;
+       u8 inactive_rgn_cnt;
+       struct ufshpb_active_field hpb_active_field[2];
+       __be16 hpb_inactive_field[2];
+};
+#define UTP_HPB_RSP_SIZE 40
+
 /**
  * struct utp_upiu_rsp - general upiu response structure
  * @header: UPIU header structure DW-0 to DW-2
@@ -476,6 +524,7 @@ struct utp_upiu_rsp {
        struct utp_upiu_header header;
        union {
                struct utp_cmd_rsp sr;
+               struct utp_hpb_rsp hr;
                struct utp_upiu_query qr;
        };
 };
@@ -544,6 +593,9 @@ struct ufs_dev_info {
        u16     wspecversion;
        u32     clk_gating_wait_us;
 
+       /* UFS HPB related flag */
+       bool    hpb_enabled;
+
        /* UFS WB related flags */
        bool    wb_enabled;
        bool    wb_buf_flush_enabled;
index 07f559a..35ec9ea 100644 (file)
@@ -116,4 +116,10 @@ struct ufs_dev_fix {
  */
 #define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM        (1 << 11)
 
+/*
+ * Some UFS devices require L2P entry should be swapped before being sent to the
+ * UFS device for HPB READ command.
+ */
+#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
+
 #endif /* UFS_QUIRKS_H_ */
index e6c334b..b3bcc5c 100644 (file)
@@ -385,48 +385,6 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
        .device_reset           = ufs_intel_device_reset,
 };
 
-#ifdef CONFIG_PM_SLEEP
-/**
- * ufshcd_pci_suspend - suspend power management function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_suspend(struct device *dev)
-{
-       return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-
-/**
- * ufshcd_pci_resume - resume power management function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_resume(struct device *dev)
-{
-       return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-
-#endif /* !CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM
-static int ufshcd_pci_runtime_suspend(struct device *dev)
-{
-       return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-static int ufshcd_pci_runtime_resume(struct device *dev)
-{
-       return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-static int ufshcd_pci_runtime_idle(struct device *dev)
-{
-       return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-#endif /* !CONFIG_PM */
-
 /**
  * ufshcd_pci_shutdown - main function to put the controller in reset state
  * @pdev: pointer to PCI device handle
@@ -510,10 +468,8 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 }
 
 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
-       SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
-                          ufshcd_pci_runtime_resume,
-                          ufshcd_pci_runtime_idle)
-       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, ufshcd_pci_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+       SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
 #ifdef CONFIG_PM_SLEEP
        .prepare        = ufshcd_suspend_prepare,
        .complete       = ufshcd_resume_complete,
index 298e22e..8859c13 100644 (file)
@@ -170,53 +170,6 @@ out:
        return err;
 }
 
-#ifdef CONFIG_PM
-/**
- * ufshcd_pltfrm_suspend - suspend power management function
- * @dev: pointer to device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-int ufshcd_pltfrm_suspend(struct device *dev)
-{
-       return ufshcd_system_suspend(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend);
-
-/**
- * ufshcd_pltfrm_resume - resume power management function
- * @dev: pointer to device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-int ufshcd_pltfrm_resume(struct device *dev)
-{
-       return ufshcd_system_resume(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume);
-
-int ufshcd_pltfrm_runtime_suspend(struct device *dev)
-{
-       return ufshcd_runtime_suspend(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend);
-
-int ufshcd_pltfrm_runtime_resume(struct device *dev)
-{
-       return ufshcd_runtime_resume(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume);
-
-int ufshcd_pltfrm_runtime_idle(struct device *dev)
-{
-       return ufshcd_runtime_idle(dev_get_drvdata(dev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle);
-
-#endif /* CONFIG_PM */
-
 void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
 {
        ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
index 772a8e8..c33e28a 100644 (file)
@@ -33,22 +33,4 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
                       const struct ufs_hba_variant_ops *vops);
 void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
 
-#ifdef CONFIG_PM
-
-int ufshcd_pltfrm_suspend(struct device *dev);
-int ufshcd_pltfrm_resume(struct device *dev);
-int ufshcd_pltfrm_runtime_suspend(struct device *dev);
-int ufshcd_pltfrm_runtime_resume(struct device *dev);
-int ufshcd_pltfrm_runtime_idle(struct device *dev);
-
-#else /* !CONFIG_PM */
-
-#define ufshcd_pltfrm_suspend  NULL
-#define ufshcd_pltfrm_resume   NULL
-#define ufshcd_pltfrm_runtime_suspend  NULL
-#define ufshcd_pltfrm_runtime_resume   NULL
-#define ufshcd_pltfrm_runtime_idle     NULL
-
-#endif /* CONFIG_PM */
-
 #endif /* UFSHCD_PLTFRM_H_ */
index 708b3b6..3841ab4 100644 (file)
 #include <linux/blk-pm.h>
 #include <linux/blkdev.h>
 #include <scsi/scsi_driver.h>
+#include <scsi/scsi_transport.h>
+#include "../scsi_transport_api.h"
 #include "ufshcd.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
 #include "ufs-sysfs.h"
 #include "ufs-debugfs.h"
+#include "ufs-fault-injection.h"
 #include "ufs_bsg.h"
 #include "ufshcd-crypto.h"
+#include "ufshpb.h"
 #include <asm/unaligned.h>
-#include "../sd.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ufs.h>
@@ -128,15 +131,6 @@ enum {
        UFSHCD_CAN_QUEUE        = 32,
 };
 
-/* UFSHCD states */
-enum {
-       UFSHCD_STATE_RESET,
-       UFSHCD_STATE_ERROR,
-       UFSHCD_STATE_OPERATIONAL,
-       UFSHCD_STATE_EH_SCHEDULED_FATAL,
-       UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
-};
-
 /* UFSHCD error handling flags */
 enum {
        UFSHCD_EH_IN_PROGRESS = (1 << 0),
@@ -205,7 +199,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 static struct ufs_dev_fix ufs_fixups[] = {
        /* UFS cards deviations table */
        UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+               UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
        UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
                UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
                UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
@@ -242,7 +237,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
 static int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode);
-static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -253,11 +247,6 @@ static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
 
-static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
-{
-       return tag >= 0 && tag < hba->nutrs;
-}
-
 static inline void ufshcd_enable_irq(struct ufs_hba *hba)
 {
        if (!hba->is_irq_enabled) {
@@ -371,26 +360,24 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
                                     enum ufs_trace_str_t str_t)
 {
-       u64 lba = -1;
+       u64 lba;
        u8 opcode = 0, group_id = 0;
        u32 intr, doorbell;
        struct ufshcd_lrb *lrbp = &hba->lrb[tag];
        struct scsi_cmnd *cmd = lrbp->cmd;
+       struct request *rq = scsi_cmd_to_rq(cmd);
        int transfer_len = -1;
 
        if (!cmd)
                return;
 
-       if (!trace_ufshcd_command_enabled()) {
-               /* trace UPIU W/O tracing command */
-               ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
-               return;
-       }
-
        /* trace UPIU also */
        ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+       if (!trace_ufshcd_command_enabled())
+               return;
+
        opcode = cmd->cmnd[0];
-       lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
+       lba = scsi_get_lba(cmd);
 
        if (opcode == READ_10 || opcode == WRITE_10) {
                /*
@@ -404,7 +391,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
                /*
                 * The number of Bytes to be unmapped beginning with the lba.
                 */
-               transfer_len = blk_rq_bytes(cmd->request);
+               transfer_len = blk_rq_bytes(rq);
        }
 
        intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -757,16 +744,6 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
                ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
 }
 
-/**
- * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
- * @hba: per adapter instance
- * @tag: position of the bit to be cleared
- */
-static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
-{
-       clear_bit(tag, &hba->outstanding_reqs);
-}
-
 /**
  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
  * @reg: Register value of host controller status
@@ -2078,7 +2055,7 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
-               struct request *req = lrbp->cmd->request;
+               struct request *req = scsi_cmd_to_rq(lrbp->cmd);
                struct ufs_hba_monitor *m = &hba->monitor;
                ktime_t now, inc, lat;
 
@@ -2112,27 +2089,22 @@ static inline
 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
        struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+       unsigned long flags;
 
        lrbp->issue_time_stamp = ktime_get();
        lrbp->compl_time_stamp = ktime_set(0, 0);
-       ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
        ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
        ufshcd_clk_scaling_start_busy(hba);
        if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
                ufshcd_start_monitor(hba, lrbp);
-       if (ufshcd_has_utrlcnr(hba)) {
-               set_bit(task_tag, &hba->outstanding_reqs);
-               ufshcd_writel(hba, 1 << task_tag,
-                             REG_UTP_TRANSFER_REQ_DOOR_BELL);
-       } else {
-               unsigned long flags;
 
-               spin_lock_irqsave(hba->host->host_lock, flags);
-               set_bit(task_tag, &hba->outstanding_reqs);
-               ufshcd_writel(hba, 1 << task_tag,
-                             REG_UTP_TRANSFER_REQ_DOOR_BELL);
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
-       }
+       spin_lock_irqsave(&hba->outstanding_lock, flags);
+       if (hba->vops && hba->vops->setup_xfer_req)
+               hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
+       __set_bit(task_tag, &hba->outstanding_reqs);
+       ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
        /* Make sure that doorbell is committed immediately */
        wmb();
 }
@@ -2247,15 +2219,15 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
 }
 
 /**
- * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
+ * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
  * @hba: per adapter instance
  * @uic_cmd: UIC command
- *
- * Mutex must be held.
  */
 static inline void
 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 {
+       lockdep_assert_held(&hba->uic_cmd_mutex);
+
        WARN_ON(hba->active_uic_cmd);
 
        hba->active_uic_cmd = uic_cmd;
@@ -2273,11 +2245,10 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 }
 
 /**
- * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
+ * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
  * @hba: per adapter instance
  * @uic_cmd: UIC command
  *
- * Must be called with mutex held.
  * Returns 0 only if success.
  */
 static int
@@ -2286,6 +2257,8 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
        int ret;
        unsigned long flags;
 
+       lockdep_assert_held(&hba->uic_cmd_mutex);
+
        if (wait_for_completion_timeout(&uic_cmd->done,
                                        msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
                ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
@@ -2315,14 +2288,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  * @uic_cmd: UIC command
  * @completion: initialize the completion only if this is set to true
  *
- * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
- * with mutex held and host_lock locked.
  * Returns 0 only if success.
  */
 static int
 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
                      bool completion)
 {
+       lockdep_assert_held(&hba->uic_cmd_mutex);
+       lockdep_assert_held(hba->host->host_lock);
+
        if (!ufshcd_ready_for_uic_cmd(hba)) {
                dev_err(hba->dev,
                        "Controller not ready to accept UIC commands\n");
@@ -2701,20 +2675,12 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
  */
 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 {
+       struct ufs_hba *hba = shost_priv(host);
+       int tag = scsi_cmd_to_rq(cmd)->tag;
        struct ufshcd_lrb *lrbp;
-       struct ufs_hba *hba;
-       int tag;
        int err = 0;
 
-       hba = shost_priv(host);
-
-       tag = cmd->request->tag;
-       if (!ufshcd_valid_tag(hba, tag)) {
-               dev_err(hba->dev,
-                       "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
-                       __func__, tag, cmd, cmd->request);
-               BUG();
-       }
+       WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
 
        if (!down_read_trylock(&hba->clk_scaling_lock))
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -2748,12 +2714,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                set_host_byte(cmd, DID_ERROR);
                cmd->scsi_done(cmd);
                goto out;
-       default:
-               dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
-                               __func__, hba->ufshcd_state);
-               set_host_byte(cmd, DID_BAD_TARGET);
-               cmd->scsi_done(cmd);
-               goto out;
        }
 
        hba->req_abort_count = 0;
@@ -2766,15 +2726,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
                (hba->clk_gating.state != CLKS_ON));
 
-       if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
-               if (hba->pm_op_in_progress)
-                       set_host_byte(cmd, DID_BAD_TARGET);
-               else
-                       err = SCSI_MLQUEUE_HOST_BUSY;
-               ufshcd_release(hba);
-               goto out;
-       }
-
        lrbp = &hba->lrb[tag];
        WARN_ON(lrbp->cmd);
        lrbp->cmd = cmd;
@@ -2784,10 +2735,17 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
        lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
 
-       ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
+       ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
 
        lrbp->req_abort_skip = false;
 
+       err = ufshpb_prep(hba, lrbp);
+       if (err == -EAGAIN) {
+               lrbp->cmd = NULL;
+               ufshcd_release(hba);
+               goto out;
+       }
+
        ufshcd_comp_scsi_upiu(hba, lrbp);
 
        err = ufshcd_map_sg(hba, lrbp);
@@ -2796,12 +2754,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                ufshcd_release(hba);
                goto out;
        }
-       /* Make sure descriptors are ready before ringing the doorbell */
-       wmb();
 
        ufshcd_send_command(hba, tag);
 out:
        up_read(&hba->clk_scaling_lock);
+
+       if (ufs_trigger_eh())
+               scsi_schedule_eh(hba->host);
+
        return err;
 }
 
@@ -2907,8 +2867,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
        time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
                        msecs_to_jiffies(max_timeout));
 
-       /* Make sure descriptors are ready before ringing the doorbell */
-       wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->dev_cmd.complete = NULL;
        if (likely(time_left)) {
@@ -2930,7 +2888,9 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
                 * we also need to clear the outstanding_request
                 * field in hba
                 */
-               ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
+               spin_lock_irqsave(&hba->outstanding_lock, flags);
+               __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
+               spin_unlock_irqrestore(&hba->outstanding_lock, flags);
        }
 
        return err;
@@ -2949,11 +2909,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
                enum dev_cmd_type cmd_type, int timeout)
 {
        struct request_queue *q = hba->cmd_queue;
+       DECLARE_COMPLETION_ONSTACK(wait);
        struct request *req;
        struct ufshcd_lrb *lrbp;
        int err;
        int tag;
-       struct completion wait;
 
        down_read(&hba->clk_scaling_lock);
 
@@ -2968,17 +2928,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
                goto out_unlock;
        }
        tag = req->tag;
-       WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+       WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
        /* Set the timeout such that the SCSI error handler is not activated. */
        req->timeout = msecs_to_jiffies(2 * timeout);
        blk_mq_start_request(req);
 
-       if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
-               err = -EBUSY;
-               goto out;
-       }
-
-       init_completion(&wait);
        lrbp = &hba->lrb[tag];
        WARN_ON(lrbp->cmd);
        err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
@@ -2988,8 +2942,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
        hba->dev_cmd.complete = &wait;
 
        ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-       /* Make sure descriptors are ready before ringing the doorbell */
-       wmb();
 
        ufshcd_send_command(hba, tag);
        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -3194,7 +3146,7 @@ out_unlock:
  *
  * Returns 0 for success, non-zero in case of failure
 */
-static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
        enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
        u32 *attr_val)
 {
@@ -3419,9 +3371,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
 
        if (is_kmalloc) {
                /* Make sure we don't copy more data than available */
-               if (param_offset + param_size > buff_len)
-                       param_size = buff_len - param_offset;
-               memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+               if (param_offset >= buff_len)
+                       ret = -EINVAL;
+               else
+                       memcpy(param_read_buf, &desc_buf[param_offset],
+                              min_t(u32, param_size, buff_len - param_offset));
        }
 out:
        if (is_kmalloc)
@@ -3965,6 +3919,35 @@ out:
 }
 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
 
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+       lockdep_assert_held(hba->host->host_lock);
+
+       return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+              (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+static void ufshcd_schedule_eh(struct ufs_hba *hba)
+{
+       bool schedule_eh = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       /* handle fatal errors only when link is not in error state */
+       if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+               if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+                   ufshcd_is_saved_err_fatal(hba))
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+               else
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+               schedule_eh = true;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (schedule_eh)
+               scsi_schedule_eh(hba->host);
+}
+
 /**
  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
  * state) and waits for it to take effect.
@@ -3983,14 +3966,14 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
  */
 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 {
-       struct completion uic_async_done;
+       DECLARE_COMPLETION_ONSTACK(uic_async_done);
        unsigned long flags;
+       bool schedule_eh = false;
        u8 status;
        int ret;
        bool reenable_intr = false;
 
        mutex_lock(&hba->uic_cmd_mutex);
-       init_completion(&uic_async_done);
        ufshcd_add_delay_before_dme_cmd(hba);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4055,10 +4038,14 @@ out:
                ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
        if (ret) {
                ufshcd_set_link_broken(hba);
-               ufshcd_schedule_eh_work(hba);
+               schedule_eh = true;
        }
+
 out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (schedule_eh)
+               ufshcd_schedule_eh(hba);
        mutex_unlock(&hba->uic_cmd_mutex);
 
        return ret;
@@ -4987,6 +4974,26 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
        return scsi_change_queue_depth(sdev, depth);
 }
 
+static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+       /* skip well-known LU */
+       if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
+           !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
+               return;
+
+       ufshpb_destroy_lu(hba, sdev);
+}
+
+static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+       /* skip well-known LU */
+       if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
+           !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
+               return;
+
+       ufshpb_init_hpb_lu(hba, sdev);
+}
+
 /**
  * ufshcd_slave_configure - adjust SCSI device configurations
  * @sdev: pointer to SCSI device
@@ -4996,6 +5003,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
        struct ufs_hba *hba = shost_priv(sdev->host);
        struct request_queue *q = sdev->request_queue;
 
+       ufshcd_hpb_configure(hba, sdev);
+
        blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
        if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
                blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
@@ -5020,15 +5029,37 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
 static void ufshcd_slave_destroy(struct scsi_device *sdev)
 {
        struct ufs_hba *hba;
+       unsigned long flags;
 
        hba = shost_priv(sdev->host);
+
+       ufshcd_hpb_destroy(hba, sdev);
+
        /* Drop the reference as it won't be needed anymore */
        if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
-               unsigned long flags;
-
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->sdev_ufs_device = NULL;
                spin_unlock_irqrestore(hba->host->host_lock, flags);
+       } else if (hba->sdev_ufs_device) {
+               struct device *supplier = NULL;
+
+               /* Ensure UFS Device WLUN exists and does not disappear */
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               if (hba->sdev_ufs_device) {
+                       supplier = &hba->sdev_ufs_device->sdev_gendev;
+                       get_device(supplier);
+               }
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+               if (supplier) {
+                       /*
+                        * If a LUN fails to probe (e.g. absent BOOT WLUN), the
+                        * device will not have been registered but can still
+                        * have a device link holding a reference to the device.
+                        */
+                       device_link_remove(&sdev->sdev_gendev, supplier);
+                       put_device(supplier);
+               }
        }
 }
 
@@ -5124,6 +5155,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                            ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
                                /* Flushed in suspend */
                                schedule_work(&hba->eeh_work);
+
+                       if (scsi_status == SAM_STAT_GOOD)
+                               ufshpb_rsp_upiu(hba, lrbp);
                        break;
                case UPIU_TRANSACTION_REJECT_UPIU:
                        /* TODO: handle Reject UPIU Response */
@@ -5232,10 +5266,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 /**
  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
  * @hba: per adapter instance
- * @completed_reqs: requests to complete
+ * @completed_reqs: bitmask that indicates which requests to complete
+ * @retry_requests: whether to ask the SCSI core to retry completed requests
  */
 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
-                                       unsigned long completed_reqs)
+                                       unsigned long completed_reqs,
+                                       bool retry_requests)
 {
        struct ufshcd_lrb *lrbp;
        struct scsi_cmnd *cmd;
@@ -5244,8 +5280,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
        bool update_scaling = false;
 
        for_each_set_bit(index, &completed_reqs, hba->nutrs) {
-               if (!test_and_clear_bit(index, &hba->outstanding_reqs))
-                       continue;
                lrbp = &hba->lrb[index];
                lrbp->compl_time_stamp = ktime_get();
                cmd = lrbp->cmd;
@@ -5253,7 +5287,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
                        if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
                                ufshcd_update_monitor(hba, lrbp);
                        ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
-                       result = ufshcd_transfer_rsp_status(hba, lrbp);
+                       result = retry_requests ? DID_BUS_BUSY << 16 :
+                               ufshcd_transfer_rsp_status(hba, lrbp);
                        scsi_dma_unmap(cmd);
                        cmd->result = result;
                        /* Mark completed command as NULL in LRB */
@@ -5277,17 +5312,19 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
 }
 
 /**
- * ufshcd_trc_handler - handle transfer requests completion
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
  * @hba: per adapter instance
- * @use_utrlcnr: get completed requests from UTRLCNR
+ * @retry_requests: whether or not to ask to retry requests
  *
  * Returns
  *  IRQ_HANDLED - If interrupt is valid
  *  IRQ_NONE    - If invalid interrupt
  */
-static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
+                                            bool retry_requests)
 {
-       unsigned long completed_reqs = 0;
+       unsigned long completed_reqs, flags;
+       u32 tr_doorbell;
 
        /* Resetting interrupt aggregation counters first and reading the
         * DOOR_BELL afterward allows us to handle all the completed requests.
@@ -5300,27 +5337,21 @@ static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
            !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
                ufshcd_reset_intr_aggr(hba);
 
-       if (use_utrlcnr) {
-               u32 utrlcnr;
-
-               utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
-               if (utrlcnr) {
-                       ufshcd_writel(hba, utrlcnr,
-                                     REG_UTP_TRANSFER_REQ_LIST_COMPL);
-                       completed_reqs = utrlcnr;
-               }
-       } else {
-               unsigned long flags;
-               u32 tr_doorbell;
+       if (ufs_fail_completion())
+               return IRQ_HANDLED;
 
-               spin_lock_irqsave(hba->host->host_lock, flags);
-               tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
-               completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
-       }
+       spin_lock_irqsave(&hba->outstanding_lock, flags);
+       tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
+       WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+                 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+                 hba->outstanding_reqs);
+       hba->outstanding_reqs &= ~completed_reqs;
+       spin_unlock_irqrestore(&hba->outstanding_lock, flags);
 
        if (completed_reqs) {
-               __ufshcd_transfer_req_compl(hba, completed_reqs);
+               __ufshcd_transfer_req_compl(hba, completed_reqs,
+                                           retry_requests);
                return IRQ_HANDLED;
        } else {
                return IRQ_NONE;
@@ -5799,7 +5830,13 @@ out:
 /* Complete requests that have door-bell cleared */
 static void ufshcd_complete_requests(struct ufs_hba *hba)
 {
-       ufshcd_trc_handler(hba, false);
+       ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
+       ufshcd_tmc_handler(hba);
+}
+
+static void ufshcd_retry_aborted_requests(struct ufs_hba *hba)
+{
+       ufshcd_transfer_req_compl(hba, /*retry_requests=*/true);
        ufshcd_tmc_handler(hba);
 }
 
@@ -5874,27 +5911,6 @@ out:
        return err_handling;
 }
 
-/* host lock must be held before calling this func */
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
-       return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
-              (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-/* host lock must be held before calling this func */
-static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
-{
-       /* handle fatal errors only when link is not in error state */
-       if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
-               if (hba->force_reset || ufshcd_is_link_broken(hba) ||
-                   ufshcd_is_saved_err_fatal(hba))
-                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
-               else
-                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
-               queue_work(hba->eh_wq, &hba->eh_work);
-       }
-}
-
 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
 {
        down_write(&hba->clk_scaling_lock);
@@ -6028,11 +6044,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
 
 /**
  * ufshcd_err_handler - handle UFS errors that require s/w attention
- * @work: pointer to work structure
+ * @host: SCSI host pointer
  */
-static void ufshcd_err_handler(struct work_struct *work)
+static void ufshcd_err_handler(struct Scsi_Host *host)
 {
-       struct ufs_hba *hba;
+       struct ufs_hba *hba = shost_priv(host);
        unsigned long flags;
        bool err_xfer = false;
        bool err_tm = false;
@@ -6040,10 +6056,9 @@ static void ufshcd_err_handler(struct work_struct *work)
        int tag;
        bool needs_reset = false, needs_restore = false;
 
-       hba = container_of(work, struct ufs_hba, eh_work);
-
        down(&hba->host_sem);
        spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->host->host_eh_scheduled = 0;
        if (ufshcd_err_handling_should_stop(hba)) {
                if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
                        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -6141,8 +6156,7 @@ static void ufshcd_err_handler(struct work_struct *work)
        }
 
 lock_skip_pending_xfer_clear:
-       /* Complete the requests that are cleared by s/w */
-       ufshcd_complete_requests(hba);
+       ufshcd_retry_aborted_requests(hba);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->silence_err_logs = false;
@@ -6357,7 +6371,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
                                         "host_regs: ");
                        ufshcd_print_pwr_info(hba);
                }
-               ufshcd_schedule_eh_work(hba);
                retval |= IRQ_HANDLED;
        }
        /*
@@ -6369,6 +6382,10 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
        hba->errors = 0;
        hba->uic_error = 0;
        spin_unlock(hba->host->host_lock);
+
+       if (queue_eh_work)
+               ufshcd_schedule_eh(hba);
+
        return retval;
 }
 
@@ -6440,7 +6457,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
                retval |= ufshcd_tmc_handler(hba);
 
        if (intr_status & UTP_TRANSFER_REQ_COMPL)
-               retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
+               retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
 
        return retval;
 }
@@ -6548,9 +6565,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        /* send command to the controller */
        __set_bit(task_tag, &hba->outstanding_tasks);
 
-       /* Make sure descriptors are ready before ringing the task doorbell */
-       wmb();
-
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
        /* Make sure that doorbell is committed immediately */
        wmb();
@@ -6663,11 +6677,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
                                        enum query_opcode desc_op)
 {
        struct request_queue *q = hba->cmd_queue;
+       DECLARE_COMPLETION_ONSTACK(wait);
        struct request *req;
        struct ufshcd_lrb *lrbp;
        int err = 0;
        int tag;
-       struct completion wait;
        u8 upiu_flags;
 
        down_read(&hba->clk_scaling_lock);
@@ -6678,14 +6692,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
                goto out_unlock;
        }
        tag = req->tag;
-       WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+       WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
 
        if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
                err = -EBUSY;
                goto out;
        }
 
-       init_completion(&wait);
        lrbp = &hba->lrb[tag];
        WARN_ON(lrbp->cmd);
        lrbp->cmd = NULL;
@@ -6723,8 +6736,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
        hba->dev_cmd.complete = &wait;
 
        ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-       /* Make sure descriptors are ready before ringing the doorbell */
-       wmb();
 
        ufshcd_send_command(hba, tag);
        /*
@@ -6865,7 +6876,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
                        err = ufshcd_clear_cmd(hba, pos);
                        if (err)
                                break;
-                       __ufshcd_transfer_req_compl(hba, pos);
+                       __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);
                }
        }
 
@@ -6981,33 +6992,24 @@ out:
  */
 static int ufshcd_abort(struct scsi_cmnd *cmd)
 {
-       struct Scsi_Host *host;
-       struct ufs_hba *hba;
+       struct Scsi_Host *host = cmd->device->host;
+       struct ufs_hba *hba = shost_priv(host);
+       int tag = scsi_cmd_to_rq(cmd)->tag;
+       struct ufshcd_lrb *lrbp = &hba->lrb[tag];
        unsigned long flags;
-       unsigned int tag;
-       int err = 0;
-       struct ufshcd_lrb *lrbp;
+       int err = FAILED;
        u32 reg;
 
-       host = cmd->device->host;
-       hba = shost_priv(host);
-       tag = cmd->request->tag;
-       lrbp = &hba->lrb[tag];
-       if (!ufshcd_valid_tag(hba, tag)) {
-               dev_err(hba->dev,
-                       "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
-                       __func__, tag, cmd, cmd->request);
-               BUG();
-       }
+       WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
 
        ufshcd_hold(hba, false);
        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
-       /* If command is already aborted/completed, return SUCCESS */
+       /* If command is already aborted/completed, return FAILED. */
        if (!(test_bit(tag, &hba->outstanding_reqs))) {
                dev_err(hba->dev,
                        "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
                        __func__, tag, hba->outstanding_reqs, reg);
-               goto out;
+               goto release;
        }
 
        /* Print Transfer Request of aborted task */
@@ -7036,7 +7038,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                dev_err(hba->dev,
                "%s: cmd was completed, but without a notifying intr, tag = %d",
                __func__, tag);
-               goto cleanup;
+               __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false);
+               goto release;
        }
 
        /*
@@ -7045,40 +7048,39 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
         * will be to send LU reset which, again, is a spec violation.
         * To avoid these unnecessary/illegal steps, first we clean up
         * the lrb taken by this cmd and re-set it in outstanding_reqs,
-        * then queue the eh_work and bail.
+        * then queue the error handler and bail.
         */
        if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
                ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
-               __ufshcd_transfer_req_compl(hba, (1UL << tag));
-               set_bit(tag, &hba->outstanding_reqs);
+
                spin_lock_irqsave(host->host_lock, flags);
                hba->force_reset = true;
-               ufshcd_schedule_eh_work(hba);
                spin_unlock_irqrestore(host->host_lock, flags);
-               goto out;
+
+               ufshcd_schedule_eh(hba);
+
+               goto release;
        }
 
        /* Skip task abort in case previous aborts failed and report failure */
-       if (lrbp->req_abort_skip)
-               err = -EIO;
-       else
-               err = ufshcd_try_to_abort_task(hba, tag);
+       if (lrbp->req_abort_skip) {
+               dev_err(hba->dev, "%s: skipping abort\n", __func__);
+               ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
+               goto release;
+       }
 
-       if (!err) {
-cleanup:
-               __ufshcd_transfer_req_compl(hba, (1UL << tag));
-out:
-               err = SUCCESS;
-       } else {
+       err = ufshcd_try_to_abort_task(hba, tag);
+       if (err) {
                dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
                ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
                err = FAILED;
+               goto release;
        }
 
-       /*
-        * This ufshcd_release() corresponds to the original scsi cmd that got
-        * aborted here (as we won't get any IRQ for it).
-        */
+       err = SUCCESS;
+
+release:
+       /* Matches the ufshcd_hold() call at the start of this function. */
        ufshcd_release(hba);
        return err;
 }
@@ -7101,9 +7103,10 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
         * Stop the host controller and complete the requests
         * cleared by h/w
         */
+       ufshpb_reset_host(hba);
        ufshcd_hba_stop(hba);
        hba->silence_err_logs = true;
-       ufshcd_complete_requests(hba);
+       ufshcd_retry_aborted_requests(hba);
        hba->silence_err_logs = false;
 
        /* scale up clocks to max frequency before full reinitialization */
@@ -7188,11 +7191,10 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->force_reset = true;
-       ufshcd_schedule_eh_work(hba);
        dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       flush_work(&hba->eh_work);
+       ufshcd_err_handler(hba->host);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@@ -7499,6 +7501,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
 {
        int err;
        u8 model_index;
+       u8 b_ufs_feature_sup;
        u8 *desc_buf;
        struct ufs_dev_info *dev_info = &hba->dev_info;
 
@@ -7526,9 +7529,26 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
        /* getting Specification Version in big endian format */
        dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
                                      desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+       b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
 
        model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
 
+       if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
+           (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
+               bool hpb_en = false;
+
+               ufshpb_get_dev_info(hba, desc_buf);
+
+               if (!ufshpb_is_legacy(hba))
+                       err = ufshcd_query_flag_retry(hba,
+                                                     UPIU_QUERY_OPCODE_READ_FLAG,
+                                                     QUERY_FLAG_IDN_HPB_EN, 0,
+                                                     &hpb_en);
+
+               if (ufshpb_is_legacy(hba) || (!err && hpb_en))
+                       dev_info->hpb_enabled = true;
+       }
+
        err = ufshcd_read_string_desc(hba, model_index,
                                      &dev_info->model, SD_ASCII_STD);
        if (err < 0) {
@@ -7760,6 +7780,10 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
        else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
                hba->dev_info.max_lu_supported = 8;
 
+       if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+               GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
+               ufshpb_get_geo_info(hba, desc_buf);
+
 out:
        kfree(desc_buf);
        return err;
@@ -7902,6 +7926,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
        }
 
        ufs_bsg_probe(hba);
+       ufshpb_init(hba);
        scsi_scan_host(hba->host);
        pm_runtime_put_sync(hba->dev);
 
@@ -7909,8 +7934,61 @@ out:
        return ret;
 }
 
+static void ufshcd_request_sense_done(struct request *rq, blk_status_t error)
+{
+       if (error != BLK_STS_OK)
+               pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error);
+       kfree(rq->end_io_data);
+       blk_put_request(rq);
+}
+
 static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
+ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+       /*
+        * Some UFS devices clear unit attention condition only if the sense
+        * size used (UFS_SENSE_SIZE in this case) is non-zero.
+        */
+       static const u8 cmd[6] = {REQUEST_SENSE, 0, 0, 0, UFS_SENSE_SIZE, 0};
+       struct scsi_request *rq;
+       struct request *req;
+       char *buffer;
+       int ret;
+
+       buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN,
+                             /*flags=*/BLK_MQ_REQ_PM);
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
+               goto out_free;
+       }
+
+       ret = blk_rq_map_kern(sdev->request_queue, req,
+                             buffer, UFS_SENSE_SIZE, GFP_NOIO);
+       if (ret)
+               goto out_put;
+
+       rq = scsi_req(req);
+       rq->cmd_len = ARRAY_SIZE(cmd);
+       memcpy(rq->cmd, cmd, rq->cmd_len);
+       rq->retries = 3;
+       req->timeout = 1 * HZ;
+       req->rq_flags |= RQF_PM | RQF_QUIET;
+       req->end_io_data = buffer;
+
+       blk_execute_rq_nowait(/*bd_disk=*/NULL, req, /*at_head=*/true,
+                             ufshcd_request_sense_done);
+       return 0;
+
+out_put:
+       blk_put_request(req);
+out_free:
+       kfree(buffer);
+       return ret;
+}
 
 static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
 {
@@ -7938,7 +8016,7 @@ static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
        if (ret)
                goto out_err;
 
-       ret = ufshcd_send_request_sense(hba, sdp);
+       ret = ufshcd_request_sense_async(hba, sdp);
        scsi_device_put(sdp);
 out_err:
        if (ret)
@@ -7967,13 +8045,13 @@ out:
 }
 
 /**
- * ufshcd_probe_hba - probe hba to detect device and initialize
+ * ufshcd_probe_hba - probe hba to detect device and initialize it
  * @hba: per-adapter instance
- * @async: asynchronous execution or not
+ * @init_dev_params: whether or not to call ufshcd_device_params_init().
  *
  * Execute link-startup and verify device initialization
  */
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
 {
        int ret;
        unsigned long flags;
@@ -8005,7 +8083,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
         * Initialize UFS device parameters used by driver, these
         * parameters are associated with UFS descriptors.
         */
-       if (async) {
+       if (init_dev_params) {
                ret = ufshcd_device_params_init(hba);
                if (ret)
                        goto out;
@@ -8050,6 +8128,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
        /* Enable Auto-Hibernate if configured */
        ufshcd_auto_hibern8_enable(hba);
 
+       ufshpb_reset(hba);
 out:
        spin_lock_irqsave(hba->host->host_lock, flags);
        if (ret)
@@ -8097,6 +8176,10 @@ out:
 static const struct attribute_group *ufshcd_driver_groups[] = {
        &ufs_sysfs_unit_descriptor_group,
        &ufs_sysfs_lun_attributes_group,
+#ifdef CONFIG_SCSI_UFS_HPB
+       &ufs_sysfs_hpb_stat_group,
+       &ufs_sysfs_hpb_param_group,
+#endif
        NULL,
 };
 
@@ -8521,8 +8604,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
        if (hba->is_powered) {
                ufshcd_exit_clk_scaling(hba);
                ufshcd_exit_clk_gating(hba);
-               if (hba->eh_wq)
-                       destroy_workqueue(hba->eh_wq);
                ufs_debugfs_hba_exit(hba);
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
@@ -8533,35 +8614,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
        }
 }
 
-static int
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
-{
-       unsigned char cmd[6] = {REQUEST_SENSE,
-                               0,
-                               0,
-                               0,
-                               UFS_SENSE_SIZE,
-                               0};
-       char *buffer;
-       int ret;
-
-       buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
-       if (!buffer) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
-                       UFS_SENSE_SIZE, NULL, NULL,
-                       msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
-       if (ret)
-               pr_err("%s: failed with err %d\n", __func__, ret);
-
-       kfree(buffer);
-out:
-       return ret;
-}
-
 /**
  * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
  *                          power mode
@@ -8739,6 +8791,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
                usleep_range(5000, 5100);
 }
 
+#ifdef CONFIG_PM
 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
 {
        int ret = 0;
@@ -8766,6 +8819,7 @@ vcc_disable:
 out:
        return ret;
 }
+#endif /* CONFIG_PM */
 
 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
 {
@@ -8798,6 +8852,8 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                req_link_state = UIC_LINK_OFF_STATE;
        }
 
+       ufshpb_suspend(hba);
+
        /*
         * If we can't transition into any of the low power modes
         * just gate the clocks.
@@ -8921,6 +8977,7 @@ out:
                ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
                hba->clk_gating.is_suspended = false;
                ufshcd_release(hba);
+               ufshpb_resume(hba);
        }
        hba->pm_op_in_progress = false;
        return ret;
@@ -8999,6 +9056,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
        /* Enable Auto-Hibernate if configured */
        ufshcd_auto_hibern8_enable(hba);
+
+       ufshpb_resume(hba);
        goto out;
 
 set_old_link_state:
@@ -9168,6 +9227,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
        return ret;
 }
 
+#ifdef CONFIG_PM
 /**
  * ufshcd_resume - helper function for resume operations
  * @hba: per adapter instance
@@ -9205,17 +9265,21 @@ out:
                ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
        return ret;
 }
+#endif /* CONFIG_PM */
 
+#ifdef CONFIG_PM_SLEEP
 /**
- * ufshcd_system_suspend - system suspend routine
- * @hba: per adapter instance
+ * ufshcd_system_suspend - system suspend callback
+ * @dev: Device associated with the UFS controller.
  *
- * Check the description of ufshcd_suspend() function for more details.
+ * Executed before putting the system into a sleep state in which the contents
+ * of main memory are preserved.
  *
  * Returns 0 for success and non-zero for failure
  */
-int ufshcd_system_suspend(struct ufs_hba *hba)
+int ufshcd_system_suspend(struct device *dev)
 {
+       struct ufs_hba *hba = dev_get_drvdata(dev);
        int ret = 0;
        ktime_t start = ktime_get();
 
@@ -9232,16 +9296,19 @@ out:
 EXPORT_SYMBOL(ufshcd_system_suspend);
 
 /**
- * ufshcd_system_resume - system resume routine
- * @hba: per adapter instance
+ * ufshcd_system_resume - system resume callback
+ * @dev: Device associated with the UFS controller.
+ *
+ * Executed after waking the system up from a sleep state in which the contents
+ * of main memory were preserved.
  *
  * Returns 0 for success and non-zero for failure
  */
-
-int ufshcd_system_resume(struct ufs_hba *hba)
+int ufshcd_system_resume(struct device *dev)
 {
-       int ret = 0;
+       struct ufs_hba *hba = dev_get_drvdata(dev);
        ktime_t start = ktime_get();
+       int ret = 0;
 
        if (pm_runtime_suspended(hba->dev))
                goto out;
@@ -9256,17 +9323,20 @@ out:
        return ret;
 }
 EXPORT_SYMBOL(ufshcd_system_resume);
+#endif /* CONFIG_PM_SLEEP */
 
+#ifdef CONFIG_PM
 /**
- * ufshcd_runtime_suspend - runtime suspend routine
- * @hba: per adapter instance
+ * ufshcd_runtime_suspend - runtime suspend callback
+ * @dev: Device associated with the UFS controller.
  *
  * Check the description of ufshcd_suspend() function for more details.
  *
  * Returns 0 for success and non-zero for failure
  */
-int ufshcd_runtime_suspend(struct ufs_hba *hba)
+int ufshcd_runtime_suspend(struct device *dev)
 {
+       struct ufs_hba *hba = dev_get_drvdata(dev);
        int ret;
        ktime_t start = ktime_get();
 
@@ -9281,7 +9351,7 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
 
 /**
  * ufshcd_runtime_resume - runtime resume routine
- * @hba: per adapter instance
+ * @dev: Device associated with the UFS controller.
  *
  * This function basically brings controller
  * to active state. Following operations are done in this function:
@@ -9289,8 +9359,9 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
  * 1. Turn on all the controller related clocks
  * 2. Turn ON VCC rail
  */
-int ufshcd_runtime_resume(struct ufs_hba *hba)
+int ufshcd_runtime_resume(struct device *dev)
 {
+       struct ufs_hba *hba = dev_get_drvdata(dev);
        int ret;
        ktime_t start = ktime_get();
 
@@ -9302,12 +9373,7 @@ int ufshcd_runtime_resume(struct ufs_hba *hba)
        return ret;
 }
 EXPORT_SYMBOL(ufshcd_runtime_resume);
-
-int ufshcd_runtime_idle(struct ufs_hba *hba)
-{
-       return 0;
-}
-EXPORT_SYMBOL(ufshcd_runtime_idle);
+#endif /* CONFIG_PM */
 
 /**
  * ufshcd_shutdown - shutdown routine
@@ -9343,6 +9409,7 @@ void ufshcd_remove(struct ufs_hba *hba)
        if (hba->sdev_ufs_device)
                ufshcd_rpm_get_sync(hba);
        ufs_bsg_remove(hba);
+       ufshpb_remove(hba);
        ufs_sysfs_remove_nodes(hba->dev);
        blk_cleanup_queue(hba->tmf_queue);
        blk_mq_free_tag_set(&hba->tmf_tag_set);
@@ -9381,6 +9448,10 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
        return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
 }
 
+static struct scsi_transport_template ufshcd_transport_template = {
+       .eh_strategy_handler = ufshcd_err_handler,
+};
+
 /**
  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  * @dev: pointer to device handle
@@ -9407,13 +9478,15 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
                err = -ENOMEM;
                goto out_error;
        }
+       host->transportt = &ufshcd_transport_template;
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
-       *hba_handle = hba;
        hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
-
        INIT_LIST_HEAD(&hba->clk_list_head);
+       spin_lock_init(&hba->outstanding_lock);
+
+       *hba_handle = hba;
 
 out_error:
        return err;
@@ -9444,7 +9517,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        int err;
        struct Scsi_Host *host = hba->host;
        struct device *dev = hba->dev;
-       char eh_wq_name[sizeof("ufs_eh_wq_00")];
 
        if (!mmio_base) {
                dev_err(hba->dev,
@@ -9498,17 +9570,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        hba->max_pwr_info.is_valid = false;
 
-       /* Initialize work queues */
-       snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
-                hba->host->host_no);
-       hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
-       if (!hba->eh_wq) {
-               dev_err(hba->dev, "%s: failed to create eh workqueue\n",
-                               __func__);
-               err = -ENOMEM;
-               goto out_disable;
-       }
-       INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
 
        sema_init(&hba->host_sem, 1);
@@ -9627,6 +9688,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        async_schedule(ufshcd_async_scan, hba);
        ufs_sysfs_add_nodes(hba->dev);
 
+       device_enable_async_suspend(dev);
        return 0;
 
 free_tmf_queue:
index 194755c..52ea6f3 100644 (file)
@@ -476,6 +476,27 @@ struct ufs_stats {
        struct ufs_event_hist event[UFS_EVT_CNT];
 };
 
+/**
+ * enum ufshcd_state - UFS host controller state
+ * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
+ *     processing.
+ * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
+ *     SCSI commands.
+ * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
+ *     SCSI commands may be submitted to the controller.
+ * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
+ *     newly submitted SCSI commands with error code DID_BAD_TARGET.
+ * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
+ *     failed. Fail all SCSI commands with error code DID_ERROR.
+ */
+enum ufshcd_state {
+       UFSHCD_STATE_RESET,
+       UFSHCD_STATE_OPERATIONAL,
+       UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
+       UFSHCD_STATE_EH_SCHEDULED_FATAL,
+       UFSHCD_STATE_ERROR,
+};
+
 enum ufshcd_quirks {
        /* Interrupt aggregation support is broken */
        UFSHCD_QUIRK_BROKEN_INTR_AGGR                   = 1 << 0,
@@ -641,6 +662,31 @@ struct ufs_hba_variant_params {
        u32 wb_flush_threshold;
 };
 
+#ifdef CONFIG_SCSI_UFS_HPB
+/**
+ * struct ufshpb_dev_info - UFSHPB device related info
+ * @num_lu: the number of user logical unit to check whether all lu finished
+ *          initialization
+ * @rgn_size: device reported HPB region size
+ * @srgn_size: device reported HPB sub-region size
+ * @slave_conf_cnt: counter to check all lu finished initialization
+ * @hpb_disabled: flag to check if HPB is disabled
+ * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
+ * @is_legacy: flag to check HPB 1.0
+ * @control_mode: either host or device
+ */
+struct ufshpb_dev_info {
+       int num_lu;
+       int rgn_size;
+       int srgn_size;
+       atomic_t slave_conf_cnt;
+       bool hpb_disabled;
+       u8 max_hpb_single_cmd;
+       bool is_legacy;
+       u8 control_mode;
+};
+#endif
+
 struct ufs_hba_monitor {
        unsigned long chunk_size;
 
@@ -674,6 +720,7 @@ struct ufs_hba_monitor {
  * @lrb: local reference block
  * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
  * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_lock: Protects @outstanding_reqs.
  * @outstanding_reqs: Bits representing outstanding transfer requests
  * @capabilities: UFS Controller Capabilities
  * @nutrs: Transfer Request Queue depth supported by controller
@@ -683,19 +730,17 @@ struct ufs_hba_monitor {
  * @priv: pointer to variant specific private data
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
- * @uic_cmd_mutex: mutex for uic command
+ * @uic_cmd_mutex: mutex for UIC command
  * @tmf_tag_set: TMF tag set.
  * @tmf_queue: Used to allocate TMF tags.
  * @pwr_done: completion for power mode change
- * @ufshcd_state: UFSHCD states
+ * @ufshcd_state: UFSHCD state
  * @eh_flags: Error handling flags
  * @intr_mask: Interrupt Mask Bits
  * @ee_ctrl_mask: Exception event control mask
  * @is_powered: flag to check if HBA is powered
  * @shutting_down: flag to check if shutdown has been invoked
  * @host_sem: semaphore used to serialize concurrent contexts
- * @eh_wq: Workqueue that eh_work works on
- * @eh_work: Worker to handle UFS errors that require s/w attention
  * @eeh_work: Worker to handle exception events
  * @errors: HBA errors
  * @uic_error: UFS interconnect layer error status
@@ -760,6 +805,7 @@ struct ufs_hba {
        struct ufshcd_lrb *lrb;
 
        unsigned long outstanding_tasks;
+       spinlock_t outstanding_lock;
        unsigned long outstanding_reqs;
 
        u32 capabilities;
@@ -785,7 +831,7 @@ struct ufs_hba {
        struct mutex uic_cmd_mutex;
        struct completion *uic_async_done;
 
-       u32 ufshcd_state;
+       enum ufshcd_state ufshcd_state;
        u32 eh_flags;
        u32 intr_mask;
        u16 ee_ctrl_mask; /* Exception event mask */
@@ -797,8 +843,6 @@ struct ufs_hba {
        struct semaphore host_sem;
 
        /* Work Queues */
-       struct workqueue_struct *eh_wq;
-       struct work_struct eh_work;
        struct work_struct eeh_work;
 
        /* HBA Errors */
@@ -851,6 +895,10 @@ struct ufs_hba {
        struct request_queue    *bsg_queue;
        struct delayed_work rpm_dev_flush_recheck_work;
 
+#ifdef CONFIG_SCSI_UFS_HPB
+       struct ufshpb_dev_info ufshpb_dev;
+#endif
+
        struct ufs_hba_monitor  monitor;
 
 #ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -893,16 +941,8 @@ static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
 
 static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
 {
-/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/
-#ifndef CONFIG_SCSI_UFS_DWC
-       if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
-           !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
-               return true;
-       else
-               return false;
-#else
-return true;
-#endif
+       return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
+               !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
 }
 
 static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
@@ -1009,11 +1049,14 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
        return 0;
 }
 
-extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
-extern int ufshcd_runtime_resume(struct ufs_hba *hba);
-extern int ufshcd_runtime_idle(struct ufs_hba *hba);
-extern int ufshcd_system_suspend(struct ufs_hba *hba);
-extern int ufshcd_system_resume(struct ufs_hba *hba);
+#ifdef CONFIG_PM
+extern int ufshcd_runtime_suspend(struct device *dev);
+extern int ufshcd_runtime_resume(struct device *dev);
+#endif
+#ifdef CONFIG_PM_SLEEP
+extern int ufshcd_system_suspend(struct device *dev);
+extern int ufshcd_system_resume(struct device *dev);
+#endif
 extern int ufshcd_shutdown(struct ufs_hba *hba);
 extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
                                      int agreed_gear,
@@ -1096,6 +1139,9 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
                           u8 param_offset,
                           u8 *param_read_buf,
                           u8 param_size);
+int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
+                           enum attr_idn idn, u8 index, u8 selector,
+                           u32 *attr_val);
 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
                      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
@@ -1160,11 +1206,6 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
        return ufshcd_readl(hba, REG_UFS_VERSION);
 }
 
-static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
-{
-       return (hba->ufs_version >= ufshci_version(3, 0));
-}
-
 static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
                        bool up, enum ufs_notify_change_status status)
 {
@@ -1226,18 +1267,6 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
        return -ENOTSUPP;
 }
 
-static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
-                                       bool is_scsi_cmd)
-{
-       if (hba->vops && hba->vops->setup_xfer_req) {
-               unsigned long flags;
-
-               spin_lock_irqsave(hba->host->host_lock, flags);
-               hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
-       }
-}
-
 static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
                                        int tag, u8 tm_function)
 {
index 5affb1f..de95be5 100644 (file)
@@ -39,7 +39,6 @@ enum {
        REG_UTP_TRANSFER_REQ_DOOR_BELL          = 0x58,
        REG_UTP_TRANSFER_REQ_LIST_CLEAR         = 0x5C,
        REG_UTP_TRANSFER_REQ_LIST_RUN_STOP      = 0x60,
-       REG_UTP_TRANSFER_REQ_LIST_COMPL         = 0x64,
        REG_UTP_TASK_REQ_LIST_BASE_L            = 0x70,
        REG_UTP_TASK_REQ_LIST_BASE_H            = 0x74,
        REG_UTP_TASK_REQ_DOOR_BELL              = 0x78,
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
new file mode 100644 (file)
index 0000000..02fb51a
--- /dev/null
@@ -0,0 +1,2933 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Universal Flash Storage Host Performance Booster
+ *
+ * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Yongmyung Lee <ymhungry.lee@samsung.com>
+ *     Jinyoung Choi <j-young.choi@samsung.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/async.h>
+
+#include "ufshcd.h"
+#include "ufshpb.h"
+#include "../sd.h"
+
+#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
+#define READ_TO_MS 1000
+#define READ_TO_EXPIRIES 100
+#define POLLING_INTERVAL_MS 200
+#define THROTTLE_MAP_REQ_DEFAULT 1
+
+/* memory management */
+static struct kmem_cache *ufshpb_mctx_cache;
+static mempool_t *ufshpb_mctx_pool;
+static mempool_t *ufshpb_page_pool;
+/* A cache size of 2MB can cache ppn in the 1GB range. */
+static unsigned int ufshpb_host_map_kbytes = 2048;
+static int tot_active_srgn_pages;
+
+static struct workqueue_struct *ufshpb_wq;
+
+static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
+                                     int srgn_idx);
+
+bool ufshpb_is_allowed(struct ufs_hba *hba)
+{
+       return !(hba->ufshpb_dev.hpb_disabled);
+}
+
+/* HPB version 1.0 is called as legacy version. */
+bool ufshpb_is_legacy(struct ufs_hba *hba)
+{
+       return hba->ufshpb_dev.is_legacy;
+}
+
+static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
+{
+       return sdev->hostdata;
+}
+
+static int ufshpb_get_state(struct ufshpb_lu *hpb)
+{
+       return atomic_read(&hpb->hpb_state);
+}
+
+static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
+{
+       atomic_set(&hpb->hpb_state, state);
+}
+
+static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
+                               struct ufshpb_subregion *srgn)
+{
+       return rgn->rgn_state != HPB_RGN_INACTIVE &&
+               srgn->srgn_state == HPB_SRGN_VALID;
+}
+
+static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
+{
+       return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
+}
+
+static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
+{
+       return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
+              op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
+}
+
+static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
+{
+       return transfer_len <= hpb->pre_req_max_tr_len;
+}
+
+/*
+ * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
+ * default. It is possible to change range of transfer_len through sysfs.
+ */
+static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
+{
+       return len > hpb->pre_req_min_tr_len &&
+              len <= hpb->pre_req_max_tr_len;
+}
+
+static bool ufshpb_is_general_lun(int lun)
+{
+       return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
+{
+       if (hpb->lu_pinned_end != PINNED_NOT_SET &&
+           rgn_idx >= hpb->lu_pinned_start &&
+           rgn_idx <= hpb->lu_pinned_end)
+               return true;
+
+       return false;
+}
+
+static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
+{
+       bool ret = false;
+       unsigned long flags;
+
+       if (ufshpb_get_state(hpb) != HPB_PRESENT)
+               return;
+
+       spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+       if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
+               ret = true;
+       spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+       if (ret)
+               queue_work(ufshpb_wq, &hpb->map_work);
+}
+
+static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
+                                   struct ufshcd_lrb *lrbp,
+                                   struct utp_hpb_rsp *rsp_field)
+{
+       /* Check HPB_UPDATE_ALERT */
+       if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
+             UPIU_HEADER_DWORD(0, 2, 0, 0)))
+               return false;
+
+       if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
+           rsp_field->desc_type != DEV_DES_TYPE ||
+           rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
+           rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
+           rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
+           rsp_field->hpb_op == HPB_RSP_NONE ||
+           (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
+            !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
+               return false;
+
+       if (!ufshpb_is_general_lun(rsp_field->lun)) {
+               dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
+                        lrbp->lun);
+               return false;
+       }
+
+       return true;
+}
+
+static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
+                              int srgn_offset, int cnt, bool set_dirty)
+{
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn, *prev_srgn = NULL;
+       int set_bit_len;
+       int bitmap_len;
+       unsigned long flags;
+
+next_srgn:
+       rgn = hpb->rgn_tbl + rgn_idx;
+       srgn = rgn->srgn_tbl + srgn_idx;
+
+       if (likely(!srgn->is_last))
+               bitmap_len = hpb->entries_per_srgn;
+       else
+               bitmap_len = hpb->last_srgn_entries;
+
+       if ((srgn_offset + cnt) > bitmap_len)
+               set_bit_len = bitmap_len - srgn_offset;
+       else
+               set_bit_len = cnt;
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+               if (set_dirty) {
+                       if (srgn->srgn_state == HPB_SRGN_VALID)
+                               bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
+                                          set_bit_len);
+               } else if (hpb->is_hcm) {
+                        /* rewind the read timer for lru regions */
+                       rgn->read_timeout = ktime_add_ms(ktime_get(),
+                                       rgn->hpb->params.read_timeout_ms);
+                       rgn->read_timeout_expiries =
+                               rgn->hpb->params.read_timeout_expiries;
+               }
+       }
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+       if (hpb->is_hcm && prev_srgn != srgn) {
+               bool activate = false;
+
+               spin_lock(&rgn->rgn_lock);
+               if (set_dirty) {
+                       rgn->reads -= srgn->reads;
+                       srgn->reads = 0;
+                       set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+               } else {
+                       srgn->reads++;
+                       rgn->reads++;
+                       if (srgn->reads == hpb->params.activation_thld)
+                               activate = true;
+               }
+               spin_unlock(&rgn->rgn_lock);
+
+               if (activate ||
+                   test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
+                       spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+                       ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
+                       spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+                       dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+                               "activate region %d-%d\n", rgn_idx, srgn_idx);
+               }
+
+               prev_srgn = srgn;
+       }
+
+       srgn_offset = 0;
+       if (++srgn_idx == hpb->srgns_per_rgn) {
+               srgn_idx = 0;
+               rgn_idx++;
+       }
+
+       cnt -= set_bit_len;
+       if (cnt > 0)
+               goto next_srgn;
+}
+
+static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
+                                 int srgn_idx, int srgn_offset, int cnt)
+{
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn;
+       int bitmap_len;
+       int bit_len;
+
+next_srgn:
+       rgn = hpb->rgn_tbl + rgn_idx;
+       srgn = rgn->srgn_tbl + srgn_idx;
+
+       if (likely(!srgn->is_last))
+               bitmap_len = hpb->entries_per_srgn;
+       else
+               bitmap_len = hpb->last_srgn_entries;
+
+       if (!ufshpb_is_valid_srgn(rgn, srgn))
+               return true;
+
+       /*
+        * If the region state is active, mctx must be allocated.
+        * In this case, check whether the region is evicted or
+        * mctx allocation fail.
+        */
+       if (unlikely(!srgn->mctx)) {
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "no mctx in region %d subregion %d.\n",
+                       srgn->rgn_idx, srgn->srgn_idx);
+               return true;
+       }
+
+       if ((srgn_offset + cnt) > bitmap_len)
+               bit_len = bitmap_len - srgn_offset;
+       else
+               bit_len = cnt;
+
+       if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
+                         srgn_offset) < bit_len + srgn_offset)
+               return true;
+
+       srgn_offset = 0;
+       if (++srgn_idx == hpb->srgns_per_rgn) {
+               srgn_idx = 0;
+               rgn_idx++;
+       }
+
+       cnt -= bit_len;
+       if (cnt > 0)
+               goto next_srgn;
+
+       return false;
+}
+
+static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
+{
+       return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+}
+
+static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
+                                    struct ufshpb_map_ctx *mctx, int pos,
+                                    int len, __be64 *ppn_buf)
+{
+       struct page *page;
+       int index, offset;
+       int copied;
+
+       index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
+       offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
+
+       if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
+               copied = len;
+       else
+               copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
+
+       page = mctx->m_page[index];
+       if (unlikely(!page)) {
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "error. cannot find page in mctx\n");
+               return -ENOMEM;
+       }
+
+       memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
+              copied * HPB_ENTRY_SIZE);
+
+       return copied;
+}
+
+static void
+ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
+                       int *srgn_idx, int *offset)
+{
+       int rgn_offset;
+
+       *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
+       rgn_offset = lpn & hpb->entries_per_rgn_mask;
+       *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
+       *offset = rgn_offset & hpb->entries_per_srgn_mask;
+}
+
+static void
+ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
+                           struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
+                           u8 transfer_len, int read_id)
+{
+       unsigned char *cdb = lrbp->cmd->cmnd;
+       __be64 ppn_tmp = ppn;
+       cdb[0] = UFSHPB_READ;
+
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
+               ppn_tmp = swab64(ppn);
+
+       /* ppn value is stored as big-endian in the host memory */
+       memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
+       cdb[14] = transfer_len;
+       cdb[15] = read_id;
+
+       lrbp->cmd->cmd_len = UFS_CDB_SIZE;
+}
+
+static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
+                                           unsigned long lpn, unsigned int len,
+                                           int read_id)
+{
+       cdb[0] = UFSHPB_WRITE_BUFFER;
+       cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
+
+       put_unaligned_be32(lpn, &cdb[2]);
+       cdb[6] = read_id;
+       put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
+
+       cdb[9] = 0x00;  /* Control = 0x00 */
+}
+
+static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
+{
+       struct ufshpb_req *pre_req;
+
+       if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
+               dev_info(&hpb->sdev_ufs_lu->sdev_dev,
+                        "pre_req throttle. inflight %d throttle %d",
+                        hpb->num_inflight_pre_req, hpb->throttle_pre_req);
+               return NULL;
+       }
+
+       pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
+                                          struct ufshpb_req, list_req);
+       if (!pre_req) {
+               dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
+               return NULL;
+       }
+
+       list_del_init(&pre_req->list_req);
+       hpb->num_inflight_pre_req++;
+
+       return pre_req;
+}
+
+static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
+                                     struct ufshpb_req *pre_req)
+{
+       pre_req->req = NULL;
+       bio_reset(pre_req->bio);
+       list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
+       hpb->num_inflight_pre_req--;
+}
+
+static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
+{
+       struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
+       struct ufshpb_lu *hpb = pre_req->hpb;
+       unsigned long flags;
+
+       if (error) {
+               struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+               struct scsi_sense_hdr sshdr;
+
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
+               scsi_command_normalize_sense(cmd, &sshdr);
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "code %x sense_key %x asc %x ascq %x",
+                       sshdr.response_code,
+                       sshdr.sense_key, sshdr.asc, sshdr.ascq);
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "byte4 %x byte5 %x byte6 %x additional_len %x",
+                       sshdr.byte4, sshdr.byte5,
+                       sshdr.byte6, sshdr.additional_length);
+       }
+
+       blk_mq_free_request(req);
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       ufshpb_put_pre_req(pre_req->hpb, pre_req);
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+}
+
+static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
+{
+       struct ufshpb_lu *hpb = pre_req->hpb;
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn;
+       __be64 *addr;
+       int offset = 0;
+       int copied;
+       unsigned long lpn = pre_req->wb.lpn;
+       int rgn_idx, srgn_idx, srgn_offset;
+       unsigned long flags;
+
+       addr = page_address(page);
+       ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+next_offset:
+       rgn = hpb->rgn_tbl + rgn_idx;
+       srgn = rgn->srgn_tbl + srgn_idx;
+
+       if (!ufshpb_is_valid_srgn(rgn, srgn))
+               goto mctx_error;
+
+       if (!srgn->mctx)
+               goto mctx_error;
+
+       copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
+                                          pre_req->wb.len - offset,
+                                          &addr[offset]);
+
+       if (copied < 0)
+               goto mctx_error;
+
+       offset += copied;
+       srgn_offset += copied;
+
+       if (srgn_offset == hpb->entries_per_srgn) {
+               srgn_offset = 0;
+
+               if (++srgn_idx == hpb->srgns_per_rgn) {
+                       srgn_idx = 0;
+                       rgn_idx++;
+               }
+       }
+
+       if (offset < pre_req->wb.len)
+               goto next_offset;
+
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+       return 0;
+mctx_error:
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+       return -ENOMEM;
+}
+
+static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
+                                      struct request_queue *q,
+                                      struct ufshpb_req *pre_req)
+{
+       struct page *page = pre_req->wb.m_page;
+       struct bio *bio = pre_req->bio;
+       int entries_bytes, ret;
+
+       if (!page)
+               return -ENOMEM;
+
+       if (ufshpb_prep_entry(pre_req, page))
+               return -ENOMEM;
+
+       entries_bytes = pre_req->wb.len * sizeof(__be64);
+
+       ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
+       if (ret != entries_bytes) {
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "bio_add_pc_page fail: %d", ret);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
+{
+       if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
+               hpb->cur_read_id = 1;
+       return hpb->cur_read_id;
+}
+
+static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
+                                 struct ufshpb_req *pre_req, int read_id)
+{
+       struct scsi_device *sdev = cmd->device;
+       struct request_queue *q = sdev->request_queue;
+       struct request *req;
+       struct scsi_request *rq;
+       struct bio *bio = pre_req->bio;
+
+       pre_req->hpb = hpb;
+       pre_req->wb.lpn = sectors_to_logical(cmd->device,
+                                            blk_rq_pos(scsi_cmd_to_rq(cmd)));
+       pre_req->wb.len = sectors_to_logical(cmd->device,
+                                            blk_rq_sectors(scsi_cmd_to_rq(cmd)));
+       if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
+               return -ENOMEM;
+
+       req = pre_req->req;
+
+       /* 1. request setup */
+       blk_rq_append_bio(req, bio);
+       req->rq_disk = NULL;
+       req->end_io_data = (void *)pre_req;
+       req->end_io = ufshpb_pre_req_compl_fn;
+
+       /* 2. scsi_request setup */
+       rq = scsi_req(req);
+       rq->retries = 1;
+
+       ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
+                                read_id);
+       rq->cmd_len = scsi_command_size(rq->cmd);
+
+       if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
+               return -EAGAIN;
+
+       hpb->stats.pre_req_cnt++;
+
+       return 0;
+}
+
+static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
+                               int *read_id)
+{
+       struct ufshpb_req *pre_req;
+       struct request *req = NULL;
+       unsigned long flags;
+       int _read_id;
+       int ret = 0;
+
+       req = blk_get_request(cmd->device->request_queue,
+                             REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
+       if (IS_ERR(req))
+               return -EAGAIN;
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       pre_req = ufshpb_get_pre_req(hpb);
+       if (!pre_req) {
+               ret = -EAGAIN;
+               goto unlock_out;
+       }
+       _read_id = ufshpb_get_read_id(hpb);
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+       pre_req->req = req;
+
+       ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
+       if (ret)
+               goto free_pre_req;
+
+       *read_id = _read_id;
+
+       return ret;
+free_pre_req:
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       ufshpb_put_pre_req(hpb, pre_req);
+unlock_out:
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+       blk_put_request(req);
+       return ret;
+}
+
+/*
+ * This function will set up HPB read command using host-side L2P map data.
+ */
+int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+       struct ufshpb_lu *hpb;
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn;
+       struct scsi_cmnd *cmd = lrbp->cmd;
+       u32 lpn;
+       __be64 ppn;
+       unsigned long flags;
+       int transfer_len, rgn_idx, srgn_idx, srgn_offset;
+       int read_id = 0;
+       int err = 0;
+
+       hpb = ufshpb_get_hpb_data(cmd->device);
+       if (!hpb)
+               return -ENODEV;
+
+       if (ufshpb_get_state(hpb) == HPB_INIT)
+               return -ENODEV;
+
+       if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+               dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+                          "%s: ufshpb state is not PRESENT", __func__);
+               return -ENODEV;
+       }
+
+       if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
+           (!ufshpb_is_write_or_discard(cmd) &&
+            !ufshpb_is_read_cmd(cmd)))
+               return 0;
+
+       transfer_len = sectors_to_logical(cmd->device,
+                                         blk_rq_sectors(scsi_cmd_to_rq(cmd)));
+       if (unlikely(!transfer_len))
+               return 0;
+
+       lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
+       ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
+       rgn = hpb->rgn_tbl + rgn_idx;
+       srgn = rgn->srgn_tbl + srgn_idx;
+
+       /* If command type is WRITE or DISCARD, set bitmap as drity */
+       if (ufshpb_is_write_or_discard(cmd)) {
+               ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
+                                  transfer_len, true);
+               return 0;
+       }
+
+       if (!ufshpb_is_supported_chunk(hpb, transfer_len))
+               return 0;
+
+       WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
+
+       if (hpb->is_hcm) {
+               /*
+                * in host control mode, reads are the main source for
+                * activation trials.
+                */
+               ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
+                                  transfer_len, false);
+
+               /* keep those counters normalized */
+               if (rgn->reads > hpb->entries_per_srgn)
+                       schedule_work(&hpb->ufshpb_normalization_work);
+       }
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
+                                  transfer_len)) {
+               hpb->stats.miss_cnt++;
+               spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+               return 0;
+       }
+
+       err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+       if (unlikely(err < 0)) {
+               /*
+                * In this case, the region state is active,
+                * but the ppn table is not allocated.
+                * Make sure that ppn table must be allocated on
+                * active state.
+                */
+               dev_err(hba->dev, "get ppn failed. err %d\n", err);
+               return err;
+       }
+       if (!ufshpb_is_legacy(hba) &&
+           ufshpb_is_required_wb(hpb, transfer_len)) {
+               err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
+               if (err) {
+                       unsigned long timeout;
+
+                       timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
+                                 hpb->params.requeue_timeout_ms);
+
+                       if (time_before(jiffies, timeout))
+                               return -EAGAIN;
+
+                       hpb->stats.miss_cnt++;
+                       return 0;
+               }
+       }
+
+       ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
+                                   read_id);
+
+       hpb->stats.hit_cnt++;
+       return 0;
+}
+
+static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
+                                        int rgn_idx, enum req_opf dir,
+                                        bool atomic)
+{
+       struct ufshpb_req *rq;
+       struct request *req;
+       int retries = HPB_MAP_REQ_RETRIES;
+
+       rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
+       if (!rq)
+               return NULL;
+
+retry:
+       req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
+                             BLK_MQ_REQ_NOWAIT);
+
+       if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
+               usleep_range(3000, 3100);
+               goto retry;
+       }
+
+       if (IS_ERR(req))
+               goto free_rq;
+
+       rq->hpb = hpb;
+       rq->req = req;
+       rq->rb.rgn_idx = rgn_idx;
+
+       return rq;
+
+free_rq:
+       kmem_cache_free(hpb->map_req_cache, rq);
+       return NULL;
+}
+
+static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
+{
+       blk_put_request(rq->req);
+       kmem_cache_free(hpb->map_req_cache, rq);
+}
+
+static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
+                                            struct ufshpb_subregion *srgn)
+{
+       struct ufshpb_req *map_req;
+       struct bio *bio;
+       unsigned long flags;
+
+       if (hpb->is_hcm &&
+           hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
+               dev_info(&hpb->sdev_ufs_lu->sdev_dev,
+                        "map_req throttle. inflight %d throttle %d",
+                        hpb->num_inflight_map_req,
+                        hpb->params.inflight_map_req);
+               return NULL;
+       }
+
+       map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
+       if (!map_req)
+               return NULL;
+
+       bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
+       if (!bio) {
+               ufshpb_put_req(hpb, map_req);
+               return NULL;
+       }
+
+       map_req->bio = bio;
+
+       map_req->rb.srgn_idx = srgn->srgn_idx;
+       map_req->rb.mctx = srgn->mctx;
+
+       spin_lock_irqsave(&hpb->param_lock, flags);
+       hpb->num_inflight_map_req++;
+       spin_unlock_irqrestore(&hpb->param_lock, flags);
+
+       return map_req;
+}
+
+static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
+                              struct ufshpb_req *map_req)
+{
+       unsigned long flags;
+
+       bio_put(map_req->bio);
+       ufshpb_put_req(hpb, map_req);
+
+       spin_lock_irqsave(&hpb->param_lock, flags);
+       hpb->num_inflight_map_req--;
+       spin_unlock_irqrestore(&hpb->param_lock, flags);
+}
+
+static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
+                                    struct ufshpb_subregion *srgn)
+{
+       struct ufshpb_region *rgn;
+       u32 num_entries = hpb->entries_per_srgn;
+
+       if (!srgn->mctx) {
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "no mctx in region %d subregion %d.\n",
+                       srgn->rgn_idx, srgn->srgn_idx);
+               return -1;
+       }
+
+       if (unlikely(srgn->is_last))
+               num_entries = hpb->last_srgn_entries;
+
+       bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
+
+       rgn = hpb->rgn_tbl + srgn->rgn_idx;
+       clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
+
+       return 0;
+}
+
+static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
+                                     int srgn_idx)
+{
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn;
+
+       rgn = hpb->rgn_tbl + rgn_idx;
+       srgn = rgn->srgn_tbl + srgn_idx;
+
+       list_del_init(&rgn->list_inact_rgn);
+
+       if (list_empty(&srgn->list_act_srgn))
+               list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+
+       hpb->stats.rb_active_cnt++;
+}
+
+static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
+{
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn;
+       int srgn_idx;
+
+       rgn = hpb->rgn_tbl + rgn_idx;
+
+       for_each_sub_region(rgn, srgn_idx, srgn)
+               list_del_init(&srgn->list_act_srgn);
+
+       if (list_empty(&rgn->list_inact_rgn))
+               list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
+
+       hpb->stats.rb_inactive_cnt++;
+}
+
+static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
+                                     struct ufshpb_subregion *srgn)
+{
+       struct ufshpb_region *rgn;
+
+       /*
+        * If there is no mctx in subregion
+        * after I/O progress for HPB_READ_BUFFER, the region to which the
+        * subregion belongs was evicted.
+        * Make sure the region must not evict in I/O progress
+        */
+       if (!srgn->mctx) {
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "no mctx in region %d subregion %d.\n",
+                       srgn->rgn_idx, srgn->srgn_idx);
+               srgn->srgn_state = HPB_SRGN_INVALID;
+               return;
+       }
+
+       rgn = hpb->rgn_tbl + srgn->rgn_idx;
+
+       if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "region %d subregion %d evicted\n",
+                       srgn->rgn_idx, srgn->srgn_idx);
+               srgn->srgn_state = HPB_SRGN_INVALID;
+               return;
+       }
+       srgn->srgn_state = HPB_SRGN_VALID;
+}
+
+static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
+{
+       struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
+
+       ufshpb_put_req(umap_req->hpb, umap_req);
+}
+
+static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
+{
+       struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
+       struct ufshpb_lu *hpb = map_req->hpb;
+       struct ufshpb_subregion *srgn;
+       unsigned long flags;
+
+       srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
+               map_req->rb.srgn_idx;
+
+       ufshpb_clear_dirty_bitmap(hpb, srgn);
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       ufshpb_activate_subregion(hpb, srgn);
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+       ufshpb_put_map_req(map_req->hpb, map_req);
+}
+
+static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
+{
+       cdb[0] = UFSHPB_WRITE_BUFFER;
+       cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
+                         UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
+       if (rgn)
+               put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
+       cdb[9] = 0x00;
+}
+
+static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
+                                   int srgn_idx, int srgn_mem_size)
+{
+       cdb[0] = UFSHPB_READ_BUFFER;
+       cdb[1] = UFSHPB_READ_BUFFER_ID;
+
+       put_unaligned_be16(rgn_idx, &cdb[2]);
+       put_unaligned_be16(srgn_idx, &cdb[4]);
+       put_unaligned_be24(srgn_mem_size, &cdb[6]);
+
+       cdb[9] = 0x00;
+}
+
+static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
+                                  struct ufshpb_req *umap_req,
+                                  struct ufshpb_region *rgn)
+{
+       struct request *req;
+       struct scsi_request *rq;
+
+       req = umap_req->req;
+       req->timeout = 0;
+       req->end_io_data = (void *)umap_req;
+       rq = scsi_req(req);
+       ufshpb_set_unmap_cmd(rq->cmd, rgn);
+       rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
+
+       blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
+
+       hpb->stats.umap_req_cnt++;
+}
+
+static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
+                                 struct ufshpb_req *map_req, bool last)
+{
+       struct request_queue *q;
+       struct request *req;
+       struct scsi_request *rq;
+       int mem_size = hpb->srgn_mem_size;
+       int ret = 0;
+       int i;
+
+       q = hpb->sdev_ufs_lu->request_queue;
+       for (i = 0; i < hpb->pages_per_srgn; i++) {
+               ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
+                                     PAGE_SIZE, 0);
+               if (ret != PAGE_SIZE) {
+                       dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                                  "bio_add_pc_page fail %d - %d\n",
+                                  map_req->rb.rgn_idx, map_req->rb.srgn_idx);
+                       return ret;
+               }
+       }
+
+       req = map_req->req;
+
+       blk_rq_append_bio(req, map_req->bio);
+
+       req->end_io_data = map_req;
+
+       rq = scsi_req(req);
+
+       if (unlikely(last))
+               mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
+
+       ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
+                               map_req->rb.srgn_idx, mem_size);
+       rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
+
+       blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
+
+       hpb->stats.map_req_cnt++;
+       return 0;
+}
+
+static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
+                                                bool last)
+{
+       struct ufshpb_map_ctx *mctx;
+       u32 num_entries = hpb->entries_per_srgn;
+       int i, j;
+
+       mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
+       if (!mctx)
+               return NULL;
+
+       mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
+       if (!mctx->m_page)
+               goto release_mctx;
+
+       if (unlikely(last))
+               num_entries = hpb->last_srgn_entries;
+
+       mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
+       if (!mctx->ppn_dirty)
+               goto release_m_page;
+
+       for (i = 0; i < hpb->pages_per_srgn; i++) {
+               mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
+               if (!mctx->m_page[i]) {
+                       for (j = 0; j < i; j++)
+                               mempool_free(mctx->m_page[j], ufshpb_page_pool);
+                       goto release_ppn_dirty;
+               }
+               clear_page(page_address(mctx->m_page[i]));
+       }
+
+       return mctx;
+
+release_ppn_dirty:
+       bitmap_free(mctx->ppn_dirty);
+release_m_page:
+       kmem_cache_free(hpb->m_page_cache, mctx->m_page);
+release_mctx:
+       mempool_free(mctx, ufshpb_mctx_pool);
+       return NULL;
+}
+
+static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
+                              struct ufshpb_map_ctx *mctx)
+{
+       int i;
+
+       for (i = 0; i < hpb->pages_per_srgn; i++)
+               mempool_free(mctx->m_page[i], ufshpb_page_pool);
+
+       bitmap_free(mctx->ppn_dirty);
+       kmem_cache_free(hpb->m_page_cache, mctx->m_page);
+       mempool_free(mctx, ufshpb_mctx_pool);
+}
+
+static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
+                                         struct ufshpb_region *rgn)
+{
+       struct ufshpb_subregion *srgn;
+       int srgn_idx;
+
+       for_each_sub_region(rgn, srgn_idx, srgn)
+               if (srgn->srgn_state == HPB_SRGN_ISSUED)
+                       return -EPERM;
+
+       return 0;
+}
+
+static void ufshpb_read_to_handler(struct work_struct *work)
+{
+       struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
+                                            ufshpb_read_to_work.work);
+       struct victim_select_info *lru_info = &hpb->lru_info;
+       struct ufshpb_region *rgn, *next_rgn;
+       unsigned long flags;
+       unsigned int poll;
+       LIST_HEAD(expired_list);
+
+       if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
+               return;
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+       list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
+                                list_lru_rgn) {
+               bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
+
+               if (timedout) {
+                       rgn->read_timeout_expiries--;
+                       if (is_rgn_dirty(rgn) ||
+                           rgn->read_timeout_expiries == 0)
+                               list_add(&rgn->list_expired_rgn, &expired_list);
+                       else
+                               rgn->read_timeout = ktime_add_ms(ktime_get(),
+                                               hpb->params.read_timeout_ms);
+               }
+       }
+
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+       list_for_each_entry_safe(rgn, next_rgn, &expired_list,
+                                list_expired_rgn) {
+               list_del_init(&rgn->list_expired_rgn);
+               spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+               ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
+               spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+       }
+
+       ufshpb_kick_map_work(hpb);
+
+       clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
+
+       poll = hpb->params.timeout_polling_interval_ms;
+       schedule_delayed_work(&hpb->ufshpb_read_to_work,
+                             msecs_to_jiffies(poll));
+}
+
+static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
+                               struct ufshpb_region *rgn)
+{
+       rgn->rgn_state = HPB_RGN_ACTIVE;
+       list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
+       atomic_inc(&lru_info->active_cnt);
+       if (rgn->hpb->is_hcm) {
+               rgn->read_timeout =
+                       ktime_add_ms(ktime_get(),
+                                    rgn->hpb->params.read_timeout_ms);
+               rgn->read_timeout_expiries =
+                       rgn->hpb->params.read_timeout_expiries;
+       }
+}
+
+static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
+                               struct ufshpb_region *rgn)
+{
+       list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
+}
+
+static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
+{
+       struct victim_select_info *lru_info = &hpb->lru_info;
+       struct ufshpb_region *rgn, *victim_rgn = NULL;
+
+       list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
+               if (!rgn) {
+                       dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                               "%s: no region allocated\n",
+                               __func__);
+                       return NULL;
+               }
+               if (ufshpb_check_srgns_issue_state(hpb, rgn))
+                       continue;
+
+               /*
+                * in host control mode, verify that the exiting region
+                * has fewer reads
+                */
+               if (hpb->is_hcm &&
+                   rgn->reads > hpb->params.eviction_thld_exit)
+                       continue;
+
+               victim_rgn = rgn;
+               break;
+       }
+
+       return victim_rgn;
+}
+
+static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
+                                   struct ufshpb_region *rgn)
+{
+       list_del_init(&rgn->list_lru_rgn);
+       rgn->rgn_state = HPB_RGN_INACTIVE;
+       atomic_dec(&lru_info->active_cnt);
+}
+
+static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
+                                         struct ufshpb_subregion *srgn)
+{
+       if (srgn->srgn_state != HPB_SRGN_UNUSED) {
+               ufshpb_put_map_ctx(hpb, srgn->mctx);
+               srgn->srgn_state = HPB_SRGN_UNUSED;
+               srgn->mctx = NULL;
+       }
+}
+
+static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
+                                struct ufshpb_region *rgn,
+                                bool atomic)
+{
+       struct ufshpb_req *umap_req;
+       int rgn_idx = rgn ? rgn->rgn_idx : 0;
+
+       umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
+       if (!umap_req)
+               return -ENOMEM;
+
+       ufshpb_execute_umap_req(hpb, umap_req, rgn);
+
+       return 0;
+}
+
+static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
+                                       struct ufshpb_region *rgn)
+{
+       return ufshpb_issue_umap_req(hpb, rgn, true);
+}
+
+static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
+{
+       return ufshpb_issue_umap_req(hpb, NULL, false);
+}
+
+static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
+                                struct ufshpb_region *rgn)
+{
+       struct victim_select_info *lru_info;
+       struct ufshpb_subregion *srgn;
+       int srgn_idx;
+
+       lru_info = &hpb->lru_info;
+
+       dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
+
+       ufshpb_cleanup_lru_info(lru_info, rgn);
+
+       for_each_sub_region(rgn, srgn_idx, srgn)
+               ufshpb_purge_active_subregion(hpb, srgn);
+}
+
+static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       if (rgn->rgn_state == HPB_RGN_PINNED) {
+               dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+                        "pinned region cannot drop-out. region %d\n",
+                        rgn->rgn_idx);
+               goto out;
+       }
+
+       if (!list_empty(&rgn->list_lru_rgn)) {
+               if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+
+               if (hpb->is_hcm) {
+                       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+                       ret = ufshpb_issue_umap_single_req(hpb, rgn);
+                       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+                       if (ret)
+                               goto out;
+               }
+
+               __ufshpb_evict_region(hpb, rgn);
+       }
+out:
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+       return ret;
+}
+
+static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
+                               struct ufshpb_region *rgn,
+                               struct ufshpb_subregion *srgn)
+{
+       struct ufshpb_req *map_req;
+       unsigned long flags;
+       int ret;
+       int err = -EAGAIN;
+       bool alloc_required = false;
+       enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+       if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+               dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+                          "%s: ufshpb state is not PRESENT\n", __func__);
+               goto unlock_out;
+       }
+
+       if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
+           (srgn->srgn_state == HPB_SRGN_INVALID)) {
+               err = 0;
+               goto unlock_out;
+       }
+
+       if (srgn->srgn_state == HPB_SRGN_UNUSED)
+               alloc_required = true;
+
+       /*
+        * If the subregion is already ISSUED state,
+        * a specific event (e.g., GC or wear-leveling, etc.) occurs in
+        * the device and HPB response for map loading is received.
+        * In this case, after finishing the HPB_READ_BUFFER,
+        * the next HPB_READ_BUFFER is performed again to obtain the latest
+        * map data.
+        */
+       if (srgn->srgn_state == HPB_SRGN_ISSUED)
+               goto unlock_out;
+
+       srgn->srgn_state = HPB_SRGN_ISSUED;
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+
+       if (alloc_required) {
+               srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
+               if (!srgn->mctx) {
+                       dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                           "get map_ctx failed. region %d - %d\n",
+                           rgn->rgn_idx, srgn->srgn_idx);
+                       state = HPB_SRGN_UNUSED;
+                       goto change_srgn_state;
+               }
+       }
+
+       map_req = ufshpb_get_map_req(hpb, srgn);
+       if (!map_req)
+               goto change_srgn_state;
+
+
+       ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
+       if (ret) {
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                          "%s: issue map_req failed: %d, region %d - %d\n",
+                          __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
+               goto free_map_req;
+       }
+       return 0;
+
+free_map_req:
+       ufshpb_put_map_req(hpb, map_req);
+change_srgn_state:
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       srgn->srgn_state = state;
+unlock_out:
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+       return err;
+}
+
+static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
+{
+       struct ufshpb_region *victim_rgn = NULL;
+       struct victim_select_info *lru_info = &hpb->lru_info;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+       /*
+        * If region belongs to lru_list, just move the region
+        * to the front of lru list because the state of the region
+        * is already active-state.
+        */
+       if (!list_empty(&rgn->list_lru_rgn)) {
+               ufshpb_hit_lru_info(lru_info, rgn);
+               goto out;
+       }
+
+       if (rgn->rgn_state == HPB_RGN_INACTIVE) {
+               if (atomic_read(&lru_info->active_cnt) ==
+                   lru_info->max_lru_active_cnt) {
+                       /*
+                        * If the maximum number of active regions
+                        * is exceeded, evict the least recently used region.
+                        * This case may occur when the device responds
+                        * to the eviction information late.
+                        * It is okay to evict the least recently used region,
+                        * because the device could detect this region
+                        * by not issuing HPB_READ
+                        *
+                        * in host control mode, verify that the entering
+                        * region has enough reads
+                        */
+                       if (hpb->is_hcm &&
+                           rgn->reads < hpb->params.eviction_thld_enter) {
+                               ret = -EACCES;
+                               goto out;
+                       }
+
+                       victim_rgn = ufshpb_victim_lru_info(hpb);
+                       if (!victim_rgn) {
+                               dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+                                   "cannot get victim region %s\n",
+                                   hpb->is_hcm ? "" : "error");
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+
+                       dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+                               "LRU full (%d), choose victim %d\n",
+                               atomic_read(&lru_info->active_cnt),
+                               victim_rgn->rgn_idx);
+
+                       if (hpb->is_hcm) {
+                               spin_unlock_irqrestore(&hpb->rgn_state_lock,
+                                                      flags);
+                               ret = ufshpb_issue_umap_single_req(hpb,
+                                                               victim_rgn);
+                               spin_lock_irqsave(&hpb->rgn_state_lock,
+                                                 flags);
+                               if (ret)
+                                       goto out;
+                       }
+
+                       __ufshpb_evict_region(hpb, victim_rgn);
+               }
+
+               /*
+                * When a region is added to lru_info list_head,
+                * it is guaranteed that the subregion has been
+                * assigned all mctx. If failed, try to receive mctx again
+                * without being added to lru_info list_head
+                */
+               ufshpb_add_lru_info(lru_info, rgn);
+       }
+out:
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+       return ret;
+}
+
+static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
+                                        struct utp_hpb_rsp *rsp_field)
+{
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn;
+       int i, rgn_i, srgn_i;
+
+       BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
+       /*
+        * If the active region and the inactive region are the same,
+        * we will inactivate this region.
+        * The device could check this (region inactivated) and
+        * will response the proper active region information
+        */
+       for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
+               rgn_i =
+                       be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
+               srgn_i =
+                       be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
+
+               rgn = hpb->rgn_tbl + rgn_i;
+               if (hpb->is_hcm &&
+                   (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
+                       /*
+                        * in host control mode, subregion activation
+                        * recommendations are only allowed to active regions.
+                        * Also, ignore recommendations for dirty regions - the
+                        * host will make decisions concerning those by himself
+                        */
+                       continue;
+               }
+
+               dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+                       "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
+
+               spin_lock(&hpb->rsp_list_lock);
+               ufshpb_update_active_info(hpb, rgn_i, srgn_i);
+               spin_unlock(&hpb->rsp_list_lock);
+
+               srgn = rgn->srgn_tbl + srgn_i;
+
+               /* blocking HPB_READ */
+               spin_lock(&hpb->rgn_state_lock);
+               if (srgn->srgn_state == HPB_SRGN_VALID)
+                       srgn->srgn_state = HPB_SRGN_INVALID;
+               spin_unlock(&hpb->rgn_state_lock);
+       }
+
+       if (hpb->is_hcm) {
+               /*
+                * in host control mode the device is not allowed to inactivate
+                * regions
+                */
+               goto out;
+       }
+
+       for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
+               rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
+               dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
+                       "inactivate(%d) region %d\n", i, rgn_i);
+
+               spin_lock(&hpb->rsp_list_lock);
+               ufshpb_update_inactive_info(hpb, rgn_i);
+               spin_unlock(&hpb->rsp_list_lock);
+
+               rgn = hpb->rgn_tbl + rgn_i;
+
+               spin_lock(&hpb->rgn_state_lock);
+               if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+                       for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
+                               srgn = rgn->srgn_tbl + srgn_i;
+                               if (srgn->srgn_state == HPB_SRGN_VALID)
+                                       srgn->srgn_state = HPB_SRGN_INVALID;
+                       }
+               }
+               spin_unlock(&hpb->rgn_state_lock);
+
+       }
+
+out:
+       dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
+               rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
+
+       if (ufshpb_get_state(hpb) == HPB_PRESENT)
+               queue_work(ufshpb_wq, &hpb->map_work);
+}
+
+static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
+{
+       struct victim_select_info *lru_info = &hpb->lru_info;
+       struct ufshpb_region *rgn;
+       unsigned long flags;
+
+       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+
+       list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
+               set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
+
+       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+}
+
+/*
+ * This function will parse recommended active subregion information in sense
+ * data field of response UPIU with SAM_STAT_GOOD state.
+ */
+void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
+       struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
+       int data_seg_len;
+
+       if (unlikely(lrbp->lun != rsp_field->lun)) {
+               struct scsi_device *sdev;
+               bool found = false;
+
+               __shost_for_each_device(sdev, hba->host) {
+                       hpb = ufshpb_get_hpb_data(sdev);
+
+                       if (!hpb)
+                               continue;
+
+                       if (rsp_field->lun == hpb->lun) {
+                               found = true;
+                               break;
+                       }
+               }
+
+               if (!found)
+                       return;
+       }
+
+       if (!hpb)
+               return;
+
+       if (ufshpb_get_state(hpb) == HPB_INIT)
+               return;
+
+       if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
+           (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
+               dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+                          "%s: ufshpb state is not PRESENT/SUSPEND\n",
+                          __func__);
+               return;
+       }
+
+       data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
+               & MASK_RSP_UPIU_DATA_SEG_LEN;
+
+       /* To flush remained rsp_list, we queue the map_work task */
+       if (!data_seg_len) {
+               if (!ufshpb_is_general_lun(hpb->lun))
+                       return;
+
+               ufshpb_kick_map_work(hpb);
+               return;
+       }
+
+       BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
+
+       if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
+               return;
+
+       hpb->stats.rb_noti_cnt++;
+
+       switch (rsp_field->hpb_op) {
+       case HPB_RSP_REQ_REGION_UPDATE:
+               if (data_seg_len != DEV_DATA_SEG_LEN)
+                       dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+                                "%s: data seg length is not same.\n",
+                                __func__);
+               ufshpb_rsp_req_region_update(hpb, rsp_field);
+               break;
+       case HPB_RSP_DEV_RESET:
+               dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
+                        "UFS device lost HPB information during PM.\n");
+
+               if (hpb->is_hcm) {
+                       struct scsi_device *sdev;
+
+                       __shost_for_each_device(sdev, hba->host) {
+                               struct ufshpb_lu *h = sdev->hostdata;
+
+                               if (h)
+                                       ufshpb_dev_reset_handler(h);
+                       }
+               }
+
+               break;
+       default:
+               dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+                          "hpb_op is not available: %d\n",
+                          rsp_field->hpb_op);
+               break;
+       }
+}
+
+static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
+                                  struct ufshpb_region *rgn,
+                                  struct ufshpb_subregion *srgn)
+{
+       if (!list_empty(&rgn->list_inact_rgn))
+               return;
+
+       if (!list_empty(&srgn->list_act_srgn)) {
+               list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+               return;
+       }
+
+       list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+}
+
+static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
+                                         struct ufshpb_region *rgn,
+                                         struct list_head *pending_list)
+{
+       struct ufshpb_subregion *srgn;
+       int srgn_idx;
+
+       if (!list_empty(&rgn->list_inact_rgn))
+               return;
+
+       for_each_sub_region(rgn, srgn_idx, srgn)
+               if (!list_empty(&srgn->list_act_srgn))
+                       return;
+
+       list_add_tail(&rgn->list_inact_rgn, pending_list);
+}
+
+static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
+{
+       struct ufshpb_region *rgn;
+       struct ufshpb_subregion *srgn;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+       while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
+                                               struct ufshpb_subregion,
+                                               list_act_srgn))) {
+               if (ufshpb_get_state(hpb) == HPB_SUSPEND)
+                       break;
+
+               list_del_init(&srgn->list_act_srgn);
+               spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+               rgn = hpb->rgn_tbl + srgn->rgn_idx;
+               ret = ufshpb_add_region(hpb, rgn);
+               if (ret)
+                       goto active_failed;
+
+               ret = ufshpb_issue_map_req(hpb, rgn, srgn);
+               if (ret) {
+                       dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                           "issue map_req failed. ret %d, region %d - %d\n",
+                           ret, rgn->rgn_idx, srgn->srgn_idx);
+                       goto active_failed;
+               }
+               spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+       }
+       spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+       return;
+
+active_failed:
+       dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
+                  rgn->rgn_idx, srgn->srgn_idx);
+       spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+       ufshpb_add_active_list(hpb, rgn, srgn);
+       spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
+{
+       struct ufshpb_region *rgn;
+       unsigned long flags;
+       int ret;
+       LIST_HEAD(pending_list);
+
+       spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+       while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
+                                              struct ufshpb_region,
+                                              list_inact_rgn))) {
+               if (ufshpb_get_state(hpb) == HPB_SUSPEND)
+                       break;
+
+               list_del_init(&rgn->list_inact_rgn);
+               spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+
+               ret = ufshpb_evict_region(hpb, rgn);
+               if (ret) {
+                       spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+                       ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
+                       spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+               }
+
+               spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+       }
+
+       list_splice(&pending_list, &hpb->lh_inact_rgn);
+       spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_normalization_work_handler(struct work_struct *work)
+{
+       struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
+                                            ufshpb_normalization_work);
+       int rgn_idx;
+       u8 factor = hpb->params.normalization_factor;
+
+       for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+               struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
+               int srgn_idx;
+
+               spin_lock(&rgn->rgn_lock);
+               rgn->reads = 0;
+               for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
+                       struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
+
+                       srgn->reads >>= factor;
+                       rgn->reads += srgn->reads;
+               }
+               spin_unlock(&rgn->rgn_lock);
+
+               if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
+                       continue;
+
+               /* if region is active but has no reads - inactivate it */
+               spin_lock(&hpb->rsp_list_lock);
+               ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
+               spin_unlock(&hpb->rsp_list_lock);
+       }
+}
+
+static void ufshpb_map_work_handler(struct work_struct *work)
+{
+       struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
+
+       if (ufshpb_get_state(hpb) != HPB_PRESENT) {
+               dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
+                          "%s: ufshpb state is not PRESENT\n", __func__);
+               return;
+       }
+
+       ufshpb_run_inactive_region_list(hpb);
+       ufshpb_run_active_subregion_list(hpb);
+}
+
+/*
+ * this function doesn't need to hold lock due to be called in init.
+ * (rgn_state_lock, rsp_list_lock, etc..)
+ */
+static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
+                                           struct ufshpb_lu *hpb,
+                                           struct ufshpb_region *rgn)
+{
+       struct ufshpb_subregion *srgn;
+       int srgn_idx, i;
+       int err = 0;
+
+       for_each_sub_region(rgn, srgn_idx, srgn) {
+               srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
+               srgn->srgn_state = HPB_SRGN_INVALID;
+               if (!srgn->mctx) {
+                       err = -ENOMEM;
+                       dev_err(hba->dev,
+                               "alloc mctx for pinned region failed\n");
+                       goto release;
+               }
+
+               list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
+       }
+
+       rgn->rgn_state = HPB_RGN_PINNED;
+       return 0;
+
+release:
+       for (i = 0; i < srgn_idx; i++) {
+               srgn = rgn->srgn_tbl + i;
+               ufshpb_put_map_ctx(hpb, srgn->mctx);
+       }
+       return err;
+}
+
+static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
+                                     struct ufshpb_region *rgn, bool last)
+{
+       int srgn_idx;
+       struct ufshpb_subregion *srgn;
+
+       for_each_sub_region(rgn, srgn_idx, srgn) {
+               INIT_LIST_HEAD(&srgn->list_act_srgn);
+
+               srgn->rgn_idx = rgn->rgn_idx;
+               srgn->srgn_idx = srgn_idx;
+               srgn->srgn_state = HPB_SRGN_UNUSED;
+       }
+
+       if (unlikely(last && hpb->last_srgn_entries))
+               srgn->is_last = true;
+}
+
+static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
+                                     struct ufshpb_region *rgn, int srgn_cnt)
+{
+       rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
+                                GFP_KERNEL);
+       if (!rgn->srgn_tbl)
+               return -ENOMEM;
+
+       rgn->srgn_cnt = srgn_cnt;
+       return 0;
+}
+
+static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
+                                    struct ufshpb_lu *hpb,
+                                    struct ufshpb_dev_info *hpb_dev_info,
+                                    struct ufshpb_lu_info *hpb_lu_info)
+{
+       u32 entries_per_rgn;
+       u64 rgn_mem_size, tmp;
+
+       /* for pre_req */
+       hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
+
+       if (ufshpb_is_legacy(hba))
+               hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
+       else
+               hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
+
+       hpb->cur_read_id = 0;
+
+       hpb->lu_pinned_start = hpb_lu_info->pinned_start;
+       hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
+               (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
+               : PINNED_NOT_SET;
+       hpb->lru_info.max_lru_active_cnt =
+               hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
+
+       rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
+                       * HPB_ENTRY_SIZE;
+       do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
+       hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
+               * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
+
+       tmp = rgn_mem_size;
+       do_div(tmp, HPB_ENTRY_SIZE);
+       entries_per_rgn = (u32)tmp;
+       hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
+       hpb->entries_per_rgn_mask = entries_per_rgn - 1;
+
+       hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
+       hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
+       hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
+
+       tmp = rgn_mem_size;
+       do_div(tmp, hpb->srgn_mem_size);
+       hpb->srgns_per_rgn = (int)tmp;
+
+       hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
+                               entries_per_rgn);
+       hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
+                               (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
+       hpb->last_srgn_entries = hpb_lu_info->num_blocks
+                                % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
+
+       hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
+
+       if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
+               hpb->is_hcm = true;
+}
+
+static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
+{
+       struct ufshpb_region *rgn_table, *rgn;
+       int rgn_idx, i;
+       int ret = 0;
+
+       rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
+                           GFP_KERNEL);
+       if (!rgn_table)
+               return -ENOMEM;
+
+       for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+               int srgn_cnt = hpb->srgns_per_rgn;
+               bool last_srgn = false;
+
+               rgn = rgn_table + rgn_idx;
+               rgn->rgn_idx = rgn_idx;
+
+               spin_lock_init(&rgn->rgn_lock);
+
+               INIT_LIST_HEAD(&rgn->list_inact_rgn);
+               INIT_LIST_HEAD(&rgn->list_lru_rgn);
+               INIT_LIST_HEAD(&rgn->list_expired_rgn);
+
+               if (rgn_idx == hpb->rgns_per_lu - 1) {
+                       srgn_cnt = ((hpb->srgns_per_lu - 1) %
+                                   hpb->srgns_per_rgn) + 1;
+                       last_srgn = true;
+               }
+
+               ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
+               if (ret)
+                       goto release_srgn_table;
+               ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
+
+               if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
+                       ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
+                       if (ret)
+                               goto release_srgn_table;
+               } else {
+                       rgn->rgn_state = HPB_RGN_INACTIVE;
+               }
+
+               rgn->rgn_flags = 0;
+               rgn->hpb = hpb;
+       }
+
+       hpb->rgn_tbl = rgn_table;
+
+       return 0;
+
+release_srgn_table:
+       for (i = 0; i <= rgn_idx; i++)
+               kvfree(rgn_table[i].srgn_tbl);
+
+       kvfree(rgn_table);
+       return ret;
+}
+
+static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
+                                        struct ufshpb_region *rgn)
+{
+       int srgn_idx;
+       struct ufshpb_subregion *srgn;
+
+       for_each_sub_region(rgn, srgn_idx, srgn)
+               if (srgn->srgn_state != HPB_SRGN_UNUSED) {
+                       srgn->srgn_state = HPB_SRGN_UNUSED;
+                       ufshpb_put_map_ctx(hpb, srgn->mctx);
+               }
+}
+
+static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
+{
+       int rgn_idx;
+
+       for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
+               struct ufshpb_region *rgn;
+
+               rgn = hpb->rgn_tbl + rgn_idx;
+               if (rgn->rgn_state != HPB_RGN_INACTIVE) {
+                       rgn->rgn_state = HPB_RGN_INACTIVE;
+
+                       ufshpb_destroy_subregion_tbl(hpb, rgn);
+               }
+
+               kvfree(rgn->srgn_tbl);
+       }
+
+       kvfree(hpb->rgn_tbl);
+}
+
+/* SYSFS functions */
+#define ufshpb_sysfs_attr_show_func(__name)                            \
+static ssize_t __name##_show(struct device *dev,                       \
+       struct device_attribute *attr, char *buf)                       \
+{                                                                      \
+       struct scsi_device *sdev = to_scsi_device(dev);                 \
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
+                                                                       \
+       if (!hpb)                                                       \
+               return -ENODEV;                                         \
+                                                                       \
+       return sysfs_emit(buf, "%llu\n", hpb->stats.__name);            \
+}                                                                      \
+\
+static DEVICE_ATTR_RO(__name)
+
+ufshpb_sysfs_attr_show_func(hit_cnt);
+ufshpb_sysfs_attr_show_func(miss_cnt);
+ufshpb_sysfs_attr_show_func(rb_noti_cnt);
+ufshpb_sysfs_attr_show_func(rb_active_cnt);
+ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
+ufshpb_sysfs_attr_show_func(map_req_cnt);
+ufshpb_sysfs_attr_show_func(umap_req_cnt);
+
+static struct attribute *hpb_dev_stat_attrs[] = {
+       &dev_attr_hit_cnt.attr,
+       &dev_attr_miss_cnt.attr,
+       &dev_attr_rb_noti_cnt.attr,
+       &dev_attr_rb_active_cnt.attr,
+       &dev_attr_rb_inactive_cnt.attr,
+       &dev_attr_map_req_cnt.attr,
+       &dev_attr_umap_req_cnt.attr,
+       NULL,
+};
+
+struct attribute_group ufs_sysfs_hpb_stat_group = {
+       .name = "hpb_stats",
+       .attrs = hpb_dev_stat_attrs,
+};
+
+/* SYSFS functions */
+#define ufshpb_sysfs_param_show_func(__name)                           \
+static ssize_t __name##_show(struct device *dev,                       \
+       struct device_attribute *attr, char *buf)                       \
+{                                                                      \
+       struct scsi_device *sdev = to_scsi_device(dev);                 \
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);              \
+                                                                       \
+       if (!hpb)                                                       \
+               return -ENODEV;                                         \
+                                                                       \
+       return sysfs_emit(buf, "%d\n", hpb->params.__name);             \
+}
+
+ufshpb_sysfs_param_show_func(requeue_timeout_ms);
+static ssize_t
+requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       if (val < 0)
+               return -EINVAL;
+
+       hpb->params.requeue_timeout_ms = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(requeue_timeout_ms);
+
+ufshpb_sysfs_param_show_func(activation_thld);
+static ssize_t
+activation_thld_store(struct device *dev, struct device_attribute *attr,
+                     const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (!hpb->is_hcm)
+               return -EOPNOTSUPP;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       if (val <= 0)
+               return -EINVAL;
+
+       hpb->params.activation_thld = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(activation_thld);
+
+ufshpb_sysfs_param_show_func(normalization_factor);
+static ssize_t
+normalization_factor_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (!hpb->is_hcm)
+               return -EOPNOTSUPP;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
+               return -EINVAL;
+
+       hpb->params.normalization_factor = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(normalization_factor);
+
+ufshpb_sysfs_param_show_func(eviction_thld_enter);
+static ssize_t
+eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (!hpb->is_hcm)
+               return -EOPNOTSUPP;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       if (val <= hpb->params.eviction_thld_exit)
+               return -EINVAL;
+
+       hpb->params.eviction_thld_enter = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(eviction_thld_enter);
+
+ufshpb_sysfs_param_show_func(eviction_thld_exit);
+static ssize_t
+eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (!hpb->is_hcm)
+               return -EOPNOTSUPP;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       if (val <= hpb->params.activation_thld)
+               return -EINVAL;
+
+       hpb->params.eviction_thld_exit = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(eviction_thld_exit);
+
+ufshpb_sysfs_param_show_func(read_timeout_ms);
+static ssize_t
+read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
+                     const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (!hpb->is_hcm)
+               return -EOPNOTSUPP;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       /* read_timeout >> timeout_polling_interval */
+       if (val < hpb->params.timeout_polling_interval_ms * 2)
+               return -EINVAL;
+
+       hpb->params.read_timeout_ms = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(read_timeout_ms);
+
+ufshpb_sysfs_param_show_func(read_timeout_expiries);
+static ssize_t
+read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (!hpb->is_hcm)
+               return -EOPNOTSUPP;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       if (val <= 0)
+               return -EINVAL;
+
+       hpb->params.read_timeout_expiries = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(read_timeout_expiries);
+
+ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
+static ssize_t
+timeout_polling_interval_ms_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (!hpb->is_hcm)
+               return -EOPNOTSUPP;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       /* timeout_polling_interval << read_timeout */
+       if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
+               return -EINVAL;
+
+       hpb->params.timeout_polling_interval_ms = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(timeout_polling_interval_ms);
+
+ufshpb_sysfs_param_show_func(inflight_map_req);
+static ssize_t inflight_map_req_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+       int val;
+
+       if (!hpb)
+               return -ENODEV;
+
+       if (!hpb->is_hcm)
+               return -EOPNOTSUPP;
+
+       if (kstrtouint(buf, 0, &val))
+               return -EINVAL;
+
+       if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
+               return -EINVAL;
+
+       hpb->params.inflight_map_req = val;
+
+       return count;
+}
+static DEVICE_ATTR_RW(inflight_map_req);
+
+static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
+{
+       hpb->params.activation_thld = ACTIVATION_THRESHOLD;
+       hpb->params.normalization_factor = 1;
+       hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
+       hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
+       hpb->params.read_timeout_ms = READ_TO_MS;
+       hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
+       hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
+       hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
+}
+
+static struct attribute *hpb_dev_param_attrs[] = {
+       &dev_attr_requeue_timeout_ms.attr,
+       &dev_attr_activation_thld.attr,
+       &dev_attr_normalization_factor.attr,
+       &dev_attr_eviction_thld_enter.attr,
+       &dev_attr_eviction_thld_exit.attr,
+       &dev_attr_read_timeout_ms.attr,
+       &dev_attr_read_timeout_expiries.attr,
+       &dev_attr_timeout_polling_interval_ms.attr,
+       &dev_attr_inflight_map_req.attr,
+       NULL,
+};
+
+struct attribute_group ufs_sysfs_hpb_param_group = {
+       .name = "hpb_params",
+       .attrs = hpb_dev_param_attrs,
+};
+
+static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
+{
+       struct ufshpb_req *pre_req = NULL, *t;
+       int qd = hpb->sdev_ufs_lu->queue_depth / 2;
+       int i;
+
+       INIT_LIST_HEAD(&hpb->lh_pre_req_free);
+
+       hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
+       hpb->throttle_pre_req = qd;
+       hpb->num_inflight_pre_req = 0;
+
+       if (!hpb->pre_req)
+               goto release_mem;
+
+       for (i = 0; i < qd; i++) {
+               pre_req = hpb->pre_req + i;
+               INIT_LIST_HEAD(&pre_req->list_req);
+               pre_req->req = NULL;
+
+               pre_req->bio = bio_alloc(GFP_KERNEL, 1);
+               if (!pre_req->bio)
+                       goto release_mem;
+
+               pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!pre_req->wb.m_page) {
+                       bio_put(pre_req->bio);
+                       goto release_mem;
+               }
+
+               list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
+       }
+
+       return 0;
+release_mem:
+       list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
+               list_del_init(&pre_req->list_req);
+               bio_put(pre_req->bio);
+               __free_page(pre_req->wb.m_page);
+       }
+
+       kfree(hpb->pre_req);
+       return -ENOMEM;
+}
+
+static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
+{
+       struct ufshpb_req *pre_req = NULL;
+       int i;
+
+       for (i = 0; i < hpb->throttle_pre_req; i++) {
+               pre_req = hpb->pre_req + i;
+               bio_put(hpb->pre_req[i].bio);
+               if (!pre_req->wb.m_page)
+                       __free_page(hpb->pre_req[i].wb.m_page);
+               list_del_init(&pre_req->list_req);
+       }
+
+       kfree(hpb->pre_req);
+}
+
+static void ufshpb_stat_init(struct ufshpb_lu *hpb)
+{
+       hpb->stats.hit_cnt = 0;
+       hpb->stats.miss_cnt = 0;
+       hpb->stats.rb_noti_cnt = 0;
+       hpb->stats.rb_active_cnt = 0;
+       hpb->stats.rb_inactive_cnt = 0;
+       hpb->stats.map_req_cnt = 0;
+       hpb->stats.umap_req_cnt = 0;
+}
+
+static void ufshpb_param_init(struct ufshpb_lu *hpb)
+{
+       hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
+       if (hpb->is_hcm)
+               ufshpb_hcm_param_init(hpb);
+}
+
+static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
+{
+       int ret;
+
+       spin_lock_init(&hpb->rgn_state_lock);
+       spin_lock_init(&hpb->rsp_list_lock);
+       spin_lock_init(&hpb->param_lock);
+
+       INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
+       INIT_LIST_HEAD(&hpb->lh_act_srgn);
+       INIT_LIST_HEAD(&hpb->lh_inact_rgn);
+       INIT_LIST_HEAD(&hpb->list_hpb_lu);
+
+       INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
+       if (hpb->is_hcm) {
+               INIT_WORK(&hpb->ufshpb_normalization_work,
+                         ufshpb_normalization_work_handler);
+               INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
+                                 ufshpb_read_to_handler);
+       }
+
+       hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
+                         sizeof(struct ufshpb_req), 0, 0, NULL);
+       if (!hpb->map_req_cache) {
+               dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
+                       hpb->lun);
+               return -ENOMEM;
+       }
+
+       hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
+                         sizeof(struct page *) * hpb->pages_per_srgn,
+                         0, 0, NULL);
+       if (!hpb->m_page_cache) {
+               dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
+                       hpb->lun);
+               ret = -ENOMEM;
+               goto release_req_cache;
+       }
+
+       ret = ufshpb_pre_req_mempool_init(hpb);
+       if (ret) {
+               dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
+                       hpb->lun);
+               goto release_m_page_cache;
+       }
+
+       ret = ufshpb_alloc_region_tbl(hba, hpb);
+       if (ret)
+               goto release_pre_req_mempool;
+
+       ufshpb_stat_init(hpb);
+       ufshpb_param_init(hpb);
+
+       if (hpb->is_hcm) {
+               unsigned int poll;
+
+               poll = hpb->params.timeout_polling_interval_ms;
+               schedule_delayed_work(&hpb->ufshpb_read_to_work,
+                                     msecs_to_jiffies(poll));
+       }
+
+       return 0;
+
+release_pre_req_mempool:
+       ufshpb_pre_req_mempool_destroy(hpb);
+release_m_page_cache:
+       kmem_cache_destroy(hpb->m_page_cache);
+release_req_cache:
+       kmem_cache_destroy(hpb->map_req_cache);
+       return ret;
+}
+
+static struct ufshpb_lu *
+ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
+                   struct ufshpb_dev_info *hpb_dev_info,
+                   struct ufshpb_lu_info *hpb_lu_info)
+{
+       struct ufshpb_lu *hpb;
+       int ret;
+
+       hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
+       if (!hpb)
+               return NULL;
+
+       hpb->lun = sdev->lun;
+       hpb->sdev_ufs_lu = sdev;
+
+       ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
+
+       ret = ufshpb_lu_hpb_init(hba, hpb);
+       if (ret) {
+               dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
+               goto release_hpb;
+       }
+
+       sdev->hostdata = hpb;
+       return hpb;
+
+release_hpb:
+       kfree(hpb);
+       return NULL;
+}
+
+static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
+{
+       struct ufshpb_region *rgn, *next_rgn;
+       struct ufshpb_subregion *srgn, *next_srgn;
+       unsigned long flags;
+
+       /*
+        * If the device reset occurred, the remaining HPB region information
+        * may be stale. Therefore, by discarding the lists of HPB response
+        * that remained after reset, we prevent unnecessary work.
+        */
+       spin_lock_irqsave(&hpb->rsp_list_lock, flags);
+       list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
+                                list_inact_rgn)
+               list_del_init(&rgn->list_inact_rgn);
+
+       list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
+                                list_act_srgn)
+               list_del_init(&srgn->list_act_srgn);
+       spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
+}
+
+static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
+{
+       if (hpb->is_hcm) {
+               cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
+               cancel_work_sync(&hpb->ufshpb_normalization_work);
+       }
+       cancel_work_sync(&hpb->map_work);
+}
+
+static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
+{
+       int err = 0;
+       bool flag_res = true;
+       int try;
+
+       /* wait for the device to complete HPB reset query */
+       for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
+               dev_dbg(hba->dev,
+                       "%s start flag reset polling %d times\n",
+                       __func__, try);
+
+               /* Poll fHpbReset flag to be cleared */
+               err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+                               QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
+
+               if (err) {
+                       dev_err(hba->dev,
+                               "%s reading fHpbReset flag failed with error %d\n",
+                               __func__, err);
+                       return flag_res;
+               }
+
+               if (!flag_res)
+                       goto out;
+
+               usleep_range(1000, 1100);
+       }
+       if (flag_res) {
+               dev_err(hba->dev,
+                       "%s fHpbReset was not cleared by the device\n",
+                       __func__);
+       }
+out:
+       return flag_res;
+}
+
+void ufshpb_reset(struct ufs_hba *hba)
+{
+       struct ufshpb_lu *hpb;
+       struct scsi_device *sdev;
+
+       shost_for_each_device(sdev, hba->host) {
+               hpb = ufshpb_get_hpb_data(sdev);
+               if (!hpb)
+                       continue;
+
+               if (ufshpb_get_state(hpb) != HPB_RESET)
+                       continue;
+
+               ufshpb_set_state(hpb, HPB_PRESENT);
+       }
+}
+
+void ufshpb_reset_host(struct ufs_hba *hba)
+{
+       struct ufshpb_lu *hpb;
+       struct scsi_device *sdev;
+
+       shost_for_each_device(sdev, hba->host) {
+               hpb = ufshpb_get_hpb_data(sdev);
+               if (!hpb)
+                       continue;
+
+               if (ufshpb_get_state(hpb) != HPB_PRESENT)
+                       continue;
+               ufshpb_set_state(hpb, HPB_RESET);
+               ufshpb_cancel_jobs(hpb);
+               ufshpb_discard_rsp_lists(hpb);
+       }
+}
+
+void ufshpb_suspend(struct ufs_hba *hba)
+{
+       struct ufshpb_lu *hpb;
+       struct scsi_device *sdev;
+
+       shost_for_each_device(sdev, hba->host) {
+               hpb = ufshpb_get_hpb_data(sdev);
+               if (!hpb)
+                       continue;
+
+               if (ufshpb_get_state(hpb) != HPB_PRESENT)
+                       continue;
+               ufshpb_set_state(hpb, HPB_SUSPEND);
+               ufshpb_cancel_jobs(hpb);
+       }
+}
+
+void ufshpb_resume(struct ufs_hba *hba)
+{
+       struct ufshpb_lu *hpb;
+       struct scsi_device *sdev;
+
+       shost_for_each_device(sdev, hba->host) {
+               hpb = ufshpb_get_hpb_data(sdev);
+               if (!hpb)
+                       continue;
+
+               if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
+                   (ufshpb_get_state(hpb) != HPB_SUSPEND))
+                       continue;
+               ufshpb_set_state(hpb, HPB_PRESENT);
+               ufshpb_kick_map_work(hpb);
+               if (hpb->is_hcm) {
+                       unsigned int poll =
+                               hpb->params.timeout_polling_interval_ms;
+
+                       schedule_delayed_work(&hpb->ufshpb_read_to_work,
+                               msecs_to_jiffies(poll));
+               }
+       }
+}
+
+static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
+                             struct ufshpb_lu_info *hpb_lu_info)
+{
+       u16 max_active_rgns;
+       u8 lu_enable;
+       int size;
+       int ret;
+       char desc_buf[QUERY_DESC_MAX_SIZE];
+
+       ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
+
+       pm_runtime_get_sync(hba->dev);
+       ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+                                           QUERY_DESC_IDN_UNIT, lun, 0,
+                                           desc_buf, &size);
+       pm_runtime_put_sync(hba->dev);
+
+       if (ret) {
+               dev_err(hba->dev,
+                       "%s: idn: %d lun: %d  query request failed",
+                       __func__, QUERY_DESC_IDN_UNIT, lun);
+               return ret;
+       }
+
+       lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
+       if (lu_enable != LU_ENABLED_HPB_FUNC)
+               return -ENODEV;
+
+       max_active_rgns = get_unaligned_be16(
+                       desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
+       if (!max_active_rgns) {
+               dev_err(hba->dev,
+                       "lun %d wrong number of max active regions\n", lun);
+               return -ENODEV;
+       }
+
+       hpb_lu_info->num_blocks = get_unaligned_be64(
+                       desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
+       hpb_lu_info->pinned_start = get_unaligned_be16(
+                       desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
+       hpb_lu_info->num_pinned = get_unaligned_be16(
+                       desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
+       hpb_lu_info->max_active_rgns = max_active_rgns;
+
+       return 0;
+}
+
+void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+       struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
+
+       if (!hpb)
+               return;
+
+       ufshpb_set_state(hpb, HPB_FAILED);
+
+       sdev = hpb->sdev_ufs_lu;
+       sdev->hostdata = NULL;
+
+       ufshpb_cancel_jobs(hpb);
+
+       ufshpb_pre_req_mempool_destroy(hpb);
+       ufshpb_destroy_region_tbl(hpb);
+
+       kmem_cache_destroy(hpb->map_req_cache);
+       kmem_cache_destroy(hpb->m_page_cache);
+
+       list_del_init(&hpb->list_hpb_lu);
+
+       kfree(hpb);
+}
+
+static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
+{
+       int pool_size;
+       struct ufshpb_lu *hpb;
+       struct scsi_device *sdev;
+       bool init_success;
+
+       if (tot_active_srgn_pages == 0) {
+               ufshpb_remove(hba);
+               return;
+       }
+
+       init_success = !ufshpb_check_hpb_reset_query(hba);
+
+       pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+       if (pool_size > tot_active_srgn_pages) {
+               mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
+               mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
+       }
+
+       shost_for_each_device(sdev, hba->host) {
+               hpb = ufshpb_get_hpb_data(sdev);
+               if (!hpb)
+                       continue;
+
+               if (init_success) {
+                       ufshpb_set_state(hpb, HPB_PRESENT);
+                       if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
+                               queue_work(ufshpb_wq, &hpb->map_work);
+                       if (!hpb->is_hcm)
+                               ufshpb_issue_umap_all_req(hpb);
+               } else {
+                       dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
+                       ufshpb_destroy_lu(hba, sdev);
+               }
+       }
+
+       if (!init_success)
+               ufshpb_remove(hba);
+}
+
+void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+       struct ufshpb_lu *hpb;
+       int ret;
+       struct ufshpb_lu_info hpb_lu_info = { 0 };
+       int lun = sdev->lun;
+
+       if (lun >= hba->dev_info.max_lu_supported)
+               goto out;
+
+       ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
+       if (ret)
+               goto out;
+
+       hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
+                                 &hpb_lu_info);
+       if (!hpb)
+               goto out;
+
+       tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
+                       hpb->srgns_per_rgn * hpb->pages_per_srgn;
+
+out:
+       /* All LUs are initialized */
+       if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
+               ufshpb_hpb_lu_prepared(hba);
+}
+
+static int ufshpb_init_mem_wq(struct ufs_hba *hba)
+{
+       int ret;
+       unsigned int pool_size;
+
+       ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
+                                       sizeof(struct ufshpb_map_ctx),
+                                       0, 0, NULL);
+       if (!ufshpb_mctx_cache) {
+               dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
+               return -ENOMEM;
+       }
+
+       pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
+       dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
+              __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
+
+       ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
+                                                   ufshpb_mctx_cache);
+       if (!ufshpb_mctx_pool) {
+               dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
+               ret = -ENOMEM;
+               goto release_mctx_cache;
+       }
+
+       ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
+       if (!ufshpb_page_pool) {
+               dev_err(hba->dev, "ufshpb: cannot init page pool\n");
+               ret = -ENOMEM;
+               goto release_mctx_pool;
+       }
+
+       ufshpb_wq = alloc_workqueue("ufshpb-wq",
+                                       WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+       if (!ufshpb_wq) {
+               dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
+               ret = -ENOMEM;
+               goto release_page_pool;
+       }
+
+       return 0;
+
+release_page_pool:
+       mempool_destroy(ufshpb_page_pool);
+release_mctx_pool:
+       mempool_destroy(ufshpb_mctx_pool);
+release_mctx_cache:
+       kmem_cache_destroy(ufshpb_mctx_cache);
+       return ret;
+}
+
+void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
+{
+       struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
+       int max_active_rgns = 0;
+       int hpb_num_lu;
+
+       hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
+       if (hpb_num_lu == 0) {
+               dev_err(hba->dev, "No HPB LU supported\n");
+               hpb_info->hpb_disabled = true;
+               return;
+       }
+
+       hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
+       hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
+       max_active_rgns = get_unaligned_be16(geo_buf +
+                         GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
+
+       if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
+           max_active_rgns == 0) {
+               dev_err(hba->dev, "No HPB supported device\n");
+               hpb_info->hpb_disabled = true;
+               return;
+       }
+}
+
+void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
+{
+       struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
+       int version, ret;
+       u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
+
+       hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
+
+       version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
+       if ((version != HPB_SUPPORT_VERSION) &&
+           (version != HPB_SUPPORT_LEGACY_VERSION)) {
+               dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
+                       __func__, version);
+               hpb_dev_info->hpb_disabled = true;
+               return;
+       }
+
+       if (version == HPB_SUPPORT_LEGACY_VERSION)
+               hpb_dev_info->is_legacy = true;
+
+       pm_runtime_get_sync(hba->dev);
+       ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+               QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
+       pm_runtime_put_sync(hba->dev);
+
+       if (ret)
+               dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
+                       __func__);
+       hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
+
+       /*
+        * Get the number of user logical unit to check whether all
+        * scsi_device finish initialization
+        */
+       hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
+}
+
+void ufshpb_init(struct ufs_hba *hba)
+{
+       struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
+       int try;
+       int ret;
+
+       if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
+               return;
+
+       if (ufshpb_init_mem_wq(hba)) {
+               hpb_dev_info->hpb_disabled = true;
+               return;
+       }
+
+       atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
+       tot_active_srgn_pages = 0;
+       /* issue HPB reset query */
+       for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
+               ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+                                       QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
+               if (!ret)
+                       break;
+       }
+}
+
+void ufshpb_remove(struct ufs_hba *hba)
+{
+       mempool_destroy(ufshpb_page_pool);
+       mempool_destroy(ufshpb_mctx_pool);
+       kmem_cache_destroy(ufshpb_mctx_cache);
+
+       destroy_workqueue(ufshpb_wq);
+}
+
+module_param(ufshpb_host_map_kbytes, uint, 0644);
+MODULE_PARM_DESC(ufshpb_host_map_kbytes,
+       "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
new file mode 100644 (file)
index 0000000..a79e073
--- /dev/null
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Universal Flash Storage Host Performance Booster
+ *
+ * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Yongmyung Lee <ymhungry.lee@samsung.com>
+ *     Jinyoung Choi <j-young.choi@samsung.com>
+ */
+
+#ifndef _UFSHPB_H_
+#define _UFSHPB_H_
+
+/* hpb response UPIU macro */
+#define HPB_RSP_NONE                           0x0
+#define HPB_RSP_REQ_REGION_UPDATE              0x1
+#define HPB_RSP_DEV_RESET                      0x2
+#define MAX_ACTIVE_NUM                         2
+#define MAX_INACTIVE_NUM                       2
+#define DEV_DATA_SEG_LEN                       0x14
+#define DEV_SENSE_SEG_LEN                      0x12
+#define DEV_DES_TYPE                           0x80
+#define DEV_ADDITIONAL_LEN                     0x10
+
+/* hpb map & entries macro */
+#define HPB_RGN_SIZE_UNIT                      512
+#define HPB_ENTRY_BLOCK_SIZE                   4096
+#define HPB_ENTRY_SIZE                         0x8
+#define PINNED_NOT_SET                         U32_MAX
+
+/* hpb support chunk size */
+#define HPB_LEGACY_CHUNK_HIGH                  1
+#define HPB_MULTI_CHUNK_LOW                    7
+#define HPB_MULTI_CHUNK_HIGH                   255
+
+/* hpb vender defined opcode */
+#define UFSHPB_READ                            0xF8
+#define UFSHPB_READ_BUFFER                     0xF9
+#define UFSHPB_READ_BUFFER_ID                  0x01
+#define UFSHPB_WRITE_BUFFER                    0xFA
+#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID    0x01
+#define UFSHPB_WRITE_BUFFER_PREFETCH_ID                0x02
+#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID       0x03
+#define HPB_WRITE_BUFFER_CMD_LENGTH            10
+#define MAX_HPB_READ_ID                                0x7F
+#define HPB_READ_BUFFER_CMD_LENGTH             10
+#define LU_ENABLED_HPB_FUNC                    0x02
+
+#define HPB_RESET_REQ_RETRIES                  10
+#define HPB_MAP_REQ_RETRIES                    5
+#define HPB_REQUEUE_TIME_MS                    0
+
+#define HPB_SUPPORT_VERSION                    0x200
+#define HPB_SUPPORT_LEGACY_VERSION             0x100
+
+enum UFSHPB_MODE {
+       HPB_HOST_CONTROL,
+       HPB_DEVICE_CONTROL,
+};
+
+enum UFSHPB_STATE {
+       HPB_INIT = 0,
+       HPB_PRESENT = 1,
+       HPB_SUSPEND,
+       HPB_FAILED,
+       HPB_RESET,
+};
+
+enum HPB_RGN_STATE {
+       HPB_RGN_INACTIVE,
+       HPB_RGN_ACTIVE,
+       /* pinned regions are always active */
+       HPB_RGN_PINNED,
+};
+
+enum HPB_SRGN_STATE {
+       HPB_SRGN_UNUSED,
+       HPB_SRGN_INVALID,
+       HPB_SRGN_VALID,
+       HPB_SRGN_ISSUED,
+};
+
+/**
+ * struct ufshpb_lu_info - UFSHPB logical unit related info
+ * @num_blocks: the number of logical block
+ * @pinned_start: the start region number of pinned region
+ * @num_pinned: the number of pinned regions
+ * @max_active_rgns: maximum number of active regions
+ */
+struct ufshpb_lu_info {
+       int num_blocks;
+       int pinned_start;
+       int num_pinned;
+       int max_active_rgns;
+};
+
+struct ufshpb_map_ctx {
+       struct page **m_page;
+       unsigned long *ppn_dirty;
+};
+
+struct ufshpb_subregion {
+       struct ufshpb_map_ctx *mctx;
+       enum HPB_SRGN_STATE srgn_state;
+       int rgn_idx;
+       int srgn_idx;
+       bool is_last;
+
+       /* subregion reads - for host mode */
+       unsigned int reads;
+
+       /* below information is used by rsp_list */
+       struct list_head list_act_srgn;
+};
+
+struct ufshpb_region {
+       struct ufshpb_lu *hpb;
+       struct ufshpb_subregion *srgn_tbl;
+       enum HPB_RGN_STATE rgn_state;
+       int rgn_idx;
+       int srgn_cnt;
+
+       /* below information is used by rsp_list */
+       struct list_head list_inact_rgn;
+
+       /* below information is used by lru */
+       struct list_head list_lru_rgn;
+       unsigned long rgn_flags;
+#define RGN_FLAG_DIRTY 0
+#define RGN_FLAG_UPDATE 1
+
+       /* region reads - for host mode */
+       spinlock_t rgn_lock;
+       unsigned int reads;
+       /* region "cold" timer - for host mode */
+       ktime_t read_timeout;
+       unsigned int read_timeout_expiries;
+       struct list_head list_expired_rgn;
+};
+
+#define for_each_sub_region(rgn, i, srgn)                              \
+       for ((i) = 0;                                                   \
+            ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \
+            (i)++)
+
+/**
+ * struct ufshpb_req - HPB related request structure (write/read buffer)
+ * @req: block layer request structure
+ * @bio: bio for this request
+ * @hpb: ufshpb_lu structure that related to
+ * @list_req: ufshpb_req mempool list
+ * @sense: store its sense data
+ * @mctx: L2P map information
+ * @rgn_idx: target region index
+ * @srgn_idx: target sub-region index
+ * @lun: target logical unit number
+ * @m_page: L2P map information data for pre-request
+ * @len: length of host-side cached L2P map in m_page
+ * @lpn: start LPN of L2P map in m_page
+ */
+struct ufshpb_req {
+       struct request *req;
+       struct bio *bio;
+       struct ufshpb_lu *hpb;
+       struct list_head list_req;
+       union {
+               struct {
+                       struct ufshpb_map_ctx *mctx;
+                       unsigned int rgn_idx;
+                       unsigned int srgn_idx;
+                       unsigned int lun;
+               } rb;
+               struct {
+                       struct page *m_page;
+                       unsigned int len;
+                       unsigned long lpn;
+               } wb;
+       };
+};
+
+struct victim_select_info {
+       struct list_head lh_lru_rgn; /* LRU list of regions */
+       int max_lru_active_cnt; /* supported hpb #region - pinned #region */
+       atomic_t active_cnt;
+};
+
+/**
+ * ufshpb_params - ufs hpb parameters
+ * @requeue_timeout_ms - requeue threshold of wb command (0x2)
+ * @activation_thld - min reads [IOs] to activate/update a region
+ * @normalization_factor - shift right the region's reads
+ * @eviction_thld_enter - min reads [IOs] for the entering region in eviction
+ * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction
+ * @read_timeout_ms - timeout [ms] from the last read IO to the region
+ * @read_timeout_expiries - amount of allowable timeout expireis
+ * @timeout_polling_interval_ms - frequency in which timeouts are checked
+ * @inflight_map_req - number of inflight map requests
+ */
+struct ufshpb_params {
+       unsigned int requeue_timeout_ms;
+       unsigned int activation_thld;
+       unsigned int normalization_factor;
+       unsigned int eviction_thld_enter;
+       unsigned int eviction_thld_exit;
+       unsigned int read_timeout_ms;
+       unsigned int read_timeout_expiries;
+       unsigned int timeout_polling_interval_ms;
+       unsigned int inflight_map_req;
+};
+
+struct ufshpb_stats {
+       u64 hit_cnt;
+       u64 miss_cnt;
+       u64 rb_noti_cnt;
+       u64 rb_active_cnt;
+       u64 rb_inactive_cnt;
+       u64 map_req_cnt;
+       u64 pre_req_cnt;
+       u64 umap_req_cnt;
+};
+
+struct ufshpb_lu {
+       int lun;
+       struct scsi_device *sdev_ufs_lu;
+
+       spinlock_t rgn_state_lock; /* for protect rgn/srgn state */
+       struct ufshpb_region *rgn_tbl;
+
+       atomic_t hpb_state;
+
+       spinlock_t rsp_list_lock;
+       struct list_head lh_act_srgn; /* hold rsp_list_lock */
+       struct list_head lh_inact_rgn; /* hold rsp_list_lock */
+
+       /* pre request information */
+       struct ufshpb_req *pre_req;
+       int num_inflight_pre_req;
+       int throttle_pre_req;
+       int num_inflight_map_req; /* hold param_lock */
+       spinlock_t param_lock;
+
+       struct list_head lh_pre_req_free;
+       int cur_read_id;
+       int pre_req_min_tr_len;
+       int pre_req_max_tr_len;
+
+       /* cached L2P map management worker */
+       struct work_struct map_work;
+
+       /* for selecting victim */
+       struct victim_select_info lru_info;
+       struct work_struct ufshpb_normalization_work;
+       struct delayed_work ufshpb_read_to_work;
+       unsigned long work_data_bits;
+#define TIMEOUT_WORK_RUNNING 0
+
+       /* pinned region information */
+       u32 lu_pinned_start;
+       u32 lu_pinned_end;
+
+       /* HPB related configuration */
+       u32 rgns_per_lu;
+       u32 srgns_per_lu;
+       u32 last_srgn_entries;
+       int srgns_per_rgn;
+       u32 srgn_mem_size;
+       u32 entries_per_rgn_mask;
+       u32 entries_per_rgn_shift;
+       u32 entries_per_srgn;
+       u32 entries_per_srgn_mask;
+       u32 entries_per_srgn_shift;
+       u32 pages_per_srgn;
+
+       bool is_hcm;
+
+       struct ufshpb_stats stats;
+       struct ufshpb_params params;
+
+       struct kmem_cache *map_req_cache;
+       struct kmem_cache *m_page_cache;
+
+       struct list_head list_hpb_lu;
+};
+
+struct ufs_hba;
+struct ufshcd_lrb;
+
+#ifndef CONFIG_SCSI_UFS_HPB
+static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; }
+static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {}
+static void ufshpb_resume(struct ufs_hba *hba) {}
+static void ufshpb_suspend(struct ufs_hba *hba) {}
+static void ufshpb_reset(struct ufs_hba *hba) {}
+static void ufshpb_reset_host(struct ufs_hba *hba) {}
+static void ufshpb_init(struct ufs_hba *hba) {}
+static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
+static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
+static void ufshpb_remove(struct ufs_hba *hba) {}
+static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; }
+static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {}
+static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {}
+static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; }
+#else
+int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+void ufshpb_resume(struct ufs_hba *hba);
+void ufshpb_suspend(struct ufs_hba *hba);
+void ufshpb_reset(struct ufs_hba *hba);
+void ufshpb_reset_host(struct ufs_hba *hba);
+void ufshpb_init(struct ufs_hba *hba);
+void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev);
+void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev);
+void ufshpb_remove(struct ufs_hba *hba);
+bool ufshpb_is_allowed(struct ufs_hba *hba);
+void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf);
+void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf);
+bool ufshpb_is_legacy(struct ufs_hba *hba);
+extern struct attribute_group ufs_sysfs_hpb_stat_group;
+extern struct attribute_group ufs_sysfs_hpb_param_group;
+#endif
+
+#endif /* End of Header */
index b0deaf4..c25ce8f 100644 (file)
@@ -519,7 +519,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
                                    struct virtio_scsi_cmd_req_pi *cmd_pi,
                                    struct scsi_cmnd *sc)
 {
-       struct request *rq = sc->request;
+       struct request *rq = scsi_cmd_to_rq(sc);
        struct blk_integrity *bi;
 
        virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
@@ -543,7 +543,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
 static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
                                                  struct scsi_cmnd *sc)
 {
-       u32 tag = blk_mq_unique_tag(sc->request);
+       u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
        u16 hwq = blk_mq_unique_tag_to_hwq(tag);
 
        return &vscsi->req_vqs[hwq];
index edc8a13..6f10a43 100644 (file)
@@ -466,14 +466,16 @@ static int wd719x_abort(struct scsi_cmnd *cmd)
        unsigned long flags;
        struct wd719x_scb *scb = scsi_cmd_priv(cmd);
        struct wd719x *wd = shost_priv(cmd->device->host);
+       struct device *dev = &wd->pdev->dev;
 
-       dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
+       dev_info(dev, "abort command, tag: %x\n", scsi_cmd_to_rq(cmd)->tag);
 
-       action = /*cmd->tag ? WD719X_CMD_ABORT_TAG : */WD719X_CMD_ABORT;
+       action = WD719X_CMD_ABORT;
 
        spin_lock_irqsave(wd->sh->host_lock, flags);
        result = wd719x_direct_cmd(wd, action, cmd->device->id,
-                                  cmd->device->lun, cmd->tag, scb->phys, 0);
+                                  cmd->device->lun, scsi_cmd_to_rq(cmd)->tag,
+                                  scb->phys, 0);
        wd719x_finish_cmd(scb, DID_ABORT);
        spin_unlock_irqrestore(wd->sh->host_lock, flags);
        if (result)
index ec9d399..0204e31 100644 (file)
@@ -212,7 +212,7 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
        memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
 
        ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
-       ring_req->timeout_per_command = sc->request->timeout / HZ;
+       ring_req->timeout_per_command = scsi_cmd_to_rq(sc)->timeout / HZ;
 
        for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
                ring_req->seg[i] = shadow->seg[i];
index c163b14..72171ea 100644 (file)
@@ -5,7 +5,7 @@ menuconfig TARGET_CORE
        depends on BLOCK
        select CONFIGFS_FS
        select CRC_T10DIF
-       select BLK_SCSI_REQUEST
+       select SCSI_COMMON
        select SGL_ALLOC
        default n
        help
index b044999..072afd0 100644 (file)
@@ -234,7 +234,7 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        struct cxgbit_device *cdev = csk->com.cdev;
        struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
        struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
-       int ret = -EINVAL;
+       int ret;
 
        if ((!ccmd->setup_ddp) ||
            (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
index cbb2118..52db28d 100644 (file)
@@ -183,7 +183,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
 
        memset(tl_cmd, 0, sizeof(*tl_cmd));
        tl_cmd->sc = sc;
-       tl_cmd->sc_cmd_tag = sc->request->tag;
+       tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag;
 
        tcm_loop_target_queue_cmd(tl_cmd);
        return 0;
@@ -241,7 +241,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 {
        struct tcm_loop_hba *tl_hba;
        struct tcm_loop_tpg *tl_tpg;
-       int ret = FAILED;
+       int ret;
 
        /*
         * Locate the tcm_loop_hba_t pointer
@@ -249,7 +249,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
        tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
        tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
        ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
-                                sc->request->tag, TMR_ABORT_TASK);
+                                scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK);
        return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 }
 
@@ -261,7 +261,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 {
        struct tcm_loop_hba *tl_hba;
        struct tcm_loop_tpg *tl_tpg;
-       int ret = FAILED;
+       int ret;
 
        /*
         * Locate the tcm_loop_hba_t pointer
index 4d3ceee..b9f9fb5 100644 (file)
@@ -1389,8 +1389,8 @@ static void sbp_sense_mangle(struct sbp_target_request *req)
                (sense[0] & 0x80) |             /* valid */
                ((sense[2] & 0xe0) >> 1) |      /* mark, eom, ili */
                (sense[2] & 0x0f);              /* sense_key */
-       status[2] = se_cmd->scsi_asc;           /* sense_code */
-       status[3] = se_cmd->scsi_ascq;          /* sense_qualifier */
+       status[2] = 0;                          /* XXX sense_code */
+       status[3] = 0;                          /* XXX sense_qualifier */
 
        /* information */
        status[4] = sense[3];
index 3bb9213..cb1de1e 100644 (file)
@@ -428,22 +428,6 @@ out:
        return rc;
 }
 
-static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
-{
-       /*
-        * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
-        * The ALUA additional sense code qualifier (ASCQ) is determined
-        * by the ALUA primary or secondary access state..
-        */
-       pr_debug("[%s]: ALUA TG Port not available, "
-               "SenseKey: NOT_READY, ASC/ASCQ: "
-               "0x04/0x%02x\n",
-               cmd->se_tfo->fabric_name, alua_ascq);
-
-       cmd->scsi_asc = 0x04;
-       cmd->scsi_ascq = alua_ascq;
-}
-
 static inline void core_alua_state_nonoptimized(
        struct se_cmd *cmd,
        unsigned char *cdb,
@@ -458,9 +442,9 @@ static inline void core_alua_state_nonoptimized(
        cmd->alua_nonop_delay = nonop_delay_msecs;
 }
 
-static inline int core_alua_state_lba_dependent(
+static inline sense_reason_t core_alua_state_lba_dependent(
        struct se_cmd *cmd,
-       struct t10_alua_tg_pt_gp *tg_pt_gp)
+       u16 tg_pt_gp_id)
 {
        struct se_device *dev = cmd->se_dev;
        u64 segment_size, segment_mult, sectors, lba;
@@ -506,23 +490,19 @@ static inline int core_alua_state_lba_dependent(
                }
                if (!cur_map) {
                        spin_unlock(&dev->t10_alua.lba_map_lock);
-                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
-                       return 1;
+                       return TCM_ALUA_TG_PT_UNAVAILABLE;
                }
                list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
                                    lba_map_mem_list) {
-                       if (map_mem->lba_map_mem_alua_pg_id !=
-                           tg_pt_gp->tg_pt_gp_id)
+                       if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id)
                                continue;
                        switch(map_mem->lba_map_mem_alua_state) {
                        case ALUA_ACCESS_STATE_STANDBY:
                                spin_unlock(&dev->t10_alua.lba_map_lock);
-                               set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
-                               return 1;
+                               return TCM_ALUA_TG_PT_STANDBY;
                        case ALUA_ACCESS_STATE_UNAVAILABLE:
                                spin_unlock(&dev->t10_alua.lba_map_lock);
-                               set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
-                               return 1;
+                               return TCM_ALUA_TG_PT_UNAVAILABLE;
                        default:
                                break;
                        }
@@ -532,7 +512,7 @@ static inline int core_alua_state_lba_dependent(
        return 0;
 }
 
-static inline int core_alua_state_standby(
+static inline sense_reason_t core_alua_state_standby(
        struct se_cmd *cmd,
        unsigned char *cdb)
 {
@@ -556,24 +536,21 @@ static inline int core_alua_state_standby(
                case SAI_READ_CAPACITY_16:
                        return 0;
                default:
-                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
-                       return 1;
+                       return TCM_ALUA_TG_PT_STANDBY;
                }
        case MAINTENANCE_IN:
                switch (cdb[1] & 0x1f) {
                case MI_REPORT_TARGET_PGS:
                        return 0;
                default:
-                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
-                       return 1;
+                       return TCM_ALUA_TG_PT_STANDBY;
                }
        case MAINTENANCE_OUT:
                switch (cdb[1]) {
                case MO_SET_TARGET_PGS:
                        return 0;
                default:
-                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
-                       return 1;
+                       return TCM_ALUA_TG_PT_STANDBY;
                }
        case REQUEST_SENSE:
        case PERSISTENT_RESERVE_IN:
@@ -582,14 +559,13 @@ static inline int core_alua_state_standby(
        case WRITE_BUFFER:
                return 0;
        default:
-               set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
-               return 1;
+               return TCM_ALUA_TG_PT_STANDBY;
        }
 
        return 0;
 }
 
-static inline int core_alua_state_unavailable(
+static inline sense_reason_t core_alua_state_unavailable(
        struct se_cmd *cmd,
        unsigned char *cdb)
 {
@@ -606,30 +582,27 @@ static inline int core_alua_state_unavailable(
                case MI_REPORT_TARGET_PGS:
                        return 0;
                default:
-                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
-                       return 1;
+                       return TCM_ALUA_TG_PT_UNAVAILABLE;
                }
        case MAINTENANCE_OUT:
                switch (cdb[1]) {
                case MO_SET_TARGET_PGS:
                        return 0;
                default:
-                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
-                       return 1;
+                       return TCM_ALUA_TG_PT_UNAVAILABLE;
                }
        case REQUEST_SENSE:
        case READ_BUFFER:
        case WRITE_BUFFER:
                return 0;
        default:
-               set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
-               return 1;
+               return TCM_ALUA_TG_PT_UNAVAILABLE;
        }
 
        return 0;
 }
 
-static inline int core_alua_state_transition(
+static inline sense_reason_t core_alua_state_transition(
        struct se_cmd *cmd,
        unsigned char *cdb)
 {
@@ -646,16 +619,14 @@ static inline int core_alua_state_transition(
                case MI_REPORT_TARGET_PGS:
                        return 0;
                default:
-                       set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
-                       return 1;
+                       return TCM_ALUA_STATE_TRANSITION;
                }
        case REQUEST_SENSE:
        case READ_BUFFER:
        case WRITE_BUFFER:
                return 0;
        default:
-               set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
-               return 1;
+               return TCM_ALUA_STATE_TRANSITION;
        }
 
        return 0;
@@ -674,6 +645,8 @@ target_alua_state_check(struct se_cmd *cmd)
        struct se_lun *lun = cmd->se_lun;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        int out_alua_state, nonop_delay_msecs;
+       u16 tg_pt_gp_id;
+       sense_reason_t rc = TCM_NO_SENSE;
 
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
@@ -687,8 +660,7 @@ target_alua_state_check(struct se_cmd *cmd)
        if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
                pr_debug("ALUA: Got secondary offline status for local"
                                " target port\n");
-               set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
-               return TCM_CHECK_CONDITION_NOT_READY;
+               return TCM_ALUA_OFFLINE;
        }
 
        if (!lun->lun_tg_pt_gp)
@@ -698,8 +670,8 @@ target_alua_state_check(struct se_cmd *cmd)
        tg_pt_gp = lun->lun_tg_pt_gp;
        out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+       tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
 
-       // XXX: keeps using tg_pt_gp witout reference after unlock
        spin_unlock(&lun->lun_tg_pt_gp_lock);
        /*
         * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
@@ -715,20 +687,16 @@ target_alua_state_check(struct se_cmd *cmd)
                core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
                break;
        case ALUA_ACCESS_STATE_STANDBY:
-               if (core_alua_state_standby(cmd, cdb))
-                       return TCM_CHECK_CONDITION_NOT_READY;
+               rc = core_alua_state_standby(cmd, cdb);
                break;
        case ALUA_ACCESS_STATE_UNAVAILABLE:
-               if (core_alua_state_unavailable(cmd, cdb))
-                       return TCM_CHECK_CONDITION_NOT_READY;
+               rc = core_alua_state_unavailable(cmd, cdb);
                break;
        case ALUA_ACCESS_STATE_TRANSITION:
-               if (core_alua_state_transition(cmd, cdb))
-                       return TCM_CHECK_CONDITION_NOT_READY;
+               rc = core_alua_state_transition(cmd, cdb);
                break;
        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
-               if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
-                       return TCM_CHECK_CONDITION_NOT_READY;
+               rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id);
                break;
        /*
         * OFFLINE is a secondary ALUA target port group access state, that is
@@ -738,10 +706,16 @@ target_alua_state_check(struct se_cmd *cmd)
        default:
                pr_err("Unknown ALUA access state: 0x%02x\n",
                                out_alua_state);
-               return TCM_INVALID_CDB_FIELD;
+               rc = TCM_INVALID_CDB_FIELD;
        }
 
-       return 0;
+       if (rc && rc != TCM_INVALID_CDB_FIELD) {
+               pr_debug("[%s]: ALUA TG Port not available, "
+                       "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n",
+                       cmd->se_tfo->fabric_name, rc);
+       }
+
+       return rc;
 }
 
 /*
index 44d9d02..4069a1e 100644 (file)
@@ -83,7 +83,7 @@ static int iblock_configure_device(struct se_device *dev)
        struct blk_integrity *bi;
        fmode_t mode;
        unsigned int max_write_zeroes_sectors;
-       int ret = -ENOMEM;
+       int ret;
 
        if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
                pr_err("Missing udev_path= parameters for IBLOCK\n");
index 2629d2e..75ef52f 100644 (file)
@@ -620,17 +620,17 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
                        buf = transport_kmap_data_sg(cmd);
                        if (!buf) {
                                ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
-                       }
-
-                       if (cdb[0] == MODE_SENSE_10) {
-                               if (!(buf[3] & 0x80))
-                                       buf[3] |= 0x80;
                        } else {
-                               if (!(buf[2] & 0x80))
-                                       buf[2] |= 0x80;
-                       }
+                               if (cdb[0] == MODE_SENSE_10) {
+                                       if (!(buf[3] & 0x80))
+                                               buf[3] |= 0x80;
+                               } else {
+                                       if (!(buf[2] & 0x80))
+                                               buf[2] |= 0x80;
+                               }
 
-                       transport_kunmap_data_sg(cmd);
+                               transport_kunmap_data_sg(cmd);
+                       }
                }
        }
 after_mode_sense:
index 26ceabe..14c6f2b 100644 (file)
@@ -736,8 +736,7 @@ static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 
-       transport_generic_request_failure(cmd,
-                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+       transport_generic_request_failure(cmd, cmd->sense_reason);
 }
 
 /*
@@ -855,7 +854,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
 }
 
 /* May be called from interrupt context so must not sleep. */
-void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
+                                   sense_reason_t sense_reason)
 {
        struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
        int success, cpu;
@@ -865,6 +865,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
                return;
 
        cmd->scsi_status = scsi_status;
+       cmd->sense_reason = sense_reason;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        switch (cmd->scsi_status) {
@@ -893,6 +894,14 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 
        queue_work_on(cpu, target_completion_wq, &cmd->work);
 }
+EXPORT_SYMBOL(target_complete_cmd_with_sense);
+
+void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+{
+       target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ?
+                             TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE :
+                             TCM_NO_SENSE);
+}
 EXPORT_SYMBOL(target_complete_cmd);
 
 void target_set_cmd_data_length(struct se_cmd *cmd, int length)
@@ -2003,7 +2012,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        case TCM_ADDRESS_OUT_OF_RANGE:
        case TCM_CHECK_CONDITION_ABORT_CMD:
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
-       case TCM_CHECK_CONDITION_NOT_READY:
        case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
        case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
        case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
@@ -2013,6 +2021,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        case TCM_TOO_MANY_SEGMENT_DESCS:
        case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
        case TCM_INVALID_FIELD_IN_COMMAND_IU:
+       case TCM_ALUA_TG_PT_STANDBY:
+       case TCM_ALUA_TG_PT_UNAVAILABLE:
+       case TCM_ALUA_STATE_TRANSITION:
+       case TCM_ALUA_OFFLINE:
                break;
        case TCM_OUT_OF_RESOURCES:
                cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
@@ -3277,9 +3289,6 @@ static const struct sense_detail sense_detail_table[] = {
        [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
                .key = UNIT_ATTENTION,
        },
-       [TCM_CHECK_CONDITION_NOT_READY] = {
-               .key = NOT_READY,
-       },
        [TCM_MISCOMPARE_VERIFY] = {
                .key = MISCOMPARE,
                .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
@@ -3340,6 +3349,26 @@ static const struct sense_detail sense_detail_table[] = {
                .asc = 0x0e,
                .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
        },
+       [TCM_ALUA_TG_PT_STANDBY] = {
+               .key = NOT_READY,
+               .asc = 0x04,
+               .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY,
+       },
+       [TCM_ALUA_TG_PT_UNAVAILABLE] = {
+               .key = NOT_READY,
+               .asc = 0x04,
+               .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE,
+       },
+       [TCM_ALUA_STATE_TRANSITION] = {
+               .key = NOT_READY,
+               .asc = 0x04,
+               .ascq = ASCQ_04H_ALUA_STATE_TRANSITION,
+       },
+       [TCM_ALUA_OFFLINE] = {
+               .key = NOT_READY,
+               .asc = 0x04,
+               .ascq = ASCQ_04H_ALUA_OFFLINE,
+       },
 };
 
 /**
@@ -3374,11 +3403,8 @@ static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
                        cmd->scsi_status = SAM_STAT_BUSY;
                        return;
                }
-       } else if (sd->asc == 0) {
-               WARN_ON_ONCE(cmd->scsi_asc == 0);
-               asc = cmd->scsi_asc;
-               ascq = cmd->scsi_ascq;
        } else {
+               WARN_ON_ONCE(sd->asc == 0);
                asc = sd->asc;
                ascq = sd->ascq;
        }
index fbb6ffa..9f552f4 100644 (file)
@@ -191,6 +191,7 @@ struct tcmu_cmd {
        unsigned long deadline;
 
 #define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_KEEP_BUF 1
        unsigned long flags;
 };
 
@@ -1315,11 +1316,13 @@ unlock:
        mutex_unlock(&udev->cmdr_lock);
 }
 
-static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
+static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
+                                  struct tcmu_cmd_entry *entry, bool keep_buf)
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        struct tcmu_dev *udev = cmd->tcmu_dev;
        bool read_len_valid = false;
+       bool ret = true;
        uint32_t read_len;
 
        /*
@@ -1330,6 +1333,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                WARN_ON_ONCE(se_cmd);
                goto out;
        }
+       if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+               pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
+                      entry->hdr.cmd_id);
+               set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+               ret = false;
+               goto out;
+       }
 
        list_del_init(&cmd->queue_entry);
 
@@ -1379,8 +1389,22 @@ done:
                target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
 
 out:
-       tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
-       tcmu_free_cmd(cmd);
+       if (!keep_buf) {
+               tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+               tcmu_free_cmd(cmd);
+       } else {
+               /*
+                * Keep this command after completion, since userspace still
+                * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
+                * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
+                * a second completion later.
+                * Userspace can free the buffer later by writing the cmd_id
+                * to new action attribute free_kept_buf.
+                */
+               clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+               set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
+       }
+       return ret;
 }
 
 static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
@@ -1432,6 +1456,7 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
        while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
 
                struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
+               bool keep_buf;
 
                /*
                 * Flush max. up to end of cmd ring since current entry might
@@ -1453,7 +1478,11 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
                }
                WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
 
-               cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
+               keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
+               if (keep_buf)
+                       cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
+               else
+                       cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
                if (!cmd) {
                        pr_err("cmd_id %u not found, ring is broken\n",
                               entry->hdr.cmd_id);
@@ -1461,7 +1490,8 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
                        return false;
                }
 
-               tcmu_handle_completion(cmd, entry);
+               if (!tcmu_handle_completion(cmd, entry, keep_buf))
+                       break;
 
                UPDATE_HEAD(udev->cmdr_last_cleaned,
                            tcmu_hdr_get_len(entry->hdr.len_op),
@@ -1619,7 +1649,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
 
 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
 {
-       if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+       if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
+           test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
                kmem_cache_free(tcmu_cmd_cache, cmd);
                return 0;
        }
@@ -1903,6 +1934,38 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
 static int tcmu_release(struct uio_info *info, struct inode *inode)
 {
        struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+       struct tcmu_cmd *cmd;
+       unsigned long i;
+       bool freed = false;
+
+       mutex_lock(&udev->cmdr_lock);
+
+       xa_for_each(&udev->commands, i, cmd) {
+               /* Cmds with KEEP_BUF set are no longer on the ring, but
+                * userspace still holds the data buffer. If userspace closes
+                * we implicitly free these cmds and buffers, since after new
+                * open the (new ?) userspace cannot find the cmd in the ring
+                * and thus never will release the buffer by writing cmd_id to
+                * free_kept_buf action attribute.
+                */
+               if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
+                       continue;
+               pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
+                        cmd->cmd_id, udev->name);
+               freed = true;
+
+               xa_erase(&udev->commands, i);
+               tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+               tcmu_free_cmd(cmd);
+       }
+       /*
+        * We only freed data space, not ring space. Therefore we dont call
+        * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+        */
+       if (freed && list_empty(&udev->tmr_queue))
+               run_qfull_queue(udev, false);
+
+       mutex_unlock(&udev->cmdr_lock);
 
        clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
 
@@ -2147,7 +2210,8 @@ static int tcmu_configure_device(struct se_device *dev)
        mb->version = TCMU_MAILBOX_VERSION;
        mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
                    TCMU_MAILBOX_FLAG_CAP_READ_LEN |
-                   TCMU_MAILBOX_FLAG_CAP_TMR;
+                   TCMU_MAILBOX_FLAG_CAP_TMR |
+                   TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
        mb->cmdr_off = CMDR_OFF;
        mb->cmdr_size = udev->cmdr_size;
 
@@ -2279,12 +2343,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
        mutex_lock(&udev->cmdr_lock);
 
        xa_for_each(&udev->commands, i, cmd) {
-               pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
-                         cmd->cmd_id, udev->name,
-                         test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
+               pr_debug("removing cmd %u on dev %s from ring %s\n",
+                        cmd->cmd_id, udev->name,
+                        test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
+                        "(is expired)" :
+                        (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
+                        "(is keep buffer)" : ""));
 
                xa_erase(&udev->commands, i);
-               if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+               if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
+                   !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
                        WARN_ON(!cmd->se_cmd);
                        list_del_init(&cmd->queue_entry);
                        cmd->se_cmd->priv = NULL;
@@ -2933,6 +3001,65 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
 }
 CONFIGFS_ATTR_WO(tcmu_, reset_ring);
 
+static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
+                                       size_t count)
+{
+       struct se_device *se_dev = container_of(to_config_group(item),
+                                               struct se_device,
+                                               dev_action_group);
+       struct tcmu_dev *udev = TCMU_DEV(se_dev);
+       struct tcmu_cmd *cmd;
+       u16 cmd_id;
+       int ret;
+
+       if (!target_dev_configured(&udev->se_dev)) {
+               pr_err("Device is not configured.\n");
+               return -EINVAL;
+       }
+
+       ret = kstrtou16(page, 0, &cmd_id);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&udev->cmdr_lock);
+
+       {
+               XA_STATE(xas, &udev->commands, cmd_id);
+
+               xas_lock(&xas);
+               cmd = xas_load(&xas);
+               if (!cmd) {
+                       pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
+                       count = -EINVAL;
+                       xas_unlock(&xas);
+                       goto out_unlock;
+               }
+               if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+                       pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
+                              cmd_id);
+                       count = -EINVAL;
+                       xas_unlock(&xas);
+                       goto out_unlock;
+               }
+               xas_store(&xas, NULL);
+               xas_unlock(&xas);
+       }
+
+       tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+       tcmu_free_cmd(cmd);
+       /*
+        * We only freed data space, not ring space. Therefore we dont call
+        * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+        */
+       if (list_empty(&udev->tmr_queue))
+               run_qfull_queue(udev, false);
+
+out_unlock:
+       mutex_unlock(&udev->cmdr_lock);
+       return count;
+}
+CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
+
 static struct configfs_attribute *tcmu_attrib_attrs[] = {
        &tcmu_attr_cmd_time_out,
        &tcmu_attr_qfull_time_out,
@@ -2951,6 +3078,7 @@ static struct configfs_attribute **tcmu_attrs;
 static struct configfs_attribute *tcmu_action_attrs[] = {
        &tcmu_attr_block_dev,
        &tcmu_attr_reset_ring,
+       &tcmu_attr_free_kept_buf,
        NULL,
 };
 
index 0f13193..d4fe7cb 100644 (file)
@@ -674,12 +674,16 @@ static void target_xcopy_do_work(struct work_struct *work)
        unsigned int max_sectors;
        int rc = 0;
        unsigned short nolb, max_nolb, copied_nolb = 0;
+       sense_reason_t sense_rc;
 
-       if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
+       sense_rc = target_parse_xcopy_cmd(xop);
+       if (sense_rc != TCM_NO_SENSE)
                goto err_free;
 
-       if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev))
+       if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) {
+               sense_rc = TCM_INVALID_PARAMETER_LIST;
                goto err_free;
+       }
 
        src_dev = xop->src_dev;
        dst_dev = xop->dst_dev;
@@ -762,20 +766,20 @@ static void target_xcopy_do_work(struct work_struct *work)
        return;
 
 out:
+       /*
+        * The XCOPY command was aborted after some data was transferred.
+        * Terminate command with CHECK CONDITION status, with the sense key
+        * set to COPY ABORTED.
+        */
+       sense_rc = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
        xcopy_pt_undepend_remotedev(xop);
        target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
 
 err_free:
        kfree(xop);
-       /*
-        * Don't override an error scsi status if it has already been set
-        */
-       if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
-               pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
-                       " CHECK_CONDITION -> sending response\n", rc);
-               ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
-       }
-       target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
+       pr_warn_ratelimited("target_xcopy_do_work: rc: %d, sense: %u, XCOPY operation failed\n",
+                          rc, sense_rc);
+       target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc);
 }
 
 /*
index f4304ce..4c5a0a4 100644 (file)
@@ -551,7 +551,7 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
        /* Did this command access the last sector? */
        sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
                        (srb->cmnd[4] << 8) | (srb->cmnd[5]);
-       disk = srb->request->rq_disk;
+       disk = scsi_cmd_to_rq(srb)->rq_disk;
        if (!disk)
                goto done;
        sdkp = scsi_disk(disk);
index f229172..6e9ea4e 100644 (file)
@@ -109,7 +109,7 @@ config NFSD_SCSILAYOUT
        depends on NFSD_V4 && BLOCK
        select NFSD_PNFS
        select EXPORTFS_BLOCK_OPS
-       select BLK_SCSI_REQUEST
+       select SCSI_COMMON
        help
          This option enables support for the exporting pNFS SCSI layouts
          in the kernel's NFS server. The pNFS SCSI layout enables NFS
index c9cb124..12b9dbc 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/bio.h>
 #include <linux/stringify.h>
 #include <linux/gfp.h>
-#include <linux/bsg.h>
 #include <linux/smp.h>
 #include <linux/rcupdate.h>
 #include <linux/percpu-refcount.h>
 #include <linux/sbitmap.h>
 
 struct module;
-struct scsi_ioctl_command;
-
 struct request_queue;
 struct elevator_queue;
 struct blk_trace;
 struct request;
 struct sg_io_hdr;
-struct bsg_job;
 struct blkcg_gq;
 struct blk_flush_queue;
 struct pr_ops;
@@ -274,9 +270,6 @@ enum blk_queue_state {
 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
 
-#define BLK_SCSI_MAX_CMDS      (256)
-#define BLK_SCSI_CMD_PER_LONG  (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
 /*
  * Zoned block device models (zoned limit).
  *
@@ -505,11 +498,6 @@ struct request_queue {
        unsigned int            max_active_zones;
 #endif /* CONFIG_BLK_DEV_ZONED */
 
-       /*
-        * sg stuff
-        */
-       unsigned int            sg_timeout;
-       unsigned int            sg_reserved_size;
        int                     node;
        struct mutex            debugfs_mutex;
 #ifdef CONFIG_BLK_DEV_IO_TRACE
@@ -536,10 +524,6 @@ struct request_queue {
 
        int                     mq_freeze_depth;
 
-#if defined(CONFIG_BLK_DEV_BSG)
-       struct bsg_class_device bsg_dev;
-#endif
-
 #ifdef CONFIG_BLK_DEV_THROTTLING
        /* Throttle data */
        struct throtl_data *td;
@@ -885,16 +869,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
 int blk_rq_append_bio(struct request *rq, struct bio *bio);
 extern void blk_queue_split(struct bio **);
-extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
-extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
-                             unsigned int, void __user *);
-extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
-                         unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
-                        struct scsi_ioctl_command __user *);
-extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
-extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
-
 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
 extern void blk_queue_exit(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
@@ -1347,8 +1321,6 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
                                    gfp_mask, 0);
 }
 
-extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
-
 static inline bool bdev_is_partition(struct block_device *bdev)
 {
        return bdev->bd_partno;
@@ -1377,6 +1349,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q)
        return q->limits.max_sectors;
 }
 
+static inline unsigned int queue_max_bytes(struct request_queue *q)
+{
+       return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
+}
+
 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
 {
        return q->limits.max_hw_sectors;
index 960988d..6b21132 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/blkdev.h>
 #include <scsi/scsi_request.h>
 
+struct bsg_job;
 struct request;
 struct device;
 struct scatterlist;
index dac37b6..1ac81c8 100644 (file)
@@ -4,36 +4,16 @@
 
 #include <uapi/linux/bsg.h>
 
-struct request;
+struct bsg_device;
+struct device;
+struct request_queue;
 
-#ifdef CONFIG_BLK_DEV_BSG
-struct bsg_ops {
-       int     (*check_proto)(struct sg_io_v4 *hdr);
-       int     (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
-                               fmode_t mode);
-       int     (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
-       void    (*free_rq)(struct request *rq);
-};
+typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
+               fmode_t mode, unsigned int timeout);
 
-struct bsg_class_device {
-       struct device *class_dev;
-       int minor;
-       struct request_queue *queue;
-       const struct bsg_ops *ops;
-};
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+               struct device *parent, const char *name,
+               bsg_sg_io_fn *sg_io_fn);
+void bsg_unregister_queue(struct bsg_device *bcd);
 
-int bsg_register_queue(struct request_queue *q, struct device *parent,
-               const char *name, const struct bsg_ops *ops);
-int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
-void bsg_unregister_queue(struct request_queue *q);
-#else
-static inline int bsg_scsi_register_queue(struct request_queue *q,
-               struct device *parent)
-{
-       return 0;
-}
-static inline void bsg_unregister_queue(struct request_queue *q)
-{
-}
-#endif /* CONFIG_BLK_DEV_BSG */
 #endif /* _LINUX_BSG_H */
index f48d0a3..c4fef00 100644 (file)
@@ -86,11 +86,13 @@ struct cdrom_device_ops {
        /* play stuff */
        int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
 
-/* driver specifications */
-       const int capability;   /* capability flags */
        /* handle uniform packets for scsi type devices (scsi,atapi) */
        int (*generic_packet) (struct cdrom_device_info *,
                               struct packet_command *);
+       int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
+                              u32 lba, u32 nframes, u8 *last_sense);
+/* driver specifications */
+       const int capability;   /* capability flags */
 };
 
 int cdrom_multisession(struct cdrom_device_info *cdi,
index 779a59f..eaf04c9 100644 (file)
@@ -111,9 +111,6 @@ struct scsi_cmnd {
                                   reconnects.   Probably == sector
                                   size */
 
-       struct request *request;        /* The command we are
-                                          working on */
-
        unsigned char *sense_buffer;
                                /* obtained by REQUEST SENSE when
                                 * CHECK CONDITION is received on original
@@ -142,10 +139,15 @@ struct scsi_cmnd {
        int flags;              /* Command flags */
        unsigned long state;    /* Command completion state */
 
-       unsigned char tag;      /* SCSI-II queued command tag */
        unsigned int extra_len; /* length of alignment and padding */
 };
 
+/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
+static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
+{
+       return blk_mq_rq_from_pdu(scmd);
+}
+
 /*
  * Return the driver private allocation behind the command.
  * Only works if cmd_size is set in the host template.
@@ -158,7 +160,9 @@ static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
 /* make sure not to use it with passthrough commands */
 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
-       return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
+       struct request *rq = scsi_cmd_to_rq(cmd);
+
+       return *(struct scsi_driver **)rq->rq_disk->private_data;
 }
 
 extern void scsi_finish_command(struct scsi_cmnd *cmd);
@@ -220,6 +224,25 @@ static inline int scsi_sg_copy_to_buffer(struct scsi_cmnd *cmd,
                                 buf, buflen);
 }
 
+static inline sector_t scsi_get_sector(struct scsi_cmnd *scmd)
+{
+       return blk_rq_pos(scsi_cmd_to_rq(scmd));
+}
+
+static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+{
+       unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+
+       return blk_rq_pos(scsi_cmd_to_rq(scmd)) >> shift;
+}
+
+static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
+{
+       unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+
+       return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
+}
+
 /*
  * The operations below are hints that tell the controller driver how
  * to handle I/Os with DIF or similar types of protection information.
@@ -282,9 +305,11 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
        return scmd->prot_type;
 }
 
-static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
+static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
 {
-       return blk_rq_pos(scmd->request);
+       struct request *rq = blk_mq_rq_from_pdu(scmd);
+
+       return t10_pi_ref_tag(rq);
 }
 
 static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
index ac6ab16..09a17f6 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/atomic.h>
 #include <linux/sbitmap.h>
 
+struct bsg_device;
 struct device;
 struct request_queue;
 struct scsi_cmnd;
@@ -205,6 +206,7 @@ struct scsi_device {
        unsigned unmap_limit_for_ws:1;  /* Use the UNMAP limit for WRITE SAME */
        unsigned rpm_autosuspend:1;     /* Enable runtime autosuspend at device
                                         * creation time */
+       unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
 
        bool offline_already;           /* Device offline message logged */
 
@@ -234,6 +236,10 @@ struct scsi_device {
        size_t                  dma_drain_len;
        void                    *dma_drain_buf;
 
+       unsigned int            sg_timeout;
+       unsigned int            sg_reserved_size;
+
+       struct bsg_device       *bsg_dev;
        unsigned char           access_state;
        struct mutex            state_mutex;
        enum scsi_device_state sdev_state;
@@ -265,13 +271,15 @@ sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
 __printf(3, 4) void
 scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
 
-#define scmd_dbg(scmd, fmt, a...)                                         \
-       do {                                                               \
-               if ((scmd)->request->rq_disk)                              \
-                       sdev_dbg((scmd)->device, "[%s] " fmt,              \
-                                (scmd)->request->rq_disk->disk_name, ##a);\
-               else                                                       \
-                       sdev_dbg((scmd)->device, fmt, ##a);                \
+#define scmd_dbg(scmd, fmt, a...)                                      \
+       do {                                                            \
+               struct request *__rq = scsi_cmd_to_rq((scmd));          \
+                                                                       \
+               if (__rq->rq_disk)                                      \
+                       sdev_dbg((scmd)->device, "[%s] " fmt,           \
+                                __rq->rq_disk->disk_name, ##a);        \
+               else                                                    \
+                       sdev_dbg((scmd)->device, fmt, ##a);             \
        } while (0)
 
 enum scsi_target_state {
index 3fdb322..5d14ada 100644 (file)
@@ -28,7 +28,8 @@
 #define BLIST_LARGELUN         ((__force blist_flags_t)(1ULL << 9))
 /* override additional length field */
 #define BLIST_INQUIRY_36       ((__force blist_flags_t)(1ULL << 10))
-#define __BLIST_UNUSED_11      ((__force blist_flags_t)(1ULL << 11))
+/* ignore MEDIA CHANGE unit attention after resuming from runtime suspend */
+#define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11))
 /* do not do automatic start on add */
 #define BLIST_NOSTARTONADD     ((__force blist_flags_t)(1ULL << 12))
 #define __BLIST_UNUSED_13      ((__force blist_flags_t)(1ULL << 13))
@@ -73,8 +74,7 @@
 #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
                               (__force blist_flags_t) \
                               ((__force __u64)__BLIST_LAST_USED - 1ULL)))
-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_11 | \
-                            __BLIST_UNUSED_13 | \
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \
                             __BLIST_UNUSED_14 | \
                             __BLIST_UNUSED_15 | \
                             __BLIST_UNUSED_16 | \
index b465799..d2cb9ae 100644 (file)
@@ -18,7 +18,9 @@
 
 #ifdef __KERNEL__
 
+struct gendisk;
 struct scsi_device;
+struct sg_io_hdr;
 
 /*
  * Structures used for scsi_ioctl et al.
@@ -43,8 +45,11 @@ typedef struct scsi_fctargaddress {
 
 int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
                int cmd, bool ndelay);
-extern int scsi_ioctl(struct scsi_device *, int, void __user *);
-extern int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
+               int cmd, void __user *arg);
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
+bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode);
 
 #endif /* __KERNEL__ */
 #endif /* _SCSI_IOCTL_H */
index b06f28c..9129b23 100644 (file)
@@ -28,6 +28,4 @@ static inline void scsi_req_free_cmd(struct scsi_request *req)
                kfree(req->cmd);
 }
 
-void scsi_req_init(struct scsi_request *req);
-
 #endif /* _SCSI_SCSI_REQUEST_H */
index 1f78b09..675f3a1 100644 (file)
@@ -75,6 +75,7 @@ void  target_backend_unregister(const struct target_backend_ops *);
 
 void   target_complete_cmd(struct se_cmd *, u8);
 void   target_set_cmd_data_length(struct se_cmd *, int);
+void   target_complete_cmd_with_sense(struct se_cmd *, u8, sense_reason_t);
 void   target_complete_cmd_with_length(struct se_cmd *, u8, int);
 
 void   transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
index 85c16c2..fb11c76 100644 (file)
@@ -171,7 +171,7 @@ enum tcm_sense_reason_table {
        TCM_WRITE_PROTECTED                     = R(0x0c),
        TCM_CHECK_CONDITION_ABORT_CMD           = R(0x0d),
        TCM_CHECK_CONDITION_UNIT_ATTENTION      = R(0x0e),
-       TCM_CHECK_CONDITION_NOT_READY           = R(0x0f),
+
        TCM_RESERVATION_CONFLICT                = R(0x10),
        TCM_ADDRESS_OUT_OF_RANGE                = R(0x11),
        TCM_OUT_OF_RESOURCES                    = R(0x12),
@@ -188,6 +188,10 @@ enum tcm_sense_reason_table {
        TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
        TCM_LUN_BUSY                            = R(0x1e),
        TCM_INVALID_FIELD_IN_COMMAND_IU         = R(0x1f),
+       TCM_ALUA_TG_PT_STANDBY                  = R(0x20),
+       TCM_ALUA_TG_PT_UNAVAILABLE              = R(0x21),
+       TCM_ALUA_STATE_TRANSITION               = R(0x22),
+       TCM_ALUA_OFFLINE                        = R(0x23),
 #undef R
 };
 
@@ -453,10 +457,10 @@ enum target_core_dif_check {
 #define TCM_ACA_TAG    0x24
 
 struct se_cmd {
+       /* Used for fail with specific sense codes */
+       sense_reason_t          sense_reason;
        /* SAM response code being sent to initiator */
        u8                      scsi_status;
-       u8                      scsi_asc;
-       u8                      scsi_ascq;
        u16                     scsi_sense_length;
        unsigned                unknown_data_length:1;
        bool                    state_active:1;
index 95b1597..27ace51 100644 (file)
@@ -46,6 +46,7 @@
 #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
 #define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
 #define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */
+#define TCMU_MAILBOX_FLAG_CAP_KEEP_BUF (1<<3) /* Keep buf after cmd completion */
 
 struct tcmu_mailbox {
        __u16 version;
@@ -75,6 +76,7 @@ struct tcmu_cmd_entry_hdr {
        __u8 kflags;
 #define TCMU_UFLAG_UNKNOWN_OP 0x1
 #define TCMU_UFLAG_READ_LEN   0x2
+#define TCMU_UFLAG_KEEP_BUF   0x4
        __u8 uflags;
 
 } __packed;
index 91d4be9..c9812c5 100644 (file)
@@ -41,6 +41,7 @@ enum fc_els_cmd {
        ELS_REC =       0x13,   /* read exchange concise */
        ELS_SRR =       0x14,   /* sequence retransmission request */
        ELS_FPIN =      0x16,   /* Fabric Performance Impact Notification */
+       ELS_EDC =       0x17,   /* Exchange Diagnostic Capabilities */
        ELS_RDP =       0x18,   /* Read Diagnostic Parameters */
        ELS_RDF =       0x19,   /* Register Diagnostic Functions */
        ELS_PRLI =      0x20,   /* process login */
@@ -111,6 +112,7 @@ enum fc_els_cmd {
        [ELS_REC] =     "REC",                  \
        [ELS_SRR] =     "SRR",                  \
        [ELS_FPIN] =    "FPIN",                 \
+       [ELS_EDC] =     "EDC",                  \
        [ELS_RDP] =     "RDP",                  \
        [ELS_RDF] =     "RDF",                  \
        [ELS_PRLI] =    "PRLI",                 \
@@ -218,6 +220,10 @@ enum fc_els_rjt_explan {
 enum fc_ls_tlv_dtag {
        ELS_DTAG_LS_REQ_INFO =          0x00000001,
                /* Link Service Request Information Descriptor */
+       ELS_DTAG_LNK_FAULT_CAP =        0x0001000D,
+               /* Link Fault Capability Descriptor */
+       ELS_DTAG_CG_SIGNAL_CAP =        0x0001000F,
+               /* Congestion Signaling Capability Descriptor */
        ELS_DTAG_LNK_INTEGRITY =        0x00020001,
                /* Link Integrity Notification Descriptor */
        ELS_DTAG_DELIVERY =             0x00020002,
@@ -236,6 +242,8 @@ enum fc_ls_tlv_dtag {
  */
 #define FC_LS_TLV_DTAG_INIT {                                        \
        { ELS_DTAG_LS_REQ_INFO,         "Link Service Request Information" }, \
+       { ELS_DTAG_LNK_FAULT_CAP,       "Link Fault Capability" },            \
+       { ELS_DTAG_CG_SIGNAL_CAP,       "Congestion Signaling Capability" },  \
        { ELS_DTAG_LNK_INTEGRITY,       "Link Integrity Notification" },      \
        { ELS_DTAG_DELIVERY,            "Delivery Notification Present" },    \
        { ELS_DTAG_PEER_CONGEST,        "Peer Congestion Notification" },     \
@@ -1144,4 +1152,102 @@ struct fc_els_rdf_resp {
 };
 
 
+/*
+ * Diagnostic Capability Descriptors for EDC ELS
+ */
+
+/*
+ * Diagnostic: Link Fault Capability Descriptor
+ */
+struct fc_diag_lnkflt_desc {
+       __be32          desc_tag;       /* Descriptor Tag (0x0001000D) */
+       __be32          desc_len;       /* Length of Descriptor (in bytes).
+                                        * Size of descriptor excluding
+                                        * desc_tag and desc_len fields.
+                                        * 12 bytes
+                                        */
+       __be32          degrade_activate_threshold;
+       __be32          degrade_deactivate_threshold;
+       __be32          fec_degrade_interval;
+};
+
+enum fc_edc_cg_signal_cap_types {
+       /* Note: Capability: bits 31:4 Rsvd; bits 3:0 are capabilities */
+       EDC_CG_SIG_NOTSUPPORTED =       0x00, /* neither supported */
+       EDC_CG_SIG_WARN_ONLY =          0x01,
+       EDC_CG_SIG_WARN_ALARM =         0x02, /* both supported */
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_EDC_CG_SIGNAL_CAP_TYPES_INIT {                              \
+       { EDC_CG_SIG_NOTSUPPORTED,      "Signaling Not Supported" },    \
+       { EDC_CG_SIG_WARN_ONLY,         "Warning Signal" },             \
+       { EDC_CG_SIG_WARN_ALARM,        "Warning and Alarm Signals" },  \
+}
+
+enum fc_diag_cg_sig_freq_types {
+       EDC_CG_SIGFREQ_CNT_MIN =        1,      /* Min Frequency Count */
+       EDC_CG_SIGFREQ_CNT_MAX =        999,    /* Max Frequency Count */
+
+       EDC_CG_SIGFREQ_SEC =            0x1,    /* Units: seconds */
+       EDC_CG_SIGFREQ_MSEC =           0x2,    /* Units: milliseconds */
+};
+
+struct fc_diag_cg_sig_freq {
+       __be16          count;          /* Time between signals
+                                        * note: upper 6 bits rsvd
+                                        */
+       __be16          units;          /* Time unit for count
+                                        * note: upper 12 bits rsvd
+                                        */
+};
+
+/*
+ * Diagnostic: Congestion Signaling Capability Descriptor
+ */
+struct fc_diag_cg_sig_desc {
+       __be32          desc_tag;       /* Descriptor Tag (0x0001000F) */
+       __be32          desc_len;       /* Length of Descriptor (in bytes).
+                                        * Size of descriptor excluding
+                                        * desc_tag and desc_len fields.
+                                        * 16 bytes
+                                        */
+       __be32                          xmt_signal_capability;
+       struct fc_diag_cg_sig_freq      xmt_signal_frequency;
+       __be32                          rcv_signal_capability;
+       struct fc_diag_cg_sig_freq      rcv_signal_frequency;
+};
+
+/*
+ * ELS_EDC - Exchange Diagnostic Capabilities
+ */
+struct fc_els_edc {
+       __u8            edc_cmd;        /* command (0x17) */
+       __u8            edc_zero[3];    /* specified as zero - part of cmd */
+       __be32          desc_len;       /* Length of Descriptor List (in bytes).
+                                        * Size of ELS excluding edc_cmd,
+                                        * edc_zero and desc_len fields.
+                                        */
+       struct fc_tlv_desc      desc[0];
+                                       /* Diagnostic Descriptor list */
+};
+
+/*
+ * ELS EDC LS_ACC Response.
+ */
+struct fc_els_edc_resp {
+       struct fc_els_ls_acc    acc_hdr;
+       __be32                  desc_list_len;  /* Length of response (in
+                                                * bytes). Excludes acc_hdr
+                                                * and desc_list_len fields.
+                                                */
+       struct fc_els_lsri_desc lsri;
+       struct fc_tlv_desc      desc[0];
+                                   /* Supported Diagnostic Descriptor list */
+};
+
+
 #endif /* _FC_ELS_H_ */