crypto: qat - Move driver to drivers/crypto/intel/qat
authorTom Zanussi <tom.zanussi@linux.intel.com>
Tue, 28 Mar 2023 15:39:51 +0000 (10:39 -0500)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 6 Apr 2023 08:41:28 +0000 (16:41 +0800)
With the growing number of Intel crypto drivers, it makes sense to
group them all into a single drivers/crypto/intel/ directory.

Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
217 files changed:
MAINTAINERS
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/intel/Kconfig
drivers/crypto/intel/Makefile
drivers/crypto/intel/qat/Kconfig [new file with mode: 0644]
drivers/crypto/intel/qat/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_4xxx/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_4xxx/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxx/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxxvf/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62x/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62x/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62xvf/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_accel_devices.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_accel_engine.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_admin.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_aer.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg_common.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_cfg_user.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_common_drv.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_config.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_config.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_init.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_isr.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_sriov.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_sysfs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport_debug.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_transport_internal.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_vf_isr.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_hal.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_hw.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_algs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_algs_send.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_algs_send.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_bl.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_bl.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_comp_algs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_comp_req.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_compression.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_compression.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_crypto.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_crypto.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_hal.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/qat_uclo.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xcc/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xccvf/Makefile [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c [new file with mode: 0644]
drivers/crypto/qat/Kconfig [deleted file]
drivers/crypto/qat/Makefile [deleted file]
drivers/crypto/qat/qat_4xxx/Makefile [deleted file]
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c [deleted file]
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h [deleted file]
drivers/crypto/qat/qat_4xxx/adf_drv.c [deleted file]
drivers/crypto/qat/qat_c3xxx/Makefile [deleted file]
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c [deleted file]
drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h [deleted file]
drivers/crypto/qat/qat_c3xxx/adf_drv.c [deleted file]
drivers/crypto/qat/qat_c3xxxvf/Makefile [deleted file]
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c [deleted file]
drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h [deleted file]
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c [deleted file]
drivers/crypto/qat/qat_c62x/Makefile [deleted file]
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c [deleted file]
drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h [deleted file]
drivers/crypto/qat/qat_c62x/adf_drv.c [deleted file]
drivers/crypto/qat/qat_c62xvf/Makefile [deleted file]
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c [deleted file]
drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h [deleted file]
drivers/crypto/qat/qat_c62xvf/adf_drv.c [deleted file]
drivers/crypto/qat/qat_common/Makefile [deleted file]
drivers/crypto/qat/qat_common/adf_accel_devices.h [deleted file]
drivers/crypto/qat/qat_common/adf_accel_engine.c [deleted file]
drivers/crypto/qat/qat_common/adf_admin.c [deleted file]
drivers/crypto/qat/qat_common/adf_aer.c [deleted file]
drivers/crypto/qat/qat_common/adf_cfg.c [deleted file]
drivers/crypto/qat/qat_common/adf_cfg.h [deleted file]
drivers/crypto/qat/qat_common/adf_cfg_common.h [deleted file]
drivers/crypto/qat/qat_common/adf_cfg_strings.h [deleted file]
drivers/crypto/qat/qat_common/adf_cfg_user.h [deleted file]
drivers/crypto/qat/qat_common/adf_common_drv.h [deleted file]
drivers/crypto/qat/qat_common/adf_ctl_drv.c [deleted file]
drivers/crypto/qat/qat_common/adf_dev_mgr.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_config.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_config.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_dc.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_dc.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_hw_data.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_hw_data.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_pfvf.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen2_pfvf.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_dc.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_dc.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_hw_data.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_hw_data.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_pfvf.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_pfvf.h [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_pm.c [deleted file]
drivers/crypto/qat/qat_common/adf_gen4_pm.h [deleted file]
drivers/crypto/qat/qat_common/adf_hw_arbiter.c [deleted file]
drivers/crypto/qat/qat_common/adf_init.c [deleted file]
drivers/crypto/qat/qat_common/adf_isr.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_msg.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_utils.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_utils.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c [deleted file]
drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h [deleted file]
drivers/crypto/qat/qat_common/adf_sriov.c [deleted file]
drivers/crypto/qat/qat_common/adf_sysfs.c [deleted file]
drivers/crypto/qat/qat_common/adf_transport.c [deleted file]
drivers/crypto/qat/qat_common/adf_transport.h [deleted file]
drivers/crypto/qat/qat_common/adf_transport_access_macros.h [deleted file]
drivers/crypto/qat/qat_common/adf_transport_debug.c [deleted file]
drivers/crypto/qat/qat_common/adf_transport_internal.h [deleted file]
drivers/crypto/qat/qat_common/adf_vf_isr.c [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_comp.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_la.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_fw_pke.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_hal.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_hw.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h [deleted file]
drivers/crypto/qat/qat_common/icp_qat_uclo.h [deleted file]
drivers/crypto/qat/qat_common/qat_algs.c [deleted file]
drivers/crypto/qat/qat_common/qat_algs_send.c [deleted file]
drivers/crypto/qat/qat_common/qat_algs_send.h [deleted file]
drivers/crypto/qat/qat_common/qat_asym_algs.c [deleted file]
drivers/crypto/qat/qat_common/qat_bl.c [deleted file]
drivers/crypto/qat/qat_common/qat_bl.h [deleted file]
drivers/crypto/qat/qat_common/qat_comp_algs.c [deleted file]
drivers/crypto/qat/qat_common/qat_comp_req.h [deleted file]
drivers/crypto/qat/qat_common/qat_compression.c [deleted file]
drivers/crypto/qat/qat_common/qat_compression.h [deleted file]
drivers/crypto/qat/qat_common/qat_crypto.c [deleted file]
drivers/crypto/qat/qat_common/qat_crypto.h [deleted file]
drivers/crypto/qat/qat_common/qat_hal.c [deleted file]
drivers/crypto/qat/qat_common/qat_uclo.c [deleted file]
drivers/crypto/qat/qat_dh895xcc/Makefile [deleted file]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c [deleted file]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h [deleted file]
drivers/crypto/qat/qat_dh895xcc/adf_drv.c [deleted file]
drivers/crypto/qat/qat_dh895xccvf/Makefile [deleted file]
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c [deleted file]
drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h [deleted file]
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c [deleted file]

index 32490c1..45ee4e6 100644 (file)
@@ -17018,7 +17018,7 @@ QAT DRIVER
 M:     Giovanni Cabiddu <giovanni.cabiddu@intel.com>
 L:     qat-linux@intel.com
 S:     Supported
-F:     drivers/crypto/qat/
+F:     drivers/crypto/intel/qat/
 
 QCOM AUDIO (ASoC) DRIVERS
 M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
index 9c9e5a5..9c440cd 100644 (file)
@@ -487,7 +487,6 @@ config CRYPTO_DEV_MXS_DCP
          To compile this driver as a module, choose M here: the module
          will be called mxs-dcp.
 
-source "drivers/crypto/qat/Kconfig"
 source "drivers/crypto/cavium/cpt/Kconfig"
 source "drivers/crypto/cavium/nitrox/Kconfig"
 source "drivers/crypto/marvell/Kconfig"
index 17f3237..51d3670 100644 (file)
@@ -32,7 +32,6 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
-obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
index 420580b..3d90c87 100644 (file)
@@ -2,3 +2,4 @@
 
 source "drivers/crypto/intel/keembay/Kconfig"
 source "drivers/crypto/intel/ixp4xx/Kconfig"
+source "drivers/crypto/intel/qat/Kconfig"
index 1cc4b6d..b3d0352 100644 (file)
@@ -2,3 +2,4 @@
 
 obj-y += keembay/
 obj-y += ixp4xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
new file mode 100644 (file)
index 0000000..1220cc8
--- /dev/null
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_QAT
+       tristate
+       select CRYPTO_AEAD
+       select CRYPTO_AUTHENC
+       select CRYPTO_SKCIPHER
+       select CRYPTO_AKCIPHER
+       select CRYPTO_DH
+       select CRYPTO_HMAC
+       select CRYPTO_RSA
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
+       select CRYPTO_SHA512
+       select CRYPTO_LIB_AES
+       select FW_LOADER
+       select CRC8
+
+config CRYPTO_DEV_QAT_DH895xCC
+       tristate "Support for Intel(R) DH895xCC"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_C3XXX
+       tristate "Support for Intel(R) C3XXX"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_c3xxx.
+
+config CRYPTO_DEV_QAT_C62X
+       tristate "Support for Intel(R) C62X"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_c62x.
+
+config CRYPTO_DEV_QAT_4XXX
+       tristate "Support for Intel(R) QAT_4XXX"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) QuickAssist Technology QAT_4xxx
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_4xxx.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+       tristate "Support for Intel(R) DH895xCC Virtual Function"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select PCI_IOV
+       select CRYPTO_DEV_QAT
+
+       help
+         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+         Virtual Function for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_dh895xccvf.
+
+config CRYPTO_DEV_QAT_C3XXXVF
+       tristate "Support for Intel(R) C3XXX Virtual Function"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select PCI_IOV
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
+         Virtual Function for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_c3xxxvf.
+
+config CRYPTO_DEV_QAT_C62XVF
+       tristate "Support for Intel(R) C62X Virtual Function"
+       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+       select PCI_IOV
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) C62x with Intel(R) QuickAssist Technology
+         Virtual Function for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_c62xvf.
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
new file mode 100644 (file)
index 0000000..258c8a6
--- /dev/null
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
+obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
new file mode 100644 (file)
index 0000000..ff9c8b5
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
+qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
new file mode 100644 (file)
index 0000000..7324b86
--- /dev/null
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 - 2021 Intel Corporation */
+#include <linux/iopoll.h>
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
+#include "adf_4xxx_hw_data.h"
+#include "icp_qat_hw.h"
+
+struct adf_fw_config {
+       u32 ae_mask;
+       char *obj_name;
+};
+
+static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
+       {0xF0, ADF_4XXX_SYM_OBJ},
+       {0xF, ADF_4XXX_ASYM_OBJ},
+       {0x100, ADF_4XXX_ADMIN_OBJ},
+};
+
+static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
+       {0xF0, ADF_4XXX_DC_OBJ},
+       {0xF, ADF_4XXX_DC_OBJ},
+       {0x100, ADF_4XXX_ADMIN_OBJ},
+};
+
+static struct adf_fw_config adf_402xx_fw_cy_config[] = {
+       {0xF0, ADF_402XX_SYM_OBJ},
+       {0xF, ADF_402XX_ASYM_OBJ},
+       {0x100, ADF_402XX_ADMIN_OBJ},
+};
+
+static struct adf_fw_config adf_402xx_fw_dc_config[] = {
+       {0xF0, ADF_402XX_DC_OBJ},
+       {0xF, ADF_402XX_DC_OBJ},
+       {0x100, ADF_402XX_ADMIN_OBJ},
+};
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = {
+       0x5555555, 0x5555555, 0x5555555, 0x5555555,
+       0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
+       0x0
+};
+
+static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
+       0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+       0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
+       0x0
+};
+
+static struct adf_hw_device_class adf_4xxx_class = {
+       .name = ADF_4XXX_DEVICE_NAME,
+       .type = DEV_4XXX,
+       .instances = 0,
+};
+
+enum dev_services {
+       SVC_CY = 0,
+       SVC_DC,
+};
+
+static const char *const dev_cfg_services[] = {
+       [SVC_CY] = ADF_CFG_CY,
+       [SVC_DC] = ADF_CFG_DC,
+};
+
+static int get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       int ret;
+
+       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                     ADF_SERVICES_ENABLED, services);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       ADF_SERVICES_ENABLED " param not found\n");
+               return ret;
+       }
+
+       ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
+                          services);
+       if (ret < 0)
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
+                       services);
+
+       return ret;
+}
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       u32 me_disable = self->fuses;
+
+       return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       if (!self || !self->ae_mask)
+               return 0;
+
+       return hweight32(self->ae_mask);
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_4XXX_SRAM_BAR;
+}
+
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *csr;
+       int i;
+
+       csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+       for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
+               ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+       u32 capabilities_cy, capabilities_dc;
+       u32 fusectl1;
+
+       /* Read accelerator capabilities mask */
+       pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
+
+       capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+                         ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+                         ICP_ACCEL_CAPABILITIES_CIPHER |
+                         ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+                         ICP_ACCEL_CAPABILITIES_SHA3 |
+                         ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+                         ICP_ACCEL_CAPABILITIES_HKDF |
+                         ICP_ACCEL_CAPABILITIES_ECEDMONT |
+                         ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+                         ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+                         ICP_ACCEL_CAPABILITIES_AES_V2;
+
+       /* A set bit in fusectl1 means the feature is OFF in this SKU */
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+       }
+
+       capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+                         ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+                         ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+                         ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+       }
+
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return capabilities_cy;
+       case SVC_DC:
+               return capabilities_dc;
+       }
+
+       return 0;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_1;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return thrd_to_arb_map_cy;
+       case SVC_DC:
+               return thrd_to_arb_map_dc;
+       }
+
+       return NULL;
+}
+
+static void get_arb_info(struct arb_info *arb_info)
+{
+       arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
+       arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
+       arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
+}
+
+static void get_admin_info(struct admin_info *admin_csrs_info)
+{
+       admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
+       admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
+       admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+       struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
+       void __iomem *csr = misc_bar->virt_addr;
+
+       /* Enable all in errsou3 except VFLR notification on host */
+       ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
+}
+
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *addr;
+
+       addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+
+       /* Enable bundle interrupts */
+       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
+       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+       /* Enable misc interrupts */
+       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
+}
+
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *addr;
+       u32 status;
+       u32 csr;
+       int ret;
+
+       addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+
+       /* Temporarily mask PM interrupt */
+       csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+       csr |= ADF_GEN4_PM_SOU;
+       ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
+
+       /* Set DRV_ACTIVE bit to power up the device */
+       ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
+
+       /* Poll status register to make sure the device is powered up */
+       ret = read_poll_timeout(ADF_CSR_RD, status,
+                               status & ADF_GEN4_PM_INIT_STATE,
+                               ADF_GEN4_PM_POLL_DELAY_US,
+                               ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+                               ADF_GEN4_PM_STATUS);
+       if (ret)
+               dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+
+       return ret;
+}
+
+static u32 uof_get_num_objs(void)
+{
+       BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
+                        ARRAY_SIZE(adf_4xxx_fw_dc_config),
+                        "Size mismatch between adf_4xxx_fw_*_config arrays");
+
+       return ARRAY_SIZE(adf_4xxx_fw_cy_config);
+}
+
+static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return adf_4xxx_fw_cy_config[obj_num].obj_name;
+       case SVC_DC:
+               return adf_4xxx_fw_dc_config[obj_num].obj_name;
+       }
+
+       return NULL;
+}
+
+static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return adf_402xx_fw_cy_config[obj_num].obj_name;
+       case SVC_DC:
+               return adf_402xx_fw_dc_config[obj_num].obj_name;
+       }
+
+       return NULL;
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+       switch (get_service_enabled(accel_dev)) {
+       case SVC_CY:
+               return adf_4xxx_fw_cy_config[obj_num].ae_mask;
+       case SVC_DC:
+               return adf_4xxx_fw_dc_config[obj_num].ae_mask;
+       }
+
+       return 0;
+}
+
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
+{
+       hw_data->dev_class = &adf_4xxx_class;
+       hw_data->instance_id = adf_4xxx_class.instances++;
+       hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
+       hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
+       hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
+       hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
+       hw_data->num_logical_accel = 1;
+       hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_enable_error_correction;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_arb_info = get_arb_info;
+       hw_data->get_admin_info = get_admin_info;
+       hw_data->get_accel_cap = get_accel_cap;
+       hw_data->get_sku = get_sku;
+       hw_data->init_admin_comms = adf_init_admin_comms;
+       hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->send_admin_init = adf_send_admin_init;
+       hw_data->init_arb = adf_init_arb;
+       hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+       hw_data->enable_ints = adf_enable_ints;
+       hw_data->init_device = adf_init_device;
+       hw_data->reset_device = adf_reset_flr;
+       hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+       switch (dev_id) {
+       case ADF_402XX_PCI_DEVICE_ID:
+               hw_data->fw_name = ADF_402XX_FW;
+               hw_data->fw_mmp_name = ADF_402XX_MMP;
+               hw_data->uof_get_name = uof_get_name_402xx;
+               break;
+
+       default:
+               hw_data->fw_name = ADF_4XXX_FW;
+               hw_data->fw_mmp_name = ADF_4XXX_MMP;
+               hw_data->uof_get_name = uof_get_name_4xxx;
+       }
+       hw_data->uof_get_num_objs = uof_get_num_objs;
+       hw_data->uof_get_ae_mask = uof_get_ae_mask;
+       hw_data->set_msix_rttable = set_msix_default_rttable;
+       hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+       hw_data->enable_pm = adf_gen4_enable_pm;
+       hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
+       hw_data->dev_config = adf_gen4_dev_config;
+
+       adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen4_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
new file mode 100644 (file)
index 0000000..085e259
--- /dev/null
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_4XXX_HW_DATA_H_
+#define ADF_4XXX_HW_DATA_H_
+
+#include <adf_accel_devices.h>
+
+/* PCIe configuration space */
+#define ADF_4XXX_SRAM_BAR              0
+#define ADF_4XXX_PMISC_BAR             1
+#define ADF_4XXX_ETR_BAR               2
+#define ADF_4XXX_RX_RINGS_OFFSET       1
+#define ADF_4XXX_TX_RINGS_MASK         0x1
+#define ADF_4XXX_MAX_ACCELERATORS      1
+#define ADF_4XXX_MAX_ACCELENGINES      9
+#define ADF_4XXX_BAR_MASK              (BIT(0) | BIT(2) | BIT(4))
+
+/* Physical function fuses */
+#define ADF_4XXX_FUSECTL0_OFFSET       (0x2C8)
+#define ADF_4XXX_FUSECTL1_OFFSET       (0x2CC)
+#define ADF_4XXX_FUSECTL2_OFFSET       (0x2D0)
+#define ADF_4XXX_FUSECTL3_OFFSET       (0x2D4)
+#define ADF_4XXX_FUSECTL4_OFFSET       (0x2D8)
+#define ADF_4XXX_FUSECTL5_OFFSET       (0x2DC)
+
+#define ADF_4XXX_ACCELERATORS_MASK     (0x1)
+#define ADF_4XXX_ACCELENGINES_MASK     (0x1FF)
+#define ADF_4XXX_ADMIN_AE_MASK         (0x100)
+
+#define ADF_4XXX_ETR_MAX_BANKS         64
+
+/* MSIX interrupt */
+#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET      (0x41A040)
+#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET      (0x41A044)
+#define ADF_4XXX_SMIAPF_MASK_OFFSET            (0x41A084)
+#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i)                (0x409000 + ((i) * 0x04))
+
+/* Bank and ring configuration */
+#define ADF_4XXX_NUM_RINGS_PER_BANK    2
+#define ADF_4XXX_NUM_BANKS_PER_VF      4
+
+/* Arbiter configuration */
+#define ADF_4XXX_ARB_CONFIG                    (BIT(31) | BIT(6) | BIT(0))
+#define ADF_4XXX_ARB_OFFSET                    (0x0)
+#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET      (0x400)
+
+/* Admin Interface Reg Offset */
+#define ADF_4XXX_ADMINMSGUR_OFFSET     (0x500574)
+#define ADF_4XXX_ADMINMSGLR_OFFSET     (0x500578)
+#define ADF_4XXX_MAILBOX_BASE_OFFSET   (0x600970)
+
+/* Firmware Binaries */
+#define ADF_4XXX_FW            "qat_4xxx.bin"
+#define ADF_4XXX_MMP           "qat_4xxx_mmp.bin"
+#define ADF_4XXX_SYM_OBJ       "qat_4xxx_sym.bin"
+#define ADF_4XXX_DC_OBJ                "qat_4xxx_dc.bin"
+#define ADF_4XXX_ASYM_OBJ      "qat_4xxx_asym.bin"
+#define ADF_4XXX_ADMIN_OBJ     "qat_4xxx_admin.bin"
+/* Firmware for 402XXX */
+#define ADF_402XX_FW           "qat_402xx.bin"
+#define ADF_402XX_MMP          "qat_402xx_mmp.bin"
+#define ADF_402XX_SYM_OBJ      "qat_402xx_sym.bin"
+#define ADF_402XX_DC_OBJ       "qat_402xx_dc.bin"
+#define ADF_402XX_ASYM_OBJ     "qat_402xx_asym.bin"
+#define ADF_402XX_ADMIN_OBJ    "qat_402xx_admin.bin"
+
+/* qat_4xxx fuse bits are different from old GENs, redefine them */
+enum icp_qat_4xxx_slice_mask {
+       ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
+       ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
+       ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
+       ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
+       ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
+       ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
+       ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
+};
+
+void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
+void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
new file mode 100644 (file)
index 0000000..ceb8732
--- /dev/null
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+
+#include "adf_4xxx_hw_data.h"
+#include "qat_compression.h"
+#include "qat_crypto.h"
+#include "adf_transport_access_macros.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
+       { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
+       { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+enum configs {
+       DEV_CFG_CY = 0,
+       DEV_CFG_DC,
+};
+
+static const char * const services_operations[] = {
+       ADF_CFG_CY,
+       ADF_CFG_DC,
+};
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       if (accel_dev->hw_device) {
+               adf_clean_hw_data_4xxx(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+       const char *config;
+       int ret;
+
+       config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+       ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+       if (ret)
+               return ret;
+
+       /* Default configuration is crypto only for even devices
+        * and compression for odd devices
+        */
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+                                         ADF_SERVICES_ENABLED, config,
+                                         ADF_STR);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int banks = GET_MAX_BANKS(accel_dev);
+       int cpus = num_online_cpus();
+       unsigned long bank, val;
+       int instances;
+       int ret;
+       int i;
+
+       if (adf_hw_dev_has_crypto(accel_dev))
+               instances = min(cpus, banks / 2);
+       else
+               instances = 0;
+
+       for (i = 0; i < instances; i++) {
+               val = i;
+               bank = i * 2;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &bank, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               bank += 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &bank, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+                        i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               val = 128;
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = ADF_COALESCING_DEF_TIME;
+               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+       }
+
+       val = i;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       val = 0;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+       return ret;
+}
+
+static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int banks = GET_MAX_BANKS(accel_dev);
+       int cpus = num_online_cpus();
+       unsigned long val;
+       int instances;
+       int ret;
+       int i;
+
+       if (adf_hw_dev_has_compression(accel_dev))
+               instances = min(cpus, banks);
+       else
+               instances = 0;
+
+       for (i = 0; i < instances; i++) {
+               val = i;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 1;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = ADF_COALESCING_DEF_TIME;
+               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+       }
+
+       val = i;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       val = 0;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+       return ret;
+}
+
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       int ret;
+
+       ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+       if (ret)
+               goto err;
+
+       ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+       if (ret)
+               goto err;
+
+       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                     ADF_SERVICES_ENABLED, services);
+       if (ret)
+               goto err;
+
+       ret = sysfs_match_string(services_operations, services);
+       if (ret < 0)
+               goto err;
+
+       switch (ret) {
+       case DEV_CFG_CY:
+               ret = adf_crypto_dev_config(accel_dev);
+               break;
+       case DEV_CFG_DC:
+               ret = adf_comp_dev_config(accel_dev);
+               break;
+       }
+
+       if (ret)
+               goto err;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+       return ret;
+
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+       return ret;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       struct adf_bar *bar;
+       int ret;
+
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /*
+                * If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow.
+                */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
+       if (!accel_dev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /*
+        * Add accel device to accel table
+        * This should be called before adf_cleanup_accel is called
+        */
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and initialise device hardware meta-data structure */
+       hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
+
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       /* If the device has no acceleration engines then ignore it */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           (~hw_data->ae_mask & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found.\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* Enable PCI device */
+       ret = pcim_enable_device(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't enable PCI device.\n");
+               goto out_err;
+       }
+
+       /* Set DMA identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration.\n");
+               goto out_err;
+       }
+
+       ret = adf_cfg_dev_init(accel_dev);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to initialize configuration.\n");
+               goto out_err;
+       }
+
+       /* Get accelerator capabilities mask */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+       if (!hw_data->accel_capabilities_mask) {
+               dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       /* Find and map all the device's BARS */
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
+
+       ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to map pci regions.\n");
+               goto out_err;
+       }
+
+       i = 0;
+       for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
+               bar = &accel_pci_dev->pci_bars[i++];
+               bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+       }
+
+       pci_set_master(pdev);
+
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state.\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       ret = adf_dev_up(accel_dev, true);
+       if (ret)
+               goto out_err_dev_stop;
+
+       ret = adf_sysfs_init(accel_dev);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+}
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_4XXX_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
+       .err_handler = &adf_err_handler,
+};
+
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_4XXX_FW);
+MODULE_FIRMWARE(ADF_4XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_SOFTDEP("pre: crypto-intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
new file mode 100644 (file)
index 0000000..92ef416
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
+qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
new file mode 100644 (file)
index 0000000..4756436
--- /dev/null
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_c3xxx_hw_data.h"
+#include "icp_qat_hw.h"
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
+       0x12222AAA, 0x11222AAA, 0x12222AAA,
+       0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c3xxx_class = {
+       .name = ADF_C3XXX_DEVICE_NAME,
+       .type = DEV_C3XXX,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       u32 straps = self->straps;
+       u32 fuses = self->fuses;
+       u32 accel;
+
+       accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
+       accel &= ADF_C3XXX_ACCELERATORS_MASK;
+
+       return accel;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       u32 straps = self->straps;
+       u32 fuses = self->fuses;
+       unsigned long disabled;
+       u32 ae_disable;
+       int accel;
+
+       /* If an accel is disabled, then disable the corresponding two AEs */
+       disabled = ~get_accel_mask(self) & ADF_C3XXX_ACCELERATORS_MASK;
+       ae_disable = BIT(1) | BIT(0);
+       for_each_set_bit(accel, &disabled, ADF_C3XXX_MAX_ACCELERATORS)
+               straps |= ae_disable << (accel << 1);
+
+       return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXX_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXX_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXX_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       int aes = self->get_num_aes(self);
+
+       if (aes == 6)
+               return DEV_SKU_4;
+
+       return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+       return thrd_to_arb_map;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+       adf_gen2_cfg_iov_thds(accel_dev, enable,
+                             ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS,
+                             ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &c3xxx_class;
+       hw_data->instance_id = c3xxx_class.instances++;
+       hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_accel_cap = adf_gen2_get_accel_cap;
+       hw_data->get_num_accels = adf_gen2_get_num_accels;
+       hw_data->get_num_aes = adf_gen2_get_num_aes;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_admin_info = adf_gen2_get_admin_info;
+       hw_data->get_arb_info = adf_gen2_get_arb_info;
+       hw_data->get_sku = get_sku;
+       hw_data->fw_name = ADF_C3XXX_FW;
+       hw_data->fw_mmp_name = ADF_C3XXX_MMP;
+       hw_data->init_admin_comms = adf_init_admin_comms;
+       hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->configure_iov_threads = configure_iov_threads;
+       hw_data->send_admin_init = adf_send_admin_init;
+       hw_data->init_arb = adf_init_arb;
+       hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+       hw_data->enable_ints = adf_gen2_enable_ints;
+       hw_data->reset_device = adf_reset_flr;
+       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->dev_config = adf_gen2_dev_config;
+
+       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
new file mode 100644 (file)
index 0000000..336a06f
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_C3XXX_HW_DATA_H_
+#define ADF_C3XXX_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C3XXX_PMISC_BAR 0
+#define ADF_C3XXX_ETR_BAR 1
+#define ADF_C3XXX_SRAM_BAR 0
+#define ADF_C3XXX_MAX_ACCELERATORS 3
+#define ADF_C3XXX_MAX_ACCELENGINES 6
+#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
+#define ADF_C3XXX_ACCELERATORS_MASK 0x7
+#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
+#define ADF_C3XXX_ETR_MAX_BANKS 16
+#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
+
+/* AE to function mapping */
+#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
+#define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
+
+/* Firmware Binary */
+#define ADF_C3XXX_FW "qat_c3xxx.bin"
+#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
+
+void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
new file mode 100644 (file)
index 0000000..bb4dca7
--- /dev/null
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxx_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_C3XXX_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
+       .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+                       adf_clean_hw_data_c3xxx(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table.
+        * This should be called before adf_cleanup_accel is called */
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_c3xxx(accel_dev->hw_device);
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+                             &hw_data->fuses);
+       pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET,
+                             &hw_data->straps);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       /* If the device has no acceleration engines then ignore it. */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           ((~hw_data->ae_mask) & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Get accelerator capabilities mask */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state\n");
+               ret = -ENOMEM;
+               goto out_err_free_reg;
+       }
+
+       ret = adf_dev_up(accel_dev, true);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C3XXX_FW);
+MODULE_FIRMWARE(ADF_C3XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
new file mode 100644 (file)
index 0000000..b6d7682
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
+qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
new file mode 100644 (file)
index 0000000..84d9486
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static struct adf_hw_device_class c3xxxiov_class = {
+       .name = ADF_C3XXXVF_DEVICE_NAME,
+       .type = DEV_C3XXXVF,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C3XXXIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &c3xxxiov_class;
+       hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+       hw_data->free_irq = adf_vf_isr_resource_free;
+       hw_data->enable_error_correction = adf_vf_void_noop;
+       hw_data->init_admin_comms = adf_vf_int_noop;
+       hw_data->exit_admin_comms = adf_vf_void_noop;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
+       hw_data->init_arb = adf_vf_int_noop;
+       hw_data->exit_arb = adf_vf_void_noop;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->enable_ints = adf_vf_void_noop;
+       hw_data->dev_class->instances++;
+       hw_data->dev_config = adf_gen2_dev_config;
+       adf_devmgr_update_class_index(hw_data);
+       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+       adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
new file mode 100644 (file)
index 0000000..6b4bf18
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_C3XXXVF_HW_DATA_H_
+#define ADF_C3XXXVF_HW_DATA_H_
+
+#define ADF_C3XXXIOV_PMISC_BAR 1
+#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
+#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
+#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
+#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
+#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
+#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
+#define ADF_C3XXXIOV_ETR_BAR 0
+#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
new file mode 100644 (file)
index 0000000..e8cc10f
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c3xxxvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_C3XXXVF_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       struct adf_accel_dev *pf;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+                       adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+       adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_dev *pf;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->is_vf = true;
+       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table */
+       if (adf_devmgr_add_dev(accel_dev, pf)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+       /* Completion for VF2PF request/response message exchange */
+       init_completion(&accel_dev->vf.msg_received);
+
+       ret = adf_dev_up(accel_dev, false);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_flush_vf_wq(accel_dev);
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
new file mode 100644 (file)
index 0000000..d581f7c
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
+qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
new file mode 100644 (file)
index 0000000..e142707
--- /dev/null
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_c62x_hw_data.h"
+#include "icp_qat_hw.h"
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
+       0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
+       0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
+};
+
+static struct adf_hw_device_class c62x_class = {
+       .name = ADF_C62X_DEVICE_NAME,
+       .type = DEV_C62X,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       u32 straps = self->straps;
+       u32 fuses = self->fuses;
+       u32 accel;
+
+       accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
+       accel &= ADF_C62X_ACCELERATORS_MASK;
+
+       return accel;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       u32 straps = self->straps;
+       u32 fuses = self->fuses;
+       unsigned long disabled;
+       u32 ae_disable;
+       int accel;
+
+       /* If an accel is disabled, then disable the corresponding two AEs */
+       disabled = ~get_accel_mask(self) & ADF_C62X_ACCELERATORS_MASK;
+       ae_disable = BIT(1) | BIT(0);
+       for_each_set_bit(accel, &disabled, ADF_C62X_MAX_ACCELERATORS)
+               straps |= ae_disable << (accel << 1);
+
+       return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62X_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62X_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62X_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       int aes = self->get_num_aes(self);
+
+       if (aes == 8)
+               return DEV_SKU_2;
+       else if (aes == 10)
+               return DEV_SKU_4;
+
+       return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+       return thrd_to_arb_map;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+       adf_gen2_cfg_iov_thds(accel_dev, enable,
+                             ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS,
+                             ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &c62x_class;
+       hw_data->instance_id = c62x_class.instances++;
+       hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_accel_cap = adf_gen2_get_accel_cap;
+       hw_data->get_num_accels = adf_gen2_get_num_accels;
+       hw_data->get_num_aes = adf_gen2_get_num_aes;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_admin_info = adf_gen2_get_admin_info;
+       hw_data->get_arb_info = adf_gen2_get_arb_info;
+       hw_data->get_sku = get_sku;
+       hw_data->fw_name = ADF_C62X_FW;
+       hw_data->fw_mmp_name = ADF_C62X_MMP;
+       hw_data->init_admin_comms = adf_init_admin_comms;
+       hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->configure_iov_threads = configure_iov_threads;
+       hw_data->send_admin_init = adf_send_admin_init;
+       hw_data->init_arb = adf_init_arb;
+       hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+       hw_data->enable_ints = adf_gen2_enable_ints;
+       hw_data->reset_device = adf_reset_flr;
+       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->dev_config = adf_gen2_dev_config;
+
+       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
new file mode 100644 (file)
index 0000000..008c0a3
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_C62X_HW_DATA_H_
+#define ADF_C62X_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_C62X_SRAM_BAR 0
+#define ADF_C62X_PMISC_BAR 1
+#define ADF_C62X_ETR_BAR 2
+#define ADF_C62X_MAX_ACCELERATORS 5
+#define ADF_C62X_MAX_ACCELENGINES 10
+#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
+#define ADF_C62X_ACCELERATORS_MASK 0x1F
+#define ADF_C62X_ACCELENGINES_MASK 0x3FF
+#define ADF_C62X_ETR_MAX_BANKS 16
+#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
+
+/* AE to function mapping */
+#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
+#define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
+
+/* Firmware Binary */
+#define ADF_C62X_FW "qat_c62x.bin"
+#define ADF_C62X_MMP "qat_c62x_mmp.bin"
+
+void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
new file mode 100644 (file)
index 0000000..ca18ae1
--- /dev/null
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62x_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_C62X_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
+       .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C62X:
+                       adf_clean_hw_data_c62x(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_C62X:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table.
+        * This should be called before adf_cleanup_accel is called */
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_c62x(accel_dev->hw_device);
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+                             &hw_data->fuses);
+       pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET,
+                             &hw_data->straps);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       /* If the device has no acceleration engines then ignore it. */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           ((~hw_data->ae_mask) & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Get accelerator capabilities mask */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+       /* Find and map all the device's BARS */
+       i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state\n");
+               ret = -ENOMEM;
+               goto out_err_free_reg;
+       }
+
+       ret = adf_dev_up(accel_dev, true);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_C62X_FW);
+MODULE_FIRMWARE(ADF_C62X_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
new file mode 100644 (file)
index 0000000..446c3d6
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
+qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
new file mode 100644 (file)
index 0000000..751d7aa
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_c62xvf_hw_data.h"
+
+static struct adf_hw_device_class c62xiov_class = {
+       .name = ADF_C62XVF_DEVICE_NAME,
+       .type = DEV_C62XVF,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_C62XIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &c62xiov_class;
+       hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+       hw_data->free_irq = adf_vf_isr_resource_free;
+       hw_data->enable_error_correction = adf_vf_void_noop;
+       hw_data->init_admin_comms = adf_vf_int_noop;
+       hw_data->exit_admin_comms = adf_vf_void_noop;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
+       hw_data->init_arb = adf_vf_int_noop;
+       hw_data->exit_arb = adf_vf_void_noop;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->enable_ints = adf_vf_void_noop;
+       hw_data->dev_class->instances++;
+       hw_data->dev_config = adf_gen2_dev_config;
+       adf_devmgr_update_class_index(hw_data);
+       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+       adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.h
new file mode 100644 (file)
index 0000000..a1a62c0
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_C62XVF_HW_DATA_H_
+#define ADF_C62XVF_HW_DATA_H_
+
+#define ADF_C62XIOV_PMISC_BAR 1
+#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
+#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
+#define ADF_C62XIOV_MAX_ACCELERATORS 1
+#define ADF_C62XIOV_MAX_ACCELENGINES 1
+#define ADF_C62XIOV_RX_RINGS_OFFSET 8
+#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
+#define ADF_C62XIOV_ETR_BAR 0
+#define ADF_C62XIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
new file mode 100644 (file)
index 0000000..3756630
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_c62xvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X_VF), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_C62XVF_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       struct adf_accel_dev *pf;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+                       adf_clean_hw_data_c62xiov(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+       adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_dev *pf;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->is_vf = true;
+       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table */
+       if (adf_devmgr_add_dev(accel_dev, pf)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_c62xiov(accel_dev->hw_device);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+       /* Completion for VF2PF request/response message exchange */
+       init_completion(&accel_dev->vf.msg_received);
+
+       ret = adf_dev_up(accel_dev, false);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_flush_vf_wq(accel_dev);
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
new file mode 100644 (file)
index 0000000..1fb8d50
--- /dev/null
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+       adf_isr.o \
+       adf_ctl_drv.o \
+       adf_dev_mgr.o \
+       adf_init.o \
+       adf_accel_engine.o \
+       adf_aer.o \
+       adf_transport.o \
+       adf_admin.o \
+       adf_hw_arbiter.o \
+       adf_sysfs.o \
+       adf_gen2_hw_data.o \
+       adf_gen2_config.o \
+       adf_gen4_hw_data.o \
+       adf_gen4_pm.o \
+       adf_gen2_dc.o \
+       adf_gen4_dc.o \
+       qat_crypto.o \
+       qat_compression.o \
+       qat_comp_algs.o \
+       qat_algs.o \
+       qat_asym_algs.o \
+       qat_algs_send.o \
+       qat_uclo.o \
+       qat_hal.o \
+       qat_bl.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
+                              adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
+                              adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
+                              adf_gen2_pfvf.o adf_gen4_pfvf.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
new file mode 100644 (file)
index 0000000..bd19e64
--- /dev/null
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include "adf_cfg_common.h"
+#include "adf_pfvf_msg.h"
+
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
+#define ADF_C3XXX_DEVICE_NAME "c3xxx"
+#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
+#define ADF_4XXX_DEVICE_NAME "4xxx"
+#define ADF_4XXX_PCI_DEVICE_ID 0x4940
+#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
+#define ADF_401XX_PCI_DEVICE_ID 0x4942
+#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
+#define ADF_402XX_PCI_DEVICE_ID 0x4944
+#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
+#define ADF_DEVICE_FUSECTL_OFFSET 0x40
+#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 16
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+       ADF_ACCEL_CAPABILITIES_NULL = 0,
+       ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+       ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+       ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+       ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+       ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+       ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+       ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+       resource_size_t base_addr;
+       void __iomem *virt_addr;
+       resource_size_t size;
+};
+
+struct adf_irq {
+       bool enabled;
+       char name[ADF_MAX_MSIX_VECTOR_NAME];
+};
+
+struct adf_accel_msix {
+       struct adf_irq *irqs;
+       u32 num_entries;
+};
+
+struct adf_accel_pci {
+       struct pci_dev *pci_dev;
+       struct adf_accel_msix msix_entries;
+       struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+       u8 revid;
+       u8 sku;
+};
+
+enum dev_state {
+       DEV_DOWN = 0,
+       DEV_UP
+};
+
+enum dev_sku_info {
+       DEV_SKU_1 = 0,
+       DEV_SKU_2,
+       DEV_SKU_3,
+       DEV_SKU_4,
+       DEV_SKU_VF,
+       DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+       switch (info) {
+       case DEV_SKU_1:
+               return "SKU1";
+       case DEV_SKU_2:
+               return "SKU2";
+       case DEV_SKU_3:
+               return "SKU3";
+       case DEV_SKU_4:
+               return "SKU4";
+       case DEV_SKU_VF:
+               return "SKUVF";
+       case DEV_SKU_UNKNOWN:
+       default:
+               break;
+       }
+       return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+       const char *name;
+       const enum adf_device_type type;
+       u32 instances;
+};
+
+struct arb_info {
+       u32 arb_cfg;
+       u32 arb_offset;
+       u32 wt2sam_offset;
+};
+
+struct admin_info {
+       u32 admin_msg_ur;
+       u32 admin_msg_lr;
+       u32 mailbox_offset;
+};
+
+struct adf_hw_csr_ops {
+       u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
+       u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
+                                 u32 ring);
+       void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
+                                   u32 ring, u32 value);
+       u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
+                                 u32 ring);
+       void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
+                                   u32 ring, u32 value);
+       u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
+       void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
+                                     u32 ring, u32 value);
+       void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
+                                   u32 ring, dma_addr_t addr);
+       void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
+                                  u32 value);
+       void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
+       void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
+                                    u32 value);
+       void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
+                                     u32 value);
+       void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
+                                          u32 bank, u32 value);
+       void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
+                                         u32 value);
+};
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_pfvf_ops {
+       int (*enable_comms)(struct adf_accel_dev *accel_dev);
+       u32 (*get_pf2vf_offset)(u32 i);
+       u32 (*get_vf2pf_offset)(u32 i);
+       void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
+       void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
+       u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
+       int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                       u32 pfvf_offset, struct mutex *csr_lock);
+       struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
+                                       u32 pfvf_offset, u8 compat_ver);
+};
+
+struct adf_dc_ops {
+       void (*build_deflate_ctx)(void *ctx);
+};
+
+struct adf_hw_device_data {
+       struct adf_hw_device_class *dev_class;
+       u32 (*get_accel_mask)(struct adf_hw_device_data *self);
+       u32 (*get_ae_mask)(struct adf_hw_device_data *self);
+       u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
+       u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
+       u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
+       u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
+       u32 (*get_num_aes)(struct adf_hw_device_data *self);
+       u32 (*get_num_accels)(struct adf_hw_device_data *self);
+       void (*get_arb_info)(struct arb_info *arb_csrs_info);
+       void (*get_admin_info)(struct admin_info *admin_csrs_info);
+       enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+       int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+       void (*free_irq)(struct adf_accel_dev *accel_dev);
+       void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+       int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
+       void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+       int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+       int (*init_arb)(struct adf_accel_dev *accel_dev);
+       void (*exit_arb)(struct adf_accel_dev *accel_dev);
+       const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
+       int (*init_device)(struct adf_accel_dev *accel_dev);
+       int (*enable_pm)(struct adf_accel_dev *accel_dev);
+       bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
+       void (*disable_iov)(struct adf_accel_dev *accel_dev);
+       void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
+                                     bool enable);
+       void (*enable_ints)(struct adf_accel_dev *accel_dev);
+       void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
+       int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
+       void (*reset_device)(struct adf_accel_dev *accel_dev);
+       void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
+       char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
+       u32 (*uof_get_num_objs)(void);
+       u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+       int (*dev_config)(struct adf_accel_dev *accel_dev);
+       struct adf_pfvf_ops pfvf_ops;
+       struct adf_hw_csr_ops csr_ops;
+       struct adf_dc_ops dc_ops;
+       const char *fw_name;
+       const char *fw_mmp_name;
+       u32 fuses;
+       u32 straps;
+       u32 accel_capabilities_mask;
+       u32 extended_dc_capabilities;
+       u32 clock_frequency;
+       u32 instance_id;
+       u16 accel_mask;
+       u32 ae_mask;
+       u32 admin_ae_mask;
+       u16 tx_rings_mask;
+       u16 ring_to_svc_map;
+       u8 tx_rx_gap;
+       u8 num_banks;
+       u16 num_banks_per_vf;
+       u8 num_rings_per_bank;
+       u8 num_accel;
+       u8 num_logical_accel;
+       u8 num_engines;
+};
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+       __raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define ADF_CFG_NUM_SERVICES   4
+#define ADF_SRV_TYPE_BIT_LEN   3
+#define ADF_SRV_TYPE_MASK      0x7
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_NUM_RINGS_PER_BANK(accel_dev) \
+       GET_HW_DATA(accel_dev)->num_rings_per_bank
+#define GET_SRV_TYPE(accel_dev, idx) \
+       (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
+       & ADF_SRV_TYPE_MASK)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
+#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
+#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+       struct icp_qat_fw_loader_handle *fw_loader;
+       const struct firmware *uof_fw;
+       const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+       struct adf_accel_dev *accel_dev;
+       struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+       struct ratelimit_state vf2pf_ratelimit;
+       u32 vf_nr;
+       bool init;
+       u8 vf_compat_ver;
+};
+
+struct adf_dc_data {
+       u8 *ovf_buff;
+       size_t ovf_buff_sz;
+       dma_addr_t ovf_buff_p;
+};
+
+struct adf_accel_dev {
+       struct adf_etr_data *transport;
+       struct adf_hw_device_data *hw_device;
+       struct adf_cfg_device_data *cfg;
+       struct adf_fw_loader_data *fw_loader;
+       struct adf_admin_comms *admin;
+       struct adf_dc_data *dc_data;
+       struct list_head crypto_list;
+       struct list_head compression_list;
+       unsigned long status;
+       atomic_t ref_count;
+       struct dentry *debugfs_dir;
+       struct list_head list;
+       struct module *owner;
+       struct adf_accel_pci accel_pci_dev;
+       union {
+               struct {
+                       /* protects VF2PF interrupts access */
+                       spinlock_t vf2pf_ints_lock;
+                       /* vf_info is non-zero when SR-IOV is init'ed */
+                       struct adf_accel_vf_info *vf_info;
+               } pf;
+               struct {
+                       bool irq_enabled;
+                       char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
+                       struct tasklet_struct pf2vf_bh_tasklet;
+                       struct mutex vf2pf_lock; /* protect CSR access */
+                       struct completion msg_received;
+                       struct pfvf_message response; /* temp field holding pf2vf response */
+                       u8 pf_compat_ver;
+               } vf;
+       };
+       struct mutex state_lock; /* protect state of the device */
+       bool is_vf;
+       u32 accel_id;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
new file mode 100644 (file)
index 0000000..4ce2b66
--- /dev/null
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
+                                u32 fw_size)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct icp_qat_fw_loader_handle *loader;
+       char *obj_name;
+       u32 num_objs;
+       u32 ae_mask;
+       int i;
+
+       loader = loader_data->fw_loader;
+       num_objs = hw_device->uof_get_num_objs();
+
+       for (i = 0; i < num_objs; i++) {
+               obj_name = hw_device->uof_get_name(accel_dev, i);
+               ae_mask = hw_device->uof_get_ae_mask(accel_dev, i);
+               if (!obj_name || !ae_mask) {
+                       dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n");
+                       goto out_err;
+               }
+
+               if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Invalid mask for UOF image\n");
+                       goto out_err;
+               }
+               if (qat_uclo_map_obj(loader, fw_addr, fw_size, obj_name)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to map UOF firmware\n");
+                       goto out_err;
+               }
+               if (qat_uclo_wr_all_uimage(loader)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to load UOF firmware\n");
+                       goto out_err;
+               }
+               qat_uclo_del_obj(loader);
+       }
+
+       return 0;
+
+out_err:
+       adf_ae_fw_release(accel_dev);
+       return -EFAULT;
+}
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       void *fw_addr, *mmp_addr;
+       u32 fw_size, mmp_size;
+
+       if (!hw_device->fw_name)
+               return 0;
+
+       if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+                       hw_device->fw_mmp_name);
+               return -EFAULT;
+       }
+       if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
+                       hw_device->fw_name);
+               goto out_err;
+       }
+
+       fw_size = loader_data->uof_fw->size;
+       fw_addr = (void *)loader_data->uof_fw->data;
+       mmp_size = loader_data->mmp_fw->size;
+       mmp_addr = (void *)loader_data->mmp_fw->data;
+
+       if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
+               goto out_err;
+       }
+
+       if (hw_device->uof_get_num_objs)
+               return adf_ae_fw_load_images(accel_dev, fw_addr, fw_size);
+
+       if (qat_uclo_map_obj(loader_data->fw_loader, fw_addr, fw_size, NULL)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
+               goto out_err;
+       }
+       if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
+               goto out_err;
+       }
+       return 0;
+
+out_err:
+       adf_ae_fw_release(accel_dev);
+       return -EFAULT;
+}
+
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return;
+
+       qat_uclo_del_obj(loader_data->fw_loader);
+       qat_hal_deinit(loader_data->fw_loader);
+       release_firmware(loader_data->uof_fw);
+       release_firmware(loader_data->mmp_fw);
+       loader_data->uof_fw = NULL;
+       loader_data->mmp_fw = NULL;
+       loader_data->fw_loader = NULL;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 ae_ctr;
+
+       if (!hw_data->fw_name)
+               return 0;
+
+       ae_ctr = qat_hal_start(loader_data->fw_loader);
+       dev_info(&GET_DEV(accel_dev),
+                "qat_dev%d started %d acceleration engines\n",
+                accel_dev->accel_id, ae_ctr);
+       return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+       if (!hw_data->fw_name)
+               return 0;
+
+       for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+               if (hw_data->ae_mask & (1 << ae)) {
+                       qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+                       ae_ctr++;
+               }
+       }
+       dev_info(&GET_DEV(accel_dev),
+                "qat_dev%d stopped %d acceleration engines\n",
+                accel_dev->accel_id, ae_ctr);
+       return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+       qat_hal_reset(loader_data->fw_loader);
+       if (qat_hal_clr_reset(loader_data->fw_loader))
+               return -EFAULT;
+
+       return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return 0;
+
+       loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+       if (!loader_data)
+               return -ENOMEM;
+
+       accel_dev->fw_loader = loader_data;
+       if (qat_hal_init(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
+               kfree(loader_data);
+               return -EFAULT;
+       }
+       if (adf_ae_reset(accel_dev, 0)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
+               qat_hal_deinit(loader_data->fw_loader);
+               kfree(loader_data);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+       if (!hw_device->fw_name)
+               return 0;
+
+       qat_hal_deinit(loader_data->fw_loader);
+       kfree(accel_dev->fw_loader);
+       accel_dev->fw_loader = NULL;
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
new file mode 100644 (file)
index 0000000..3b6184c
--- /dev/null
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_ADMIN_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+#define ADF_CONST_TABLE_SIZE 1024
+#define ADF_ADMIN_POLL_DELAY_US 20
+#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+
+static const u8 const_tab[1024] __aligned(1024) = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x18,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x01, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x02, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x14, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x02,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25,
+0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x2B, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+       dma_addr_t phy_addr;
+       dma_addr_t const_tbl_addr;
+       void *virt_addr;
+       void *virt_tbl_addr;
+       void __iomem *mailbox_addr;
+       struct mutex lock;      /* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+                                 void *in, void *out)
+{
+       int ret;
+       u32 status;
+       struct adf_admin_comms *admin = accel_dev->admin;
+       int offset = ae * ADF_ADMINMSG_LEN * 2;
+       void __iomem *mailbox = admin->mailbox_addr;
+       int mb_offset = ae * ADF_ADMIN_MAILBOX_STRIDE;
+       struct icp_qat_fw_init_admin_req *request = in;
+
+       mutex_lock(&admin->lock);
+
+       if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+               mutex_unlock(&admin->lock);
+               return -EAGAIN;
+       }
+
+       memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+       ADF_CSR_WR(mailbox, mb_offset, 1);
+
+       ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
+                               ADF_ADMIN_POLL_DELAY_US,
+                               ADF_ADMIN_POLL_TIMEOUT_US, true,
+                               mailbox, mb_offset);
+       if (ret < 0) {
+               /* Response timeout */
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send admin msg %d to accelerator %d\n",
+                       request->cmd_id, ae);
+       } else {
+               /* Response received from admin message, we can now
+                * make response data available in "out" parameter.
+                */
+               memcpy(out, admin->virt_addr + offset +
+                      ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+       }
+
+       mutex_unlock(&admin->lock);
+       return ret;
+}
+
+static int adf_send_admin(struct adf_accel_dev *accel_dev,
+                         struct icp_qat_fw_init_admin_req *req,
+                         struct icp_qat_fw_init_admin_resp *resp,
+                         const unsigned long ae_mask)
+{
+       u32 ae;
+
+       for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
+               if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
+                   resp->status)
+                       return -EFAULT;
+
+       return 0;
+}
+
+static int adf_init_ae(struct adf_accel_dev *accel_dev)
+{
+       struct icp_qat_fw_init_admin_req req;
+       struct icp_qat_fw_init_admin_resp resp;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       u32 ae_mask = hw_device->ae_mask;
+
+       memset(&req, 0, sizeof(req));
+       memset(&resp, 0, sizeof(resp));
+       req.cmd_id = ICP_QAT_FW_INIT_AE;
+
+       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
+{
+       struct icp_qat_fw_init_admin_req req;
+       struct icp_qat_fw_init_admin_resp resp;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       u32 ae_mask = hw_device->admin_ae_mask ?: hw_device->ae_mask;
+
+       memset(&req, 0, sizeof(req));
+       memset(&resp, 0, sizeof(resp));
+       req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
+
+       req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
+       req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+
+       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
+                                  u32 *capabilities)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct icp_qat_fw_init_admin_resp resp;
+       struct icp_qat_fw_init_admin_req req;
+       unsigned long ae_mask;
+       unsigned long ae;
+       int ret;
+
+       /* Target only service accelerator engines */
+       ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
+
+       memset(&req, 0, sizeof(req));
+       memset(&resp, 0, sizeof(resp));
+       req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
+
+       *capabilities = 0;
+       for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+               ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
+               if (ret)
+                       return ret;
+
+               *capabilities |= resp.extended_features;
+       }
+
+       return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+       u32 dc_capabilities = 0;
+       int ret;
+
+       ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
+               return ret;
+       }
+       accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
+
+       ret = adf_set_fw_constants(accel_dev);
+       if (ret)
+               return ret;
+
+       return adf_init_ae(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+/**
+ * adf_init_admin_pm() - Function sends PM init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ * @idle_delay: QAT HW idle time before power gating is initiated.
+ *             000 - 64us
+ *             001 - 128us
+ *             010 - 256us
+ *             011 - 512us
+ *             100 - 1ms
+ *             101 - 2ms
+ *             110 - 4ms
+ *             111 - 8ms
+ *
+ * Function sends to the FW the admin init message for the PM state
+ * configuration.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct icp_qat_fw_init_admin_resp resp = {0};
+       struct icp_qat_fw_init_admin_req req = {0};
+       u32 ae_mask = hw_data->admin_ae_mask;
+
+       if (!accel_dev->admin) {
+               dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
+               return -EFAULT;
+       }
+
+       req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
+       req.idle_filter = idle_delay;
+
+       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       struct admin_info admin_csrs_info;
+       u32 mailbox_offset, adminmsg_u, adminmsg_l;
+       void __iomem *mailbox;
+       u64 reg_val;
+
+       admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+                            dev_to_node(&GET_DEV(accel_dev)));
+       if (!admin)
+               return -ENOMEM;
+       admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                             &admin->phy_addr, GFP_KERNEL);
+       if (!admin->virt_addr) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+               kfree(admin);
+               return -ENOMEM;
+       }
+
+       admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+                                                 PAGE_SIZE,
+                                                 &admin->const_tbl_addr,
+                                                 GFP_KERNEL);
+       if (!admin->virt_tbl_addr) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_addr, admin->phy_addr);
+               kfree(admin);
+               return -ENOMEM;
+       }
+
+       memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
+       hw_data->get_admin_info(&admin_csrs_info);
+
+       mailbox_offset = admin_csrs_info.mailbox_offset;
+       mailbox = pmisc_addr + mailbox_offset;
+       adminmsg_u = admin_csrs_info.admin_msg_ur;
+       adminmsg_l = admin_csrs_info.admin_msg_lr;
+
+       reg_val = (u64)admin->phy_addr;
+       ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
+       ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
+
+       mutex_init(&admin->lock);
+       admin->mailbox_addr = mailbox;
+       accel_dev->admin = admin;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin = accel_dev->admin;
+
+       if (!admin)
+               return;
+
+       if (admin->virt_addr)
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_addr, admin->phy_addr);
+       if (admin->virt_tbl_addr)
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_tbl_addr, admin->const_tbl_addr);
+
+       mutex_destroy(&admin->lock);
+       kfree(admin);
+       accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
new file mode 100644 (file)
index 0000000..04af32a
--- /dev/null
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+                                          pci_channel_state_t state)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
+       if (!accel_dev) {
+               dev_err(&pdev->dev, "Can't find acceleration device\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       if (state == pci_channel_io_perm_failure) {
+               dev_err(&pdev->dev, "Can't recover from device error\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+       int mode;
+       struct adf_accel_dev *accel_dev;
+       struct completion compl;
+       struct work_struct reset_work;
+};
+
+void adf_reset_sbr(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       struct pci_dev *parent = pdev->bus->self;
+       u16 bridge_ctl = 0;
+
+       if (!parent)
+               parent = pdev;
+
+       if (!pci_wait_for_pending_transaction(pdev))
+               dev_info(&GET_DEV(accel_dev),
+                        "Transaction still in progress. Proceeding\n");
+
+       dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
+
+       pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+       bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+       msleep(100);
+       bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+       msleep(100);
+}
+EXPORT_SYMBOL_GPL(adf_reset_sbr);
+
+void adf_reset_flr(struct adf_accel_dev *accel_dev)
+{
+       pcie_flr(accel_to_pci_dev(accel_dev));
+}
+EXPORT_SYMBOL_GPL(adf_reset_flr);
+
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       if (hw_device->reset_device) {
+               dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+                        accel_dev->accel_id);
+               hw_device->reset_device(accel_dev);
+               pci_restore_state(pdev);
+               pci_save_state(pdev);
+       }
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+       struct adf_reset_dev_data *reset_data =
+                 container_of(work, struct adf_reset_dev_data, reset_work);
+       struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+       adf_dev_restarting_notify(accel_dev);
+       if (adf_dev_restart(accel_dev)) {
+               /* The device hanged and we can't restart it so stop here */
+               dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+               kfree(reset_data);
+               WARN(1, "QAT: device restart failed. Device is unusable\n");
+               return;
+       }
+       adf_dev_restarted_notify(accel_dev);
+       clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+       /* The dev is back alive. Notify the caller if in sync mode */
+       if (reset_data->mode == ADF_DEV_RESET_SYNC)
+               complete(&reset_data->compl);
+       else
+               kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+                                     enum adf_dev_reset_mode mode)
+{
+       struct adf_reset_dev_data *reset_data;
+
+       if (!adf_dev_started(accel_dev) ||
+           test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+               return 0;
+
+       set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+       reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+       if (!reset_data)
+               return -ENOMEM;
+       reset_data->accel_dev = accel_dev;
+       init_completion(&reset_data->compl);
+       reset_data->mode = mode;
+       INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+       queue_work(device_reset_wq, &reset_data->reset_work);
+
+       /* If in sync mode wait for the result */
+       if (mode == ADF_DEV_RESET_SYNC) {
+               int ret = 0;
+               /* Maximum device reset time is 10 seconds */
+               unsigned long wait_jiffies = msecs_to_jiffies(10000);
+               unsigned long timeout = wait_for_completion_timeout(
+                                  &reset_data->compl, wait_jiffies);
+               if (!timeout) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Reset device timeout expired\n");
+                       ret = -EFAULT;
+               }
+               kfree(reset_data);
+               return ret;
+       }
+       return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Can't find acceleration device\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+       if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+       dev_info(&pdev->dev, "Acceleration driver reset completed\n");
+       dev_info(&pdev->dev, "Device is up and running\n");
+}
+
+const struct pci_error_handlers adf_err_handler = {
+       .error_detected = adf_error_detected,
+       .slot_reset = adf_slot_reset,
+       .resume = adf_resume,
+};
+EXPORT_SYMBOL_GPL(adf_err_handler);
+
+int adf_init_aer(void)
+{
+       device_reset_wq = alloc_workqueue("qat_device_reset_wq",
+                                         WQ_MEM_RECLAIM, 0);
+       return !device_reset_wq ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+       if (device_reset_wq)
+               destroy_workqueue(device_reset_wq);
+       device_reset_wq = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.c b/drivers/crypto/intel/qat/qat_common/adf_cfg.c
new file mode 100644 (file)
index 0000000..1931e5b
--- /dev/null
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+       mutex_lock(&qat_cfg_read_lock);
+       return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+       struct list_head *list;
+       struct adf_cfg_section *sec =
+                               list_entry(v, struct adf_cfg_section, list);
+
+       seq_printf(sfile, "[%s]\n", sec->name);
+       list_for_each(list, &sec->param_head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list, struct adf_cfg_key_val, list);
+               seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+       }
+       return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+       return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+       .start = qat_dev_cfg_start,
+       .next = qat_dev_cfg_next,
+       .stop = qat_dev_cfg_stop,
+       .show = qat_dev_cfg_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(qat_dev_cfg);
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data;
+
+       dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+       if (!dev_cfg_data)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+       init_rwsem(&dev_cfg_data->lock);
+       accel_dev->cfg = dev_cfg_data;
+
+       /* accel_dev->debugfs_dir should always be non-NULL here */
+       dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+                                                 accel_dev->debugfs_dir,
+                                                 dev_cfg_data,
+                                                 &qat_dev_cfg_fops);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+       down_write(&dev_cfg_data->lock);
+       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+       up_write(&dev_cfg_data->lock);
+       clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+       if (!dev_cfg_data)
+               return;
+
+       down_write(&dev_cfg_data->lock);
+       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+       up_write(&dev_cfg_data->lock);
+       debugfs_remove(dev_cfg_data->debug);
+       kfree(dev_cfg_data);
+       accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+                              struct adf_cfg_section *sec)
+{
+       list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec)
+{
+       struct list_head *head = &sec->param_head;
+       struct list_head *list_ptr, *tmp;
+
+       list_for_each_prev_safe(list_ptr, tmp, head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list_ptr, struct adf_cfg_key_val, list);
+
+               if (strncmp(ptr->key, key, sizeof(ptr->key)))
+                       continue;
+
+               list_del(list_ptr);
+               kfree(ptr);
+               break;
+       }
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+       struct list_head *list_ptr, *tmp;
+
+       list_for_each_prev_safe(list_ptr, tmp, head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list_ptr, struct adf_cfg_key_val, list);
+               list_del(list_ptr);
+               kfree(ptr);
+       }
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+       struct adf_cfg_section *ptr;
+       struct list_head *list, *tmp;
+
+       list_for_each_prev_safe(list, tmp, head) {
+               ptr = list_entry(list, struct adf_cfg_section, list);
+               adf_cfg_keyval_del_all(&ptr->param_head);
+               list_del(list);
+               kfree(ptr);
+       }
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+                                                     const char *key)
+{
+       struct list_head *list;
+
+       list_for_each(list, &s->param_head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list, struct adf_cfg_key_val, list);
+               if (!strcmp(ptr->key, key))
+                       return ptr;
+       }
+       return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+                                               const char *sec_name)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct list_head *list;
+
+       list_for_each(list, &cfg->sec_list) {
+               struct adf_cfg_section *ptr =
+                       list_entry(list, struct adf_cfg_section, list);
+               if (!strcmp(ptr->name, sec_name))
+                       return ptr;
+       }
+       return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+                              const char *sec_name,
+                              const char *key_name,
+                              char *val)
+{
+       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+       struct adf_cfg_key_val *keyval = NULL;
+
+       if (sec)
+               keyval = adf_cfg_key_value_find(sec, key_name);
+       if (keyval) {
+               memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+               return 0;
+       }
+       return -ENODATA;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device. If the key exists already, the value
+ * is updated.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+                               const char *section_name,
+                               const char *key, const void *val,
+                               enum adf_cfg_val_type type)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct adf_cfg_key_val *key_val;
+       struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+                                                          section_name);
+       char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       if (!section)
+               return -EFAULT;
+
+       key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+       if (!key_val)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&key_val->list);
+       strscpy(key_val->key, key, sizeof(key_val->key));
+
+       if (type == ADF_DEC) {
+               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+                        "%ld", (*((long *)val)));
+       } else if (type == ADF_STR) {
+               strscpy(key_val->val, (char *)val, sizeof(key_val->val));
+       } else if (type == ADF_HEX) {
+               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+                        "0x%lx", (unsigned long)val);
+       } else {
+               dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
+               kfree(key_val);
+               return -EINVAL;
+       }
+       key_val->type = type;
+
+       /* Add the key-value pair as below policy:
+        * 1. if the key doesn't exist, add it;
+        * 2. if the key already exists with a different value then update it
+        *    to the new value (the key is deleted and the newly created
+        *    key_val containing the new value is added to the database);
+        * 3. if the key exists with the same value, then return without doing
+        *    anything (the newly created key_val is freed).
+        */
+       if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
+               if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
+                       adf_cfg_keyval_remove(key, section);
+               } else {
+                       kfree(key_val);
+                       return 0;
+               }
+       }
+
+       down_write(&cfg->lock);
+       adf_cfg_keyval_add(key_val, section);
+       up_write(&cfg->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+       if (sec)
+               return 0;
+
+       sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+       if (!sec)
+               return -ENOMEM;
+
+       strscpy(sec->name, name, sizeof(sec->name));
+       INIT_LIST_HEAD(&sec->param_head);
+       down_write(&cfg->lock);
+       list_add_tail(&sec->list, &cfg->sec_list);
+       up_write(&cfg->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+                           const char *section, const char *name,
+                           char *value)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       int ret;
+
+       down_read(&cfg->lock);
+       ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+       up_read(&cfg->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_get_param_value);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg.h b/drivers/crypto/intel/qat/qat_common/adf_cfg.h
new file mode 100644 (file)
index 0000000..376cde6
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       enum adf_cfg_val_type type;
+       struct list_head list;
+};
+
+struct adf_cfg_section {
+       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+       struct list_head list;
+       struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+       struct list_head sec_list;
+       struct dentry *debug;
+       struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+                               const char *section_name,
+                               const char *key, const void *val,
+                               enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+                           const char *section, const char *name, char *value);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
new file mode 100644 (file)
index 0000000..6e5de1d
--- /dev/null
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES (32 * 32)
+#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
+
+#define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0
+#define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3
+#define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6
+#define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9
+enum adf_cfg_service_type {
+       UNUSED = 0,
+       CRYPTO,
+       COMP,
+       SYM,
+       ASYM,
+       USED
+};
+
+enum adf_cfg_val_type {
+       ADF_DEC,
+       ADF_HEX,
+       ADF_STR
+};
+
+enum adf_device_type {
+       DEV_UNKNOWN = 0,
+       DEV_DH895XCC,
+       DEV_DH895XCCVF,
+       DEV_C62X,
+       DEV_C62XVF,
+       DEV_C3XXX,
+       DEV_C3XXXVF,
+       DEV_4XXX,
+};
+
+struct adf_dev_status_info {
+       enum adf_device_type type;
+       __u32 accel_id;
+       __u32 instance_id;
+       __u8 num_ae;
+       __u8 num_accel;
+       __u8 num_logical_accel;
+       __u8 banks_per_accel;
+       __u8 state;
+       __u8 bus;
+       __u8 dev;
+       __u8 fun;
+       char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
new file mode 100644 (file)
index 0000000..5d8c3bd
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RingSymRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_SYM_BANK_NUM "BankSymNumber"
+#define ADF_RING_ASYM_BANK_NUM "BankAsymNumber"
+#define ADF_RING_DC_BANK_NUM "BankDcNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_CFG_DC "dc"
+#define ADF_CFG_CY "sym;asym"
+#define ADF_SERVICES_ENABLED "ServicesEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_user.h
new file mode 100644 (file)
index 0000000..421f4fb
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       union {
+               struct adf_user_cfg_key_val *next;
+               __u64 padding3;
+       };
+       enum adf_cfg_val_type type;
+} __packed;
+
+struct adf_user_cfg_section {
+       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+       union {
+               struct adf_user_cfg_key_val *params;
+               __u64 padding1;
+       };
+       union {
+               struct adf_user_cfg_section *next;
+               __u64 padding3;
+       };
+} __packed;
+
+struct adf_user_cfg_ctl_data {
+       union {
+               struct adf_user_cfg_section *config_section;
+               __u64 padding;
+       };
+       __u8 device_id;
+} __packed;
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
new file mode 100644 (file)
index 0000000..db79759
--- /dev/null
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_MAJOR_VERSION      0
+#define ADF_MINOR_VERSION      6
+#define ADF_BUILD_VERSION      0
+#define ADF_DRV_VERSION                __stringify(ADF_MAJOR_VERSION) "." \
+                               __stringify(ADF_MINOR_VERSION) "." \
+                               __stringify(ADF_BUILD_VERSION)
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_PF_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+
+enum adf_dev_reset_mode {
+       ADF_DEV_RESET_ASYNC = 0,
+       ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+       ADF_EVENT_INIT = 0,
+       ADF_EVENT_START,
+       ADF_EVENT_STOP,
+       ADF_EVENT_SHUTDOWN,
+       ADF_EVENT_RESTARTING,
+       ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+       int (*event_hld)(struct adf_accel_dev *accel_dev,
+                        enum adf_event event);
+       unsigned long init_status[ADF_DEVS_ARRAY_SIZE];
+       unsigned long start_status[ADF_DEVS_ARRAY_SIZE];
+       char *name;
+       struct list_head list;
+};
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
+int adf_dev_restart(struct adf_accel_dev *accel_dev);
+
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
+int adf_ctl_dev_register(void);
+void adf_ctl_dev_unregister(void);
+int adf_processes_dev_register(void);
+void adf_processes_dev_unregister(void);
+
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(u32 id);
+void adf_devmgr_get_num_dev(u32 *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+extern const struct pci_error_handlers adf_err_handler;
+void adf_reset_sbr(struct adf_accel_dev *accel_dev);
+void adf_reset_flr(struct adf_accel_dev *accel_dev);
+void adf_dev_restore(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
+int qat_algs_register(void);
+void qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
+
+struct qat_compression_instance *qat_compression_get_instance_node(int node);
+void qat_compression_put_instance(struct qat_compression_instance *inst);
+int qat_compression_register(void);
+int qat_compression_unregister(void);
+int qat_comp_algs_register(void);
+void qat_comp_algs_unregister(void);
+void qat_comp_alg_callback(void *resp);
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
+
+int adf_sysfs_init(struct adf_accel_dev *accel_dev);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                 unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+                         unsigned char ae, unsigned int ctx_mask);
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+                           unsigned int ae);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
+                          unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+                           unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+                      unsigned char ae, unsigned int uaddr,
+                      unsigned int words_num, u64 *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                    unsigned int uword_addr, unsigned int words_num,
+                    unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                       unsigned char ae,
+                       struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned long ctx_mask,
+                    enum icp_qat_uof_regtype reg_type,
+                    unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned long ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned long ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned long ctx_mask,
+                   unsigned short reg_num, unsigned int regdata);
+int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                 unsigned char ae, unsigned short lm_addr, unsigned int value);
+void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned char mode);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
+                      int mem_size);
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+                    void *addr_ptr, u32 mem_size, char *obj_name);
+int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
+                            unsigned int cfg_ae_mask);
+int adf_init_misc_wq(void);
+void adf_exit_misc_wq(void);
+bool adf_misc_wq_queue_work(struct work_struct *work);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
+void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr);
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
+#else
+#define adf_sriov_configure NULL
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_init_pf_wq(void)
+{
+       return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
+
+static inline int adf_init_vf_wq(void)
+{
+       return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
+#endif
+
+static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *pmisc;
+
+       pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+
+       return pmisc->virt_addr;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
new file mode 100644 (file)
index 0000000..88c41d6
--- /dev/null
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define ADF_CFG_MAX_SECTION 512
+#define ADF_CFG_MAX_KEY_VAL 256
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = adf_ctl_ioctl,
+       .compat_ioctl = compat_ptr_ioctl,
+};
+
+struct adf_ctl_drv_info {
+       unsigned int major;
+       struct cdev drv_cdev;
+       struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adf_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+       device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
+       cdev_del(&adf_ctl_drv.drv_cdev);
+       class_destroy(adf_ctl_drv.drv_class);
+       unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+       dev_t dev_id;
+       struct device *drv_device;
+
+       if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+               pr_err("QAT: unable to allocate chrdev region\n");
+               return -EFAULT;
+       }
+
+       adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+       if (IS_ERR(adf_ctl_drv.drv_class)) {
+               pr_err("QAT: class_create failed for adf_ctl\n");
+               goto err_chrdev_unreg;
+       }
+       adf_ctl_drv.major = MAJOR(dev_id);
+       cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
+       if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
+               pr_err("QAT: cdev add failed\n");
+               goto err_class_destr;
+       }
+
+       drv_device = device_create(adf_ctl_drv.drv_class, NULL,
+                                  MKDEV(adf_ctl_drv.major, 0),
+                                  NULL, DEVICE_NAME);
+       if (IS_ERR(drv_device)) {
+               pr_err("QAT: failed to create device\n");
+               goto err_cdev_del;
+       }
+       return 0;
+err_cdev_del:
+       cdev_del(&adf_ctl_drv.drv_cdev);
+err_class_destr:
+       class_destroy(adf_ctl_drv.drv_class);
+err_chrdev_unreg:
+       unregister_chrdev_region(dev_id, 1);
+       return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+                                  unsigned long arg)
+{
+       struct adf_user_cfg_ctl_data *cfg_data;
+
+       cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+       if (!cfg_data)
+               return -ENOMEM;
+
+       /* Initialize device id to NO DEVICE as 0 is a valid device id */
+       cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+       if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+               pr_err("QAT: failed to copy from user cfg_data.\n");
+               kfree(cfg_data);
+               return -EIO;
+       }
+
+       *ctl_data = cfg_data;
+       return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+                                 const char *section,
+                                 const struct adf_user_cfg_key_val *key_val)
+{
+       if (key_val->type == ADF_HEX) {
+               long *ptr = (long *)key_val->val;
+               long val = *ptr;
+
+               if (adf_cfg_add_key_value_param(accel_dev, section,
+                                               key_val->key, (void *)val,
+                                               key_val->type)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "failed to add hex keyvalue.\n");
+                       return -EFAULT;
+               }
+       } else {
+               if (adf_cfg_add_key_value_param(accel_dev, section,
+                                               key_val->key, key_val->val,
+                                               key_val->type)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "failed to add keyvalue.\n");
+                       return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+                                  struct adf_user_cfg_ctl_data *ctl_data)
+{
+       struct adf_user_cfg_key_val key_val;
+       struct adf_user_cfg_key_val *params_head;
+       struct adf_user_cfg_section section, *section_head;
+       int i, j;
+
+       section_head = ctl_data->config_section;
+
+       for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
+               if (copy_from_user(&section, (void __user *)section_head,
+                                  sizeof(*section_head))) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "failed to copy section info\n");
+                       goto out_err;
+               }
+
+               if (adf_cfg_section_add(accel_dev, section.name)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "failed to add section.\n");
+                       goto out_err;
+               }
+
+               params_head = section.params;
+
+               for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
+                       if (copy_from_user(&key_val, (void __user *)params_head,
+                                          sizeof(key_val))) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "Failed to copy keyvalue.\n");
+                               goto out_err;
+                       }
+                       if (adf_add_key_value_data(accel_dev, section.name,
+                                                  &key_val)) {
+                               goto out_err;
+                       }
+                       params_head = key_val.next;
+               }
+               section_head = section.next;
+       }
+       return 0;
+out_err:
+       adf_cfg_del_all(accel_dev);
+       return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+                                   unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+       struct adf_accel_dev *accel_dev;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+       if (!accel_dev) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (adf_dev_started(accel_dev)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+               ret = -EFAULT;
+               goto out;
+       }
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+       struct adf_accel_dev *dev;
+
+       list_for_each_entry(dev, adf_devmgr_get_head(), list) {
+               if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+                               dev_info(&GET_DEV(dev),
+                                        "device qat_dev%d is busy\n",
+                                        dev->accel_id);
+                               return -EBUSY;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void adf_ctl_stop_devices(u32 id)
+{
+       struct adf_accel_dev *accel_dev;
+
+       list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (!adf_dev_started(accel_dev))
+                               continue;
+
+                       /* First stop all VFs */
+                       if (!accel_dev->is_vf)
+                               continue;
+
+                       adf_dev_down(accel_dev, false);
+               }
+       }
+
+       list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (!adf_dev_started(accel_dev))
+                               continue;
+
+                       adf_dev_down(accel_dev, false);
+               }
+       }
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+                                 unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       if (adf_devmgr_verify_id(ctl_data->device_id)) {
+               pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+               ret = -ENODEV;
+               goto out;
+       }
+
+       ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+       if (ret)
+               goto out;
+
+       if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+               pr_info("QAT: Stopping all acceleration devices.\n");
+       else
+               pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+                       ctl_data->device_id);
+
+       adf_ctl_stop_devices(ctl_data->device_id);
+
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+                                  unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+       struct adf_accel_dev *accel_dev;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       ret = -ENODEV;
+       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+       if (!accel_dev)
+               goto out;
+
+       dev_info(&GET_DEV(accel_dev),
+                "Starting acceleration device qat_dev%d.\n",
+                ctl_data->device_id);
+
+       ret = adf_dev_up(accel_dev, false);
+
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+                       ctl_data->device_id);
+               adf_dev_down(accel_dev, false);
+       }
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+                                        unsigned long arg)
+{
+       u32 num_devices = 0;
+
+       adf_devmgr_get_num_dev(&num_devices);
+       if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+                                   unsigned long arg)
+{
+       struct adf_hw_device_data *hw_data;
+       struct adf_dev_status_info dev_info;
+       struct adf_accel_dev *accel_dev;
+
+       if (copy_from_user(&dev_info, (void __user *)arg,
+                          sizeof(struct adf_dev_status_info))) {
+               pr_err("QAT: failed to copy from user.\n");
+               return -EFAULT;
+       }
+
+       accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+       if (!accel_dev)
+               return -ENODEV;
+
+       hw_data = accel_dev->hw_device;
+       dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+       dev_info.num_ae = hw_data->get_num_aes(hw_data);
+       dev_info.num_accel = hw_data->get_num_accels(hw_data);
+       dev_info.num_logical_accel = hw_data->num_logical_accel;
+       dev_info.banks_per_accel = hw_data->num_banks
+                                       / hw_data->num_logical_accel;
+       strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+       dev_info.instance_id = hw_data->instance_id;
+       dev_info.type = hw_data->dev_class->type;
+       dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+       dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+       dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+       if (copy_to_user((void __user *)arg, &dev_info,
+                        sizeof(struct adf_dev_status_info))) {
+               dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+       int ret;
+
+       if (mutex_lock_interruptible(&adf_ctl_lock))
+               return -EFAULT;
+
+       switch (cmd) {
+       case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+               ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+               break;
+
+       case IOCTL_STOP_ACCEL_DEV:
+               ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+               break;
+
+       case IOCTL_START_ACCEL_DEV:
+               ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+               break;
+
+       case IOCTL_GET_NUM_DEVICES:
+               ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+               break;
+
+       case IOCTL_STATUS_ACCEL_DEV:
+               ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+               break;
+       default:
+               pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
+               ret = -EFAULT;
+               break;
+       }
+       mutex_unlock(&adf_ctl_lock);
+       return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+       if (adf_chr_drv_create())
+               goto err_chr_dev;
+
+       if (adf_init_misc_wq())
+               goto err_misc_wq;
+
+       if (adf_init_aer())
+               goto err_aer;
+
+       if (adf_init_pf_wq())
+               goto err_pf_wq;
+
+       if (adf_init_vf_wq())
+               goto err_vf_wq;
+
+       if (qat_crypto_register())
+               goto err_crypto_register;
+
+       if (qat_compression_register())
+               goto err_compression_register;
+
+       return 0;
+
+err_compression_register:
+       qat_crypto_unregister();
+err_crypto_register:
+       adf_exit_vf_wq();
+err_vf_wq:
+       adf_exit_pf_wq();
+err_pf_wq:
+       adf_exit_aer();
+err_aer:
+       adf_exit_misc_wq();
+err_misc_wq:
+       adf_chr_drv_destroy();
+err_chr_dev:
+       mutex_destroy(&adf_ctl_lock);
+       return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+       adf_chr_drv_destroy();
+       adf_exit_misc_wq();
+       adf_exit_aer();
+       adf_exit_vf_wq();
+       adf_exit_pf_wq();
+       qat_crypto_unregister();
+       qat_compression_unregister();
+       adf_clean_vf_map(false);
+       mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS_CRYPTO("intel_qat");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
new file mode 100644 (file)
index 0000000..86ee36f
--- /dev/null
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
+static DEFINE_MUTEX(table_lock);
+static u32 num_devices;
+static u8 id_map[ADF_MAX_DEVICES];
+
+struct vf_id_map {
+       u32 bdf;
+       u32 id;
+       u32 fake_id;
+       bool attached;
+       struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+       return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+               PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+               (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+       return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+
+               if (ptr->bdf == bdf)
+                       return ptr;
+       }
+       return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+               if (ptr->fake_id == fake)
+                       return ptr->id;
+       }
+       return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ *     for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+       struct vf_id_map *map;
+       struct list_head *ptr, *tmp;
+
+       mutex_lock(&table_lock);
+       list_for_each_safe(ptr, tmp, &vfs_table) {
+               map = list_entry(ptr, struct vf_id_map, list);
+               if (map->bdf != -1) {
+                       id_map[map->id] = 0;
+                       num_devices--;
+               }
+
+               if (vf && map->bdf == -1)
+                       continue;
+
+               list_del(ptr);
+               kfree(map);
+       }
+       mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data:  Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+       struct adf_hw_device_class *class = hw_data->dev_class;
+       struct list_head *itr;
+       int i = 0;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->hw_device->dev_class == class)
+                       ptr->hw_device->instance_id = i++;
+
+               if (i == class->instances)
+                       break;
+       }
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
+static unsigned int adf_find_free_id(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < ADF_MAX_DEVICES; i++) {
+               if (!id_map[i]) {
+                       id_map[i] = 1;
+                       return i;
+               }
+       }
+       return ADF_MAX_DEVICES + 1;
+}
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev:  Pointer to acceleration device.
+ * @pf:                Corresponding PF if the accel_dev is a VF
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf)
+{
+       struct list_head *itr;
+       int ret = 0;
+
+       if (num_devices == ADF_MAX_DEVICES) {
+               dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
+                       ADF_MAX_DEVICES);
+               return -EFAULT;
+       }
+
+       mutex_lock(&table_lock);
+       atomic_set(&accel_dev->ref_count, 0);
+
+       /* PF on host or VF on guest - optimized to remove redundant is_vf */
+       if (!accel_dev->is_vf || !pf) {
+               struct vf_id_map *map;
+
+               list_for_each(itr, &accel_table) {
+                       struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+                       if (ptr == accel_dev) {
+                               ret = -EEXIST;
+                               goto unlock;
+                       }
+               }
+
+               list_add_tail(&accel_dev->list, &accel_table);
+               accel_dev->accel_id = adf_find_free_id();
+               if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+               num_devices++;
+               map = kzalloc(sizeof(*map), GFP_KERNEL);
+               if (!map) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+               map->bdf = ~0;
+               map->id = accel_dev->accel_id;
+               map->fake_id = map->id;
+               map->attached = true;
+               list_add_tail(&map->list, &vfs_table);
+       } else if (accel_dev->is_vf && pf) {
+               /* VF on host */
+               struct vf_id_map *map;
+
+               map = adf_find_vf(adf_get_vf_num(accel_dev));
+               if (map) {
+                       struct vf_id_map *next;
+
+                       accel_dev->accel_id = map->id;
+                       list_add_tail(&accel_dev->list, &accel_table);
+                       map->fake_id++;
+                       map->attached = true;
+                       next = list_next_entry(map, list);
+                       while (next && &next->list != &vfs_table) {
+                               next->fake_id++;
+                               next = list_next_entry(next, list);
+                       }
+
+                       ret = 0;
+                       goto unlock;
+               }
+
+               map = kzalloc(sizeof(*map), GFP_KERNEL);
+               if (!map) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+               accel_dev->accel_id = adf_find_free_id();
+               if (accel_dev->accel_id > ADF_MAX_DEVICES) {
+                       kfree(map);
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+               num_devices++;
+               list_add_tail(&accel_dev->list, &accel_table);
+               map->bdf = adf_get_vf_num(accel_dev);
+               map->id = accel_dev->accel_id;
+               map->fake_id = map->id;
+               map->attached = true;
+               list_add_tail(&map->list, &vfs_table);
+       }
+       mutex_init(&accel_dev->state_lock);
+unlock:
+       mutex_unlock(&table_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+       return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev:  Pointer to acceleration device.
+ * @pf:                Corresponding PF if the accel_dev is a VF
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+                      struct adf_accel_dev *pf)
+{
+       mutex_lock(&table_lock);
+       /* PF on host or VF on guest - optimized to remove redundant is_vf */
+       if (!accel_dev->is_vf || !pf) {
+               id_map[accel_dev->accel_id] = 0;
+               num_devices--;
+       } else if (accel_dev->is_vf && pf) {
+               struct vf_id_map *map, *next;
+
+               map = adf_find_vf(adf_get_vf_num(accel_dev));
+               if (!map) {
+                       dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+                       goto unlock;
+               }
+               map->fake_id--;
+               map->attached = false;
+               next = list_next_entry(map, list);
+               while (next && &next->list != &vfs_table) {
+                       next->fake_id--;
+                       next = list_next_entry(next, list);
+               }
+       }
+unlock:
+       mutex_destroy(&accel_dev->state_lock);
+       list_del(&accel_dev->list);
+       mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+       struct adf_accel_dev *dev = NULL;
+
+       if (!list_empty(&accel_table))
+               dev = list_first_entry(&accel_table, struct adf_accel_dev,
+                                      list);
+       return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @pci_dev:  Pointer to PCI device.
+ *
+ * Function returns acceleration device associated with the given PCI device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pointer to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+       struct list_head *itr;
+
+       mutex_lock(&table_lock);
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+                       mutex_unlock(&table_lock);
+                       return ptr;
+               }
+       }
+       mutex_unlock(&table_lock);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
+{
+       struct list_head *itr;
+       int real_id;
+
+       mutex_lock(&table_lock);
+       real_id = adf_get_vf_real_id(id);
+       if (real_id < 0)
+               goto unlock;
+
+       id = real_id;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+               if (ptr->accel_id == id) {
+                       mutex_unlock(&table_lock);
+                       return ptr;
+               }
+       }
+unlock:
+       mutex_unlock(&table_lock);
+       return NULL;
+}
+
+int adf_devmgr_verify_id(u32 id)
+{
+       if (id == ADF_CFG_ALL_DEVICES)
+               return 0;
+
+       if (adf_devmgr_get_dev_by_id(id))
+               return 0;
+
+       return -ENODEV;
+}
+
+static int adf_get_num_dettached_vfs(void)
+{
+       struct list_head *itr;
+       int vfs = 0;
+
+       mutex_lock(&table_lock);
+       list_for_each(itr, &vfs_table) {
+               struct vf_id_map *ptr =
+                       list_entry(itr, struct vf_id_map, list);
+               if (ptr->bdf != ~0 && !ptr->attached)
+                       vfs++;
+       }
+       mutex_unlock(&table_lock);
+       return vfs;
+}
+
+void adf_devmgr_get_num_dev(u32 *num)
+{
+       *num = num_devices - adf_get_num_dettached_vfs();
+}
+
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+       return atomic_read(&accel_dev->ref_count) != 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
+
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+       if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+               if (!try_module_get(accel_dev->owner))
+                       return -EFAULT;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_get);
+
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+       if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+               module_put(accel_dev->owner);
+}
+EXPORT_SYMBOL_GPL(adf_dev_put);
+
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+       return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
+
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+       return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
new file mode 100644 (file)
index 0000000..eeb30da
--- /dev/null
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_gen2_config.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "qat_compression.h"
+#include "adf_transport_access_macros.h"
+
+static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int banks = GET_MAX_BANKS(accel_dev);
+       int cpus = num_online_cpus();
+       unsigned long val;
+       int instances;
+       int ret;
+       int i;
+
+       if (adf_hw_dev_has_crypto(accel_dev))
+               instances = min(cpus, banks);
+       else
+               instances = 0;
+
+       for (i = 0; i < instances; i++) {
+               val = i;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+                        i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               val = 128;
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 2;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 8;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 10;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = ADF_COALESCING_DEF_TIME;
+               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+       }
+
+       val = i;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+                                         &val, ADF_DEC);
+       if (ret)
+               goto err;
+
+       return ret;
+
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+       return ret;
+}
+
+static int adf_gen2_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int banks = GET_MAX_BANKS(accel_dev);
+       int cpus = num_online_cpus();
+       unsigned long val;
+       int instances;
+       int ret;
+       int i;
+
+       if (adf_hw_dev_has_compression(accel_dev))
+               instances = min(cpus, banks);
+       else
+               instances = 0;
+
+       for (i = 0; i < instances; i++) {
+               val = i;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 6;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+
+               val = 14;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                                 key, &val, ADF_DEC);
+               if (ret)
+                       goto err;
+       }
+
+       val = i;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                         &val, ADF_DEC);
+       if (ret)
+               return ret;
+
+       return ret;
+
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+       return ret;
+}
+
+/**
+ * adf_gen2_dev_config() - create dev config required to create instances
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+       if (ret)
+               goto err;
+
+       ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+       if (ret)
+               goto err;
+
+       ret = adf_gen2_crypto_dev_config(accel_dev);
+       if (ret)
+               goto err;
+
+       ret = adf_gen2_comp_dev_config(accel_dev);
+       if (ret)
+               goto err;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+       return ret;
+
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_dev_config);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.h
new file mode 100644 (file)
index 0000000..4bf9da2
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN2_CONFIG_H_
+#define ADF_GEN2_CONFIG_H_
+
+#include "adf_accel_devices.h"
+
+int adf_gen2_dev_config(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
new file mode 100644 (file)
index 0000000..47261b1
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_gen2_dc.h"
+#include "icp_qat_fw_comp.h"
+
+static void qat_comp_build_deflate_ctx(void *ctx)
+{
+       struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+       struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
+
+       memset(req_tmpl, 0, sizeof(*req_tmpl));
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+       header->comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+                                           QAT_COMN_PTR_TYPE_SGL);
+       header->serv_specif_flags =
+               ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+                                           ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+       cd_pars->u.sl.comp_slice_cfg_word[0] =
+               ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
+                                                   ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+                                                   ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+                                                   ICP_QAT_HW_COMPRESSION_DEPTH_1,
+                                                   ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+       req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+       req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+       req_pars->req_par_flags =
+               ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+                                                     ICP_QAT_FW_COMP_EOP,
+                                                     ICP_QAT_FW_COMP_BFINAL,
+                                                     ICP_QAT_FW_COMP_CNV,
+                                                     ICP_QAT_FW_COMP_CNV_RECOVERY,
+                                                     ICP_QAT_FW_COMP_NO_CNV_DFX,
+                                                     ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+                                                     ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+                                                     ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+                                                     ICP_QAT_FW_COMP_NO_APPEND_CRC,
+                                                     ICP_QAT_FW_COMP_NO_DROP_DATA);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+       ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
+
+       /* Fill second half of the template for decompression */
+       memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+       req_tmpl++;
+       header = &req_tmpl->comn_hdr;
+       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+       cd_pars = &req_tmpl->cd_pars;
+       cd_pars->u.sl.comp_slice_cfg_word[0] =
+               ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
+                                                   ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+                                                   ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+                                                   ICP_QAT_HW_COMPRESSION_DEPTH_1,
+                                                   ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+}
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+       dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
new file mode 100644 (file)
index 0000000..6eae023
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN2_DC_H
+#define ADF_GEN2_DC_H
+
+#include "adf_accel_devices.h"
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
+
+#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
new file mode 100644 (file)
index 0000000..d188454
--- /dev/null
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include "adf_common_drv.h"
+#include "adf_gen2_hw_data.h"
+#include "icp_qat_hw.h"
+#include <linux/pci.h>
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
+{
+       if (!self || !self->accel_mask)
+               return 0;
+
+       return hweight16(self->accel_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
+
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
+{
+       if (!self || !self->ae_mask)
+               return 0;
+
+       return hweight32(self->ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
+
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       unsigned long accel_mask = hw_data->accel_mask;
+       unsigned long ae_mask = hw_data->ae_mask;
+       unsigned int val, i;
+
+       /* Enable Accel Engine error detection & correction */
+       for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
+               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
+               val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
+               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
+               val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
+       }
+
+       /* Enable shared memory error detection & correction */
+       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
+               val |= ADF_GEN2_ERRSSMSH_EN;
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
+               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
+               val |= ADF_GEN2_ERRSSMSH_EN;
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
+       }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
+
+void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
+                          int num_a_regs, int num_b_regs)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u32 reg;
+       int i;
+
+       /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
+       for (i = 0; i < num_a_regs; i++) {
+               reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
+               if (enable)
+                       reg |= AE2FUNCTION_MAP_VALID;
+               else
+                       reg &= ~AE2FUNCTION_MAP_VALID;
+               WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
+       }
+
+       /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
+       for (i = 0; i < num_b_regs; i++) {
+               reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
+               if (enable)
+                       reg |= AE2FUNCTION_MAP_VALID;
+               else
+                       reg &= ~AE2FUNCTION_MAP_VALID;
+               WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
+       }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
+
+void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
+{
+       admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
+       admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
+       admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
+
+void adf_gen2_get_arb_info(struct arb_info *arb_info)
+{
+       arb_info->arb_cfg = ADF_ARB_CONFIG;
+       arb_info->arb_offset = ADF_ARB_OFFSET;
+       arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
+
+void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *addr = adf_get_pmisc_base(accel_dev);
+       u32 val;
+
+       val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
+
+       /* Enable bundle and misc interrupts */
+       ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
+       ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+       return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+       return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               u32 value)
+{
+       WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+       return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               u32 value)
+{
+       WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+       return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
+                                 u32 ring, u32 value)
+{
+       WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               dma_addr_t addr)
+{
+       WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+       WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+       WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
+                                u32 value)
+{
+       WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+                                 u32 value)
+{
+       WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+                                      u32 value)
+{
+       WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+                                     u32 value)
+{
+       WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+       csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+       csr_ops->read_csr_ring_head = read_csr_ring_head;
+       csr_ops->write_csr_ring_head = write_csr_ring_head;
+       csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+       csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+       csr_ops->read_csr_e_stat = read_csr_e_stat;
+       csr_ops->write_csr_ring_config = write_csr_ring_config;
+       csr_ops->write_csr_ring_base = write_csr_ring_base;
+       csr_ops->write_csr_int_flag = write_csr_int_flag;
+       csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+       csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+       csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+       csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+       csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
+
+u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+       u32 straps = hw_data->straps;
+       u32 fuses = hw_data->fuses;
+       u32 legfuses;
+       u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+                          ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+                          ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+                          ICP_ACCEL_CAPABILITIES_CIPHER |
+                          ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       /* Read accelerator capabilities mask */
+       pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
+
+       /* A set bit in legfuses means the feature is OFF in this SKU */
+       if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+       if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       if ((straps | fuses) & ADF_POWERGATE_PKE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+
+       if ((straps | fuses) & ADF_POWERGATE_DC)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       return capabilities;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
+
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+       u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+       unsigned long accel_mask = hw_data->accel_mask;
+       u32 i = 0;
+
+       /* Configures WDT timers */
+       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+               /* Enable WDT for sym and dc */
+               ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
+               /* Enable WDT for pke */
+               ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
+       }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
new file mode 100644 (file)
index 0000000..e4bc075
--- /dev/null
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2020 Intel Corporation */
+#ifndef ADF_GEN2_HW_DATA_H_
+#define ADF_GEN2_HW_DATA_H_
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+
+/* Transport access */
+#define ADF_BANK_INT_SRC_SEL_MASK_0    0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X    0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG       0x000
+#define ADF_RING_CSR_RING_LBASE                0x040
+#define ADF_RING_CSR_RING_UBASE                0x080
+#define ADF_RING_CSR_RING_HEAD         0x0C0
+#define ADF_RING_CSR_RING_TAIL         0x100
+#define ADF_RING_CSR_E_STAT            0x14C
+#define ADF_RING_CSR_INT_FLAG          0x170
+#define ADF_RING_CSR_INT_SRCSEL                0x174
+#define ADF_RING_CSR_INT_SRCSEL_2      0x178
+#define ADF_RING_CSR_INT_COL_EN                0x17C
+#define ADF_RING_CSR_INT_COL_CTL       0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL  0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
+#define ADF_RING_BUNDLE_SIZE           0x1000
+#define ADF_GEN2_RX_RINGS_OFFSET       8
+#define ADF_GEN2_TX_RINGS_MASK         0xFF
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+       (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+       u32 l_base = 0, u_base = 0; \
+       l_base = (u32)((value) & 0xFFFFFFFF); \
+       u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_INT_FLAG, value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+       ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+       ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_INT_COL_CTL, \
+                  ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
+                  ADF_RING_CSR_INT_FLAG_AND_COL, value)
+
+/* AE to function map */
+#define AE2FUNCTION_MAP_A_OFFSET       (0x3A400 + 0x190)
+#define AE2FUNCTION_MAP_B_OFFSET       (0x3A400 + 0x310)
+#define AE2FUNCTION_MAP_REG_SIZE       4
+#define AE2FUNCTION_MAP_VALID          BIT(7)
+
+#define READ_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index) \
+       ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
+                  AE2FUNCTION_MAP_REG_SIZE * (index))
+#define WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
+       ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
+                  AE2FUNCTION_MAP_REG_SIZE * (index), value)
+#define READ_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index) \
+       ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
+                  AE2FUNCTION_MAP_REG_SIZE * (index))
+#define WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
+       ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
+                  AE2FUNCTION_MAP_REG_SIZE * (index), value)
+
+/* Admin Interface Offsets */
+#define ADF_ADMINMSGUR_OFFSET  (0x3A000 + 0x574)
+#define ADF_ADMINMSGLR_OFFSET  (0x3A000 + 0x578)
+#define ADF_MAILBOX_BASE_OFFSET        0x20970
+
+/* Arbiter configuration */
+#define ADF_ARB_OFFSET                 0x30000
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET   0x180
+#define ADF_ARB_CONFIG                 (BIT(31) | BIT(6) | BIT(0))
+#define ADF_ARB_REG_SLOT               0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET    0x19C
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+       (ADF_ARB_REG_SLOT * (index)), value)
+
+/* Power gating */
+#define ADF_POWERGATE_DC               BIT(23)
+#define ADF_POWERGATE_PKE              BIT(24)
+
+/* Default ring mapping */
+#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \
+       (CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+        CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+        UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+          COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE      0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x2000000
+#define ADF_SSMWDT_OFFSET              0x54
+#define ADF_SSMWDTPKE_OFFSET           0x58
+#define ADF_SSMWDT(i)          (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
+#define ADF_SSMWDTPKE(i)       (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+
+/* Error detection and correction */
+#define ADF_GEN2_AE_CTX_ENABLES(i)     ((i) * 0x1000 + 0x20818)
+#define ADF_GEN2_AE_MISC_CONTROL(i)    ((i) * 0x1000 + 0x20960)
+#define ADF_GEN2_ENABLE_AE_ECC_ERR     BIT(28)
+#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR     (BIT(24) | BIT(12))
+#define ADF_GEN2_UERRSSMSH(i)          ((i) * 0x4000 + 0x18)
+#define ADF_GEN2_CERRSSMSH(i)          ((i) * 0x4000 + 0x10)
+#define ADF_GEN2_ERRSSMSH_EN           BIT(3)
+
+/* Interrupts */
+#define ADF_GEN2_SMIAPF0_MASK_OFFSET    (0x3A000 + 0x28)
+#define ADF_GEN2_SMIAPF1_MASK_OFFSET    (0x3A000 + 0x30)
+#define ADF_GEN2_SMIA1_MASK             0x1
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
+void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
+                          int num_a_regs, int num_b_regs);
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
+void adf_gen2_get_arb_info(struct arb_info *arb_info);
+void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
+u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.c
new file mode 100644 (file)
index 0000000..70ef119
--- /dev/null
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen2_pfvf.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_vf_proto.h"
+#include "adf_pfvf_utils.h"
+
+ /* VF2PF interrupts */
+#define ADF_GEN2_VF_MSK                        0xFFFF
+#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask)        (((vf_mask) & ADF_GEN2_VF_MSK) << 9)
+
+#define ADF_GEN2_PF_PF2VF_OFFSET(i)    (0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_GEN2_VF_PF2VF_OFFSET       0x200
+
+#define ADF_GEN2_CSR_IN_USE            0x6AC2
+#define ADF_GEN2_CSR_IN_USE_MASK       0xFFFE
+
+enum gen2_csr_pos {
+       ADF_GEN2_CSR_PF2VF_OFFSET       =  0,
+       ADF_GEN2_CSR_VF2PF_OFFSET       = 16,
+};
+
+#define ADF_PFVF_GEN2_MSGTYPE_SHIFT    2
+#define ADF_PFVF_GEN2_MSGTYPE_MASK     0x0F
+#define ADF_PFVF_GEN2_MSGDATA_SHIFT    6
+#define ADF_PFVF_GEN2_MSGDATA_MASK     0x3FF
+
+static const struct pfvf_csr_format csr_gen2_fmt = {
+       { ADF_PFVF_GEN2_MSGTYPE_SHIFT, ADF_PFVF_GEN2_MSGTYPE_MASK },
+       { ADF_PFVF_GEN2_MSGDATA_SHIFT, ADF_PFVF_GEN2_MSGDATA_MASK },
+};
+
+#define ADF_PFVF_MSG_RETRY_DELAY       5
+#define ADF_PFVF_MSG_MAX_RETRIES       3
+
+static u32 adf_gen2_pf_get_pfvf_offset(u32 i)
+{
+       return ADF_GEN2_PF_PF2VF_OFFSET(i);
+}
+
+static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
+{
+       return ADF_GEN2_VF_PF2VF_OFFSET;
+}
+
+static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+       if (vf_mask & ADF_GEN2_VF_MSK) {
+               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+                         & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+       }
+}
+
+static void adf_gen2_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+       u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+                 | ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+}
+
+static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       u32 sources, disabled, pending;
+       u32 errsou3, errmsk3;
+
+       /* Get the interrupt sources triggered by VFs */
+       errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+       sources = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+
+       if (!sources)
+               return 0;
+
+       /* Get the already disabled interrupts */
+       errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+       disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+
+       pending = sources & ~disabled;
+       if (!pending)
+               return 0;
+
+       /* Due to HW limitations, when disabling the interrupts, we can't
+        * just disable the requested sources, as this would lead to missed
+        * interrupts if ERRSOU3 changes just before writing to ERRMSK3.
+        * To work around it, disable all and re-enable only the sources that
+        * are not in vf_mask and were not already disabled. Re-enabling will
+        * trigger a new interrupt for the sources that have changed in the
+        * meantime, if any.
+        */
+       errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+       errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+
+       /* Return the sources of the (new) interrupt(s) */
+       return pending;
+}
+
+static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
+{
+       return ADF_PFVF_INT << offset;
+}
+
+static u32 gen2_csr_msg_to_position(u32 csr_msg, enum gen2_csr_pos offset)
+{
+       return (csr_msg & 0xFFFF) << offset;
+}
+
+static u32 gen2_csr_msg_from_position(u32 csr_val, enum gen2_csr_pos offset)
+{
+       return (csr_val >> offset) & 0xFFFF;
+}
+
+static bool gen2_csr_is_in_use(u32 msg, enum gen2_csr_pos offset)
+{
+       return ((msg >> offset) & ADF_GEN2_CSR_IN_USE_MASK) == ADF_GEN2_CSR_IN_USE;
+}
+
+static void gen2_csr_clear_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+       *msg &= ~(ADF_GEN2_CSR_IN_USE_MASK << offset);
+}
+
+static void gen2_csr_set_in_use(u32 *msg, enum gen2_csr_pos offset)
+{
+       *msg |= (ADF_GEN2_CSR_IN_USE << offset);
+}
+
+static bool is_legacy_user_pfvf_message(u32 msg)
+{
+       return !(msg & ADF_PFVF_MSGORIGIN_SYSTEM);
+}
+
+static bool is_pf2vf_notification(u8 msg_type)
+{
+       switch (msg_type) {
+       case ADF_PF2VF_MSGTYPE_RESTARTING:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool is_vf2pf_notification(u8 msg_type)
+{
+       switch (msg_type) {
+       case ADF_VF2PF_MSGTYPE_INIT:
+       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+               return true;
+       default:
+               return false;
+       }
+}
+
+struct pfvf_gen2_params {
+       u32 pfvf_offset;
+       struct mutex *csr_lock; /* lock preventing concurrent access of CSR */
+       enum gen2_csr_pos local_offset;
+       enum gen2_csr_pos remote_offset;
+       bool (*is_notification_message)(u8 msg_type);
+       u8 compat_ver;
+};
+
+static int adf_gen2_pfvf_send(struct adf_accel_dev *accel_dev,
+                             struct pfvf_message msg,
+                             struct pfvf_gen2_params *params)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       enum gen2_csr_pos remote_offset = params->remote_offset;
+       enum gen2_csr_pos local_offset = params->local_offset;
+       unsigned int retries = ADF_PFVF_MSG_MAX_RETRIES;
+       struct mutex *lock = params->csr_lock;
+       u32 pfvf_offset = params->pfvf_offset;
+       u32 int_bit;
+       u32 csr_val;
+       u32 csr_msg;
+       int ret;
+
+       /* Gen2 messages, both PF->VF and VF->PF, are all 16 bits long. This
+        * allows us to build and read messages as if they where all 0 based.
+        * However, send and receive are in a single shared 32 bits register,
+        * so we need to shift and/or mask the message half before decoding
+        * it and after encoding it. Which one to shift depends on the
+        * direction.
+        */
+
+       int_bit = gen2_csr_get_int_bit(local_offset);
+
+       csr_msg = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen2_fmt);
+       if (unlikely(!csr_msg))
+               return -EINVAL;
+
+       /* Prepare for CSR format, shifting the wire message in place and
+        * setting the in use pattern
+        */
+       csr_msg = gen2_csr_msg_to_position(csr_msg, local_offset);
+       gen2_csr_set_in_use(&csr_msg, remote_offset);
+
+       mutex_lock(lock);
+
+start:
+       /* Check if the PFVF CSR is in use by remote function */
+       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+       if (gen2_csr_is_in_use(csr_val, local_offset)) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "PFVF CSR in use by remote function\n");
+               goto retry;
+       }
+
+       /* Attempt to get ownership of the PFVF CSR */
+       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_msg | int_bit);
+
+       /* Wait for confirmation from remote func it received the message */
+       ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & int_bit),
+                               ADF_PFVF_MSG_ACK_DELAY_US,
+                               ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+                               true, pmisc_addr, pfvf_offset);
+       if (unlikely(ret < 0)) {
+               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+               csr_val &= ~int_bit;
+       }
+
+       /* For fire-and-forget notifications, the receiver does not clear
+        * the in-use pattern. This is used to detect collisions.
+        */
+       if (params->is_notification_message(msg.type) && csr_val != csr_msg) {
+               /* Collision must have overwritten the message */
+               dev_err(&GET_DEV(accel_dev),
+                       "Collision on notification - PFVF CSR overwritten by remote function\n");
+               goto retry;
+       }
+
+       /* If the far side did not clear the in-use pattern it is either
+        * 1) Notification - message left intact to detect collision
+        * 2) Older protocol (compatibility version < 3) on the far side
+        *    where the sender is responsible for clearing the in-use
+        *    pattern after the received has acknowledged receipt.
+        * In either case, clear the in-use pattern now.
+        */
+       if (gen2_csr_is_in_use(csr_val, remote_offset)) {
+               gen2_csr_clear_in_use(&csr_val, remote_offset);
+               ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+       }
+
+out:
+       mutex_unlock(lock);
+       return ret;
+
+retry:
+       if (--retries) {
+               msleep(ADF_PFVF_MSG_RETRY_DELAY);
+               goto start;
+       } else {
+               ret = -EBUSY;
+               goto out;
+       }
+}
+
+static struct pfvf_message adf_gen2_pfvf_recv(struct adf_accel_dev *accel_dev,
+                                             struct pfvf_gen2_params *params)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       enum gen2_csr_pos remote_offset = params->remote_offset;
+       enum gen2_csr_pos local_offset = params->local_offset;
+       u32 pfvf_offset = params->pfvf_offset;
+       struct pfvf_message msg = { 0 };
+       u32 int_bit;
+       u32 csr_val;
+       u16 csr_msg;
+
+       int_bit = gen2_csr_get_int_bit(local_offset);
+
+       /* Read message */
+       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+       if (!(csr_val & int_bit)) {
+               dev_info(&GET_DEV(accel_dev),
+                        "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+               return msg;
+       }
+
+       /* Extract the message from the CSR */
+       csr_msg = gen2_csr_msg_from_position(csr_val, local_offset);
+
+       /* Ignore legacy non-system (non-kernel) messages */
+       if (unlikely(is_legacy_user_pfvf_message(csr_msg))) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Ignored non-system message (0x%.8x);\n", csr_val);
+               /* Because this must be a legacy message, the far side
+                * must clear the in-use pattern, so don't do it.
+                */
+               return msg;
+       }
+
+       /* Return the pfvf_message format */
+       msg = adf_pfvf_message_of(accel_dev, csr_msg, &csr_gen2_fmt);
+
+       /* The in-use pattern is not cleared for notifications (so that
+        * it can be used for collision detection) or older implementations
+        */
+       if (params->compat_ver >= ADF_PFVF_COMPAT_FAST_ACK &&
+           !params->is_notification_message(msg.type))
+               gen2_csr_clear_in_use(&csr_val, remote_offset);
+
+       /* To ACK, clear the INT bit */
+       csr_val &= ~int_bit;
+       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
+
+       return msg;
+}
+
+static int adf_gen2_pf2vf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                              u32 pfvf_offset, struct mutex *csr_lock)
+{
+       struct pfvf_gen2_params params = {
+               .csr_lock = csr_lock,
+               .pfvf_offset = pfvf_offset,
+               .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+               .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+               .is_notification_message = is_pf2vf_notification,
+       };
+
+       return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static int adf_gen2_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                              u32 pfvf_offset, struct mutex *csr_lock)
+{
+       struct pfvf_gen2_params params = {
+               .csr_lock = csr_lock,
+               .pfvf_offset = pfvf_offset,
+               .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+               .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+               .is_notification_message = is_vf2pf_notification,
+       };
+
+       return adf_gen2_pfvf_send(accel_dev, msg, &params);
+}
+
+static struct pfvf_message adf_gen2_pf2vf_recv(struct adf_accel_dev *accel_dev,
+                                              u32 pfvf_offset, u8 compat_ver)
+{
+       struct pfvf_gen2_params params = {
+               .pfvf_offset = pfvf_offset,
+               .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+               .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+               .is_notification_message = is_pf2vf_notification,
+               .compat_ver = compat_ver,
+       };
+
+       return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+static struct pfvf_message adf_gen2_vf2pf_recv(struct adf_accel_dev *accel_dev,
+                                              u32 pfvf_offset, u8 compat_ver)
+{
+       struct pfvf_gen2_params params = {
+               .pfvf_offset = pfvf_offset,
+               .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
+               .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
+               .is_notification_message = is_vf2pf_notification,
+               .compat_ver = compat_ver,
+       };
+
+       return adf_gen2_pfvf_recv(accel_dev, &params);
+}
+
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+       pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
+       pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
+       pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+       pfvf_ops->disable_all_vf2pf_interrupts = adf_gen2_disable_all_vf2pf_interrupts;
+       pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts;
+       pfvf_ops->send_msg = adf_gen2_pf2vf_send;
+       pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_pf_pfvf_ops);
+
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
+       pfvf_ops->get_pf2vf_offset = adf_gen2_vf_get_pfvf_offset;
+       pfvf_ops->get_vf2pf_offset = adf_gen2_vf_get_pfvf_offset;
+       pfvf_ops->send_msg = adf_gen2_vf2pf_send;
+       pfvf_ops->recv_msg = adf_gen2_pf2vf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_vf_pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
new file mode 100644 (file)
index 0000000..a716545
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN2_PFVF_H
+#define ADF_GEN2_PFVF_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
+#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
+
+#if defined(CONFIG_PCI_IOV)
+void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+
+static inline void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN2_PFVF_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
new file mode 100644 (file)
index 0000000..5859238
--- /dev/null
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_20_comp.h"
+#include "adf_gen4_dc.h"
+
+static void qat_comp_build_deflate(void *ctx)
+{
+       struct icp_qat_fw_comp_req *req_tmpl =
+                               (struct icp_qat_fw_comp_req *)ctx;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+       struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
+       struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
+       struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
+       u32 upper_val;
+       u32 lower_val;
+
+       memset(req_tmpl, 0, sizeof(*req_tmpl));
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+       header->comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
+                                           QAT_COMN_PTR_TYPE_SGL);
+       header->serv_specif_flags =
+               ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
+                                           ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+                                           ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+       hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+       hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+       hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+       hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
+       hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
+       hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
+       hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
+       hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
+
+       upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
+       lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+
+       cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+       cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
+
+       req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
+       req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
+       req_pars->req_par_flags =
+               ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
+                                                     ICP_QAT_FW_COMP_EOP,
+                                                     ICP_QAT_FW_COMP_BFINAL,
+                                                     ICP_QAT_FW_COMP_CNV,
+                                                     ICP_QAT_FW_COMP_CNV_RECOVERY,
+                                                     ICP_QAT_FW_COMP_NO_CNV_DFX,
+                                                     ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
+                                                     ICP_QAT_FW_COMP_NO_XXHASH_ACC,
+                                                     ICP_QAT_FW_COMP_CNV_ERROR_NONE,
+                                                     ICP_QAT_FW_COMP_NO_APPEND_CRC,
+                                                     ICP_QAT_FW_COMP_NO_DROP_DATA);
+
+       /* Fill second half of the template for decompression */
+       memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
+       req_tmpl++;
+       header = &req_tmpl->comn_hdr;
+       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+       cd_pars = &req_tmpl->cd_pars;
+
+       hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+       lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
+
+       cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+       cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+}
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+       dc_ops->build_deflate_ctx = qat_comp_build_deflate;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
new file mode 100644 (file)
index 0000000..0b1a677
--- /dev/null
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_DC_H
+#define ADF_GEN4_DC_H
+
+#include "adf_accel_devices.h"
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
+
+#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
new file mode 100644 (file)
index 0000000..3148a62
--- /dev/null
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2020 Intel Corporation */
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_hw_data.h"
+
+static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+{
+       return BUILD_RING_BASE_ADDR(addr, size);
+}
+
+static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+       return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               u32 value)
+{
+       WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+       return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
+}
+
+static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               u32 value)
+{
+       WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
+}
+
+static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
+{
+       return READ_CSR_E_STAT(csr_base_addr, bank);
+}
+
+static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                                 u32 value)
+{
+       WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
+}
+
+static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
+                               dma_addr_t addr)
+{
+       WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
+}
+
+static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
+                              u32 value)
+{
+       WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+{
+       WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+}
+
+static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+       WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
+                                 u32 value)
+{
+       WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
+}
+
+static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
+                                      u32 value)
+{
+       WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
+}
+
+static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
+                                     u32 value)
+{
+       WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
+}
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+       csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
+       csr_ops->read_csr_ring_head = read_csr_ring_head;
+       csr_ops->write_csr_ring_head = write_csr_ring_head;
+       csr_ops->read_csr_ring_tail = read_csr_ring_tail;
+       csr_ops->write_csr_ring_tail = write_csr_ring_tail;
+       csr_ops->read_csr_e_stat = read_csr_e_stat;
+       csr_ops->write_csr_ring_config = write_csr_ring_config;
+       csr_ops->write_csr_ring_base = write_csr_ring_base;
+       csr_ops->write_csr_int_flag = write_csr_int_flag;
+       csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+       csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+       csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
+       csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+       csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+
+static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
+                                              u32 *lower)
+{
+       *lower = lower_32_bits(value);
+       *upper = upper_32_bits(value);
+}
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+       u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+       u32 ssm_wdt_pke_high = 0;
+       u32 ssm_wdt_pke_low = 0;
+       u32 ssm_wdt_high = 0;
+       u32 ssm_wdt_low = 0;
+
+       /* Convert 64bit WDT timer value into 32bit values for
+        * mmio write to 32bit CSRs.
+        */
+       adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
+       adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
+                                   &ssm_wdt_pke_low);
+
+       /* Enable WDT for sym and dc */
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
+       /* Enable WDT for pke */
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
+       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
+
+int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+       u32 status;
+       int ret;
+
+       /* Write rpresetctl register BIT(0) as 1
+        * Since rpresetctl registers have no RW fields, no need to preserve
+        * values for other bits. Just write directly.
+        */
+       ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+                  ADF_WQM_CSR_RPRESETCTL_RESET);
+
+       /* Read rpresetsts register and wait for rp reset to complete */
+       ret = read_poll_timeout(ADF_CSR_RD, status,
+                               status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+                               ADF_RPRESET_POLL_DELAY_US,
+                               ADF_RPRESET_POLL_TIMEOUT_US, true,
+                               csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+       if (!ret) {
+               /* When rp reset is done, clear rpresetsts */
+               ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+                          ADF_WQM_CSR_RPRESETSTS_STATUS);
+       }
+
+       return ret;
+}
+
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
+       void __iomem *csr;
+       int ret;
+
+       if (bank_number >= hw_data->num_banks)
+               return -EINVAL;
+
+       dev_dbg(&GET_DEV(accel_dev),
+               "ring pair reset for bank:%d\n", bank_number);
+
+       csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
+       ret = reset_ring_pair(csr, bank_number);
+       if (ret)
+               dev_err(&GET_DEV(accel_dev),
+                       "ring pair reset failed (timeout)\n");
+       else
+               dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
new file mode 100644 (file)
index 0000000..4fb4b3d
--- /dev/null
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2020 Intel Corporation */
+#ifndef ADF_GEN4_HW_CSR_DATA_H_
+#define ADF_GEN4_HW_CSR_DATA_H_
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+
+/* Transport access */
+#define ADF_BANK_INT_SRC_SEL_MASK      0x44UL
+#define ADF_RING_CSR_RING_CONFIG       0x1000
+#define ADF_RING_CSR_RING_LBASE                0x1040
+#define ADF_RING_CSR_RING_UBASE                0x1080
+#define ADF_RING_CSR_RING_HEAD         0x0C0
+#define ADF_RING_CSR_RING_TAIL         0x100
+#define ADF_RING_CSR_E_STAT            0x14C
+#define ADF_RING_CSR_INT_FLAG          0x170
+#define ADF_RING_CSR_INT_SRCSEL                0x174
+#define ADF_RING_CSR_INT_COL_CTL       0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL  0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
+#define ADF_RING_CSR_INT_COL_EN                0x17C
+#define ADF_RING_CSR_ADDR_OFFSET       0x100000
+#define ADF_RING_BUNDLE_SIZE           0x2000
+
+#define BUILD_RING_BASE_ADDR(addr, size) \
+       ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_HEAD + ((ring) << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value)  \
+do { \
+       void __iomem *_csr_base_addr = csr_base_addr; \
+       u32 _bank = bank;                                               \
+       u32 _ring = ring;                                               \
+       dma_addr_t _value = value;                                      \
+       u32 l_base = 0, u_base = 0;                                     \
+       l_base = lower_32_bits(_value);                                 \
+       u_base = upper_32_bits(_value);                                 \
+       ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET,         \
+                  ADF_RING_BUNDLE_SIZE * (_bank) +                     \
+                  ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base);   \
+       ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET,         \
+                  ADF_RING_BUNDLE_SIZE * (_bank) +                     \
+                  ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base);   \
+} while (0)
+
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_FLAG, (value))
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_COL_EN, (value))
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_COL_CTL, \
+                  ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_INT_FLAG_AND_COL, (value))
+
+/* Arbiter configuration */
+#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
+
+#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+                  ADF_RING_BUNDLE_SIZE * (bank) + \
+                  ADF_RING_CSR_RING_SRV_ARB_EN, (value))
+
+/* Default ring mapping */
+#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
+       (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
+         SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
+        ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
+         SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
+
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE      0x7000000ULL
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x8000000
+#define ADF_SSMWDTL_OFFSET             0x54
+#define ADF_SSMWDTH_OFFSET             0x5C
+#define ADF_SSMWDTPKEL_OFFSET          0x58
+#define ADF_SSMWDTPKEH_OFFSET          0x60
+
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US    (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US      20
+#define ADF_WQM_CSR_RPRESETCTL_RESET   BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank)   (0x6000 + ((bank) << 3))
+#define ADF_WQM_CSR_RPRESETSTS_STATUS  BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank)   (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
+/* Error source registers */
+#define ADF_GEN4_ERRSOU0       (0x41A200)
+#define ADF_GEN4_ERRSOU1       (0x41A204)
+#define ADF_GEN4_ERRSOU2       (0x41A208)
+#define ADF_GEN4_ERRSOU3       (0x41A20C)
+
+/* Error source mask registers */
+#define ADF_GEN4_ERRMSK0       (0x41A210)
+#define ADF_GEN4_ERRMSK1       (0x41A214)
+#define ADF_GEN4_ERRMSK2       (0x41A218)
+#define ADF_GEN4_ERRMSK3       (0x41A21C)
+
+#define ADF_GEN4_VFLNOTIFY     BIT(7)
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
new file mode 100644 (file)
index 0000000..8e8efe9
--- /dev/null
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+#define ADF_4XXX_PF2VM_OFFSET(i)       (0x40B010 + ((i) * 0x20))
+#define ADF_4XXX_VM2PF_OFFSET(i)       (0x40B014 + ((i) * 0x20))
+
+/* VF2PF interrupt source registers */
+#define ADF_4XXX_VM2PF_SOU             0x41A180
+#define ADF_4XXX_VM2PF_MSK             0x41A1C0
+#define ADF_GEN4_VF_MSK                        0xFFFF
+
+#define ADF_PFVF_GEN4_MSGTYPE_SHIFT    2
+#define ADF_PFVF_GEN4_MSGTYPE_MASK     0x3F
+#define ADF_PFVF_GEN4_MSGDATA_SHIFT    8
+#define ADF_PFVF_GEN4_MSGDATA_MASK     0xFFFFFF
+
+static const struct pfvf_csr_format csr_gen4_fmt = {
+       { ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
+       { ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
+};
+
+static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
+{
+       return ADF_4XXX_PF2VM_OFFSET(i);
+}
+
+static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
+{
+       return ADF_4XXX_VM2PF_OFFSET(i);
+}
+
+static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+       u32 val;
+
+       val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
+}
+
+static void adf_gen4_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
+}
+
+static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       u32 sources, disabled, pending;
+
+       /* Get the interrupt sources triggered by VFs */
+       sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
+       if (!sources)
+               return 0;
+
+       /* Get the already disabled interrupts */
+       disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
+
+       pending = sources & ~disabled;
+       if (!pending)
+               return 0;
+
+       /* Due to HW limitations, when disabling the interrupts, we can't
+        * just disable the requested sources, as this would lead to missed
+        * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK.
+        * To work around it, disable all and re-enable only the sources that
+        * are not in vf_mask and were not already disabled. Re-enabling will
+        * trigger a new interrupt for the sources that have changed in the
+        * meantime, if any.
+        */
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources);
+
+       /* Return the sources of the (new) interrupt(s) */
+       return pending;
+}
+
+static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
+                             struct pfvf_message msg, u32 pfvf_offset,
+                             struct mutex *csr_lock)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u32 csr_val;
+       int ret;
+
+       csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
+       if (unlikely(!csr_val))
+               return -EINVAL;
+
+       mutex_lock(csr_lock);
+
+       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
+
+       /* Wait for confirmation from remote that it received the message */
+       ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT),
+                               ADF_PFVF_MSG_ACK_DELAY_US,
+                               ADF_PFVF_MSG_ACK_MAX_DELAY_US,
+                               true, pmisc_addr, pfvf_offset);
+       if (ret < 0)
+               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+
+       mutex_unlock(csr_lock);
+       return ret;
+}
+
+static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
+                                             u32 pfvf_offset, u8 compat_ver)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       struct pfvf_message msg = { 0 };
+       u32 csr_val;
+
+       /* Read message from the CSR */
+       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
+       if (!(csr_val & ADF_PFVF_INT)) {
+               dev_info(&GET_DEV(accel_dev),
+                        "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
+               return msg;
+       }
+
+       /* We can now acknowledge the message reception by clearing the
+        * interrupt bit
+        */
+       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
+
+       /* Return the pfvf_message format */
+       return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
+}
+
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
+       pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
+       pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
+       pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
+       pfvf_ops->disable_all_vf2pf_interrupts = adf_gen4_disable_all_vf2pf_interrupts;
+       pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts;
+       pfvf_ops->send_msg = adf_gen4_pfvf_send;
+       pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_pf_pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
new file mode 100644 (file)
index 0000000..17d1b77
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_GEN4_PFVF_H
+#define ADF_GEN4_PFVF_H
+
+#include "adf_accel_devices.h"
+
+#ifdef CONFIG_PCI_IOV
+void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+#else
+static inline void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
+}
+#endif
+
+#endif /* ADF_GEN4_PFVF_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
new file mode 100644 (file)
index 0000000..7037c08
--- /dev/null
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pm.h"
+#include "adf_cfg_strings.h"
+#include "icp_qat_fw_init_admin.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_cfg.h"
+
+enum qat_pm_host_msg {
+       PM_NO_CHANGE = 0,
+       PM_SET_MIN,
+};
+
+struct adf_gen4_pm_data {
+       struct work_struct pm_irq_work;
+       struct adf_accel_dev *accel_dev;
+       u32 pm_int_sts;
+};
+
+static int send_host_msg(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+       u32 msg;
+
+       msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
+       if (msg & ADF_GEN4_PM_MSG_PENDING)
+               return -EBUSY;
+
+       /* Send HOST_MSG */
+       msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
+       msg |= ADF_GEN4_PM_MSG_PENDING;
+       ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
+
+       /* Poll status register to make sure the HOST_MSG has been processed */
+       return read_poll_timeout(ADF_CSR_RD, msg,
+                               !(msg & ADF_GEN4_PM_MSG_PENDING),
+                               ADF_GEN4_PM_MSG_POLL_DELAY_US,
+                               ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
+                               ADF_GEN4_PM_HOST_MSG);
+}
+
+static void pm_bh_handler(struct work_struct *work)
+{
+       struct adf_gen4_pm_data *pm_data =
+               container_of(work, struct adf_gen4_pm_data, pm_irq_work);
+       struct adf_accel_dev *accel_dev = pm_data->accel_dev;
+       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+       u32 pm_int_sts = pm_data->pm_int_sts;
+       u32 val;
+
+       /* PM Idle interrupt */
+       if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
+               /* Issue host message to FW */
+               if (send_host_msg(accel_dev))
+                       dev_warn_ratelimited(&GET_DEV(accel_dev),
+                                            "Failed to send host msg to FW\n");
+       }
+
+       /* Clear interrupt status */
+       ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
+
+       /* Reenable PM interrupt */
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+       val &= ~ADF_GEN4_PM_SOU;
+       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+       kfree(pm_data);
+}
+
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+       struct adf_gen4_pm_data *pm_data = NULL;
+       u32 errsou2;
+       u32 errmsk2;
+       u32 val;
+
+       /* Only handle the interrupt triggered by PM */
+       errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+       if (errmsk2 & ADF_GEN4_PM_SOU)
+               return false;
+
+       errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
+       if (!(errsou2 & ADF_GEN4_PM_SOU))
+               return false;
+
+       /* Disable interrupt */
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+       val |= ADF_GEN4_PM_SOU;
+       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+
+       pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
+       if (!pm_data)
+               return false;
+
+       pm_data->pm_int_sts = val;
+       pm_data->accel_dev = accel_dev;
+
+       INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
+       adf_misc_wq_queue_work(&pm_data->pm_irq_work);
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+       int ret;
+       u32 val;
+
+       ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
+       if (ret)
+               return ret;
+
+       /* Enable default PM interrupts: IDLE, THROTTLE */
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+       val |= ADF_GEN4_PM_INT_EN_DEFAULT;
+
+       /* Clear interrupt status */
+       val |= ADF_GEN4_PM_INT_STS_MASK;
+       ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
+
+       /* Unmask PM Interrupt */
+       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
+       val &= ~ADF_GEN4_PM_SOU;
+       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
new file mode 100644 (file)
index 0000000..f8f8a9e
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef ADF_GEN4_PM_H
+#define ADF_GEN4_PM_H
+
+#include "adf_accel_devices.h"
+
+/* Power management registers */
+#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
+
+/* Power management */
+#define ADF_GEN4_PM_POLL_DELAY_US      20
+#define ADF_GEN4_PM_POLL_TIMEOUT_US    USEC_PER_SEC
+#define ADF_GEN4_PM_MSG_POLL_DELAY_US  (10 * USEC_PER_MSEC)
+#define ADF_GEN4_PM_STATUS             (0x50A00C)
+#define ADF_GEN4_PM_INTERRUPT          (0x50A028)
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN4_PM_SOU                        BIT(18)
+
+#define ADF_GEN4_PM_IDLE_INT_EN                BIT(18)
+#define ADF_GEN4_PM_THROTTLE_INT_EN    BIT(19)
+#define ADF_GEN4_PM_DRV_ACTIVE         BIT(20)
+#define ADF_GEN4_PM_INIT_STATE         BIT(21)
+#define ADF_GEN4_PM_INT_EN_DEFAULT     (ADF_GEN4_PM_IDLE_INT_EN | \
+                                       ADF_GEN4_PM_THROTTLE_INT_EN)
+
+#define ADF_GEN4_PM_THR_STS    BIT(0)
+#define ADF_GEN4_PM_IDLE_STS   BIT(1)
+#define ADF_GEN4_PM_FW_INT_STS BIT(2)
+#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \
+                                ADF_GEN4_PM_IDLE_STS | \
+                                ADF_GEN4_PM_FW_INT_STS)
+
+#define ADF_GEN4_PM_MSG_PENDING                        BIT(0)
+#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK       GENMASK(28, 1)
+
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER                (0x0)
+#define ADF_GEN4_PM_MAX_IDLE_FILTER            (0x7)
+
+int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
+bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
new file mode 100644 (file)
index 0000000..da69566
--- /dev/null
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport_internal.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REG_SIZE 0x4
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, arb_offset, index, value) \
+       ADF_CSR_WR(csr_addr, (arb_offset) + \
+       (ADF_ARB_REG_SIZE * (index)), value)
+
+#define WRITE_CSR_ARB_WT2SAM(csr_addr, arb_offset, wt_offset, index, value) \
+       ADF_CSR_WR(csr_addr, ((arb_offset) + (wt_offset)) + \
+       (ADF_ARB_REG_SIZE * (index)), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+       unsigned long ae_mask = hw_data->ae_mask;
+       u32 arb_off, wt_off, arb_cfg;
+       const u32 *thd_2_arb_cfg;
+       struct arb_info info;
+       int arb, i;
+
+       hw_data->get_arb_info(&info);
+       arb_cfg = info.arb_cfg;
+       arb_off = info.arb_offset;
+       wt_off = info.wt2sam_offset;
+
+       /* Service arb configured for 32 bytes responses and
+        * ring flow control check enabled. */
+       for (arb = 0; arb < ADF_ARB_NUM; arb++)
+               WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
+
+       /* Map worker threads to service arbiters */
+       thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
+
+       for_each_set_bit(i, &ae_mask, hw_data->num_engines)
+               WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
+{
+       struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+       u32 tx_ring_mask = hw_data->tx_rings_mask;
+       u32 shift = hw_data->tx_rx_gap;
+       u32 arben, arben_tx, arben_rx;
+       u32 rx_ring_mask;
+
+       /*
+        * Enable arbitration on a ring only if the TX half of the ring mask
+        * matches the RX part. This results in writes to CSR on both TX and
+        * RX update - only one is necessary, but both are done for
+        * simplicity.
+        */
+       rx_ring_mask = tx_ring_mask << shift;
+       arben_tx = (ring->bank->ring_mask & tx_ring_mask) >> 0;
+       arben_rx = (ring->bank->ring_mask & rx_ring_mask) >> shift;
+       arben = arben_tx & arben_rx;
+
+       csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
+                                          ring->bank->bank_number, arben);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+       u32 arb_off, wt_off;
+       struct arb_info info;
+       void __iomem *csr;
+       unsigned int i;
+
+       hw_data->get_arb_info(&info);
+       arb_off = info.arb_offset;
+       wt_off = info.wt2sam_offset;
+
+       if (!accel_dev->transport)
+               return;
+
+       csr = accel_dev->transport->banks[0].csr_addr;
+
+       hw_data->get_arb_info(&info);
+
+       /* Reset arbiter configuration */
+       for (i = 0; i < ADF_ARB_NUM; i++)
+               WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
+
+       /* Unmap worker threads to service arbiters */
+       for (i = 0; i < hw_data->num_engines; i++)
+               WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
+
+       /* Disable arbitration on all rings */
+       for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+               csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
+}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
new file mode 100644 (file)
index 0000000..0985f64
--- /dev/null
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+       mutex_lock(&service_lock);
+       list_add(&service->list, &service_table);
+       mutex_unlock(&service_lock);
+}
+
+int adf_service_register(struct service_hndl *service)
+{
+       memset(service->init_status, 0, sizeof(service->init_status));
+       memset(service->start_status, 0, sizeof(service->start_status));
+       adf_service_add(service);
+       return 0;
+}
+
+static void adf_service_remove(struct service_hndl *service)
+{
+       mutex_lock(&service_lock);
+       list_del(&service->list);
+       mutex_unlock(&service_lock);
+}
+
+int adf_service_unregister(struct service_hndl *service)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
+               if (service->init_status[i] || service->start_status[i]) {
+                       pr_err("QAT: Could not remove active service\n");
+                       return -EFAULT;
+               }
+       }
+       adf_service_remove(service);
+       return 0;
+}
+
+/**
+ * adf_dev_init() - Init data structures and services for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Initialize the ring data structures and the admin comms and arbitration
+ * services.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_dev_init(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int ret;
+
+       if (!hw_data) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to init device - hw_data not set\n");
+               return -EFAULT;
+       }
+
+       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+           !accel_dev->is_vf) {
+               dev_err(&GET_DEV(accel_dev), "Device not configured\n");
+               return -EFAULT;
+       }
+
+       if (adf_init_etr_data(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
+               return -EFAULT;
+       }
+
+       if (hw_data->init_device && hw_data->init_device(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
+               return -EFAULT;
+       }
+
+       if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
+               return -EFAULT;
+       }
+
+       if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
+               return -EFAULT;
+       }
+
+       if (adf_ae_init(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to initialise Acceleration Engine\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+       if (adf_ae_fw_load(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to load acceleration FW\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+       if (hw_data->alloc_irq(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+       hw_data->enable_ints(accel_dev);
+       hw_data->enable_error_correction(accel_dev);
+
+       ret = hw_data->pfvf_ops.enable_comms(accel_dev);
+       if (ret)
+               return ret;
+
+       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
+           accel_dev->is_vf) {
+               if (qat_crypto_vf_dev_config(accel_dev))
+                       return -EFAULT;
+       }
+
+       /*
+        * Subservice initialisation is divided into two stages: init and start.
+        * This is to facilitate any ordering dependencies between services
+        * prior to starting any of the accelerators.
+        */
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to initialise service %s\n",
+                               service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, service->init_status);
+       }
+
+       return 0;
+}
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+       if (adf_ae_start(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+       if (hw_data->send_admin_init(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+               return -EFAULT;
+       }
+
+       /* Set ssm watch dog timer */
+       if (hw_data->set_ssm_wdtimer)
+               hw_data->set_ssm_wdtimer(accel_dev);
+
+       /* Enable Power Management */
+       if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
+               return -EFAULT;
+       }
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to start service %s\n",
+                               service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, service->start_status);
+       }
+
+       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+       set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+       if (!list_empty(&accel_dev->crypto_list) &&
+           (qat_algs_register() || qat_asym_algs_register())) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to register crypto algs\n");
+               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+               return -EFAULT;
+       }
+
+       if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to register compression algs\n");
+               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+static void adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+       bool wait = false;
+       int ret;
+
+       if (!adf_dev_started(accel_dev) &&
+           !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+               return;
+
+       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+       clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+       if (!list_empty(&accel_dev->crypto_list)) {
+               qat_algs_unregister();
+               qat_asym_algs_unregister();
+       }
+
+       if (!list_empty(&accel_dev->compression_list))
+               qat_comp_algs_unregister();
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!test_bit(accel_dev->accel_id, service->start_status))
+                       continue;
+               ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+               if (!ret) {
+                       clear_bit(accel_dev->accel_id, service->start_status);
+               } else if (ret == -EAGAIN) {
+                       wait = true;
+                       clear_bit(accel_dev->accel_id, service->start_status);
+               }
+       }
+
+       if (wait)
+               msleep(100);
+
+       if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
+               if (adf_ae_stop(accel_dev))
+                       dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
+               else
+                       clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+       }
+}
+
+/**
+ * adf_dev_shutdown() - shutdown acceleration services and data strucutures
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Cleanup the ring data structures and the admin comms and arbitration
+ * services.
+ */
+static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       if (!hw_data) {
+               dev_err(&GET_DEV(accel_dev),
+                       "QAT: Failed to shutdown device - hw_data not set\n");
+               return;
+       }
+
+       if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+               adf_ae_fw_release(accel_dev);
+               clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+       }
+
+       if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+               if (adf_ae_shutdown(accel_dev))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to shutdown Accel Engine\n");
+               else
+                       clear_bit(ADF_STATUS_AE_INITIALISED,
+                                 &accel_dev->status);
+       }
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!test_bit(accel_dev->accel_id, service->init_status))
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to shutdown service %s\n",
+                               service->name);
+               else
+                       clear_bit(accel_dev->accel_id, service->init_status);
+       }
+
+       hw_data->disable_iov(accel_dev);
+
+       if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+               hw_data->free_irq(accel_dev);
+               clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+       }
+
+       /* Delete configuration only if not restarting */
+       if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+               adf_cfg_del_all(accel_dev);
+
+       if (hw_data->exit_arb)
+               hw_data->exit_arb(accel_dev);
+
+       if (hw_data->exit_admin_comms)
+               hw_data->exit_admin_comms(accel_dev);
+
+       adf_cleanup_etr_data(accel_dev);
+       adf_dev_restore(accel_dev);
+}
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to restart service %s.\n",
+                               service->name);
+       }
+       return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to restart service %s.\n",
+                               service->name);
+       }
+       return 0;
+}
+
+static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
+{
+       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       int ret;
+
+       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                     ADF_SERVICES_ENABLED, services);
+
+       adf_dev_stop(accel_dev);
+       adf_dev_shutdown(accel_dev);
+
+       if (!ret) {
+               ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+               if (ret)
+                       return ret;
+
+               ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+                                                 ADF_SERVICES_ENABLED,
+                                                 services, ADF_STR);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+{
+       int ret = 0;
+
+       if (!accel_dev)
+               return -EINVAL;
+
+       mutex_lock(&accel_dev->state_lock);
+
+       if (!adf_dev_started(accel_dev)) {
+               dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+                        accel_dev->accel_id);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (reconfig) {
+               ret = adf_dev_shutdown_cache_cfg(accel_dev);
+               goto out;
+       }
+
+       adf_dev_stop(accel_dev);
+       adf_dev_shutdown(accel_dev);
+
+out:
+       mutex_unlock(&accel_dev->state_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_down);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
+{
+       int ret = 0;
+
+       if (!accel_dev)
+               return -EINVAL;
+
+       mutex_lock(&accel_dev->state_lock);
+
+       if (adf_dev_started(accel_dev)) {
+               dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
+                        accel_dev->accel_id);
+               ret = -EALREADY;
+               goto out;
+       }
+
+       if (config && GET_HW_DATA(accel_dev)->dev_config) {
+               ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+               if (unlikely(ret))
+                       goto out;
+       }
+
+       ret = adf_dev_init(accel_dev);
+       if (unlikely(ret))
+               goto out;
+
+       ret = adf_dev_start(accel_dev);
+
+out:
+       mutex_unlock(&accel_dev->state_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_up);
+
+int adf_dev_restart(struct adf_accel_dev *accel_dev)
+{
+       int ret = 0;
+
+       if (!accel_dev)
+               return -EFAULT;
+
+       adf_dev_down(accel_dev, false);
+
+       ret = adf_dev_up(accel_dev, false);
+       /* if device is already up return success*/
+       if (ret == -EALREADY)
+               return 0;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_restart);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
new file mode 100644 (file)
index 0000000..ad9e135
--- /dev/null
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+
+#define ADF_MAX_NUM_VFS        32
+static struct workqueue_struct *adf_misc_wq;
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 msix_num_entries = hw_data->num_banks + 1;
+       int ret;
+
+       if (hw_data->set_msix_rttable)
+               hw_data->set_msix_rttable(accel_dev);
+
+       ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
+                                   msix_num_entries, PCI_IRQ_MSIX);
+       if (unlikely(ret < 0)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to allocate %d MSI-X vectors\n",
+                       msix_num_entries);
+               return ret;
+       }
+       return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+       pci_free_irq_vectors(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+       struct adf_etr_bank_data *bank = bank_ptr;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
+                                           0);
+       tasklet_hi_schedule(&bank->resp_handler);
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PCI_IOV
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
+       GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
+       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
+}
+
+static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       u32 pending;
+
+       spin_lock(&accel_dev->pf.vf2pf_ints_lock);
+       pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
+       spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
+
+       return pending;
+}
+
+static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
+{
+       bool irq_handled = false;
+       unsigned long vf_mask;
+
+       /* Get the interrupt sources triggered by VFs, except for those already disabled */
+       vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
+       if (vf_mask) {
+               struct adf_accel_vf_info *vf_info;
+               int i;
+
+               /*
+                * Handle VF2PF interrupt unless the VF is malicious and
+                * is attempting to flood the host OS with VF2PF interrupts.
+                */
+               for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
+                       vf_info = accel_dev->pf.vf_info + i;
+
+                       if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+                               dev_info(&GET_DEV(accel_dev),
+                                        "Too many ints from VF%d\n",
+                                         vf_info->vf_nr);
+                               continue;
+                       }
+
+                       adf_schedule_vf2pf_handler(vf_info);
+                       irq_handled = true;
+               }
+       }
+       return irq_handled;
+}
+#endif /* CONFIG_PCI_IOV */
+
+static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+       if (hw_data->handle_pm_interrupt &&
+           hw_data->handle_pm_interrupt(accel_dev))
+               return true;
+
+       return false;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+       struct adf_accel_dev *accel_dev = dev_ptr;
+
+#ifdef CONFIG_PCI_IOV
+       /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+       if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
+               return IRQ_HANDLED;
+#endif /* CONFIG_PCI_IOV */
+
+       if (adf_handle_pm_int(accel_dev))
+               return IRQ_HANDLED;
+
+       dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+               accel_dev->accel_id);
+
+       return IRQ_NONE;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       int clust_irq = hw_data->num_banks;
+       int irq, i = 0;
+
+       if (pci_dev_info->msix_entries.num_entries > 1) {
+               for (i = 0; i < hw_data->num_banks; i++) {
+                       if (irqs[i].enabled) {
+                               irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+                               irq_set_affinity_hint(irq, NULL);
+                               free_irq(irq, &etr_data->banks[i]);
+                       }
+               }
+       }
+
+       if (irqs[i].enabled) {
+               irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+               free_irq(irq, accel_dev);
+       }
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       int clust_irq = hw_data->num_banks;
+       int ret, irq, i = 0;
+       char *name;
+
+       /* Request msix irq for all banks unless SR-IOV enabled */
+       if (!accel_dev->pf.vf_info) {
+               for (i = 0; i < hw_data->num_banks; i++) {
+                       struct adf_etr_bank_data *bank = &etr_data->banks[i];
+                       unsigned int cpu, cpus = num_online_cpus();
+
+                       name = irqs[i].name;
+                       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                                "qat%d-bundle%d", accel_dev->accel_id, i);
+                       irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+                       if (unlikely(irq < 0)) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "Failed to get IRQ number of device vector %d - %s\n",
+                                       i, name);
+                               ret = irq;
+                               goto err;
+                       }
+                       ret = request_irq(irq, adf_msix_isr_bundle, 0,
+                                         &name[0], bank);
+                       if (ret) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "Failed to allocate IRQ %d for %s\n",
+                                       irq, name);
+                               goto err;
+                       }
+
+                       cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+                              i) % cpus;
+                       irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+                       irqs[i].enabled = true;
+               }
+       }
+
+       /* Request msix irq for AE */
+       name = irqs[i].name;
+       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                "qat%d-ae-cluster", accel_dev->accel_id);
+       irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+       if (unlikely(irq < 0)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to get IRQ number of device vector %d - %s\n",
+                       i, name);
+               ret = irq;
+               goto err;
+       }
+       ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to allocate IRQ %d for %s\n", irq, name);
+               goto err;
+       }
+       irqs[i].enabled = true;
+       return ret;
+err:
+       adf_free_irqs(accel_dev);
+       return ret;
+}
+
+static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 msix_num_entries = 1;
+       struct adf_irq *irqs;
+
+       /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+       if (!accel_dev->pf.vf_info)
+               msix_num_entries += hw_data->num_banks;
+
+       irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
+                           GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
+       if (!irqs)
+               return -ENOMEM;
+
+       accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
+       accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
+       return 0;
+}
+
+static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
+{
+       kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
+       accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++)
+               tasklet_init(&priv_data->banks[i].resp_handler,
+                            adf_response_handler,
+                            (unsigned long)&priv_data->banks[i]);
+       return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++) {
+               tasklet_disable(&priv_data->banks[i].resp_handler);
+               tasklet_kill(&priv_data->banks[i].resp_handler);
+       }
+}
+
+/**
+ * adf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device.
+ */
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+       adf_free_irqs(accel_dev);
+       adf_cleanup_bh(accel_dev);
+       adf_disable_msix(&accel_dev->accel_pci_dev);
+       adf_isr_free_msix_vectors_data(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_free);
+
+/**
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = adf_isr_alloc_msix_vectors_data(accel_dev);
+       if (ret)
+               goto err_out;
+
+       ret = adf_enable_msix(accel_dev);
+       if (ret)
+               goto err_free_msix_table;
+
+       ret = adf_setup_bh(accel_dev);
+       if (ret)
+               goto err_disable_msix;
+
+       ret = adf_request_irqs(accel_dev);
+       if (ret)
+               goto err_cleanup_bh;
+
+       return 0;
+
+err_cleanup_bh:
+       adf_cleanup_bh(accel_dev);
+
+err_disable_msix:
+       adf_disable_msix(&accel_dev->accel_pci_dev);
+
+err_free_msix_table:
+       adf_isr_free_msix_vectors_data(accel_dev);
+
+err_out:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
+
+/**
+ * adf_init_misc_wq() - Init misc workqueue
+ *
+ * Function init workqueue 'qat_misc_wq' for general purpose.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_misc_wq(void)
+{
+       adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+
+       return !adf_misc_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_misc_wq(void)
+{
+       if (adf_misc_wq)
+               destroy_workqueue(adf_misc_wq);
+
+       adf_misc_wq = NULL;
+}
+
+bool adf_misc_wq_queue_work(struct work_struct *work)
+{
+       return queue_work(adf_misc_wq, work);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h
new file mode 100644 (file)
index 0000000..204a424
--- /dev/null
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#ifndef ADF_PFVF_MSG_H
+#define ADF_PFVF_MSG_H
+
+#include <linux/bits.h>
+
+/*
+ * PF<->VF Gen2 Messaging format
+ *
+ * The PF has an array of 32-bit PF2VF registers, one for each VF. The
+ * PF can access all these registers while each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   VF2PF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   PF2VF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see function
+ * adf_gen2_pfvf_send() in adf_pf2vf_msg.c).
+ *
+ *
+ * PF<->VF Gen4 Messaging format
+ *
+ * Similarly to the gen2 messaging format, 32-bit long registers are used for
+ * communication between PF and VFs. However, each VF and PF share a pair of
+ * 32-bits register to avoid collisions: one for PV to VF messages and one
+ * for VF to PF messages.
+ *
+ * Both the Interrupt bit and the Message Origin bit retain the same position
+ * and meaning, although non-system messages are now deprecated and not
+ * expected.
+ *
+ *  31 30              9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |   . . .   |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \_____________________/ \_______________/  ^  ^
+ *             ^                     ^         |  |
+ *             |                     |         |  PF/VF Int
+ *             |                     |         Message Origin
+ *             |                     Message Type
+ *             Message-specific Data/Reserved
+ *
+ * For both formats, the message reception is acknowledged by lowering the
+ * interrupt bit on the register where the message was sent.
+ */
+
+/* PFVF message common bits */
+#define ADF_PFVF_INT                           BIT(0)
+#define ADF_PFVF_MSGORIGIN_SYSTEM              BIT(1)
+
+/* Different generations have different CSR layouts, use this struct
+ * to abstract these differences away
+ */
+struct pfvf_message {
+       u8 type;
+       u32 data;
+};
+
+/* PF->VF messages */
+enum pf2vf_msgtype {
+       ADF_PF2VF_MSGTYPE_RESTARTING            = 0x01,
+       ADF_PF2VF_MSGTYPE_VERSION_RESP          = 0x02,
+       ADF_PF2VF_MSGTYPE_BLKMSG_RESP           = 0x03,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+       ADF_PF2VF_MSGTYPE_RP_RESET_RESP         = 0x10,
+};
+
+/* VF->PF messages */
+enum vf2pf_msgtype {
+       ADF_VF2PF_MSGTYPE_INIT                  = 0x03,
+       ADF_VF2PF_MSGTYPE_SHUTDOWN              = 0x04,
+       ADF_VF2PF_MSGTYPE_VERSION_REQ           = 0x05,
+       ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ        = 0x06,
+       ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ       = 0x07,
+       ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ      = 0x08,
+       ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ       = 0x09,
+/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
+       ADF_VF2PF_MSGTYPE_RP_RESET              = 0x10,
+};
+
+/* VF/PF compatibility version. */
+enum pfvf_compatibility_version {
+       /* Support for extended capabilities */
+       ADF_PFVF_COMPAT_CAPABILITIES            = 0x02,
+       /* In-use pattern cleared by receiver */
+       ADF_PFVF_COMPAT_FAST_ACK                = 0x03,
+       /* Ring to service mapping support for non-standard mappings */
+       ADF_PFVF_COMPAT_RING_TO_SVC_MAP         = 0x04,
+       /* Reference to the latest version */
+       ADF_PFVF_COMPAT_THIS_VERSION            = 0x04,
+};
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK       GENMASK(7, 0)
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK     GENMASK(9, 8)
+
+enum pf2vf_compat_response {
+       ADF_PF2VF_VF_COMPATIBLE                 = 0x01,
+       ADF_PF2VF_VF_INCOMPATIBLE               = 0x02,
+       ADF_PF2VF_VF_COMPAT_UNKNOWN             = 0x03,
+};
+
+enum ring_reset_result {
+       RPRESET_SUCCESS                         = 0x00,
+       RPRESET_NOT_SUPPORTED                   = 0x01,
+       RPRESET_INVAL_BANK                      = 0x02,
+       RPRESET_TIMEOUT                         = 0x03,
+};
+
+#define ADF_VF2PF_RNG_RESET_RP_MASK            GENMASK(1, 0)
+#define ADF_VF2PF_RNG_RESET_RSVD_MASK          GENMASK(25, 2)
+
+/* PF->VF Block Responses */
+#define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK                GENMASK(1, 0)
+#define ADF_PF2VF_BLKMSG_RESP_DATA_MASK                GENMASK(9, 2)
+
+enum pf2vf_blkmsg_resp_type {
+       ADF_PF2VF_BLKMSG_RESP_TYPE_DATA         = 0x00,
+       ADF_PF2VF_BLKMSG_RESP_TYPE_CRC          = 0x01,
+       ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR        = 0x02,
+};
+
+/* PF->VF Block Error Code */
+enum pf2vf_blkmsg_error {
+       ADF_PF2VF_INVALID_BLOCK_TYPE            = 0x00,
+       ADF_PF2VF_INVALID_BYTE_NUM_REQ          = 0x01,
+       ADF_PF2VF_PAYLOAD_TRUNCATED             = 0x02,
+       ADF_PF2VF_UNSPECIFIED_ERROR             = 0x03,
+};
+
+/* VF->PF Block Requests */
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK                GENMASK(1, 0)
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK                GENMASK(8, 2)
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK       GENMASK(2, 0)
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK       GENMASK(8, 3)
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK                GENMASK(3, 0)
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK                GENMASK(8, 4)
+#define ADF_VF2PF_BLOCK_CRC_REQ_MASK           BIT(9)
+
+/* PF->VF Block Request Types
+ *  0..15 - 32 byte message
+ * 16..23 - 64 byte message
+ * 24..27 - 128 byte message
+ */
+enum vf2pf_blkmsg_req_type {
+       ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY        = 0x02,
+       ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP       = 0x03,
+};
+
+#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
+               (FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK))
+
+#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \
+               (FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \
+               ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1)
+
+#define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \
+               (FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \
+               ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX)
+
+#define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \
+               FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \
+               FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK)
+
+#define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \
+               FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK)
+
+struct pfvf_blkmsg_header {
+       u8 version;
+       u8 payload_size;
+} __packed;
+
+#define ADF_PFVF_BLKMSG_HEADER_SIZE            (sizeof(struct pfvf_blkmsg_header))
+#define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg)   (sizeof(blkmsg) - \
+                                                       ADF_PFVF_BLKMSG_HEADER_SIZE)
+#define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg)       (ADF_PFVF_BLKMSG_HEADER_SIZE + \
+                                                       (blkmsg)->hdr.payload_size)
+#define ADF_PFVF_BLKMSG_MSG_MAX_SIZE           128
+
+/* PF->VF Block message header bytes */
+#define ADF_PFVF_BLKMSG_VER_BYTE               0
+#define ADF_PFVF_BLKMSG_LEN_BYTE               1
+
+/* PF/VF Capabilities message values */
+enum blkmsg_capabilities_versions {
+       ADF_PFVF_CAPABILITIES_V1_VERSION        = 0x01,
+       ADF_PFVF_CAPABILITIES_V2_VERSION        = 0x02,
+       ADF_PFVF_CAPABILITIES_V3_VERSION        = 0x03,
+};
+
+struct capabilities_v1 {
+       struct pfvf_blkmsg_header hdr;
+       u32 ext_dc_caps;
+} __packed;
+
+struct capabilities_v2 {
+       struct pfvf_blkmsg_header hdr;
+       u32 ext_dc_caps;
+       u32 capabilities;
+} __packed;
+
+struct capabilities_v3 {
+       struct pfvf_blkmsg_header hdr;
+       u32 ext_dc_caps;
+       u32 capabilities;
+       u32 frequency;
+} __packed;
+
+/* PF/VF Ring to service mapping values */
+enum blkmsg_ring_to_svc_versions {
+       ADF_PFVF_RING_TO_SVC_VERSION            = 0x01,
+};
+
+struct ring_to_svc_map_v1 {
+       struct pfvf_blkmsg_header hdr;
+       u16 map;
+} __packed;
+
+#endif /* ADF_PFVF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c
new file mode 100644 (file)
index 0000000..14c069f
--- /dev/null
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_vf_info *vf;
+       struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
+       int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+       for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+               if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to send restarting msg to VF%d\n", i);
+       }
+}
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+                                    u8 *buffer, u8 compat)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct capabilities_v2 caps_msg;
+
+       caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
+       caps_msg.capabilities = hw_data->accel_capabilities_mask;
+
+       caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
+       caps_msg.hdr.payload_size =
+                       ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
+
+       memcpy(buffer, &caps_msg, sizeof(caps_msg));
+
+       return 0;
+}
+
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+                                   u8 *buffer, u8 compat)
+{
+       struct ring_to_svc_map_v1 rts_map_msg;
+
+       rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
+       rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
+       rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
+
+       memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
+
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h
new file mode 100644 (file)
index 0000000..e8982d1
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_MSG_H
+#define ADF_PFVF_PF_MSG_H
+
+#include "adf_accel_devices.h"
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+
+typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
+                                        u8 *buffer, u8 compat);
+
+int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
+                                    u8 *buffer, u8 comapt);
+int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
+                                   u8 *buffer, u8 comapt);
+
+#endif /* ADF_PFVF_PF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c
new file mode 100644 (file)
index 0000000..388e58b
--- /dev/null
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_pf_msg.h"
+#include "adf_pfvf_pf_proto.h"
+#include "adf_pfvf_utils.h"
+
+typedef u8 (*pf2vf_blkmsg_data_getter_fn)(u8 const *blkmsg, u8 byte);
+
+static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
+       NULL,                             /* no message type defined for value 0 */
+       NULL,                             /* no message type defined for value 1 */
+       adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
+       adf_pf_ring_to_svc_msg_provider,  /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
+};
+
+/**
+ * adf_send_pf2vf_msg() - send PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr:     VF number to which the message will be sent
+ * @msg:       Message to send
+ *
+ * This function allows the PF to send a message to a specific VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg)
+{
+       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+       u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(vf_nr);
+
+       return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+                                 &accel_dev->pf.vf_info[vf_nr].pf2vf_lock);
+}
+
+/**
+ * adf_recv_vf2pf_msg() - receive a VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr:     Number of the VF from where the message will be received
+ *
+ * This function allows the PF to receive a message from a specific VF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr)
+{
+       struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+       u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(vf_nr);
+
+       return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver);
+}
+
+static adf_pf2vf_blkmsg_provider get_blkmsg_response_provider(u8 type)
+{
+       if (type >= ARRAY_SIZE(pf2vf_blkmsg_providers))
+               return NULL;
+
+       return pf2vf_blkmsg_providers[type];
+}
+
+/* Byte pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_byte(u8 const *blkmsg, u8 index)
+{
+       return blkmsg[index];
+}
+
+/* CRC pf2vf_blkmsg_data_getter_fn callback */
+static u8 adf_pf2vf_blkmsg_get_crc(u8 const *blkmsg, u8 count)
+{
+       /* count is 0-based, turn it into a length */
+       return adf_pfvf_calc_blkmsg_crc(blkmsg, count + 1);
+}
+
+static int adf_pf2vf_blkmsg_get_data(struct adf_accel_vf_info *vf_info,
+                                    u8 type, u8 byte, u8 max_size, u8 *data,
+                                    pf2vf_blkmsg_data_getter_fn data_getter)
+{
+       u8 blkmsg[ADF_PFVF_BLKMSG_MSG_MAX_SIZE] = { 0 };
+       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+       adf_pf2vf_blkmsg_provider provider;
+       u8 msg_size;
+
+       provider = get_blkmsg_response_provider(type);
+
+       if (unlikely(!provider)) {
+               pr_err("QAT: No registered provider for message %d\n", type);
+               *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
+               return -EINVAL;
+       }
+
+       if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) {
+               pr_err("QAT: unknown error from provider for message %d\n", type);
+               *data = ADF_PF2VF_UNSPECIFIED_ERROR;
+               return -EINVAL;
+       }
+
+       msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE];
+
+       if (unlikely(msg_size >= max_size)) {
+               pr_err("QAT: Invalid size %d provided for message type %d\n",
+                      msg_size, type);
+               *data = ADF_PF2VF_PAYLOAD_TRUNCATED;
+               return -EINVAL;
+       }
+
+       if (unlikely(byte >= msg_size)) {
+               pr_err("QAT: Out-of-bound byte number %d (msg size %d)\n",
+                      byte, msg_size);
+               *data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
+               return -EINVAL;
+       }
+
+       *data = data_getter(blkmsg, byte);
+       return 0;
+}
+
+static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
+                                            struct pfvf_message req)
+{
+       u8 resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR;
+       struct pfvf_message resp = { 0 };
+       u8 resp_data = 0;
+       u8 blk_type;
+       u8 blk_byte;
+       u8 byte_max;
+
+       switch (req.type) {
+       case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+               blk_type = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, req.data)
+                          + ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX + 1;
+               blk_byte = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, req.data);
+               byte_max = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+               break;
+       case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+               blk_type = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, req.data)
+                          + ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1;
+               blk_byte = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, req.data);
+               byte_max = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+               break;
+       case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+               blk_type = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, req.data);
+               blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
+               byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+               break;
+       }
+
+       /* Is this a request for CRC or data? */
+       if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
+               dev_dbg(&GET_DEV(vf_info->accel_dev),
+                       "BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
+                       blk_type, blk_byte + 1, vf_info->vf_nr);
+
+               if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+                                              byte_max, &resp_data,
+                                              adf_pf2vf_blkmsg_get_crc))
+                       resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_CRC;
+       } else {
+               dev_dbg(&GET_DEV(vf_info->accel_dev),
+                       "BlockMsg of type %d for data byte %d received from VF%d\n",
+                       blk_type, blk_byte, vf_info->vf_nr);
+
+               if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
+                                              byte_max, &resp_data,
+                                              adf_pf2vf_blkmsg_get_byte))
+                       resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_DATA;
+       }
+
+       resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
+       resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp_type) |
+                   FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp_data);
+
+       return resp;
+}
+
+static struct pfvf_message handle_rp_reset_req(struct adf_accel_dev *accel_dev, u8 vf_nr,
+                                              struct pfvf_message req)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct pfvf_message resp = {
+               .type = ADF_PF2VF_MSGTYPE_RP_RESET_RESP,
+               .data = RPRESET_SUCCESS
+       };
+       u32 bank_number;
+       u32 rsvd_field;
+
+       bank_number = FIELD_GET(ADF_VF2PF_RNG_RESET_RP_MASK, req.data);
+       rsvd_field = FIELD_GET(ADF_VF2PF_RNG_RESET_RSVD_MASK, req.data);
+
+       dev_dbg(&GET_DEV(accel_dev),
+               "Ring Pair Reset Message received from VF%d for bank 0x%x\n",
+               vf_nr, bank_number);
+
+       if (!hw_data->ring_pair_reset || rsvd_field) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Ring Pair Reset for VF%d is not supported\n", vf_nr);
+               resp.data = RPRESET_NOT_SUPPORTED;
+               goto out;
+       }
+
+       if (bank_number >= hw_data->num_banks_per_vf) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid bank number (0x%x) from VF%d for Ring Reset\n",
+                       bank_number, vf_nr);
+               resp.data = RPRESET_INVAL_BANK;
+               goto out;
+       }
+
+       /* Convert the VF provided value to PF bank number */
+       bank_number = vf_nr * hw_data->num_banks_per_vf + bank_number;
+       if (hw_data->ring_pair_reset(accel_dev, bank_number)) {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Ring pair reset for VF%d failure\n", vf_nr);
+               resp.data = RPRESET_TIMEOUT;
+               goto out;
+       }
+
+       dev_dbg(&GET_DEV(accel_dev),
+               "Ring pair reset for VF%d successfully\n", vf_nr);
+
+out:
+       return resp;
+}
+
+static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
+                               struct pfvf_message msg, struct pfvf_message *resp)
+{
+       struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+
+       switch (msg.type) {
+       case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+               {
+               u8 vf_compat_ver = msg.data;
+               u8 compat;
+
+               dev_dbg(&GET_DEV(accel_dev),
+                       "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
+                       vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
+
+               if (vf_compat_ver == 0)
+                       compat = ADF_PF2VF_VF_INCOMPATIBLE;
+               else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
+                       compat = ADF_PF2VF_VF_COMPATIBLE;
+               else
+                       compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
+
+               vf_info->vf_compat_ver = vf_compat_ver;
+
+               resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+               resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK,
+                                       ADF_PFVF_COMPAT_THIS_VERSION) |
+                            FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+               {
+               u8 compat;
+
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Legacy VersionRequest received from VF%d to PF (vers 1.1)\n",
+                       vf_nr);
+
+               /* legacy driver, VF compat_ver is 0 */
+               vf_info->vf_compat_ver = 0;
+
+               /* PF always newer than legacy VF */
+               compat = ADF_PF2VF_VF_COMPATIBLE;
+
+               /* Set legacy major and minor version to the latest, 1.1 */
+               resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
+               resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK, 0x11) |
+                            FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_INIT:
+               {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Init message received from VF%d\n", vf_nr);
+               vf_info->init = true;
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+               {
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Shutdown message received from VF%d\n", vf_nr);
+               vf_info->init = false;
+               }
+               break;
+       case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
+       case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
+       case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
+               *resp = handle_blkmsg_req(vf_info, msg);
+               break;
+       case ADF_VF2PF_MSGTYPE_RP_RESET:
+               *resp = handle_rp_reset_req(accel_dev, vf_nr, msg);
+               break;
+       default:
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Unknown message from VF%d (type 0x%.4x, data: 0x%.4x)\n",
+                       vf_nr, msg.type, msg.data);
+               return -ENOMSG;
+       }
+
+       return 0;
+}
+
+bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+       struct pfvf_message req;
+       struct pfvf_message resp = {0};
+
+       req = adf_recv_vf2pf_msg(accel_dev, vf_nr);
+       if (!req.type)  /* Legacy or no message */
+               return true;
+
+       if (adf_handle_vf2pf_msg(accel_dev, vf_nr, req, &resp))
+               return false;
+
+       if (resp.type && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send response to VF%d\n", vf_nr);
+
+       return true;
+}
+
+/**
+ * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * This function carries out the necessary steps to setup and start the PFVF
+ * communication channel, if any.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+       adf_pfvf_crc_init();
+       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.h
new file mode 100644 (file)
index 0000000..165d266
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_PF_PROTO_H
+#define ADF_PFVF_PF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg);
+
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_PF_PROTO_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.c
new file mode 100644 (file)
index 0000000..c5f6d77
--- /dev/null
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2021 Intel Corporation */
+#include <linux/crc8.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+
+/* CRC Calculation */
+DECLARE_CRC8_TABLE(pfvf_crc8_table);
+#define ADF_PFVF_CRC8_POLYNOMIAL 0x97
+
+void adf_pfvf_crc_init(void)
+{
+       crc8_populate_msb(pfvf_crc8_table, ADF_PFVF_CRC8_POLYNOMIAL);
+}
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
+{
+       return crc8(pfvf_crc8_table, buf, buf_len, CRC8_INIT_VALUE);
+}
+
+static bool set_value_on_csr_msg(struct adf_accel_dev *accel_dev, u32 *csr_msg,
+                                u32 value, const struct pfvf_field_format *fmt)
+{
+       if (unlikely((value & fmt->mask) != value)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "PFVF message value 0x%X out of range, %u max allowed\n",
+                       value, fmt->mask);
+               return false;
+       }
+
+       *csr_msg |= value << fmt->offset;
+
+       return true;
+}
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
+                       struct pfvf_message msg,
+                       const struct pfvf_csr_format *fmt)
+{
+       u32 csr_msg = 0;
+
+       if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
+           !set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
+               return 0;
+
+       return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
+}
+
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 csr_msg,
+                                       const struct pfvf_csr_format *fmt)
+{
+       struct pfvf_message msg = { 0 };
+
+       msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
+       msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
+
+       if (unlikely(!msg.type))
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid PFVF msg with no type received\n");
+
+       return msg;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h
new file mode 100644 (file)
index 0000000..2be048e
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_UTILS_H
+#define ADF_PFVF_UTILS_H
+
+#include <linux/types.h>
+#include "adf_pfvf_msg.h"
+
+/* How long to wait for far side to acknowledge receipt */
+#define ADF_PFVF_MSG_ACK_DELAY_US      4
+#define ADF_PFVF_MSG_ACK_MAX_DELAY_US  (1 * USEC_PER_SEC)
+
+u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len);
+void adf_pfvf_crc_init(void);
+
+struct pfvf_field_format {
+       u8  offset;
+       u32 mask;
+};
+
+struct pfvf_csr_format {
+       struct pfvf_field_format type;
+       struct pfvf_field_format data;
+};
+
+u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                       const struct pfvf_csr_format *fmt);
+struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
+                                       const struct pfvf_csr_format *fmt);
+
+#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c
new file mode 100644 (file)
index 0000000..1141258
--- /dev/null
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+/**
+ * adf_vf2pf_notify_init() - send init msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends an init message from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+       struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
+
+       if (adf_send_vf2pf_msg(accel_dev, msg)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send Init event to PF\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
+
+/**
+ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown message from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+       struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
+
+       if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+               if (adf_send_vf2pf_msg(accel_dev, msg))
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
+
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+       u8 pf_version;
+       int compat;
+       int ret;
+       struct pfvf_message resp;
+       struct pfvf_message msg = {
+               .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
+               .data = ADF_PFVF_COMPAT_THIS_VERSION,
+       };
+
+       BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
+
+       ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to send Compatibility Version Request.\n");
+               return ret;
+       }
+
+       pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
+       compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
+
+       /* Response from PF received, check compatibility */
+       switch (compat) {
+       case ADF_PF2VF_VF_COMPATIBLE:
+               break;
+       case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+               /* VF is newer than PF - compatible for now */
+               break;
+       case ADF_PF2VF_VF_INCOMPATIBLE:
+               dev_err(&GET_DEV(accel_dev),
+                       "PF (vers %d) and VF (vers %d) are not compatible\n",
+                       pf_version, ADF_PFVF_COMPAT_THIS_VERSION);
+               return -EINVAL;
+       default:
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid response from PF; assume not compatible\n");
+               return -EINVAL;
+       }
+
+       accel_dev->vf.pf_compat_ver = pf_version;
+       return 0;
+}
+
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct capabilities_v3 cap_msg = { 0 };
+       unsigned int len = sizeof(cap_msg);
+
+       if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
+               /* The PF is too old to support the extended capabilities */
+               return 0;
+
+       if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
+                                     (u8 *)&cap_msg, &len)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "QAT: Failed to get block message response\n");
+               return -EFAULT;
+       }
+
+       switch (cap_msg.hdr.version) {
+       default:
+               /* Newer version received, handle only the know parts */
+               fallthrough;
+       case ADF_PFVF_CAPABILITIES_V3_VERSION:
+               if (likely(len >= sizeof(struct capabilities_v3)))
+                       hw_data->clock_frequency = cap_msg.frequency;
+               else
+                       dev_info(&GET_DEV(accel_dev), "Could not get frequency");
+               fallthrough;
+       case ADF_PFVF_CAPABILITIES_V2_VERSION:
+               if (likely(len >= sizeof(struct capabilities_v2)))
+                       hw_data->accel_capabilities_mask = cap_msg.capabilities;
+               else
+                       dev_info(&GET_DEV(accel_dev), "Could not get capabilities");
+               fallthrough;
+       case ADF_PFVF_CAPABILITIES_V1_VERSION:
+               if (likely(len >= sizeof(struct capabilities_v1))) {
+                       hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
+               } else {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Capabilities message truncated to %d bytes\n", len);
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
+{
+       struct ring_to_svc_map_v1 rts_map_msg = { 0 };
+       unsigned int len = sizeof(rts_map_msg);
+
+       if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
+               /* Use already set default mappings */
+               return 0;
+
+       if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
+                                     (u8 *)&rts_map_msg, &len)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "QAT: Failed to get block message response\n");
+               return -EFAULT;
+       }
+
+       if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
+               dev_err(&GET_DEV(accel_dev),
+                       "RING_TO_SVC message truncated to %d bytes\n", len);
+               return -EFAULT;
+       }
+
+       /* Only v1 at present */
+       accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h
new file mode 100644 (file)
index 0000000..71bc0e3
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_MSG_H
+#define ADF_PFVF_VF_MSG_H
+
+#if defined(CONFIG_PCI_IOV)
+int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
+int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+
+#endif /* ADF_PFVF_VF_MSG_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c
new file mode 100644 (file)
index 0000000..1015155
--- /dev/null
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pfvf_msg.h"
+#include "adf_pfvf_utils.h"
+#include "adf_pfvf_vf_msg.h"
+#include "adf_pfvf_vf_proto.h"
+
+#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY    10
+#define ADF_PFVF_MSG_ACK_DELAY                 2
+#define ADF_PFVF_MSG_ACK_MAX_RETRY             100
+
+/* How often to retry if there is no response */
+#define ADF_PFVF_MSG_RESP_RETRIES      5
+#define ADF_PFVF_MSG_RESP_TIMEOUT      (ADF_PFVF_MSG_ACK_DELAY * \
+                                        ADF_PFVF_MSG_ACK_MAX_RETRY + \
+                                        ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
+
+/**
+ * adf_send_vf2pf_msg() - send VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @msg:       Message to send
+ *
+ * This function allows the VF to send a message to the PF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
+{
+       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+       u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0);
+
+       return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
+                                 &accel_dev->vf.vf2pf_lock);
+}
+
+/**
+ * adf_recv_pf2vf_msg() - receive a PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ *
+ * This function allows the VF to receive a message from the PF.
+ *
+ * Return: a valid message on success, zero otherwise.
+ */
+static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
+       u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
+
+       return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver);
+}
+
+/**
+ * adf_send_vf2pf_req() - send VF2PF request message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg:       Request message to send
+ * @resp:      Returned PF response
+ *
+ * This function sends a message that requires a response from the VF to the PF
+ * and waits for a reply.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                      struct pfvf_message *resp)
+{
+       unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
+       unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
+       int ret;
+
+       reinit_completion(&accel_dev->vf.msg_received);
+
+       /* Send request from VF to PF */
+       do {
+               ret = adf_send_vf2pf_msg(accel_dev, msg);
+               if (ret) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to send request msg to PF\n");
+                       return ret;
+               }
+
+               /* Wait for response, if it times out retry */
+               ret = wait_for_completion_timeout(&accel_dev->vf.msg_received,
+                                                 timeout);
+               if (ret) {
+                       if (likely(resp))
+                               *resp = accel_dev->vf.response;
+
+                       /* Once copied, set to an invalid value */
+                       accel_dev->vf.response.type = 0;
+
+                       return 0;
+               }
+
+               dev_err(&GET_DEV(accel_dev), "PFVF response message timeout\n");
+       } while (--retries);
+
+       return -EIO;
+}
+
+static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc,
+                                    u8 *type, u8 *data)
+{
+       struct pfvf_message req = { 0 };
+       struct pfvf_message resp = { 0 };
+       u8 blk_type;
+       u8 blk_byte;
+       u8 msg_type;
+       u8 max_data;
+       int err;
+
+       /* Convert the block type to {small, medium, large} size category */
+       if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
+               msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
+               blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
+               blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
+               max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
+       } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
+               msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
+               blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
+                                     *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
+               blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
+               max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
+       } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
+               msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
+               blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
+                                     *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
+               blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
+               max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
+       } else {
+               dev_err(&GET_DEV(accel_dev), "Invalid message type %u\n", *type);
+               return -EINVAL;
+       }
+
+       /* Sanity check */
+       if (*data > max_data) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid byte %s %u for message type %u\n",
+                       crc ? "count" : "index", *data, *type);
+               return -EINVAL;
+       }
+
+       /* Build the block message */
+       req.type = msg_type;
+       req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
+
+       err = adf_send_vf2pf_req(accel_dev, req, &resp);
+       if (err)
+               return err;
+
+       *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
+       *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
+
+       return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type,
+                                    u8 index, u8 *data)
+{
+       int ret;
+
+       ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
+       if (ret < 0)
+               return ret;
+
+       if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Unexpected BLKMSG response type %u, byte 0x%x\n",
+                       type, index);
+               return -EFAULT;
+       }
+
+       *data = index;
+       return 0;
+}
+
+static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type,
+                                   u8 bytes, u8 *crc)
+{
+       int ret;
+
+       /* The count of bytes refers to a length, however shift it to a 0-based
+        * count to avoid overflows. Thus, a request for 0 bytes is technically
+        * valid.
+        */
+       --bytes;
+
+       ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
+       if (ret < 0)
+               return ret;
+
+       if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
+                       type, bytes);
+               return  -EFAULT;
+       }
+
+       *crc = bytes;
+       return 0;
+}
+
+/**
+ * adf_send_vf2pf_blkmsg_req() - retrieve block message
+ * @accel_dev: Pointer to acceleration VF device.
+ * @type:      The block message type, see adf_pfvf_msg.h for allowed values
+ * @buffer:    input buffer where to place the received data
+ * @buffer_len:        buffer length as input, the amount of written bytes on output
+ *
+ * Request a message of type 'type' over the block message transport.
+ * This function will send the required amount block message requests and
+ * return the overall content back to the caller through the provided buffer.
+ * The buffer should be large enough to contain the requested message type,
+ * otherwise the response will be truncated.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+                             u8 *buffer, unsigned int *buffer_len)
+{
+       unsigned int index;
+       unsigned int msg_len;
+       int ret;
+       u8 remote_crc;
+       u8 local_crc;
+
+       if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
+               dev_err(&GET_DEV(accel_dev), "Invalid block message type %d\n",
+                       type);
+               return -EINVAL;
+       }
+
+       if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Buffer size too small for a block message\n");
+               return -EINVAL;
+       }
+
+       ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+                                       ADF_PFVF_BLKMSG_VER_BYTE,
+                                       &buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
+       if (unlikely(ret))
+               return ret;
+
+       if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid version 0 received for block request %u", type);
+               return -EFAULT;
+       }
+
+       ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
+                                       ADF_PFVF_BLKMSG_LEN_BYTE,
+                                       &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
+       if (unlikely(ret))
+               return ret;
+
+       if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid size 0 received for block request %u", type);
+               return -EFAULT;
+       }
+
+       /* We need to pick the minimum since there is no way to request a
+        * specific version. As a consequence any scenario is possible:
+        * - PF has a newer (longer) version which doesn't fit in the buffer
+        * - VF expects a newer (longer) version, so we must not ask for
+        *   bytes in excess
+        * - PF and VF share the same version, no problem
+        */
+       msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
+       msg_len = min(*buffer_len, msg_len);
+
+       /* Get the payload */
+       for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
+               ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index,
+                                               &buffer[index]);
+               if (unlikely(ret))
+                       return ret;
+       }
+
+       ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
+       if (unlikely(ret))
+               return ret;
+
+       local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
+       if (unlikely(local_crc != remote_crc)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "CRC error on msg type %d. Local %02X, remote %02X\n",
+                       type, local_crc, remote_crc);
+               return -EIO;
+       }
+
+       *buffer_len = msg_len;
+       return 0;
+}
+
+static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
+                                struct pfvf_message msg)
+{
+       switch (msg.type) {
+       case ADF_PF2VF_MSGTYPE_RESTARTING:
+               dev_dbg(&GET_DEV(accel_dev), "Restarting message received from PF\n");
+
+               adf_pf2vf_handle_pf_restarting(accel_dev);
+               return false;
+       case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+       case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
+       case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
+               dev_dbg(&GET_DEV(accel_dev),
+                       "Response Message received from PF (type 0x%.4x, data 0x%.4x)\n",
+                       msg.type, msg.data);
+               accel_dev->vf.response = msg;
+               complete(&accel_dev->vf.msg_received);
+               return true;
+       default:
+               dev_err(&GET_DEV(accel_dev),
+                       "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
+                       msg.type, msg.data);
+       }
+
+       return false;
+}
+
+bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
+{
+       struct pfvf_message msg;
+
+       msg = adf_recv_pf2vf_msg(accel_dev);
+       if (msg.type)  /* Invalid or no message */
+               return adf_handle_pf2vf_msg(accel_dev, msg);
+
+       /* No replies for PF->VF messages at present */
+
+       return true;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       adf_pfvf_crc_init();
+       adf_enable_pf2vf_interrupts(accel_dev);
+
+       ret = adf_vf2pf_request_version(accel_dev);
+       if (ret)
+               return ret;
+
+       ret = adf_vf2pf_get_capabilities(accel_dev);
+       if (ret)
+               return ret;
+
+       ret = adf_vf2pf_get_ring_to_svc(accel_dev);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.h
new file mode 100644 (file)
index 0000000..f6ee9b3
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2021 Intel Corporation */
+#ifndef ADF_PFVF_VF_PROTO_H
+#define ADF_PFVF_VF_PROTO_H
+
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg);
+int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
+                      struct pfvf_message *resp);
+int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
+                             u8 *buffer, unsigned int *buffer_len);
+
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PFVF_VF_PROTO_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
new file mode 100644 (file)
index 0000000..f44025b
--- /dev/null
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pfvf_pf_msg.h"
+
+#define ADF_VF2PF_RATELIMIT_INTERVAL   8
+#define ADF_VF2PF_RATELIMIT_BURST      130
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+struct adf_pf2vf_resp {
+       struct work_struct pf2vf_resp_work;
+       struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+       struct adf_pf2vf_resp *pf2vf_resp =
+               container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+       struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
+       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+       u32 vf_nr = vf_info->vf_nr;
+       bool ret;
+
+       ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
+       if (ret)
+               /* re-enable interrupt on PF from this VF */
+               adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
+
+       kfree(pf2vf_resp);
+}
+
+void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
+{
+       struct adf_pf2vf_resp *pf2vf_resp;
+
+       pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+       if (!pf2vf_resp)
+               return;
+
+       pf2vf_resp->vf_info = vf_info;
+       INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+       queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       int totalvfs = pci_sriov_get_totalvfs(pdev);
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_accel_vf_info *vf_info;
+       int i;
+
+       for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+            i++, vf_info++) {
+               /* This ptr will be populated when VFs will be created */
+               vf_info->accel_dev = accel_dev;
+               vf_info->vf_nr = i;
+               vf_info->vf_compat_ver = 0;
+
+               mutex_init(&vf_info->pf2vf_lock);
+               ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+                                    ADF_VF2PF_RATELIMIT_INTERVAL,
+                                    ADF_VF2PF_RATELIMIT_BURST);
+       }
+
+       /* Set Valid bits in AE Thread to PCIe Function Mapping */
+       if (hw_data->configure_iov_threads)
+               hw_data->configure_iov_threads(accel_dev, true);
+
+       /* Enable VF to PF interrupts for all VFs */
+       adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
+
+       /*
+        * Due to the hardware design, when SR-IOV and the ring arbiter
+        * are enabled all the VFs supported in hardware must be enabled in
+        * order for all the hardware resources (i.e. bundles) to be usable.
+        * When SR-IOV is enabled, each of the VFs will own one bundle.
+        */
+       return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @accel_dev:  Pointer to accel device.
+ *
+ * Function disables SRIOV for the accel device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+       struct adf_accel_vf_info *vf;
+       int i;
+
+       if (!accel_dev->pf.vf_info)
+               return;
+
+       adf_pf2vf_notify_restarting(accel_dev);
+       pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+       /* Disable VF to PF interrupts */
+       adf_disable_all_vf2pf_interrupts(accel_dev);
+
+       /* Clear Valid bits in AE Thread to PCIe Function Mapping */
+       if (hw_data->configure_iov_threads)
+               hw_data->configure_iov_threads(accel_dev, false);
+
+       for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
+               mutex_destroy(&vf->pf2vf_lock);
+
+       kfree(accel_dev->pf.vf_info);
+       accel_dev->pf.vf_info = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev:  Pointer to PCI device.
+ * @numvfs: Number of virtual functions (VFs) to enable.
+ *
+ * Note that the @numvfs parameter is ignored and all VFs supported by the
+ * device are enabled due to the design of the hardware.
+ *
+ * Function enables SRIOV for the PCI device.
+ *
+ * Return: number of VFs enabled on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+       int totalvfs = pci_sriov_get_totalvfs(pdev);
+       unsigned long val;
+       int ret;
+
+       if (!accel_dev) {
+               dev_err(&pdev->dev, "Failed to find accel_dev\n");
+               return -EFAULT;
+       }
+
+       if (!device_iommu_mapped(&pdev->dev))
+               dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
+
+       if (accel_dev->pf.vf_info) {
+               dev_info(&pdev->dev, "Already enabled for this device\n");
+               return -EINVAL;
+       }
+
+       if (adf_dev_started(accel_dev)) {
+               if (adf_devmgr_in_reset(accel_dev) ||
+                   adf_dev_in_use(accel_dev)) {
+                       dev_err(&GET_DEV(accel_dev), "Device busy\n");
+                       return -EBUSY;
+               }
+
+               ret = adf_dev_down(accel_dev, true);
+               if (ret)
+                       return ret;
+       }
+
+       if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+               return -EFAULT;
+       val = 0;
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       ADF_NUM_CY, (void *)&val, ADF_DEC))
+               return -EFAULT;
+       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+                                         &val, ADF_DEC);
+       if (ret)
+               return ret;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+       /* Allocate memory for VF info structs */
+       accel_dev->pf.vf_info = kcalloc(totalvfs,
+                                       sizeof(struct adf_accel_vf_info),
+                                       GFP_KERNEL);
+       if (!accel_dev->pf.vf_info)
+               return -ENOMEM;
+
+       if (adf_dev_up(accel_dev, false)) {
+               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+                       accel_dev->accel_id);
+               return -EFAULT;
+       }
+
+       ret = adf_enable_sriov(accel_dev);
+       if (ret)
+               return ret;
+
+       return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+       /* Workqueue for PF2VF responses */
+       pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
+
+       return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+       if (pf2vf_resp_wq) {
+               destroy_workqueue(pf2vf_resp_wq);
+               pf2vf_resp_wq = NULL;
+       }
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
new file mode 100644 (file)
index 0000000..3eb6611
--- /dev/null
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static const char * const state_operations[] = {
+       [DEV_DOWN] = "down",
+       [DEV_UP] = "up",
+};
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct adf_accel_dev *accel_dev;
+       char *state;
+
+       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+       if (!accel_dev)
+               return -EINVAL;
+
+       state = adf_dev_started(accel_dev) ? "up" : "down";
+       return sysfs_emit(buf, "%s\n", state);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct adf_accel_dev *accel_dev;
+       u32 accel_id;
+       int ret;
+
+       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+       if (!accel_dev)
+               return -EINVAL;
+
+       accel_id = accel_dev->accel_id;
+
+       if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
+               dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
+               return -EBUSY;
+       }
+
+       ret = sysfs_match_string(state_operations, buf);
+       if (ret < 0)
+               return ret;
+
+       switch (ret) {
+       case DEV_DOWN:
+               dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+
+               ret = adf_dev_down(accel_dev, true);
+               if (ret < 0)
+                       return -EINVAL;
+
+               break;
+       case DEV_UP:
+               dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+
+               ret = adf_dev_up(accel_dev, true);
+               if (ret < 0) {
+                       dev_err(dev, "Failed to start device qat_dev%d\n",
+                               accel_id);
+                       adf_dev_down(accel_dev, true);
+                       return ret;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return count;
+}
+
+static const char * const services_operations[] = {
+       ADF_CFG_CY,
+       ADF_CFG_DC,
+};
+
+static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
+                                char *buf)
+{
+       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+       struct adf_accel_dev *accel_dev;
+       int ret;
+
+       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+       if (!accel_dev)
+               return -EINVAL;
+
+       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                     ADF_SERVICES_ENABLED, services);
+       if (ret)
+               return ret;
+
+       return sysfs_emit(buf, "%s\n", services);
+}
+
+static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
+                                      const char *services)
+{
+       return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+                                          ADF_SERVICES_ENABLED, services,
+                                          ADF_STR);
+}
+
+static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct adf_hw_device_data *hw_data;
+       struct adf_accel_dev *accel_dev;
+       int ret;
+
+       ret = sysfs_match_string(services_operations, buf);
+       if (ret < 0)
+               return ret;
+
+       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+       if (!accel_dev)
+               return -EINVAL;
+
+       if (adf_dev_started(accel_dev)) {
+               dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
+                        accel_dev->accel_id);
+               return -EINVAL;
+       }
+
+       ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
+       if (ret < 0)
+               return ret;
+
+       hw_data = GET_HW_DATA(accel_dev);
+
+       /* Update capabilities mask after change in configuration.
+        * A call to this function is required as capabilities are, at the
+        * moment, tied to configuration
+        */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+       if (!hw_data->accel_capabilities_mask)
+               return -EINVAL;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(state);
+static DEVICE_ATTR_RW(cfg_services);
+
+static struct attribute *qat_attrs[] = {
+       &dev_attr_state.attr,
+       &dev_attr_cfg_services.attr,
+       NULL,
+};
+
+static struct attribute_group qat_group = {
+       .attrs = qat_attrs,
+       .name = "qat",
+};
+
+int adf_sysfs_init(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to create qat attribute group: %d\n", ret);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_sysfs_init);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
new file mode 100644 (file)
index 0000000..630d048
--- /dev/null
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/delay.h>
+#include <linux/nospec.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+#define ADF_MAX_RING_THRESHOLD         80
+#define ADF_PERCENT(tot, percent)      (((tot) * (percent)) / 100)
+
+static inline u32 adf_modulo(u32 data, u32 shift)
+{
+       u32 div = data >> shift;
+       u32 mult = div << shift;
+
+       return data - mult;
+}
+
+static inline int adf_check_ring_alignment(u64 addr, u64 size)
+{
+       if (((size - 1) & addr) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
+{
+       int i = ADF_MIN_RING_SIZE;
+
+       for (; i <= ADF_MAX_RING_SIZE; i++)
+               if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+                       return i;
+
+       return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
+{
+       spin_lock(&bank->lock);
+       if (bank->ring_mask & (1 << ring)) {
+               spin_unlock(&bank->lock);
+               return -EFAULT;
+       }
+       bank->ring_mask |= (1 << ring);
+       spin_unlock(&bank->lock);
+       return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
+{
+       spin_lock(&bank->lock);
+       bank->ring_mask &= ~(1 << ring);
+       spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       spin_lock_bh(&bank->lock);
+       bank->irq_mask |= (1 << ring);
+       spin_unlock_bh(&bank->lock);
+       csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
+                                     bank->irq_mask);
+       csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
+                                      bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       spin_lock_bh(&bank->lock);
+       bank->irq_mask &= ~(1 << ring);
+       spin_unlock_bh(&bank->lock);
+       csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
+                                     bank->irq_mask);
+}
+
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
+{
+       return atomic_read(ring->inflights) > ring->threshold;
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+
+       if (atomic_add_return(1, ring->inflights) >
+           ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+               atomic_dec(ring->inflights);
+               return -EAGAIN;
+       }
+       spin_lock_bh(&ring->lock);
+       memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
+              ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+       ring->tail = adf_modulo(ring->tail +
+                               ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+                               ADF_RING_SIZE_MODULO(ring->ring_size));
+       csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
+                                    ring->bank->bank_number, ring->ring_number,
+                                    ring->tail);
+       spin_unlock_bh(&ring->lock);
+
+       return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+       u32 msg_counter = 0;
+       u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
+
+       while (*msg != ADF_RING_EMPTY_SIG) {
+               ring->callback((u32 *)msg);
+               atomic_dec(ring->inflights);
+               *msg = ADF_RING_EMPTY_SIG;
+               ring->head = adf_modulo(ring->head +
+                                       ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+                                       ADF_RING_SIZE_MODULO(ring->ring_size));
+               msg_counter++;
+               msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
+       }
+       if (msg_counter > 0) {
+               csr_ops->write_csr_ring_head(ring->bank->csr_addr,
+                                            ring->bank->bank_number,
+                                            ring->ring_number, ring->head);
+       }
+       return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+       u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+       csr_ops->write_csr_ring_config(ring->bank->csr_addr,
+                                      ring->bank->bank_number,
+                                      ring->ring_number, ring_config);
+
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+       u32 ring_config =
+                       BUILD_RESP_RING_CONFIG(ring->ring_size,
+                                              ADF_RING_NEAR_WATERMARK_512,
+                                              ADF_RING_NEAR_WATERMARK_0);
+
+       csr_ops->write_csr_ring_config(ring->bank->csr_addr,
+                                      ring->bank->bank_number,
+                                      ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+       u64 ring_base;
+       u32 ring_size_bytes =
+                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+       ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+                                            ring_size_bytes, &ring->dma_addr,
+                                            GFP_KERNEL);
+       if (!ring->base_addr)
+               return -ENOMEM;
+
+       memset(ring->base_addr, 0x7F, ring_size_bytes);
+       /* The base_addr has to be aligned to the size of the buffer */
+       if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+               dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
+               dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+                                 ring->base_addr, ring->dma_addr);
+               ring->base_addr = NULL;
+               return -EFAULT;
+       }
+
+       if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+               adf_configure_tx_ring(ring);
+
+       else
+               adf_configure_rx_ring(ring);
+
+       ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
+                                                     ring->ring_size);
+
+       csr_ops->write_csr_ring_base(ring->bank->csr_addr,
+                                    ring->bank->bank_number, ring->ring_number,
+                                    ring_base);
+       spin_lock_init(&ring->lock);
+       return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+       u32 ring_size_bytes =
+                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+       if (ring->base_addr) {
+               memset(ring->base_addr, 0x7F, ring_size_bytes);
+               dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+                                 ring_size_bytes, ring->base_addr,
+                                 ring->dma_addr);
+       }
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+                   u32 bank_num, u32 num_msgs,
+                   u32 msg_size, const char *ring_name,
+                   adf_callback_fn callback, int poll_mode,
+                   struct adf_etr_ring_data **ring_ptr)
+{
+       struct adf_etr_data *transport_data = accel_dev->transport;
+       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
+       struct adf_etr_bank_data *bank;
+       struct adf_etr_ring_data *ring;
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       int max_inflights;
+       u32 ring_num;
+       int ret;
+
+       if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
+               return -EFAULT;
+       }
+       if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+               dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
+               return -EFAULT;
+       }
+       if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+                             ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Invalid ring size for given msg size\n");
+               return -EFAULT;
+       }
+       if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+               dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
+                       section, ring_name);
+               return -EFAULT;
+       }
+       if (kstrtouint(val, 10, &ring_num)) {
+               dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
+               return -EFAULT;
+       }
+       if (ring_num >= num_rings_per_bank) {
+               dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+               return -EFAULT;
+       }
+
+       ring_num = array_index_nospec(ring_num, num_rings_per_bank);
+       bank = &transport_data->banks[bank_num];
+       if (adf_reserve_ring(bank, ring_num)) {
+               dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
+                       ring_num, ring_name);
+               return -EFAULT;
+       }
+       ring = &bank->rings[ring_num];
+       ring->ring_number = ring_num;
+       ring->bank = bank;
+       ring->callback = callback;
+       ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+       ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+       ring->head = 0;
+       ring->tail = 0;
+       max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
+       ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
+       atomic_set(ring->inflights, 0);
+       ret = adf_init_ring(ring);
+       if (ret)
+               goto err;
+
+       /* Enable HW arbitration for the given ring */
+       adf_update_ring_arb(ring);
+
+       if (adf_ring_debugfs_add(ring, ring_name)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Couldn't add ring debugfs entry\n");
+               ret = -EFAULT;
+               goto err;
+       }
+
+       /* Enable interrupts if needed */
+       if (callback && (!poll_mode))
+               adf_enable_ring_irq(bank, ring->ring_number);
+       *ring_ptr = ring;
+       return 0;
+err:
+       adf_cleanup_ring(ring);
+       adf_unreserve_ring(bank, ring_num);
+       adf_update_ring_arb(ring);
+       return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       /* Disable interrupts for the given ring */
+       adf_disable_ring_irq(bank, ring->ring_number);
+
+       /* Clear PCI config space */
+
+       csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
+                                      ring->ring_number, 0);
+       csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
+                                    ring->ring_number, 0);
+       adf_ring_debugfs_rm(ring);
+       adf_unreserve_ring(bank, ring->ring_number);
+       /* Disable HW arbitration for the given ring */
+       adf_update_ring_arb(ring);
+       adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+       unsigned long empty_rings;
+       int i;
+
+       empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
+                                              bank->bank_number);
+       empty_rings = ~empty_rings & bank->irq_mask;
+
+       for_each_set_bit(i, &empty_rings, num_rings_per_bank)
+               adf_handle_response(&bank->rings[i]);
+}
+
+void adf_response_handler(uintptr_t bank_addr)
+{
+       struct adf_etr_bank_data *bank = (void *)bank_addr;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       /* Handle all the responses and reenable IRQs */
+       adf_ring_response_handler(bank);
+
+       csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
+                                           bank->irq_mask);
+}
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+                                 const char *section, const char *format,
+                                 u32 key, u32 *value)
+{
+       char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+       if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+               return -EFAULT;
+
+       if (kstrtouint(val_buf, 10, value))
+               return -EFAULT;
+       return 0;
+}
+
+static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
+                                 const char *section,
+                                 u32 bank_num_in_accel)
+{
+       if (adf_get_cfg_int(bank->accel_dev, section,
+                           ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+                           bank_num_in_accel, &bank->irq_coalesc_timer))
+               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+       if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+           ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+                        struct adf_etr_bank_data *bank,
+                        u32 bank_num, void __iomem *csr_addr)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u8 num_rings_per_bank = hw_data->num_rings_per_bank;
+       struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+       u32 irq_mask = BIT(num_rings_per_bank) - 1;
+       struct adf_etr_ring_data *ring;
+       struct adf_etr_ring_data *tx_ring;
+       u32 i, coalesc_enabled = 0;
+       unsigned long ring_mask;
+       int size;
+
+       memset(bank, 0, sizeof(*bank));
+       bank->bank_number = bank_num;
+       bank->csr_addr = csr_addr;
+       bank->accel_dev = accel_dev;
+       spin_lock_init(&bank->lock);
+
+       /* Allocate the rings in the bank */
+       size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
+       bank->rings = kzalloc_node(size, GFP_KERNEL,
+                                  dev_to_node(&GET_DEV(accel_dev)));
+       if (!bank->rings)
+               return -ENOMEM;
+
+       /* Enable IRQ coalescing always. This will allow to use
+        * the optimised flag and coalesc register.
+        * If it is disabled in the config file just use min time value */
+       if ((adf_get_cfg_int(accel_dev, "Accelerator0",
+                            ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
+                            &coalesc_enabled) == 0) && coalesc_enabled)
+               adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
+       else
+               bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+       for (i = 0; i < num_rings_per_bank; i++) {
+               csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
+               csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
+
+               ring = &bank->rings[i];
+               if (hw_data->tx_rings_mask & (1 << i)) {
+                       ring->inflights =
+                               kzalloc_node(sizeof(atomic_t),
+                                            GFP_KERNEL,
+                                            dev_to_node(&GET_DEV(accel_dev)));
+                       if (!ring->inflights)
+                               goto err;
+               } else {
+                       if (i < hw_data->tx_rx_gap) {
+                               dev_err(&GET_DEV(accel_dev),
+                                       "Invalid tx rings mask config\n");
+                               goto err;
+                       }
+                       tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+                       ring->inflights = tx_ring->inflights;
+               }
+       }
+       if (adf_bank_debugfs_add(bank)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to add bank debugfs entry\n");
+               goto err;
+       }
+
+       csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
+       csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
+
+       return 0;
+err:
+       ring_mask = hw_data->tx_rings_mask;
+       for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
+               ring = &bank->rings[i];
+               kfree(ring->inflights);
+               ring->inflights = NULL;
+       }
+       kfree(bank->rings);
+       return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *csr_addr;
+       u32 size;
+       u32 num_banks = 0;
+       int i, ret;
+
+       etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+                               dev_to_node(&GET_DEV(accel_dev)));
+       if (!etr_data)
+               return -ENOMEM;
+
+       num_banks = GET_MAX_BANKS(accel_dev);
+       size = num_banks * sizeof(struct adf_etr_bank_data);
+       etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+                                      dev_to_node(&GET_DEV(accel_dev)));
+       if (!etr_data->banks) {
+               ret = -ENOMEM;
+               goto err_bank;
+       }
+
+       accel_dev->transport = etr_data;
+       i = hw_data->get_etr_bar_id(hw_data);
+       csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+       /* accel_dev->debugfs_dir should always be non-NULL here */
+       etr_data->debug = debugfs_create_dir("transport",
+                                            accel_dev->debugfs_dir);
+
+       for (i = 0; i < num_banks; i++) {
+               ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+                                   csr_addr);
+               if (ret)
+                       goto err_bank_all;
+       }
+
+       return 0;
+
+err_bank_all:
+       debugfs_remove(etr_data->debug);
+       kfree(etr_data->banks);
+err_bank:
+       kfree(etr_data);
+       accel_dev->transport = NULL;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u8 num_rings_per_bank = hw_data->num_rings_per_bank;
+       u32 i;
+
+       for (i = 0; i < num_rings_per_bank; i++) {
+               struct adf_etr_ring_data *ring = &bank->rings[i];
+
+               if (bank->ring_mask & (1 << i))
+                       adf_cleanup_ring(ring);
+
+               if (hw_data->tx_rings_mask & (1 << i))
+                       kfree(ring->inflights);
+       }
+       kfree(bank->rings);
+       adf_bank_debugfs_rm(bank);
+       memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       u32 i, num_banks = GET_MAX_BANKS(accel_dev);
+
+       for (i = 0; i < num_banks; i++)
+               cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data = accel_dev->transport;
+
+       if (etr_data) {
+               adf_cleanup_etr_handles(accel_dev);
+               debugfs_remove(etr_data->debug);
+               kfree(etr_data->banks->rings);
+               kfree(etr_data->banks);
+               kfree(etr_data);
+               accel_dev->transport = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.h b/drivers/crypto/intel/qat/qat_common/adf_transport.h
new file mode 100644 (file)
index 0000000..e6ef6f9
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+                   u32 bank_num, u32 num_mgs, u32 msg_size,
+                   const char *ring_name, adf_callback_fn callback,
+                   int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/intel/qat/qat_common/adf_transport_access_macros.h
new file mode 100644 (file)
index 0000000..d3667db
--- /dev/null
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring buffer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+       ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+               ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+                               SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+       ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size)        \
+       ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+       ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
new file mode 100644 (file)
index 0000000..08bca1c
--- /dev/null
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+
+       mutex_lock(&ring_read_lock);
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+               return NULL;
+
+       return ring->base_addr +
+               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+
+       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+               return NULL;
+
+       return ring->base_addr +
+               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+       void __iomem *csr = ring->bank->csr_addr;
+
+       if (v == SEQ_START_TOKEN) {
+               int head, tail, empty;
+
+               head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
+                                                  ring->ring_number);
+               tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
+                                                  ring->ring_number);
+               empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
+
+               seq_puts(sfile, "------- Ring configuration -------\n");
+               seq_printf(sfile, "ring name: %s\n",
+                          ring->ring_debug->ring_name);
+               seq_printf(sfile, "ring num %d, bank num %d\n",
+                          ring->ring_number, ring->bank->bank_number);
+               seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+                          head, tail, (empty & 1 << ring->ring_number)
+                          >> ring->ring_number);
+               seq_printf(sfile, "ring size %lld, msg size %d\n",
+                          (long long)ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+                          ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+               seq_puts(sfile, "----------- Ring data ------------\n");
+               return 0;
+       }
+       seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
+                    v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
+       return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_debug_sops = {
+       .start = adf_ring_start,
+       .next = adf_ring_next,
+       .stop = adf_ring_stop,
+       .show = adf_ring_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+       struct adf_etr_ring_debug_entry *ring_debug;
+       char entry_name[8];
+
+       ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+       if (!ring_debug)
+               return -ENOMEM;
+
+       strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+       snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+                ring->ring_number);
+
+       ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+                                               ring->bank->bank_debug_dir,
+                                               ring, &adf_ring_debug_fops);
+       ring->ring_debug = ring_debug;
+       return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+       if (ring->ring_debug) {
+               debugfs_remove(ring->ring_debug->debug);
+               kfree(ring->ring_debug);
+               ring->ring_debug = NULL;
+       }
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_etr_bank_data *bank = sfile->private;
+       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
+
+       mutex_lock(&bank_read_lock);
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos >= num_rings_per_bank)
+               return NULL;
+
+       return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_etr_bank_data *bank = sfile->private;
+       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
+
+       if (++(*pos) >= num_rings_per_bank)
+               return NULL;
+
+       return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+       struct adf_etr_bank_data *bank = sfile->private;
+       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(sfile, "------- Bank %d configuration -------\n",
+                          bank->bank_number);
+       } else {
+               int ring_id = *((int *)v) - 1;
+               struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+               void __iomem *csr = bank->csr_addr;
+               int head, tail, empty;
+
+               if (!(bank->ring_mask & 1 << ring_id))
+                       return 0;
+
+               head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
+                                                  ring->ring_number);
+               tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
+                                                  ring->ring_number);
+               empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
+
+               seq_printf(sfile,
+                          "ring num %02d, head %04x, tail %04x, empty: %d\n",
+                          ring->ring_number, head, tail,
+                          (empty & 1 << ring->ring_number) >>
+                          ring->ring_number);
+       }
+       return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_debug_sops = {
+       .start = adf_bank_start,
+       .next = adf_bank_next,
+       .stop = adf_bank_stop,
+       .show = adf_bank_show
+};
+
+DEFINE_SEQ_ATTRIBUTE(adf_bank_debug);
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct dentry *parent = accel_dev->transport->debug;
+       char name[8];
+
+       snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+       bank->bank_debug_dir = debugfs_create_dir(name, parent);
+       bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+                                                  bank->bank_debug_dir, bank,
+                                                  &adf_bank_debug_fops);
+       return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+       debugfs_remove(bank->bank_debug_cfg);
+       debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h b/drivers/crypto/intel/qat/qat_common/adf_transport_internal.h
new file mode 100644 (file)
index 0000000..8b2c92b
--- /dev/null
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+       char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+       void *base_addr;
+       atomic_t *inflights;
+       adf_callback_fn callback;
+       struct adf_etr_bank_data *bank;
+       dma_addr_t dma_addr;
+       struct adf_etr_ring_debug_entry *ring_debug;
+       spinlock_t lock;        /* protects ring data struct */
+       u16 head;
+       u16 tail;
+       u32 threshold;
+       u8 ring_number;
+       u8 ring_size;
+       u8 msg_size;
+};
+
+struct adf_etr_bank_data {
+       struct adf_etr_ring_data *rings;
+       struct tasklet_struct resp_handler;
+       void __iomem *csr_addr;
+       u32 irq_coalesc_timer;
+       u32 bank_number;
+       u16 ring_mask;
+       u16 irq_mask;
+       spinlock_t lock;        /* protects bank data struct */
+       struct adf_accel_dev *accel_dev;
+       struct dentry *bank_debug_dir;
+       struct dentry *bank_debug_cfg;
+};
+
+struct adf_etr_data {
+       struct adf_etr_bank_data *banks;
+       struct dentry *debug;
+};
+
+void adf_response_handler(uintptr_t bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+       return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+                                      const char *name)
+{
+       return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
new file mode 100644 (file)
index 0000000..b05c395
--- /dev/null
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_cfg_common.h"
+#include "adf_transport_access_macros.h"
+#include "adf_transport_internal.h"
+
+#define ADF_VINTSOU_OFFSET     0x204
+#define ADF_VINTMSK_OFFSET     0x208
+#define ADF_VINTSOU_BUN                BIT(0)
+#define ADF_VINTSOU_PF2VF      BIT(1)
+
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+       struct adf_accel_dev *accel_dev;
+       struct work_struct work;
+};
+
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+       ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
+}
+
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+       ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
+                                        PCI_IRQ_MSI);
+       if (unlikely(stat < 0)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to enable MSI interrupt: %d\n", stat);
+               return stat;
+       }
+
+       return 0;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       pci_free_irq_vectors(pdev);
+}
+
+static void adf_dev_stop_async(struct work_struct *work)
+{
+       struct adf_vf_stop_data *stop_data =
+               container_of(work, struct adf_vf_stop_data, work);
+       struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+       adf_dev_restarting_notify(accel_dev);
+       adf_dev_down(accel_dev, false);
+
+       /* Re-enable PF2VF interrupts */
+       adf_enable_pf2vf_interrupts(accel_dev);
+       kfree(stop_data);
+}
+
+int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
+{
+       struct adf_vf_stop_data *stop_data;
+
+       clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+       stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+       if (!stop_data) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Couldn't schedule stop for vf_%d\n",
+                       accel_dev->accel_id);
+               return -ENOMEM;
+       }
+       stop_data->accel_dev = accel_dev;
+       INIT_WORK(&stop_data->work, adf_dev_stop_async);
+       queue_work(adf_vf_stop_wq, &stop_data->work);
+
+       return 0;
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+       struct adf_accel_dev *accel_dev = data;
+       bool ret;
+
+       ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
+       if (ret)
+               /* Re-enable PF2VF interrupts */
+               adf_enable_pf2vf_interrupts(accel_dev);
+
+       return;
+
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+       tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+                    (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+       mutex_init(&accel_dev->vf.vf2pf_lock);
+       return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+       tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+       tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+       mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+       struct adf_accel_dev *accel_dev = privdata;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+       struct adf_bar *pmisc =
+                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+       void __iomem *pmisc_bar_addr = pmisc->virt_addr;
+       bool handled = false;
+       u32 v_int, v_mask;
+
+       /* Read VF INT source CSR to determine the source of VF interrupt */
+       v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
+
+       /* Read VF INT mask CSR to determine which sources are masked */
+       v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
+
+       /*
+        * Recompute v_int ignoring sources that are masked. This is to
+        * avoid rescheduling the tasklet for interrupts already handled
+        */
+       v_int &= ~v_mask;
+
+       /* Check for PF2VF interrupt */
+       if (v_int & ADF_VINTSOU_PF2VF) {
+               /* Disable PF to VF interrupt */
+               adf_disable_pf2vf_interrupts(accel_dev);
+
+               /* Schedule tasklet to handle interrupt BH */
+               tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+               handled = true;
+       }
+
+       /* Check bundle interrupt */
+       if (v_int & ADF_VINTSOU_BUN) {
+               struct adf_etr_data *etr_data = accel_dev->transport;
+               struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+               /* Disable Flag and Coalesce Ring Interrupts */
+               csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
+                                                   bank->bank_number, 0);
+               tasklet_hi_schedule(&bank->resp_handler);
+               handled = true;
+       }
+
+       return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       unsigned int cpu;
+       int ret;
+
+       snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+                "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+                PCI_FUNC(pdev->devfn));
+       ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+                         (void *)accel_dev);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+                       accel_dev->vf.irq_name);
+               return ret;
+       }
+       cpu = accel_dev->accel_id % num_online_cpus();
+       irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+       accel_dev->vf.irq_enabled = true;
+
+       return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+
+       tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+                    (unsigned long)priv_data->banks);
+       return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+
+       tasklet_disable(&priv_data->banks[0].resp_handler);
+       tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+/**
+ * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function frees interrupts for acceleration device virtual function.
+ */
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       if (accel_dev->vf.irq_enabled) {
+               irq_set_affinity_hint(pdev->irq, NULL);
+               free_irq(pdev->irq, accel_dev);
+       }
+       adf_cleanup_bh(accel_dev);
+       adf_cleanup_pf2vf_bh(accel_dev);
+       adf_disable_msi(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
+
+/**
+ * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function allocates interrupts for acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+       if (adf_enable_msi(accel_dev))
+               goto err_out;
+
+       if (adf_setup_pf2vf_bh(accel_dev))
+               goto err_disable_msi;
+
+       if (adf_setup_bh(accel_dev))
+               goto err_cleanup_pf2vf_bh;
+
+       if (adf_request_msi_irq(accel_dev))
+               goto err_cleanup_bh;
+
+       return 0;
+
+err_cleanup_bh:
+       adf_cleanup_bh(accel_dev);
+
+err_cleanup_pf2vf_bh:
+       adf_cleanup_pf2vf_bh(accel_dev);
+
+err_disable_msi:
+       adf_disable_msi(accel_dev);
+
+err_out:
+       return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+/**
+ * adf_flush_vf_wq() - Flush workqueue for VF
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables the PF/VF interrupts on the VF so that no new messages
+ * are received and flushes the workqueue 'adf_vf_stop_wq'.
+ *
+ * Return: void.
+ */
+void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
+{
+       adf_disable_pf2vf_interrupts(accel_dev);
+
+       flush_workqueue(adf_vf_stop_wq);
+}
+EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
+
+/**
+ * adf_init_vf_wq() - Init workqueue for VF
+ *
+ * Function init workqueue 'adf_vf_stop_wq' for VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int __init adf_init_vf_wq(void)
+{
+       adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+
+       return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+       if (adf_vf_stop_wq)
+               destroy_workqueue(adf_vf_stop_wq);
+
+       adf_vf_stop_wq = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
new file mode 100644 (file)
index 0000000..c141160
--- /dev/null
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+               (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+       (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+       ICP_QAT_FW_COMN_RESP_SERV_NULL,
+       ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+       ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+       ICP_QAT_FW_COMN_REQ_NULL = 0,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+       ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+       union {
+               struct {
+                       __u64 content_desc_addr;
+                       __u16 content_desc_resrvd1;
+                       __u8 content_desc_params_sz;
+                       __u8 content_desc_hdr_resrvd2;
+                       __u32 content_desc_resrvd3;
+               } s;
+               struct {
+                       __u32 serv_specif_fields[4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+       __u64 opaque_data;
+       __u64 src_data_addr;
+       __u64 dest_data_addr;
+       __u32 src_length;
+       __u32 dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+       __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+       __u8 resrvd1;
+       __u8 service_cmd_id;
+       __u8 service_type;
+       __u8 hdr_flags;
+       __u16 serv_specif_flags;
+       __u16 comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+       __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+       __u8 xlat_err_code;
+       __u8 cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+       __u8 resrvd1;
+       __u8 service_id;
+       __u8 response_type;
+       __u8 hdr_flags;
+       struct icp_qat_fw_comn_error comn_error;
+       __u8 comn_status;
+       __u8 cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_hdr;
+       __u64 opaque_data;
+       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+       (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+       (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+        ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+       ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+        | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+                       QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+        & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+       ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+       QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+       (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+       QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+       (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+       QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+       (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+       QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+       QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+       QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+       ICP_QAT_FW_SLICE_NULL = 0,
+       ICP_QAT_FW_SLICE_CIPHER = 1,
+       ICP_QAT_FW_SLICE_AUTH = 2,
+       ICP_QAT_FW_SLICE_DRAM_RD = 3,
+       ICP_QAT_FW_SLICE_DRAM_WR = 4,
+       ICP_QAT_FW_SLICE_COMP = 5,
+       ICP_QAT_FW_SLICE_XLAT = 6,
+       ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
new file mode 100644 (file)
index 0000000..a03d43f
--- /dev/null
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_FW_COMP_H_
+#define _ICP_QAT_FW_COMP_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_comp_cmd_id {
+       ICP_QAT_FW_COMP_CMD_STATIC = 0,
+       ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
+       ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+       ICP_QAT_FW_COMP_CMD_DELIMITER
+};
+
+enum icp_qat_fw_comp_20_cmd_id {
+       ICP_QAT_FW_COMP_20_CMD_LZ4_COMPRESS = 3,
+       ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4,
+       ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5,
+       ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
+       ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
+       ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
+       ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9,
+       ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10,
+       ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11,
+       ICP_QAT_FW_COMP_20_CMD_DELIMITER
+};
+
+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+
+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
+       ret_uncomp, secure_ram) \
+       ((((sesstype) & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) << \
+       ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
+       (((autoselect) & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) << \
+       ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
+       (((enhanced_asb) & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) << \
+       ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
+       (((ret_uncomp) & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) << \
+       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
+       (((secure_ram) & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) << \
+       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
+       ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
+       ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \
+       ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK)
+
+#define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \
+       ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK)
+
+#define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \
+       QAT_FIELD_GET(flags, \
+       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \
+       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK)
+
+#define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \
+       QAT_FIELD_GET(flags, \
+       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \
+       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK)
+
+struct icp_qat_fw_comp_req_hdr_cd_pars {
+       union {
+               struct {
+                       __u64 content_desc_addr;
+                       __u16 content_desc_resrvd1;
+                       __u8 content_desc_params_sz;
+                       __u8 content_desc_hdr_resrvd2;
+                       __u32 content_desc_resrvd3;
+               } s;
+               struct {
+                       __u32 comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
+                       __u32 content_desc_resrvd4;
+               } sl;
+       } u;
+};
+
+struct icp_qat_fw_comp_req_params {
+       __u32 comp_len;
+       __u32 out_buffer_sz;
+       union {
+               struct {
+                       __u32 initial_crc32;
+                       __u32 initial_adler;
+               } legacy;
+               __u64 crc_data_addr;
+       } crc;
+       __u32 req_par_flags;
+       __u32 rsrvd;
+};
+
+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
+                                             cnvdfx, crc, xxhash_acc, \
+                                             cnv_error_type, append_crc, \
+                                             drop_data) \
+       ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
+       ICP_QAT_FW_COMP_SOP_BITPOS) | \
+       (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
+       ICP_QAT_FW_COMP_EOP_BITPOS) | \
+       (((bfinal) & ICP_QAT_FW_COMP_BFINAL_MASK) \
+       << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
+       (((cnv) & ICP_QAT_FW_COMP_CNV_MASK) << \
+       ICP_QAT_FW_COMP_CNV_BITPOS) | \
+       (((cnvnr) & ICP_QAT_FW_COMP_CNVNR_MASK) \
+       << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \
+       (((cnvdfx) & ICP_QAT_FW_COMP_CNV_DFX_MASK) \
+       << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \
+       (((crc) & ICP_QAT_FW_COMP_CRC_MODE_MASK) \
+       << ICP_QAT_FW_COMP_CRC_MODE_BITPOS) | \
+       (((xxhash_acc) & ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) \
+       << ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS) | \
+       (((cnv_error_type) & ICP_QAT_FW_COMP_CNV_ERROR_MASK) \
+       << ICP_QAT_FW_COMP_CNV_ERROR_BITPOS) | \
+       (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
+       << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
+       (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
+       << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
+
+#define ICP_QAT_FW_COMP_NOT_SOP 0
+#define ICP_QAT_FW_COMP_SOP 1
+#define ICP_QAT_FW_COMP_NOT_EOP 0
+#define ICP_QAT_FW_COMP_EOP 1
+#define ICP_QAT_FW_COMP_NOT_BFINAL 0
+#define ICP_QAT_FW_COMP_BFINAL 1
+#define ICP_QAT_FW_COMP_NO_CNV 0
+#define ICP_QAT_FW_COMP_CNV 1
+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
+#define ICP_QAT_FW_COMP_NO_CNV_DFX 0
+#define ICP_QAT_FW_COMP_CNV_DFX 1
+#define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0
+#define ICP_QAT_FW_COMP_CRC_MODE_E2E 1
+#define ICP_QAT_FW_COMP_NO_XXHASH_ACC 0
+#define ICP_QAT_FW_COMP_XXHASH_ACC 1
+#define ICP_QAT_FW_COMP_APPEND_CRC 1
+#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
+#define ICP_QAT_FW_COMP_DROP_DATA 1
+#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
+#define ICP_QAT_FW_COMP_SOP_BITPOS 0
+#define ICP_QAT_FW_COMP_SOP_MASK 0x1
+#define ICP_QAT_FW_COMP_EOP_BITPOS 1
+#define ICP_QAT_FW_COMP_EOP_MASK 0x1
+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_BITPOS 16
+#define ICP_QAT_FW_COMP_CNV_MASK 0x1
+#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17
+#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18
+#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1
+#define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19
+#define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1
+#define ICP_QAT_FW_COMP_CNV_ERROR_BITPOS 21
+#define ICP_QAT_FW_COMP_CNV_ERROR_MASK 0b111
+#define ICP_QAT_FW_COMP_CNV_ERROR_NONE 0b000
+#define ICP_QAT_FW_COMP_CNV_ERROR_CHECKSUM 0b001
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_OBC_DIFF 0b010
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR 0b011
+#define ICP_QAT_FW_COMP_CNV_ERROR_XLT 0b100
+#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_IBC_DIFF 0b101
+#define ICP_QAT_FW_COMP_APPEND_CRC_BITPOS 24
+#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
+#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
+#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
+
+#define ICP_QAT_FW_COMP_SOP_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
+       ICP_QAT_FW_COMP_SOP_MASK)
+
+#define ICP_QAT_FW_COMP_SOP_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SOP_BITPOS, \
+       ICP_QAT_FW_COMP_SOP_MASK)
+
+#define ICP_QAT_FW_COMP_EOP_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_EOP_BITPOS, \
+       ICP_QAT_FW_COMP_EOP_MASK)
+
+#define ICP_QAT_FW_COMP_EOP_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_EOP_BITPOS, \
+       ICP_QAT_FW_COMP_EOP_MASK)
+
+#define ICP_QAT_FW_COMP_BFINAL_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
+       ICP_QAT_FW_COMP_BFINAL_MASK)
+
+#define ICP_QAT_FW_COMP_BFINAL_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
+       ICP_QAT_FW_COMP_BFINAL_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_MASK)
+
+#define ICP_QAT_FW_COMP_CNVNR_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNVNR_BITPOS, \
+       ICP_QAT_FW_COMP_CNVNR_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_DFX_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_DFX_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_DFX_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_DFX_MASK)
+
+#define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \
+       ICP_QAT_FW_COMP_CRC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
+       ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
+       ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_ERROR_MASK)
+
+#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
+       ICP_QAT_FW_COMP_CNV_ERROR_MASK)
+
+struct icp_qat_fw_xlt_req_params {
+       __u64 inter_buff_ptr;
+};
+
+struct icp_qat_fw_comp_cd_hdr {
+       __u16 ram_bank_flags;
+       __u8 comp_cfg_offset;
+       __u8 next_curr_id;
+       __u32 resrvd;
+       __u64 comp_state_addr;
+       __u64 ram_banks_addr;
+};
+
+#define COMP_CPR_INITIAL_CRC 0
+#define COMP_CPR_INITIAL_ADLER 1
+
+struct icp_qat_fw_xlt_cd_hdr {
+       __u16 resrvd1;
+       __u8 resrvd2;
+       __u8 next_curr_id;
+       __u32 resrvd3;
+};
+
+struct icp_qat_fw_comp_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comp_req_params comp_pars;
+       union {
+               struct icp_qat_fw_xlt_req_params xlt_pars;
+               __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+       } u1;
+       __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+       struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
+       union {
+               struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
+               __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
+       } u2;
+};
+
+struct icp_qat_fw_resp_comp_pars {
+       __u32 input_byte_counter;
+       __u32 output_byte_counter;
+       union {
+               struct {
+                       __u32 curr_crc32;
+                       __u32 curr_adler_32;
+               } legacy;
+               __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_2];
+       } crc;
+};
+
+struct icp_qat_fw_comp_state {
+       __u32 rd8_counter;
+       __u32 status_flags;
+       __u32 in_counter;
+       __u32 out_counter;
+       __u64 intermediate_state;
+       __u32 lobc;
+       __u32 replaybc;
+       __u64 pcrc64_poly;
+       __u32 crc32;
+       __u32 adler_xxhash32;
+       __u64 pcrc64_xorout;
+       __u32 out_buf_size;
+       __u32 in_buf_size;
+       __u64 in_pcrc64;
+       __u64 out_pcrc64;
+       __u32 lobs;
+       __u32 libc;
+       __u64 reserved;
+       __u32 xxhash_state[4];
+       __u32 cleartext[4];
+};
+
+struct icp_qat_fw_comp_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_resp;
+       __u64 opaque_data;
+       struct icp_qat_fw_resp_comp_pars comp_resp_pars;
+};
+
+#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
+#define QAT_FW_COMP_BANK_I_BITPOS 8
+#define QAT_FW_COMP_BANK_H_BITPOS 7
+#define QAT_FW_COMP_BANK_G_BITPOS 6
+#define QAT_FW_COMP_BANK_F_BITPOS 5
+#define QAT_FW_COMP_BANK_E_BITPOS 4
+#define QAT_FW_COMP_BANK_D_BITPOS 3
+#define QAT_FW_COMP_BANK_C_BITPOS 2
+#define QAT_FW_COMP_BANK_B_BITPOS 1
+#define QAT_FW_COMP_BANK_A_BITPOS 0
+
+enum icp_qat_fw_comp_bank_enabled {
+       ICP_QAT_FW_COMP_BANK_DISABLED = 0,
+       ICP_QAT_FW_COMP_BANK_ENABLED = 1,
+       ICP_QAT_FW_COMP_BANK_DELIMITER = 2
+};
+
+#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable, bank_h_enable, \
+                                       bank_g_enable, bank_f_enable, \
+                                       bank_e_enable, bank_d_enable, \
+                                       bank_c_enable, bank_b_enable, \
+                                       bank_a_enable) \
+       ((((bank_i_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_I_BITPOS) | \
+       (((bank_h_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_H_BITPOS) | \
+       (((bank_g_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_G_BITPOS) | \
+       (((bank_f_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_F_BITPOS) | \
+       (((bank_e_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_E_BITPOS) | \
+       (((bank_d_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_D_BITPOS) | \
+       (((bank_c_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_C_BITPOS) | \
+       (((bank_b_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_B_BITPOS) | \
+       (((bank_a_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
+       QAT_FW_COMP_BANK_A_BITPOS))
+
+struct icp_qat_fw_comp_crc_data_struct {
+       __u32 crc32;
+       union {
+               __u32 adler;
+               __u32 xxhash;
+       } adler_xxhash_u;
+       __u32 cpr_in_crc_lo;
+       __u32 cpr_in_crc_hi;
+       __u32 cpr_out_crc_lo;
+       __u32 cpr_out_crc_hi;
+       __u32 xlt_in_crc_lo;
+       __u32 xlt_in_crc_hi;
+       __u32 xlt_out_crc_lo;
+       __u32 xlt_out_crc_hi;
+       __u32 prog_crc_poly_lo;
+       __u32 prog_crc_poly_hi;
+       __u32 xor_out_lo;
+       __u32 xor_out_hi;
+       __u32 append_crc_lo;
+       __u32 append_crc_hi;
+};
+
+struct xxhash_acc_state_buff {
+       __u32 in_counter;
+       __u32 out_counter;
+       __u32 xxhash_state[4];
+       __u32 clear_txt[4];
+};
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644 (file)
index 0000000..56cb827
--- /dev/null
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+       ICP_QAT_FW_INIT_AE = 0,
+       ICP_QAT_FW_TRNG_ENABLE = 1,
+       ICP_QAT_FW_TRNG_DISABLE = 2,
+       ICP_QAT_FW_CONSTANTS_CFG = 3,
+       ICP_QAT_FW_STATUS_GET = 4,
+       ICP_QAT_FW_COUNTERS_GET = 5,
+       ICP_QAT_FW_LOOPBACK = 6,
+       ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+       ICP_QAT_FW_HEARTBEAT_GET = 8,
+       ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+       ICP_QAT_FW_PM_STATE_CONFIG = 128,
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+       ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+       ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+       __u16 init_cfg_sz;
+       __u8 resrvd1;
+       __u8 cmd_id;
+       __u32 resrvd2;
+       __u64 opaque_data;
+       __u64 init_cfg_ptr;
+
+       union {
+               struct {
+                       __u16 ibuf_size_in_kb;
+                       __u16 resrvd3;
+               };
+               __u32 idle_filter;
+       };
+
+       __u32 resrvd4;
+} __packed;
+
+struct icp_qat_fw_init_admin_resp {
+       __u8 flags;
+       __u8 resrvd1;
+       __u8 status;
+       __u8 cmd_id;
+       union {
+               __u32 resrvd2;
+               struct {
+                       __u16 version_minor_num;
+                       __u16 version_major_num;
+               };
+               __u32 extended_features;
+       };
+       __u64 opaque_data;
+       union {
+               __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       __u32 version_patch_num;
+                       __u8 context_id;
+                       __u8 ae_id;
+                       __u16 resrvd4;
+                       __u64 resrvd5;
+               };
+               struct {
+                       __u64 req_rec_count;
+                       __u64 resp_sent_count;
+               };
+               struct {
+                       __u16 compression_algos;
+                       __u16 checksum_algos;
+                       __u32 deflate_capabilities;
+                       __u32 resrvd6;
+                       __u32 lzs_capabilities;
+               };
+               struct {
+                       __u32 cipher_algos;
+                       __u32 hash_algos;
+                       __u16 keygen_algos;
+                       __u16 other;
+                       __u16 public_key_algos;
+                       __u16 prime_algos;
+               };
+               struct {
+                       __u64 timestamp;
+                       __u64 resrvd7;
+               };
+               struct {
+                       __u32 successful_count;
+                       __u32 unsuccessful_count;
+                       __u64 resrvd8;
+               };
+       };
+} __packed;
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, \
+                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_la.h
new file mode 100644 (file)
index 0000000..28fa17f
--- /dev/null
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+       ICP_QAT_FW_LA_CMD_CIPHER = 0,
+       ICP_QAT_FW_LA_CMD_AUTH = 1,
+       ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+       ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+       ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+       ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+       ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+       ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+       ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+       ICP_QAT_FW_LA_CMD_MGF1 = 9,
+       ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+       ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+       ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE 1
+#define QAT_LA_SLICE_TYPE_BITPOS 14
+#define QAT_LA_SLICE_TYPE_MASK 0x3
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO        2
+#define ICP_QAT_FW_LA_CCM_PROTO        1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK   0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+       cmp_auth, ret_auth, update_state, \
+       ciph_iv, ciphcfg, partial) \
+       (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+       ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+       QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+       ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+       QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+       ((proto & QAT_LA_PROTO_MASK) << \
+       QAT_LA_PROTO_BITPOS)    | \
+       ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+       QAT_LA_CMP_AUTH_RES_BITPOS) | \
+       ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+       QAT_LA_RET_AUTH_RES_BITPOS) | \
+       ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+       QAT_LA_UPDATE_STATE_BITPOS) | \
+       ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+       QAT_LA_CIPH_IV_FLD_BITPOS) | \
+       ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+       ((partial & QAT_LA_PARTIAL_MASK) << \
+       QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+       QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_SLICE_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_SLICE_TYPE_BITPOS, \
+       QAT_LA_SLICE_TYPE_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+       union {
+               struct {
+                       __u64 content_desc_addr;
+                       __u16 content_desc_resrvd1;
+                       __u8 content_desc_params_sz;
+                       __u8 content_desc_hdr_resrvd2;
+                       __u32 content_desc_resrvd3;
+               } s;
+               struct {
+                       __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+       union {
+               struct {
+                       __u64 content_desc_addr;
+                       __u16 content_desc_resrvd1;
+                       __u8 content_desc_params_sz;
+                       __u8 content_desc_hdr_resrvd2;
+                       __u32 content_desc_resrvd3;
+               } s;
+               struct {
+                       __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } sl;
+       } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+       __u8 cipher_state_sz;
+       __u8 cipher_key_sz;
+       __u8 cipher_cfg_offset;
+       __u8 next_curr_id;
+       __u8 cipher_padding_sz;
+       __u8 resrvd1;
+       __u16 resrvd2;
+       __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+       __u32 resrvd1;
+       __u8 resrvd2;
+       __u8 hash_flags;
+       __u8 hash_cfg_offset;
+       __u8 next_curr_id;
+       __u8 resrvd3;
+       __u8 outer_prefix_sz;
+       __u8 final_sz;
+       __u8 inner_res_sz;
+       __u8 resrvd4;
+       __u8 inner_state1_sz;
+       __u8 inner_state2_offset;
+       __u8 inner_state2_sz;
+       __u8 outer_config_offset;
+       __u8 outer_state1_sz;
+       __u8 outer_res_sz;
+       __u8 outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+       __u8 cipher_state_sz;
+       __u8 cipher_key_sz;
+       __u8 cipher_cfg_offset;
+       __u8 next_curr_id_cipher;
+       __u8 cipher_padding_sz;
+       __u8 hash_flags;
+       __u8 hash_cfg_offset;
+       __u8 next_curr_id_auth;
+       __u8 resrvd1;
+       __u8 outer_prefix_sz;
+       __u8 final_sz;
+       __u8 inner_res_sz;
+       __u8 resrvd2;
+       __u8 inner_state1_sz;
+       __u8 inner_state2_offset;
+       __u8 inner_state2_sz;
+       __u8 outer_config_offset;
+       __u8 outer_state1_sz;
+       __u8 outer_res_sz;
+       __u8 outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX  240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+       (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+       __u32 cipher_offset;
+       __u32 cipher_length;
+       union {
+               __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       __u64 cipher_IV_ptr;
+                       __u64 resrvd1;
+               } s;
+       } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+       __u32 auth_off;
+       __u32 auth_len;
+       union {
+               __u64 auth_partial_st_prefix;
+               __u64 aad_adr;
+       } u1;
+       __u64 auth_res_addr;
+       union {
+               __u8 inner_prefix_sz;
+               __u8 aad_sz;
+       } u2;
+       __u8 resrvd1;
+       __u8 hash_state_sz;
+       __u8 auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+       union {
+               __u8 inner_prefix_sz;
+               __u8 aad_sz;
+       } u2;
+       __u8 resrvd1;
+       __u16 resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_resp;
+       __u64 opaque_data;
+       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+         ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644 (file)
index 0000000..7eb5dae
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+       unsigned int state;
+       unsigned int ustore_size;
+       unsigned int free_addr;
+       unsigned int free_size;
+       unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+       struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+       unsigned int ae_mask;
+       unsigned int admin_ae_mask;
+       unsigned int slice_mask;
+       unsigned int revision_id;
+       unsigned int ae_max_num;
+       unsigned int upc_mask;
+       unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_chip_info {
+       int mmp_sram_size;
+       bool nn;
+       bool lm2lm3;
+       u32 lm_size;
+       u32 icp_rst_csr;
+       u32 icp_rst_mask;
+       u32 glb_clk_enable_csr;
+       u32 misc_ctl_csr;
+       u32 wakeup_event_val;
+       bool fw_auth;
+       bool css_3k;
+       bool tgroup_share_ustore;
+       u32 fcu_ctl_csr;
+       u32 fcu_sts_csr;
+       u32 fcu_dram_addr_hi;
+       u32 fcu_dram_addr_lo;
+       u32 fcu_loaded_ae_csr;
+       u8 fcu_loaded_ae_pos;
+};
+
+struct icp_qat_fw_loader_handle {
+       struct icp_qat_fw_loader_hal_handle *hal_handle;
+       struct icp_qat_fw_loader_chip_info *chip_info;
+       struct pci_dev *pci_dev;
+       void *obj_handle;
+       void *sobj_handle;
+       void *mobj_handle;
+       unsigned int cfg_ae_mask;
+       void __iomem *hal_sram_addr_v;
+       void __iomem *hal_cap_g_ctl_csr_addr_v;
+       void __iomem *hal_cap_ae_xfer_csr_addr_v;
+       void __iomem *hal_cap_ae_local_csr_addr_v;
+       void __iomem *hal_ep_csr_addr_v;
+};
+
+struct icp_firml_dram_desc {
+       void __iomem *dram_base_addr;
+       void *dram_base_addr_v;
+       dma_addr_t dram_bus_addr;
+       u64 dram_size;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644 (file)
index 0000000..9dddae0
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+       __u64 content_desc_addr;
+       __u32 content_desc_resrvd;
+       __u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+       __u64 opaque;
+       __u64 src_data_addr;
+       __u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+       __u8 resrvd1;
+       __u8 resrvd2;
+       __u8 service_type;
+       __u8 hdr_flags;
+       __u16 comn_req_flags;
+       __u16 resrvd4;
+       struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+       struct icp_qat_fw_req_pke_hdr pke_hdr;
+       struct icp_qat_fw_req_pke_mid pke_mid;
+       __u8 output_param_count;
+       __u8 input_param_count;
+       __u16 resrvd1;
+       __u32 resrvd2;
+       __u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+       __u8 resrvd1;
+       __u8 resrvd2;
+       __u8 response_type;
+       __u8 hdr_flags;
+       __u16 comn_resp_flags;
+       __u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+       struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+       __u64 opaque;
+       __u64 src_data_addr;
+       __u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS              7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK                0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+       QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+               ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+               QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+               QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+               ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+               ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hal.h
new file mode 100644 (file)
index 0000000..20b2ee1
--- /dev/null
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+       MISC_CONTROL = 0xA04,
+       ICP_RESET = 0xA0c,
+       ICP_GLOBAL_CLK_ENABLE = 0xA50
+};
+
+enum {
+       MISC_CONTROL_C4XXX = 0xAA0,
+       ICP_RESET_CPP0 = 0x938,
+       ICP_RESET_CPP1 = 0x93c,
+       ICP_GLOBAL_CLK_ENABLE_CPP0 = 0x964,
+       ICP_GLOBAL_CLK_ENABLE_CPP1 = 0x968
+};
+
+enum hal_ae_csr {
+       USTORE_ADDRESS = 0x000,
+       USTORE_DATA_LOWER = 0x004,
+       USTORE_DATA_UPPER = 0x008,
+       ALU_OUT = 0x010,
+       CTX_ARB_CNTL = 0x014,
+       CTX_ENABLES = 0x018,
+       CC_ENABLE = 0x01c,
+       CSR_CTX_POINTER = 0x020,
+       CTX_STS_INDIRECT = 0x040,
+       ACTIVE_CTX_STATUS = 0x044,
+       CTX_SIG_EVENTS_INDIRECT = 0x048,
+       CTX_SIG_EVENTS_ACTIVE = 0x04c,
+       CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+       LM_ADDR_0_INDIRECT = 0x060,
+       LM_ADDR_1_INDIRECT = 0x068,
+       LM_ADDR_2_INDIRECT = 0x0cc,
+       LM_ADDR_3_INDIRECT = 0x0d4,
+       INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+       INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+       INDIRECT_LM_ADDR_2_BYTE_INDEX = 0x10c,
+       INDIRECT_LM_ADDR_3_BYTE_INDEX = 0x114,
+       INDIRECT_T_INDEX = 0x0f8,
+       INDIRECT_T_INDEX_BYTE_INDEX = 0x0fc,
+       FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+       TIMESTAMP_LOW = 0x0c0,
+       TIMESTAMP_HIGH = 0x0c4,
+       PROFILE_COUNT = 0x144,
+       SIGNATURE_ENABLE = 0x150,
+       AE_MISC_CONTROL = 0x160,
+       LOCAL_CSR_STATUS = 0x180,
+};
+
+enum fcu_csr {
+       FCU_CONTROL           = 0x8c0,
+       FCU_STATUS            = 0x8c4,
+       FCU_STATUS1           = 0x8c8,
+       FCU_DRAM_ADDR_LO      = 0x8cc,
+       FCU_DRAM_ADDR_HI      = 0x8d0,
+       FCU_RAMBASE_ADDR_HI   = 0x8d4,
+       FCU_RAMBASE_ADDR_LO   = 0x8d8
+};
+
+enum fcu_csr_4xxx {
+       FCU_CONTROL_4XXX           = 0x1000,
+       FCU_STATUS_4XXX            = 0x1004,
+       FCU_ME_BROADCAST_MASK_TYPE = 0x1008,
+       FCU_AE_LOADED_4XXX         = 0x1010,
+       FCU_DRAM_ADDR_LO_4XXX      = 0x1014,
+       FCU_DRAM_ADDR_HI_4XXX      = 0x1018,
+};
+
+enum fcu_cmd {
+       FCU_CTRL_CMD_NOOP  = 0,
+       FCU_CTRL_CMD_AUTH  = 1,
+       FCU_CTRL_CMD_LOAD  = 2,
+       FCU_CTRL_CMD_START = 3
+};
+
+enum fcu_sts {
+       FCU_STS_NO_STS    = 0,
+       FCU_STS_VERI_DONE = 1,
+       FCU_STS_LOAD_DONE = 2,
+       FCU_STS_VERI_FAIL = 3,
+       FCU_STS_LOAD_FAIL = 4,
+       FCU_STS_BUSY      = 5
+};
+
+#define ALL_AE_MASK                 0xFFFFFFFF
+#define UA_ECS                      (0x1 << 31)
+#define ACS_ABO_BITPOS              31
+#define ACS_ACNO                    0x7
+#define CE_ENABLE_BITPOS            0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS   16
+#define CE_LMADDR_1_GLOBAL_BITPOS   17
+#define CE_LMADDR_2_GLOBAL_BITPOS   22
+#define CE_LMADDR_3_GLOBAL_BITPOS   23
+#define CE_T_INDEX_GLOBAL_BITPOS    21
+#define CE_NN_MODE_BITPOS           20
+#define CE_REG_PAR_ERR_BITPOS       25
+#define CE_BREAKPOINT_BITPOS        27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS    31
+#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY              (0x1)
+#define LCS_STATUS          (0x1)
+#define MMC_SHARE_CS_BITPOS         2
+#define WAKEUP_EVENT 0x10000
+#define FCU_CTRL_BROADCAST_POS   0x4
+#define FCU_CTRL_AE_POS     0x8
+#define FCU_AUTH_STS_MASK   0x7
+#define FCU_STS_DONE_POS    0x9
+#define FCU_STS_AUTHFWLD_POS 0X8
+#define FCU_LOADED_AE_POS   0x16
+#define FW_AUTH_WAIT_PERIOD 10
+#define FW_AUTH_MAX_RETRY   300
+#define ICP_QAT_AE_OFFSET 0x20000
+#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET 0x800
+#define ICP_QAT_EP_OFFSET 0x3a000
+#define ICP_QAT_EP_OFFSET_4XXX   0x200000 /* HI MMIO CSRs */
+#define ICP_QAT_AE_OFFSET_4XXX   0x600000
+#define ICP_QAT_CAP_OFFSET_4XXX  0x640000
+#define SET_CAP_CSR(handle, csr, val) \
+       ADF_CSR_WR((handle)->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+       ADF_CSR_RD((handle)->hal_cap_g_ctl_csr_addr_v, csr)
+#define AE_CSR(handle, ae) \
+       ((char __iomem *)(handle)->hal_cap_ae_local_csr_addr_v + ((ae) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & (csr)))
+#define SET_AE_CSR(handle, ae, csr, val) \
+       ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+       ((char __iomem *)(handle)->hal_cap_ae_xfer_csr_addr_v + ((ae) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+       (((reg) & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+       ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+       ADF_CSR_WR((handle)->hal_sram_addr_v, addr, val)
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
new file mode 100644 (file)
index 0000000..4042739
--- /dev/null
@@ -0,0 +1,376 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+       ICP_QAT_HW_AE_0 = 0,
+       ICP_QAT_HW_AE_1 = 1,
+       ICP_QAT_HW_AE_2 = 2,
+       ICP_QAT_HW_AE_3 = 3,
+       ICP_QAT_HW_AE_4 = 4,
+       ICP_QAT_HW_AE_5 = 5,
+       ICP_QAT_HW_AE_6 = 6,
+       ICP_QAT_HW_AE_7 = 7,
+       ICP_QAT_HW_AE_8 = 8,
+       ICP_QAT_HW_AE_9 = 9,
+       ICP_QAT_HW_AE_10 = 10,
+       ICP_QAT_HW_AE_11 = 11,
+       ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+       ICP_QAT_HW_QAT_0 = 0,
+       ICP_QAT_HW_QAT_1 = 1,
+       ICP_QAT_HW_QAT_2 = 2,
+       ICP_QAT_HW_QAT_3 = 3,
+       ICP_QAT_HW_QAT_4 = 4,
+       ICP_QAT_HW_QAT_5 = 5,
+       ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+       ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+       ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+       ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+       ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+       ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+       ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+       ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+       ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+       ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+       ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+       ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+       ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+       ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+       ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+       ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+       ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+       ICP_QAT_HW_AUTH_MODE0 = 0,
+       ICP_QAT_HW_AUTH_MODE1 = 1,
+       ICP_QAT_HW_AUTH_MODE2 = 2,
+       ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+       __u32 config;
+       __u32 reserved;
+};
+
+struct icp_qat_hw_ucs_cipher_config {
+       __u32 val;
+       __u32 reserved[3];
+};
+
+enum icp_qat_slice_mask {
+       ICP_ACCEL_MASK_CIPHER_SLICE = BIT(0),
+       ICP_ACCEL_MASK_AUTH_SLICE = BIT(1),
+       ICP_ACCEL_MASK_PKE_SLICE = BIT(2),
+       ICP_ACCEL_MASK_COMPRESS_SLICE = BIT(3),
+       ICP_ACCEL_MASK_LZS_SLICE = BIT(4),
+       ICP_ACCEL_MASK_EIA3_SLICE = BIT(5),
+       ICP_ACCEL_MASK_SHA3_SLICE = BIT(6),
+};
+
+enum icp_qat_capabilities_mask {
+       ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = BIT(0),
+       ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = BIT(1),
+       ICP_ACCEL_CAPABILITIES_CIPHER = BIT(2),
+       ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
+       ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
+       ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
+       ICP_ACCEL_CAPABILITIES_LZS_COMPRESSION = BIT(6),
+       ICP_ACCEL_CAPABILITIES_RAND = BIT(7),
+       ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
+       ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
+       /* Bits 10-11 are currently reserved */
+       ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
+       ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
+       /* Bit 14 is currently reserved */
+       ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
+       ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
+       ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
+       /* Bits 18-21 are currently reserved */
+       ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
+       ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
+       ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
+       ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
+       ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+       (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+       ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+       (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+        QAT_AUTH_ALGO_SHA3_BITPOS) | \
+        (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+       (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+       & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+       ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+       __be32 counter;
+       __u32 reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+       (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+       struct icp_qat_hw_auth_config auth_config;
+       struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+       ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+       struct icp_qat_hw_auth_setup inner_setup;
+       __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+       struct icp_qat_hw_auth_setup outer_setup;
+       __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+       struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+       ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+       ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+       ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+       ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+       ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+       ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+       ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+       ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+       ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+       ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+       ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+       ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+       ICP_QAT_HW_CIPHER_F8_MODE = 3,
+       ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+       ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+       __u32 val;
+       __u32 reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+       ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+       ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+       ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+       ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+       (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+       ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+       ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+       ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+       struct icp_qat_hw_cipher_config cipher_config;
+       __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_ucs_cipher_aes256_f8 {
+       struct icp_qat_hw_ucs_cipher_config cipher_config;
+       __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+       union {
+               struct icp_qat_hw_cipher_aes256_f8 aes;
+               struct icp_qat_hw_ucs_cipher_aes256_f8 ucs_aes;
+       };
+} __aligned(64);
+
+enum icp_qat_hw_compression_direction {
+       ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
+       ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
+       ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_delayed_match {
+       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
+       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
+       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_algo {
+       ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
+       ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
+       ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_depth {
+       ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
+       ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
+       ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
+       ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
+       ICP_QAT_HW_COMPRESSION_DEPTH_128 = 4,
+       ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 5
+};
+
+enum icp_qat_hw_compression_file_type {
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
+       ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
+};
+
+struct icp_qat_hw_compression_config {
+       __u32 lower_val;
+       __u32 upper_val;
+};
+
+#define QAT_COMPRESSION_DIR_BITPOS 4
+#define QAT_COMPRESSION_DIR_MASK 0x7
+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
+#define QAT_COMPRESSION_ALGO_BITPOS 31
+#define QAT_COMPRESSION_ALGO_MASK 0x1
+#define QAT_COMPRESSION_DEPTH_BITPOS 28
+#define QAT_COMPRESSION_DEPTH_MASK 0x7
+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
+
+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \
+       algo, depth, filetype) \
+       ((((dir) & QAT_COMPRESSION_DIR_MASK) << \
+       QAT_COMPRESSION_DIR_BITPOS) | \
+       (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \
+       QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
+       (((algo) & QAT_COMPRESSION_ALGO_MASK) << \
+       QAT_COMPRESSION_ALGO_BITPOS) | \
+       (((depth) & QAT_COMPRESSION_DEPTH_MASK) << \
+       QAT_COMPRESSION_DEPTH_BITPOS) | \
+       (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) << \
+       QAT_COMPRESSION_FILE_TYPE_BITPOS))
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
new file mode 100644 (file)
index 0000000..7ea8962
--- /dev/null
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_HW_20_COMP_H_
+#define _ICP_QAT_HW_20_COMP_H_
+
+#include "icp_qat_hw_20_comp_defs.h"
+#include "icp_qat_fw.h"
+
+struct icp_qat_hw_comp_20_config_csr_lower {
+       enum icp_qat_hw_comp_20_extended_delay_match_mode edmm;
+       enum icp_qat_hw_comp_20_hw_comp_format algo;
+       enum icp_qat_hw_comp_20_search_depth sd;
+       enum icp_qat_hw_comp_20_hbs_control hbs;
+       enum icp_qat_hw_comp_20_abd abd;
+       enum icp_qat_hw_comp_20_lllbd_ctrl lllbd;
+       enum icp_qat_hw_comp_20_min_match_control mmctrl;
+       enum icp_qat_hw_comp_20_skip_hash_collision hash_col;
+       enum icp_qat_hw_comp_20_skip_hash_update hash_update;
+       enum icp_qat_hw_comp_20_byte_skip skip_ctrl;
+};
+
+static inline __u32
+ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower csr)
+{
+       u32 val32 = 0;
+
+       QAT_FIELD_SET(val32, csr.algo,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
+       QAT_FIELD_SET(val32, csr.sd,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
+       QAT_FIELD_SET(val32, csr.edmm,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
+       QAT_FIELD_SET(val32, csr.hbs,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.lllbd,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
+       QAT_FIELD_SET(val32, csr.mmctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.hash_col,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
+       QAT_FIELD_SET(val32, csr.hash_update,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
+       QAT_FIELD_SET(val32, csr.skip_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
+       QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
+
+       return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_comp_20_config_csr_upper {
+       enum icp_qat_hw_comp_20_scb_control scb_ctrl;
+       enum icp_qat_hw_comp_20_rmb_control rmb_ctrl;
+       enum icp_qat_hw_comp_20_som_control som_ctrl;
+       enum icp_qat_hw_comp_20_skip_hash_rd_control skip_hash_ctrl;
+       enum icp_qat_hw_comp_20_scb_unload_control scb_unload_ctrl;
+       enum icp_qat_hw_comp_20_disable_token_fusion_control disable_token_fusion_ctrl;
+       enum icp_qat_hw_comp_20_lbms lbms;
+       enum icp_qat_hw_comp_20_scb_mode_reset_mask scb_mode_reset;
+       __u16 lazy;
+       __u16 nice;
+};
+
+static inline __u32
+ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper csr)
+{
+       u32 val32 = 0;
+
+       QAT_FIELD_SET(val32, csr.scb_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.rmb_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.som_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.skip_hash_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.scb_unload_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.lbms,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK);
+       QAT_FIELD_SET(val32, csr.scb_mode_reset,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
+       QAT_FIELD_SET(val32, csr.lazy,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
+       QAT_FIELD_SET(val32, csr.nice,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
+                     ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
+
+       return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_decomp_20_config_csr_lower {
+       enum icp_qat_hw_decomp_20_hbs_control hbs;
+       enum icp_qat_hw_decomp_20_lbms lbms;
+       enum icp_qat_hw_decomp_20_hw_comp_format algo;
+       enum icp_qat_hw_decomp_20_min_match_control mmctrl;
+       enum icp_qat_hw_decomp_20_lz4_block_checksum_present lbc;
+};
+
+static inline __u32
+ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_lower csr)
+{
+       u32 val32 = 0;
+
+       QAT_FIELD_SET(val32, csr.hbs,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.lbms,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK);
+       QAT_FIELD_SET(val32, csr.algo,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
+       QAT_FIELD_SET(val32, csr.mmctrl,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.lbc,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
+
+       return __builtin_bswap32(val32);
+}
+
+struct icp_qat_hw_decomp_20_config_csr_upper {
+       enum icp_qat_hw_decomp_20_speculative_decoder_control sdc;
+       enum icp_qat_hw_decomp_20_mini_cam_control mcc;
+};
+
+static inline __u32
+ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_upper csr)
+{
+       u32 val32 = 0;
+
+       QAT_FIELD_SET(val32, csr.sdc,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
+       QAT_FIELD_SET(val32, csr.mcc,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
+                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
+
+       return __builtin_bswap32(val32);
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp_defs.h
new file mode 100644 (file)
index 0000000..208d455
--- /dev/null
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _ICP_QAT_HW_20_COMP_DEFS_H
+#define _ICP_QAT_HW_20_COMP_DEFS_H
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_control {
+       ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0,
+       ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_rmb_control {
+       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0,
+       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3
+
+enum icp_qat_hw_comp_20_som_control {
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0,
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1,
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2,
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_rd_control {
+       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
+       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_unload_control {
+       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0,
+       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_disable_token_fusion_control {
+       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0,
+       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3
+
+enum icp_qat_hw_comp_20_lbms {
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0,
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1,
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2,
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1
+
+enum icp_qat_hw_comp_20_scb_mode_reset_mask {
+       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0,
+       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
+
+enum icp_qat_hw_comp_20_hbs_control {
+       ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
+       ICP_QAT_HW_COMP_23_HBS_CONTROL_HBS_IS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1
+
+enum icp_qat_hw_comp_20_abd {
+       ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0,
+       ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1
+
+enum icp_qat_hw_comp_20_lllbd_ctrl {
+       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
+       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf
+
+enum icp_qat_hw_comp_20_search_depth {
+       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1,
+       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3,
+       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7
+
+enum icp_qat_hw_comp_20_hw_comp_format {
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0,
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1,
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2,
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3,
+       ICP_QAT_HW_COMP_23_HW_COMP_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
+
+enum icp_qat_hw_comp_20_min_match_control {
+       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_collision {
+       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0,
+       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1
+
+enum icp_qat_hw_comp_20_skip_hash_update {
+       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0,
+       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1
+
+enum icp_qat_hw_comp_20_byte_skip {
+       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0,
+       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1
+
+enum icp_qat_hw_comp_20_extended_delay_match_mode {
+       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0,
+       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \
+       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_speculative_decoder_control {
+       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0,
+       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_mini_cam_control {
+       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0,
+       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
+
+enum icp_qat_hw_decomp_20_hbs_control {
+       ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3
+
+enum icp_qat_hw_decomp_20_lbms {
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0,
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1,
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2,
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7
+
+enum icp_qat_hw_decomp_20_hw_comp_format {
+       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1,
+       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2,
+       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3,
+       ICP_QAT_HW_DECOMP_23_HW_DECOMP_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
+
+enum icp_qat_hw_decomp_20_min_match_control {
+       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1
+
+enum icp_qat_hw_decomp_20_lz4_block_checksum_present {
+       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0,
+       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \
+       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
new file mode 100644 (file)
index 0000000..69482ab
--- /dev/null
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
+#define ICP_QAT_AC_C62X_DEV_TYPE   0x01000000
+#define ICP_QAT_AC_C3XXX_DEV_TYPE  0x02000000
+#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
+#define ICP_QAT_UCLO_MAX_AE       12
+#define ICP_QAT_UCLO_MAX_CTX      8
+#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE   0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG  128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_MAX_LMEM_REG_2X 1280
+#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
+#define ICP_QAT_UOF_OBJID_LEN     8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
+#define ICP_QAT_UOF_STRT        "UOF_STRT"
+#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
+#define ICP_QAT_UOF_LOCAL_SCOPE     1
+#define ICP_QAT_UOF_INIT_EXPR               0
+#define ICP_QAT_UOF_INIT_REG                1
+#define ICP_QAT_UOF_INIT_REG_CTX            2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
+#define ICP_QAT_SUOF_OBJ_ID_LEN             8
+#define ICP_QAT_SUOF_FID  0x53554f46
+#define ICP_QAT_SUOF_MAJVER 0x0
+#define ICP_QAT_SUOF_MINVER 0x1
+#define ICP_QAT_SUOF_OBJ_NAME_LEN 128
+#define ICP_QAT_MOF_OBJ_ID_LEN 8
+#define ICP_QAT_MOF_OBJ_CHUNKID_LEN 8
+#define ICP_QAT_MOF_FID 0x00666f6d
+#define ICP_QAT_MOF_MAJVER 0x0
+#define ICP_QAT_MOF_MINVER 0x1
+#define ICP_QAT_MOF_SYM_OBJS "SYM_OBJS"
+#define ICP_QAT_SUOF_OBJS "SUF_OBJS"
+#define ICP_QAT_SUOF_IMAG "SUF_IMAG"
+#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN    (50 * sizeof(unsigned long long))
+#define ICP_QAT_SIMG_AE_INSTS_LEN       (0x4000 * sizeof(unsigned long long))
+
+#define DSS_FWSK_MODULUS_LEN    384 /* RSA3K */
+#define DSS_FWSK_EXPONENT_LEN   4
+#define DSS_FWSK_PADDING_LEN    380
+#define DSS_SIGNATURE_LEN       384 /* RSA3K */
+
+#define CSS_FWSK_MODULUS_LEN    256 /* RSA2K */
+#define CSS_FWSK_EXPONENT_LEN   4
+#define CSS_FWSK_PADDING_LEN    252
+#define CSS_SIGNATURE_LEN       256 /* RSA2K */
+
+#define ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)   ((handle)->chip_info->css_3k ? \
+                                               DSS_FWSK_MODULUS_LEN  : \
+                                               CSS_FWSK_MODULUS_LEN)
+
+#define ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)  ((handle)->chip_info->css_3k ? \
+                                               DSS_FWSK_EXPONENT_LEN : \
+                                               CSS_FWSK_EXPONENT_LEN)
+
+#define ICP_QAT_CSS_FWSK_PAD_LEN(handle)       ((handle)->chip_info->css_3k ? \
+                                               DSS_FWSK_PADDING_LEN : \
+                                               CSS_FWSK_PADDING_LEN)
+
+#define ICP_QAT_CSS_FWSK_PUB_LEN(handle)       (ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
+                                               ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
+                                               ICP_QAT_CSS_FWSK_PAD_LEN(handle))
+
+#define ICP_QAT_CSS_SIGNATURE_LEN(handle)      ((handle)->chip_info->css_3k ? \
+                                               DSS_SIGNATURE_LEN : \
+                                               CSS_SIGNATURE_LEN)
+
+#define ICP_QAT_CSS_AE_IMG_LEN     (sizeof(struct icp_qat_simg_ae_mode) + \
+                                   ICP_QAT_SIMG_AE_INIT_SEQ_LEN +         \
+                                   ICP_QAT_SIMG_AE_INSTS_LEN)
+#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \
+                                       ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \
+                                       ICP_QAT_CSS_SIGNATURE_LEN(handle) + \
+                                       ICP_QAT_CSS_AE_IMG_LEN)
+#define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \
+                                       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
+                                       ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
+                                       ICP_QAT_CSS_SIGNATURE_LEN(handle))
+#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN    0x40000
+#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN    0x30000
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+#define ICP_QAT_LOC_MEM2_MODE(ae_mode) (((ae_mode) >> 0x6) & 0x1)
+#define ICP_QAT_LOC_MEM3_MODE(ae_mode) (((ae_mode) >> 0x7) & 0x1)
+#define ICP_QAT_LOC_TINDEX_MODE(ae_mode) (((ae_mode) >> 0xe) & 0x1)
+
+enum icp_qat_uof_mem_region {
+       ICP_QAT_UOF_SRAM_REGION = 0x0,
+       ICP_QAT_UOF_LMEM_REGION = 0x3,
+       ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+       ICP_NO_DEST     = 0,
+       ICP_GPA_REL     = 1,
+       ICP_GPA_ABS     = 2,
+       ICP_GPB_REL     = 3,
+       ICP_GPB_ABS     = 4,
+       ICP_SR_REL      = 5,
+       ICP_SR_RD_REL   = 6,
+       ICP_SR_WR_REL   = 7,
+       ICP_SR_ABS      = 8,
+       ICP_SR_RD_ABS   = 9,
+       ICP_SR_WR_ABS   = 10,
+       ICP_DR_REL      = 19,
+       ICP_DR_RD_REL   = 20,
+       ICP_DR_WR_REL   = 21,
+       ICP_DR_ABS      = 22,
+       ICP_DR_RD_ABS   = 23,
+       ICP_DR_WR_ABS   = 24,
+       ICP_LMEM        = 26,
+       ICP_LMEM0       = 27,
+       ICP_LMEM1       = 28,
+       ICP_NEIGH_REL   = 31,
+       ICP_LMEM2       = 61,
+       ICP_LMEM3       = 62,
+};
+
+enum icp_qat_css_fwtype {
+       CSS_AE_FIRMWARE = 0,
+       CSS_MMP_FIRMWARE = 1
+};
+
+struct icp_qat_uclo_page {
+       struct icp_qat_uclo_encap_page *encap_page;
+       struct icp_qat_uclo_region *region;
+       unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+       struct icp_qat_uclo_page *loaded;
+       struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+       struct icp_qat_uclo_region *region;
+       struct icp_qat_uclo_page *page;
+       struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+       struct icp_qat_uclo_encapme *encap_image;
+       unsigned int ctx_mask_assigned;
+       unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+       unsigned int slice_num;
+       unsigned int eff_ustore_size;
+       struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+       char *beg_uof;
+       struct icp_qat_uof_objhdr *obj_hdr;
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+       struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+       unsigned int start_addr;
+       unsigned int words_num;
+       u64 micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+       unsigned int def_page;
+       unsigned int page_region;
+       unsigned int beg_addr_v;
+       unsigned int beg_addr_p;
+       unsigned int micro_words_num;
+       unsigned int uwblock_num;
+       struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+       struct icp_qat_uof_image *img_ptr;
+       struct icp_qat_uclo_encap_page *page;
+       unsigned int ae_reg_num;
+       struct icp_qat_uof_ae_reg *ae_reg;
+       unsigned int init_regsym_num;
+       struct icp_qat_uof_init_regsym *init_regsym;
+       unsigned int sbreak_num;
+       struct icp_qat_uof_sbreak *sbreak;
+       unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+       unsigned int entry_num;
+       struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+       char *file_buff;
+       unsigned int checksum;
+       unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+       unsigned int table_len;
+       unsigned int reserved;
+       u64 strings;
+};
+
+struct icp_qat_uclo_objhandle {
+       unsigned int prod_type;
+       unsigned int prod_rev;
+       struct icp_qat_uclo_objhdr *obj_hdr;
+       struct icp_qat_uof_encap_obj encap_uof_obj;
+       struct icp_qat_uof_strtable str_table;
+       struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+       struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+       struct icp_qat_uclo_init_mem_table init_mem_tab;
+       struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+       struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+       int uimage_num;
+       int uword_in_bytes;
+       int global_inited;
+       unsigned int ae_num;
+       unsigned int ustore_phy_size;
+       void *obj_buf;
+       u64 *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+       unsigned int start_addr;
+       unsigned int words_num;
+       unsigned int uword_offset;
+       unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+       unsigned short file_id;
+       unsigned short reserved1;
+       char min_ver;
+       char maj_ver;
+       unsigned short reserved2;
+       unsigned short max_chunks;
+       unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+       unsigned int checksum;
+       unsigned int offset;
+       unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+       unsigned int ac_dev_type;
+       unsigned short min_cpu_ver;
+       unsigned short max_cpu_ver;
+       short max_chunks;
+       short num_chunks;
+       unsigned int reserved1;
+       unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+       unsigned int offset;
+       unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+       unsigned int offset_in_byte;
+       unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+       unsigned int sym_name;
+       char region;
+       char scope;
+       unsigned short reserved1;
+       unsigned int addr;
+       unsigned int num_in_bytes;
+       unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+       unsigned int sym_name;
+       char init_type;
+       char value_type;
+       char reg_type;
+       unsigned char ctx;
+       unsigned int reg_addr;
+       unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+       unsigned int sram_base;
+       unsigned int sram_size;
+       unsigned int sram_alignment;
+       unsigned int sdram_base;
+       unsigned int sdram_size;
+       unsigned int sdram_alignment;
+       unsigned int sdram1_base;
+       unsigned int sdram1_size;
+       unsigned int sdram1_alignment;
+       unsigned int scratch_base;
+       unsigned int scratch_size;
+       unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+       char tool_id[ICP_QAT_UOF_OBJID_LEN];
+       int tool_ver;
+       unsigned int reserved1;
+       unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+       unsigned int page_num;
+       unsigned int virt_uaddr;
+       unsigned char sbreak_type;
+       unsigned char reg_type;
+       unsigned short reserved1;
+       unsigned int addr_offset;
+       unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+       unsigned int page_region;
+       unsigned int page_num;
+       unsigned char def_page;
+       unsigned char reserved2;
+       unsigned short reserved1;
+       unsigned int beg_addr_v;
+       unsigned int beg_addr_p;
+       unsigned int neigh_reg_tab_offset;
+       unsigned int uc_var_tab_offset;
+       unsigned int imp_var_tab_offset;
+       unsigned int imp_expr_tab_offset;
+       unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+       unsigned int img_name;
+       unsigned int ae_assigned;
+       unsigned int ctx_assigned;
+       unsigned int ac_dev_type;
+       unsigned int entry_address;
+       unsigned int fill_pattern[2];
+       unsigned int reloadable_size;
+       unsigned char sensitivity;
+       unsigned char reserved;
+       unsigned short ae_mode;
+       unsigned short max_ver;
+       unsigned short min_ver;
+       unsigned short image_attrib;
+       unsigned short reserved2;
+       unsigned short page_region_num;
+       unsigned short numpages;
+       unsigned int reg_tab_offset;
+       unsigned int init_reg_sym_tab;
+       unsigned int sbreak_tab;
+       unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+       unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+       unsigned int name;
+       unsigned int vis_name;
+       unsigned short type;
+       unsigned short addr;
+       unsigned short access_mode;
+       unsigned char visible;
+       unsigned char reserved1;
+       unsigned short ref_count;
+       unsigned short reserved2;
+       unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+       unsigned int micro_words_num;
+       unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+       unsigned int ae;
+       unsigned int addr;
+       unsigned int *value;
+       unsigned int size;
+       struct icp_qat_uof_batch_init *next;
+};
+
+struct icp_qat_suof_img_hdr {
+       char          *simg_buf;
+       unsigned long simg_len;
+       char          *css_header;
+       char          *css_key;
+       char          *css_signature;
+       char          *css_simg;
+       unsigned long simg_size;
+       unsigned int  ae_num;
+       unsigned int  ae_mask;
+       unsigned int  fw_type;
+       unsigned long simg_name;
+       unsigned long appmeta_data;
+};
+
+struct icp_qat_suof_img_tbl {
+       unsigned int num_simgs;
+       struct icp_qat_suof_img_hdr *simg_hdr;
+};
+
+struct icp_qat_suof_handle {
+       unsigned int  file_id;
+       unsigned int  check_sum;
+       char          min_ver;
+       char          maj_ver;
+       char          fw_type;
+       char          *suof_buf;
+       unsigned int  suof_size;
+       char          *sym_str;
+       unsigned int  sym_size;
+       struct icp_qat_suof_img_tbl img_table;
+};
+
+struct icp_qat_fw_auth_desc {
+       unsigned int   img_len;
+       unsigned int   ae_mask;
+       unsigned int   css_hdr_high;
+       unsigned int   css_hdr_low;
+       unsigned int   img_high;
+       unsigned int   img_low;
+       unsigned int   signature_high;
+       unsigned int   signature_low;
+       unsigned int   fwsk_pub_high;
+       unsigned int   fwsk_pub_low;
+       unsigned int   img_ae_mode_data_high;
+       unsigned int   img_ae_mode_data_low;
+       unsigned int   img_ae_init_data_high;
+       unsigned int   img_ae_init_data_low;
+       unsigned int   img_ae_insts_high;
+       unsigned int   img_ae_insts_low;
+};
+
+struct icp_qat_auth_chunk {
+       struct icp_qat_fw_auth_desc fw_auth_desc;
+       u64 chunk_size;
+       u64 chunk_bus_addr;
+};
+
+struct icp_qat_css_hdr {
+       unsigned int module_type;
+       unsigned int header_len;
+       unsigned int header_ver;
+       unsigned int module_id;
+       unsigned int module_vendor;
+       unsigned int date;
+       unsigned int size;
+       unsigned int key_size;
+       unsigned int module_size;
+       unsigned int exponent_size;
+       unsigned int fw_type;
+       unsigned int reserved[21];
+};
+
+struct icp_qat_simg_ae_mode {
+       unsigned int     file_id;
+       unsigned short   maj_ver;
+       unsigned short   min_ver;
+       unsigned int     dev_type;
+       unsigned short   devmax_ver;
+       unsigned short   devmin_ver;
+       unsigned int     ae_mask;
+       unsigned int     ctx_enables;
+       char             fw_type;
+       char             ctx_mode;
+       char             nn_mode;
+       char             lm0_mode;
+       char             lm1_mode;
+       char             scs_mode;
+       char             lm2_mode;
+       char             lm3_mode;
+       char             tindex_mode;
+       unsigned char    reserved[7];
+       char             simg_name[256];
+       char             appmeta_data[256];
+};
+
+struct icp_qat_suof_filehdr {
+       unsigned int     file_id;
+       unsigned int     check_sum;
+       char             min_ver;
+       char             maj_ver;
+       char             fw_type;
+       char             reserved;
+       unsigned short   max_chunks;
+       unsigned short   num_chunks;
+};
+
+struct icp_qat_suof_chunk_hdr {
+       char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
+       u64 offset;
+       u64 size;
+};
+
+struct icp_qat_suof_strtable {
+       unsigned int tab_length;
+       unsigned int strings;
+};
+
+struct icp_qat_suof_objhdr {
+       unsigned int img_length;
+       unsigned int reserved;
+};
+
+struct icp_qat_mof_file_hdr {
+       unsigned int file_id;
+       unsigned int checksum;
+       char min_ver;
+       char maj_ver;
+       unsigned short reserved;
+       unsigned short max_chunks;
+       unsigned short num_chunks;
+};
+
+struct icp_qat_mof_chunkhdr {
+       char chunk_id[ICP_QAT_MOF_OBJ_ID_LEN];
+       u64 offset;
+       u64 size;
+};
+
+struct icp_qat_mof_str_table {
+       unsigned int tab_len;
+       unsigned int strings;
+};
+
+struct icp_qat_mof_obj_hdr {
+       unsigned short max_chunks;
+       unsigned short num_chunks;
+       unsigned int reserved;
+};
+
+struct icp_qat_mof_obj_chunkhdr {
+       char chunk_id[ICP_QAT_MOF_OBJ_CHUNKID_LEN];
+       u64 offset;
+       u64 size;
+       unsigned int name;
+       unsigned int reserved;
+};
+
+struct icp_qat_mof_objhdr {
+       char *obj_name;
+       char *obj_buf;
+       unsigned int obj_size;
+};
+
+struct icp_qat_mof_table {
+       unsigned int num_objs;
+       struct icp_qat_mof_objhdr *obj_hdr;
+};
+
+struct icp_qat_mof_handle {
+       unsigned int file_id;
+       unsigned int checksum;
+       char min_ver;
+       char maj_ver;
+       char *mof_buf;
+       u32 mof_size;
+       char *sym_str;
+       unsigned int sym_size;
+       char *uobjs_hdr;
+       char *sobjs_hdr;
+       struct icp_qat_mof_table obj_table;
+};
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
new file mode 100644 (file)
index 0000000..538dcbf
--- /dev/null
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+#include "qat_bl.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+                                      ICP_QAT_HW_CIPHER_NO_CONVERT, \
+                                      ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+                                      ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+                                      ICP_QAT_HW_CIPHER_DECRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+                                      ICP_QAT_HW_CIPHER_NO_CONVERT, \
+                                      ICP_QAT_HW_CIPHER_DECRYPT)
+
+#define HW_CAP_AES_V2(accel_dev) \
+       (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
+        ICP_ACCEL_CAPABILITIES_AES_V2)
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+/* Common content descriptor */
+struct qat_alg_cd {
+       union {
+               struct qat_enc { /* Encrypt content desc */
+                       struct icp_qat_hw_cipher_algo_blk cipher;
+                       struct icp_qat_hw_auth_algo_blk hash;
+               } qat_enc_cd;
+               struct qat_dec { /* Decrypt content desc */
+                       struct icp_qat_hw_auth_algo_blk hash;
+                       struct icp_qat_hw_cipher_algo_blk cipher;
+               } qat_dec_cd;
+       };
+} __aligned(64);
+
+struct qat_alg_aead_ctx {
+       struct qat_alg_cd *enc_cd;
+       struct qat_alg_cd *dec_cd;
+       dma_addr_t enc_cd_paddr;
+       dma_addr_t dec_cd_paddr;
+       struct icp_qat_fw_la_bulk_req enc_fw_req;
+       struct icp_qat_fw_la_bulk_req dec_fw_req;
+       struct crypto_shash *hash_tfm;
+       enum icp_qat_hw_auth_algo qat_hash_alg;
+       struct qat_crypto_instance *inst;
+       union {
+               struct sha1_state sha1;
+               struct sha256_state sha256;
+               struct sha512_state sha512;
+       };
+       char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
+       char opad[SHA512_BLOCK_SIZE];
+};
+
+struct qat_alg_skcipher_ctx {
+       struct icp_qat_hw_cipher_algo_blk *enc_cd;
+       struct icp_qat_hw_cipher_algo_blk *dec_cd;
+       dma_addr_t enc_cd_paddr;
+       dma_addr_t dec_cd_paddr;
+       struct icp_qat_fw_la_bulk_req enc_fw_req;
+       struct icp_qat_fw_la_bulk_req dec_fw_req;
+       struct qat_crypto_instance *inst;
+       struct crypto_skcipher *ftfm;
+       struct crypto_cipher *tweak;
+       bool fallback;
+       int mode;
+};
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+       switch (qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               return ICP_QAT_HW_SHA1_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               return ICP_QAT_HW_SHA256_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               return ICP_QAT_HW_SHA512_STATE1_SZ;
+       default:
+               return -EFAULT;
+       }
+       return -EFAULT;
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+                                 struct qat_alg_aead_ctx *ctx,
+                                 const u8 *auth_key,
+                                 unsigned int auth_keylen)
+{
+       SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
+       int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+       int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+       __be32 *hash_state_out;
+       __be64 *hash512_state_out;
+       int i, offset;
+
+       memset(ctx->ipad, 0, block_size);
+       memset(ctx->opad, 0, block_size);
+       shash->tfm = ctx->hash_tfm;
+
+       if (auth_keylen > block_size) {
+               int ret = crypto_shash_digest(shash, auth_key,
+                                             auth_keylen, ctx->ipad);
+               if (ret)
+                       return ret;
+
+               memcpy(ctx->opad, ctx->ipad, digest_size);
+       } else {
+               memcpy(ctx->ipad, auth_key, auth_keylen);
+               memcpy(ctx->opad, auth_key, auth_keylen);
+       }
+
+       for (i = 0; i < block_size; i++) {
+               char *ipad_ptr = ctx->ipad + i;
+               char *opad_ptr = ctx->opad + i;
+               *ipad_ptr ^= HMAC_IPAD_VALUE;
+               *opad_ptr ^= HMAC_OPAD_VALUE;
+       }
+
+       if (crypto_shash_init(shash))
+               return -EFAULT;
+
+       if (crypto_shash_update(shash, ctx->ipad, block_size))
+               return -EFAULT;
+
+       hash_state_out = (__be32 *)hash->sha.state1;
+       hash512_state_out = (__be64 *)hash_state_out;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               if (crypto_shash_export(shash, &ctx->sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               if (crypto_shash_export(shash, &ctx->sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               if (crypto_shash_export(shash, &ctx->sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+                       *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+               break;
+       default:
+               return -EFAULT;
+       }
+
+       if (crypto_shash_init(shash))
+               return -EFAULT;
+
+       if (crypto_shash_update(shash, ctx->opad, block_size))
+               return -EFAULT;
+
+       offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+       if (offset < 0)
+               return -EFAULT;
+
+       hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+       hash512_state_out = (__be64 *)hash_state_out;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               if (crypto_shash_export(shash, &ctx->sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               if (crypto_shash_export(shash, &ctx->sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               if (crypto_shash_export(shash, &ctx->sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+                       *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+               break;
+       default:
+               return -EFAULT;
+       }
+       memzero_explicit(ctx->ipad, block_size);
+       memzero_explicit(ctx->opad, block_size);
+       return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+       header->comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+                                           QAT_COMN_PTR_TYPE_SGL);
+       ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+                                 ICP_QAT_FW_LA_PARTIAL_NONE);
+       ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+       ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+       ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+                                      ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
+                                        int alg,
+                                        struct crypto_authenc_keys *keys,
+                                        int mode)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+       struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+       struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+       struct icp_qat_hw_auth_algo_blk *hash =
+               (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+               sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       void *ptr = &req_tmpl->cd_ctrl;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+
+       /* CD setup */
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+       hash->sha.inner_setup.auth_config.config =
+               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                                            ctx->qat_hash_alg, digestsize);
+       hash->sha.inner_setup.auth_counter.counter =
+               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+               return -EFAULT;
+
+       /* Request setup */
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_RET_AUTH_RES);
+       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+       /* Cipher CD config setup */
+       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cipher_cd_ctrl->cipher_cfg_offset = 0;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       /* Auth CD config setup */
+       hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+       hash_cd_ctrl->inner_res_sz = digestsize;
+       hash_cd_ctrl->final_sz = digestsize;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               hash_cd_ctrl->inner_state1_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+               hash_cd_ctrl->inner_state2_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+               break;
+       default:
+               break;
+       }
+       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+                       ((sizeof(struct icp_qat_hw_auth_setup) +
+                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+       return 0;
+}
+
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
+                                        int alg,
+                                        struct crypto_authenc_keys *keys,
+                                        int mode)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
+       struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+       struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+       struct icp_qat_hw_cipher_algo_blk *cipher =
+               (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+               sizeof(struct icp_qat_hw_auth_setup) +
+               roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       void *ptr = &req_tmpl->cd_ctrl;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+       struct icp_qat_fw_la_auth_req_params *auth_param =
+               (struct icp_qat_fw_la_auth_req_params *)
+               ((char *)&req_tmpl->serv_specif_rqpars +
+               sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+       /* CD setup */
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
+       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+       hash->sha.inner_setup.auth_config.config =
+               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                                            ctx->qat_hash_alg,
+                                            digestsize);
+       hash->sha.inner_setup.auth_counter.counter =
+               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
+               return -EFAULT;
+
+       /* Request setup */
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_CMP_AUTH_RES);
+       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+       /* Cipher CD config setup */
+       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cipher_cd_ctrl->cipher_cfg_offset =
+               (sizeof(struct icp_qat_hw_auth_setup) +
+                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+       /* Auth CD config setup */
+       hash_cd_ctrl->hash_cfg_offset = 0;
+       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+       hash_cd_ctrl->inner_res_sz = digestsize;
+       hash_cd_ctrl->final_sz = digestsize;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               hash_cd_ctrl->inner_state1_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+               hash_cd_ctrl->inner_state2_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+               break;
+       default:
+               break;
+       }
+
+       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+                       ((sizeof(struct icp_qat_hw_auth_setup) +
+                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+       auth_param->auth_res_sz = digestsize;
+       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       return 0;
+}
+
+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+                                     struct icp_qat_fw_la_bulk_req *req,
+                                     struct icp_qat_hw_cipher_algo_blk *cd,
+                                     const u8 *key, unsigned int keylen)
+{
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
+       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+       int mode = ctx->mode;
+
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+       cd_pars->u.s.content_desc_params_sz =
+                               sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+
+       if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
+               ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+                                            ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+
+               /* Store both XTS keys in CD, only the first key is sent
+                * to the HW, the second key is used for tweak calculation
+                */
+               memcpy(cd->ucs_aes.key, key, keylen);
+               keylen = keylen / 2;
+       } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+               ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+                                            ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+               memcpy(cd->ucs_aes.key, key, keylen);
+               keylen = round_up(keylen, 16);
+       } else {
+               memcpy(cd->aes.key, key, keylen);
+       }
+
+       /* Cipher CD config setup */
+       cd_ctrl->cipher_key_sz = keylen >> 3;
+       cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cd_ctrl->cipher_cfg_offset = 0;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+}
+
+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
+                                     int alg, const u8 *key,
+                                     unsigned int keylen, int mode)
+{
+       struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
+       struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+       qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
+       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+       enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
+}
+
+static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
+                                   u8 *key_reverse)
+{
+       struct crypto_aes_ctx aes_expanded;
+       int nrounds;
+       u8 *key;
+
+       aes_expandkey(&aes_expanded, key_forward, keylen);
+       if (keylen == AES_KEYSIZE_128) {
+               nrounds = 10;
+               key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
+               memcpy(key_reverse, key, AES_BLOCK_SIZE);
+       } else {
+               /* AES_KEYSIZE_256 */
+               nrounds = 14;
+               key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
+               memcpy(key_reverse, key, AES_BLOCK_SIZE);
+               memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
+                      AES_BLOCK_SIZE);
+       }
+}
+
+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
+                                     int alg, const u8 *key,
+                                     unsigned int keylen, int mode)
+{
+       struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
+       struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+
+       qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
+       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+
+       if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
+               /* Key reversing not supported, set no convert */
+               dec_cd->aes.cipher_config.val =
+                               QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
+
+               /* In-place key reversal */
+               qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
+                                       dec_cd->ucs_aes.key);
+       } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
+               dec_cd->aes.cipher_config.val =
+                                       QAT_AES_HW_CONFIG_DEC(alg, mode);
+       } else {
+               dec_cd->aes.cipher_config.val =
+                                       QAT_AES_HW_CONFIG_ENC(alg, mode);
+       }
+}
+
+static int qat_alg_validate_key(int key_len, int *alg, int mode)
+{
+       if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
+               switch (key_len) {
+               case AES_KEYSIZE_128:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+                       break;
+               case AES_KEYSIZE_192:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+                       break;
+               case AES_KEYSIZE_256:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else {
+               switch (key_len) {
+               case AES_KEYSIZE_128 << 1:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+                       break;
+               case AES_KEYSIZE_256 << 1:
+                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
+                                     unsigned int keylen,  int mode)
+{
+       struct crypto_authenc_keys keys;
+       int alg;
+
+       if (crypto_authenc_extractkeys(&keys, key, keylen))
+               goto bad_key;
+
+       if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
+               goto bad_key;
+
+       if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
+               goto error;
+
+       if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
+               goto error;
+
+       memzero_explicit(&keys, sizeof(keys));
+       return 0;
+bad_key:
+       memzero_explicit(&keys, sizeof(keys));
+       return -EINVAL;
+error:
+       memzero_explicit(&keys, sizeof(keys));
+       return -EFAULT;
+}
+
+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
+                                         const u8 *key,
+                                         unsigned int keylen,
+                                         int mode)
+{
+       int alg;
+
+       if (qat_alg_validate_key(keylen, &alg, mode))
+               return -EINVAL;
+
+       qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
+       qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
+       return 0;
+}
+
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
+                             unsigned int keylen)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+       memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+       memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+       return qat_alg_aead_init_sessions(tfm, key, keylen,
+                                         ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
+                              unsigned int keylen)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+       struct qat_crypto_instance *inst = NULL;
+       int node = numa_node_id();
+       struct device *dev;
+       int ret;
+
+       inst = qat_crypto_get_instance_node(node);
+       if (!inst)
+               return -EINVAL;
+       dev = &GET_DEV(inst->accel_dev);
+       ctx->inst = inst;
+       ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+                                        &ctx->enc_cd_paddr,
+                                        GFP_ATOMIC);
+       if (!ctx->enc_cd) {
+               ret = -ENOMEM;
+               goto out_free_inst;
+       }
+       ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+                                        &ctx->dec_cd_paddr,
+                                        GFP_ATOMIC);
+       if (!ctx->dec_cd) {
+               ret = -ENOMEM;
+               goto out_free_enc;
+       }
+
+       ret = qat_alg_aead_init_sessions(tfm, key, keylen,
+                                        ICP_QAT_HW_CIPHER_CBC_MODE);
+       if (ret)
+               goto out_free_all;
+
+       return 0;
+
+out_free_all:
+       memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->dec_cd, ctx->dec_cd_paddr);
+       ctx->dec_cd = NULL;
+out_free_enc:
+       memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->enc_cd, ctx->enc_cd_paddr);
+       ctx->enc_cd = NULL;
+out_free_inst:
+       ctx->inst = NULL;
+       qat_crypto_put_instance(inst);
+       return ret;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+                              unsigned int keylen)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+       if (ctx->enc_cd)
+               return qat_alg_aead_rekey(tfm, key, keylen);
+       else
+               return qat_alg_aead_newkey(tfm, key, keylen);
+}
+
+static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+                                 struct qat_crypto_request *qat_req)
+{
+       struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct aead_request *areq = qat_req->aead_req;
+       u8 stat_filed = qat_resp->comn_resp.comn_status;
+       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+       qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               res = -EBADMSG;
+       aead_request_complete(areq, res);
+}
+
+static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
+{
+       struct skcipher_request *sreq = qat_req->skcipher_req;
+       u64 iv_lo_prev;
+       u64 iv_lo;
+       u64 iv_hi;
+
+       memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
+
+       iv_lo = be64_to_cpu(qat_req->iv_lo);
+       iv_hi = be64_to_cpu(qat_req->iv_hi);
+
+       iv_lo_prev = iv_lo;
+       iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
+       if (iv_lo < iv_lo_prev)
+               iv_hi++;
+
+       qat_req->iv_lo = cpu_to_be64(iv_lo);
+       qat_req->iv_hi = cpu_to_be64(iv_hi);
+}
+
+static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
+{
+       struct skcipher_request *sreq = qat_req->skcipher_req;
+       int offset = sreq->cryptlen - AES_BLOCK_SIZE;
+       struct scatterlist *sgl;
+
+       if (qat_req->encryption)
+               sgl = sreq->dst;
+       else
+               sgl = sreq->src;
+
+       scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
+}
+
+static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
+{
+       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+       switch (ctx->mode) {
+       case ICP_QAT_HW_CIPHER_CTR_MODE:
+               qat_alg_update_iv_ctr_mode(qat_req);
+               break;
+       case ICP_QAT_HW_CIPHER_CBC_MODE:
+               qat_alg_update_iv_cbc_mode(qat_req);
+               break;
+       case ICP_QAT_HW_CIPHER_XTS_MODE:
+               break;
+       default:
+               dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
+                        ctx->mode);
+       }
+}
+
+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+                                     struct qat_crypto_request *qat_req)
+{
+       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct skcipher_request *sreq = qat_req->skcipher_req;
+       u8 stat_filed = qat_resp->comn_resp.comn_status;
+       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+       qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               res = -EINVAL;
+
+       if (qat_req->encryption)
+               qat_alg_update_iv(qat_req);
+
+       memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
+
+       skcipher_request_complete(sreq, res);
+}
+
+void qat_alg_callback(void *resp)
+{
+       struct icp_qat_fw_la_resp *qat_resp = resp;
+       struct qat_crypto_request *qat_req =
+                               (void *)(__force long)qat_resp->opaque_data;
+       struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
+
+       qat_req->cb(qat_resp, qat_req);
+
+       qat_alg_send_backlog(backlog);
+}
+
+static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
+                                   struct qat_crypto_instance *inst,
+                                   struct crypto_async_request *base)
+{
+       struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+       alg_req->fw_req = (u32 *)&qat_req->req;
+       alg_req->tx_ring = inst->sym_tx;
+       alg_req->base = base;
+       alg_req->backlog = &inst->backlog;
+
+       return qat_alg_send_message(alg_req);
+}
+
+static int qat_alg_aead_dec(struct aead_request *areq)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+       int digst_size = crypto_aead_authsize(aead_tfm);
+       gfp_t f = qat_algs_alloc_flags(&areq->base);
+       int ret;
+       u32 cipher_len;
+
+       cipher_len = areq->cryptlen - digst_size;
+       if (cipher_len % AES_BLOCK_SIZE != 0)
+               return -EINVAL;
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+                                &qat_req->buf, NULL, f);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->dec_fw_req;
+       qat_req->aead_ctx = ctx;
+       qat_req->aead_req = areq;
+       qat_req->cb = qat_aead_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       cipher_param->cipher_length = cipher_len;
+       cipher_param->cipher_offset = areq->assoclen;
+       memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+       auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
+
+       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_alg_aead_enc(struct aead_request *areq)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       gfp_t f = qat_algs_alloc_flags(&areq->base);
+       struct icp_qat_fw_la_bulk_req *msg;
+       u8 *iv = areq->iv;
+       int ret;
+
+       if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+               return -EINVAL;
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+                                &qat_req->buf, NULL, f);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->enc_fw_req;
+       qat_req->aead_ctx = ctx;
+       qat_req->aead_req = areq;
+       qat_req->cb = qat_aead_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
+
+       memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+       cipher_param->cipher_length = areq->cryptlen;
+       cipher_param->cipher_offset = areq->assoclen;
+
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen + areq->cryptlen;
+
+       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
+                                 const u8 *key, unsigned int keylen,
+                                 int mode)
+{
+       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+       memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+       memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+
+       return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
+}
+
+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
+                                  const u8 *key, unsigned int keylen,
+                                  int mode)
+{
+       struct qat_crypto_instance *inst = NULL;
+       struct device *dev;
+       int node = numa_node_id();
+       int ret;
+
+       inst = qat_crypto_get_instance_node(node);
+       if (!inst)
+               return -EINVAL;
+       dev = &GET_DEV(inst->accel_dev);
+       ctx->inst = inst;
+       ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
+                                        &ctx->enc_cd_paddr,
+                                        GFP_ATOMIC);
+       if (!ctx->enc_cd) {
+               ret = -ENOMEM;
+               goto out_free_instance;
+       }
+       ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
+                                        &ctx->dec_cd_paddr,
+                                        GFP_ATOMIC);
+       if (!ctx->dec_cd) {
+               ret = -ENOMEM;
+               goto out_free_enc;
+       }
+
+       ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
+       if (ret)
+               goto out_free_all;
+
+       return 0;
+
+out_free_all:
+       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+       dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+                         ctx->dec_cd, ctx->dec_cd_paddr);
+       ctx->dec_cd = NULL;
+out_free_enc:
+       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+       dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+                         ctx->enc_cd, ctx->enc_cd_paddr);
+       ctx->enc_cd = NULL;
+out_free_instance:
+       ctx->inst = NULL;
+       qat_crypto_put_instance(inst);
+       return ret;
+}
+
+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+                                  const u8 *key, unsigned int keylen,
+                                  int mode)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       ctx->mode = mode;
+
+       if (ctx->enc_cd)
+               return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
+       else
+               return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
+}
+
+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
+                                      const u8 *key, unsigned int keylen)
+{
+       return qat_alg_skcipher_setkey(tfm, key, keylen,
+                                      ICP_QAT_HW_CIPHER_CBC_MODE);
+}
+
+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
+                                      const u8 *key, unsigned int keylen)
+{
+       return qat_alg_skcipher_setkey(tfm, key, keylen,
+                                      ICP_QAT_HW_CIPHER_CTR_MODE);
+}
+
+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+                                      const u8 *key, unsigned int keylen)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       int ret;
+
+       ret = xts_verify_key(tfm, key, keylen);
+       if (ret)
+               return ret;
+
+       if (keylen >> 1 == AES_KEYSIZE_192) {
+               ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
+               if (ret)
+                       return ret;
+
+               ctx->fallback = true;
+
+               return 0;
+       }
+
+       ctx->fallback = false;
+
+       ret = qat_alg_skcipher_setkey(tfm, key, keylen,
+                                     ICP_QAT_HW_CIPHER_XTS_MODE);
+       if (ret)
+               return ret;
+
+       if (HW_CAP_AES_V2(ctx->inst->accel_dev))
+               ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
+                                          keylen / 2);
+
+       return ret;
+}
+
+static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
+{
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+       u8 *iv = qat_req->skcipher_req->iv;
+
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+
+       if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
+               crypto_cipher_encrypt_one(ctx->tweak,
+                                         (u8 *)cipher_param->u.cipher_IV_array,
+                                         iv);
+       else
+               memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+}
+
+static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+       struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       gfp_t f = qat_algs_alloc_flags(&req->base);
+       struct icp_qat_fw_la_bulk_req *msg;
+       int ret;
+
+       if (req->cryptlen == 0)
+               return 0;
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
+                                &qat_req->buf, NULL, f);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->enc_fw_req;
+       qat_req->skcipher_ctx = ctx;
+       qat_req->skcipher_req = req;
+       qat_req->cb = qat_skcipher_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       qat_req->encryption = true;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       cipher_param->cipher_length = req->cryptlen;
+       cipher_param->cipher_offset = 0;
+
+       qat_alg_set_req_iv(qat_req);
+
+       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
+{
+       if (req->cryptlen % AES_BLOCK_SIZE != 0)
+               return -EINVAL;
+
+       return qat_alg_skcipher_encrypt(req);
+}
+
+static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+       struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+       if (req->cryptlen < XTS_BLOCK_SIZE)
+               return -EINVAL;
+
+       if (ctx->fallback) {
+               memcpy(nreq, req, sizeof(*req));
+               skcipher_request_set_tfm(nreq, ctx->ftfm);
+               return crypto_skcipher_encrypt(nreq);
+       }
+
+       return qat_alg_skcipher_encrypt(req);
+}
+
+static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+       struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       gfp_t f = qat_algs_alloc_flags(&req->base);
+       struct icp_qat_fw_la_bulk_req *msg;
+       int ret;
+
+       if (req->cryptlen == 0)
+               return 0;
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
+                                &qat_req->buf, NULL, f);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->dec_fw_req;
+       qat_req->skcipher_ctx = ctx;
+       qat_req->skcipher_req = req;
+       qat_req->cb = qat_skcipher_alg_callback;
+       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       qat_req->encryption = false;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       cipher_param->cipher_length = req->cryptlen;
+       cipher_param->cipher_offset = 0;
+
+       qat_alg_set_req_iv(qat_req);
+       qat_alg_update_iv(qat_req);
+
+       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
+{
+       if (req->cryptlen % AES_BLOCK_SIZE != 0)
+               return -EINVAL;
+
+       return qat_alg_skcipher_decrypt(req);
+}
+
+static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+       struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+       if (req->cryptlen < XTS_BLOCK_SIZE)
+               return -EINVAL;
+
+       if (ctx->fallback) {
+               memcpy(nreq, req, sizeof(*req));
+               skcipher_request_set_tfm(nreq, ctx->ftfm);
+               return crypto_skcipher_decrypt(nreq);
+       }
+
+       return qat_alg_skcipher_decrypt(req);
+}
+
+static int qat_alg_aead_init(struct crypto_aead *tfm,
+                            enum icp_qat_hw_auth_algo hash,
+                            const char *hash_name)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+       ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+       if (IS_ERR(ctx->hash_tfm))
+               return PTR_ERR(ctx->hash_tfm);
+       ctx->qat_hash_alg = hash;
+       crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+       return 0;
+}
+
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
+{
+       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
+{
+       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
+{
+       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
+{
+       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev;
+
+       crypto_free_shash(ctx->hash_tfm);
+
+       if (!inst)
+               return;
+
+       dev = &GET_DEV(inst->accel_dev);
+       if (ctx->enc_cd) {
+               memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->enc_cd, ctx->enc_cd_paddr);
+       }
+       if (ctx->dec_cd) {
+               memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->dec_cd, ctx->dec_cd_paddr);
+       }
+       qat_crypto_put_instance(inst);
+}
+
+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
+{
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+       return 0;
+}
+
+static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       int reqsize;
+
+       ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
+                                         CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->ftfm))
+               return PTR_ERR(ctx->ftfm);
+
+       ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
+       if (IS_ERR(ctx->tweak)) {
+               crypto_free_skcipher(ctx->ftfm);
+               return PTR_ERR(ctx->tweak);
+       }
+
+       reqsize = max(sizeof(struct qat_crypto_request),
+                     sizeof(struct skcipher_request) +
+                     crypto_skcipher_reqsize(ctx->ftfm));
+       crypto_skcipher_set_reqsize(tfm, reqsize);
+
+       return 0;
+}
+
+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev;
+
+       if (!inst)
+               return;
+
+       dev = &GET_DEV(inst->accel_dev);
+       if (ctx->enc_cd) {
+               memset(ctx->enc_cd, 0,
+                      sizeof(struct icp_qat_hw_cipher_algo_blk));
+               dma_free_coherent(dev,
+                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
+                                 ctx->enc_cd, ctx->enc_cd_paddr);
+       }
+       if (ctx->dec_cd) {
+               memset(ctx->dec_cd, 0,
+                      sizeof(struct icp_qat_hw_cipher_algo_blk));
+               dma_free_coherent(dev,
+                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
+                                 ctx->dec_cd, ctx->dec_cd_paddr);
+       }
+       qat_crypto_put_instance(inst);
+}
+
+static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
+{
+       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       if (ctx->ftfm)
+               crypto_free_skcipher(ctx->ftfm);
+
+       if (ctx->tweak)
+               crypto_free_cipher(ctx->tweak);
+
+       qat_alg_skcipher_exit_tfm(tfm);
+}
+
+static struct aead_alg qat_aeads[] = { {
+       .base = {
+               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_alg_aead_sha1_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+       .base = {
+               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_alg_aead_sha256_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+       .base = {
+               .cra_name = "authenc(hmac(sha512),cbc(aes))",
+               .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_alg_aead_sha512_init,
+       .exit = qat_alg_aead_exit,
+       .setkey = qat_alg_aead_setkey,
+       .decrypt = qat_alg_aead_dec,
+       .encrypt = qat_alg_aead_enc,
+       .ivsize = AES_BLOCK_SIZE,
+       .maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
+static struct skcipher_alg qat_skciphers[] = { {
+       .base.cra_name = "cbc(aes)",
+       .base.cra_driver_name = "qat_aes_cbc",
+       .base.cra_priority = 4001,
+       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+       .base.cra_blocksize = AES_BLOCK_SIZE,
+       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+       .base.cra_alignmask = 0,
+       .base.cra_module = THIS_MODULE,
+
+       .init = qat_alg_skcipher_init_tfm,
+       .exit = qat_alg_skcipher_exit_tfm,
+       .setkey = qat_alg_skcipher_cbc_setkey,
+       .decrypt = qat_alg_skcipher_blk_decrypt,
+       .encrypt = qat_alg_skcipher_blk_encrypt,
+       .min_keysize = AES_MIN_KEY_SIZE,
+       .max_keysize = AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
+}, {
+       .base.cra_name = "ctr(aes)",
+       .base.cra_driver_name = "qat_aes_ctr",
+       .base.cra_priority = 4001,
+       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+       .base.cra_blocksize = 1,
+       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+       .base.cra_alignmask = 0,
+       .base.cra_module = THIS_MODULE,
+
+       .init = qat_alg_skcipher_init_tfm,
+       .exit = qat_alg_skcipher_exit_tfm,
+       .setkey = qat_alg_skcipher_ctr_setkey,
+       .decrypt = qat_alg_skcipher_decrypt,
+       .encrypt = qat_alg_skcipher_encrypt,
+       .min_keysize = AES_MIN_KEY_SIZE,
+       .max_keysize = AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
+}, {
+       .base.cra_name = "xts(aes)",
+       .base.cra_driver_name = "qat_aes_xts",
+       .base.cra_priority = 4001,
+       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+                         CRYPTO_ALG_ALLOCATES_MEMORY,
+       .base.cra_blocksize = AES_BLOCK_SIZE,
+       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+       .base.cra_alignmask = 0,
+       .base.cra_module = THIS_MODULE,
+
+       .init = qat_alg_skcipher_init_xts_tfm,
+       .exit = qat_alg_skcipher_exit_xts_tfm,
+       .setkey = qat_alg_skcipher_xts_setkey,
+       .decrypt = qat_alg_skcipher_xts_decrypt,
+       .encrypt = qat_alg_skcipher_xts_encrypt,
+       .min_keysize = 2 * AES_MIN_KEY_SIZE,
+       .max_keysize = 2 * AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
+} };
+
+int qat_algs_register(void)
+{
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs != 1)
+               goto unlock;
+
+       ret = crypto_register_skciphers(qat_skciphers,
+                                       ARRAY_SIZE(qat_skciphers));
+       if (ret)
+               goto unlock;
+
+       ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       if (ret)
+               goto unreg_algs;
+
+unlock:
+       mutex_unlock(&algs_lock);
+       return ret;
+
+unreg_algs:
+       crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
+       goto unlock;
+}
+
+void qat_algs_unregister(void)
+{
+       mutex_lock(&algs_lock);
+       if (--active_devs != 0)
+               goto unlock;
+
+       crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+       crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
+
+unlock:
+       mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.c b/drivers/crypto/intel/qat/qat_common/qat_algs_send.c
new file mode 100644 (file)
index 0000000..bb80455
--- /dev/null
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2022 Intel Corporation */
+#include <crypto/algapi.h>
+#include "adf_transport.h"
+#include "qat_algs_send.h"
+#include "qat_crypto.h"
+
+#define ADF_MAX_RETRIES                20
+
+static int qat_alg_send_message_retry(struct qat_alg_req *req)
+{
+       int ret = 0, ctr = 0;
+
+       do {
+               ret = adf_send_message(req->tx_ring, req->fw_req);
+       } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
+
+       if (ret == -EAGAIN)
+               return -ENOSPC;
+
+       return -EINPROGRESS;
+}
+
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
+{
+       struct qat_alg_req *req, *tmp;
+
+       spin_lock_bh(&backlog->lock);
+       list_for_each_entry_safe(req, tmp, &backlog->list, list) {
+               if (adf_send_message(req->tx_ring, req->fw_req)) {
+                       /* The HW ring is full. Do nothing.
+                        * qat_alg_send_backlog() will be invoked again by
+                        * another callback.
+                        */
+                       break;
+               }
+               list_del(&req->list);
+               crypto_request_complete(req->base, -EINPROGRESS);
+       }
+       spin_unlock_bh(&backlog->lock);
+}
+
+static void qat_alg_backlog_req(struct qat_alg_req *req,
+                               struct qat_instance_backlog *backlog)
+{
+       INIT_LIST_HEAD(&req->list);
+
+       spin_lock_bh(&backlog->lock);
+       list_add_tail(&req->list, &backlog->list);
+       spin_unlock_bh(&backlog->lock);
+}
+
+static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
+{
+       struct qat_instance_backlog *backlog = req->backlog;
+       struct adf_etr_ring_data *tx_ring = req->tx_ring;
+       u32 *fw_req = req->fw_req;
+
+       /* If any request is already backlogged, then add to backlog list */
+       if (!list_empty(&backlog->list))
+               goto enqueue;
+
+       /* If ring is nearly full, then add to backlog list */
+       if (adf_ring_nearly_full(tx_ring))
+               goto enqueue;
+
+       /* If adding request to HW ring fails, then add to backlog list */
+       if (adf_send_message(tx_ring, fw_req))
+               goto enqueue;
+
+       return -EINPROGRESS;
+
+enqueue:
+       qat_alg_backlog_req(req, backlog);
+
+       return -EBUSY;
+}
+
+int qat_alg_send_message(struct qat_alg_req *req)
+{
+       u32 flags = req->base->flags;
+
+       if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+               return qat_alg_send_message_maybacklog(req);
+       else
+               return qat_alg_send_message_retry(req);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs_send.h b/drivers/crypto/intel/qat/qat_common/qat_algs_send.h
new file mode 100644 (file)
index 0000000..0baca16
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef QAT_ALGS_SEND_H
+#define QAT_ALGS_SEND_H
+
+#include <linux/list.h>
+#include "adf_transport_internal.h"
+
+struct qat_instance_backlog {
+       struct list_head list;
+       spinlock_t lock; /* protects backlog list */
+};
+
+struct qat_alg_req {
+       u32 *fw_req;
+       struct adf_etr_ring_data *tx_ring;
+       struct crypto_async_request *base;
+       struct list_head list;
+       struct qat_instance_backlog *backlog;
+};
+
+int qat_alg_send_message(struct qat_alg_req *req);
+void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
new file mode 100644 (file)
index 0000000..935a7e0
--- /dev/null
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/dh.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include <crypto/scatterwalk.h>
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+       union {
+               struct {
+                       dma_addr_t m;
+                       dma_addr_t e;
+                       dma_addr_t n;
+               } enc;
+               struct {
+                       dma_addr_t c;
+                       dma_addr_t d;
+                       dma_addr_t n;
+               } dec;
+               struct {
+                       dma_addr_t c;
+                       dma_addr_t p;
+                       dma_addr_t q;
+                       dma_addr_t dp;
+                       dma_addr_t dq;
+                       dma_addr_t qinv;
+               } dec_crt;
+               u64 in_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+       union {
+               struct {
+                       dma_addr_t c;
+               } enc;
+               struct {
+                       dma_addr_t m;
+               } dec;
+               u64 out_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+       char *n;
+       char *e;
+       char *d;
+       char *p;
+       char *q;
+       char *dp;
+       char *dq;
+       char *qinv;
+       dma_addr_t dma_n;
+       dma_addr_t dma_e;
+       dma_addr_t dma_d;
+       dma_addr_t dma_p;
+       dma_addr_t dma_q;
+       dma_addr_t dma_dp;
+       dma_addr_t dma_dq;
+       dma_addr_t dma_qinv;
+       unsigned int key_sz;
+       bool crt_mode;
+       struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_dh_input_params {
+       union {
+               struct {
+                       dma_addr_t b;
+                       dma_addr_t xa;
+                       dma_addr_t p;
+               } in;
+               struct {
+                       dma_addr_t xa;
+                       dma_addr_t p;
+               } in_g2;
+               u64 in_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_dh_output_params {
+       union {
+               dma_addr_t r;
+               u64 out_tab[8];
+       };
+} __packed __aligned(64);
+
+struct qat_dh_ctx {
+       char *g;
+       char *xa;
+       char *p;
+       dma_addr_t dma_g;
+       dma_addr_t dma_xa;
+       dma_addr_t dma_p;
+       unsigned int p_size;
+       bool g2;
+       struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_asym_request {
+       union {
+               struct qat_rsa_input_params rsa;
+               struct qat_dh_input_params dh;
+       } in;
+       union {
+               struct qat_rsa_output_params rsa;
+               struct qat_dh_output_params dh;
+       } out;
+       dma_addr_t phy_in;
+       dma_addr_t phy_out;
+       char *src_align;
+       char *dst_align;
+       struct icp_qat_fw_pke_request req;
+       union {
+               struct qat_rsa_ctx *rsa;
+               struct qat_dh_ctx *dh;
+       } ctx;
+       union {
+               struct akcipher_request *rsa;
+               struct kpp_request *dh;
+       } areq;
+       int err;
+       void (*cb)(struct icp_qat_fw_pke_resp *resp);
+       struct qat_alg_req alg_req;
+} __aligned(64);
+
+static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
+                                    struct qat_crypto_instance *inst,
+                                    struct crypto_async_request *base)
+{
+       struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+       alg_req->fw_req = (u32 *)&qat_req->req;
+       alg_req->tx_ring = inst->pke_tx;
+       alg_req->base = base;
+       alg_req->backlog = &inst->backlog;
+
+       return qat_alg_send_message(alg_req);
+}
+
+static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
+{
+       struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+       struct kpp_request *areq = req->areq.dh;
+       struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
+       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+                               resp->pke_resp_hdr.comn_resp_flags);
+
+       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+       if (areq->src) {
+               dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
+                                DMA_TO_DEVICE);
+               kfree_sensitive(req->src_align);
+       }
+
+       areq->dst_len = req->ctx.dh->p_size;
+       if (req->dst_align) {
+               scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+                                        areq->dst_len, 1);
+               kfree_sensitive(req->dst_align);
+       }
+
+       dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+                        DMA_FROM_DEVICE);
+
+       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+                        DMA_TO_DEVICE);
+       dma_unmap_single(dev, req->phy_out,
+                        sizeof(struct qat_dh_output_params),
+                        DMA_TO_DEVICE);
+
+       kpp_request_complete(areq, err);
+}
+
+#define PKE_DH_1536 0x390c1a49
+#define PKE_DH_G2_1536 0x2e0b1a3e
+#define PKE_DH_2048 0x4d0c1a60
+#define PKE_DH_G2_2048 0x3e0b1a55
+#define PKE_DH_3072 0x510c1a77
+#define PKE_DH_G2_3072 0x3a0b1a6c
+#define PKE_DH_4096 0x690c1a8e
+#define PKE_DH_G2_4096 0x4a0b1a83
+
+static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 1536:
+               return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
+       case 2048:
+               return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
+       case 3072:
+               return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
+       case 4096:
+               return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
+       default:
+               return 0;
+       }
+}
+
+static int qat_dh_compute_value(struct kpp_request *req)
+{
+       struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_asym_request *qat_req =
+                       PTR_ALIGN(kpp_request_ctx(req), 64);
+       struct icp_qat_fw_pke_request *msg = &qat_req->req;
+       gfp_t flags = qat_algs_alloc_flags(&req->base);
+       int n_input_params = 0;
+       u8 *vaddr;
+       int ret;
+
+       if (unlikely(!ctx->xa))
+               return -EINVAL;
+
+       if (req->dst_len < ctx->p_size) {
+               req->dst_len = ctx->p_size;
+               return -EOVERFLOW;
+       }
+
+       if (req->src_len > ctx->p_size)
+               return -EINVAL;
+
+       memset(msg, '\0', sizeof(*msg));
+       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+       msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
+                                                   !req->src && ctx->g2);
+       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+               return -EINVAL;
+
+       qat_req->cb = qat_dh_cb;
+       qat_req->ctx.dh = ctx;
+       qat_req->areq.dh = req;
+       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+       msg->pke_hdr.comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+       /*
+        * If no source is provided use g as base
+        */
+       if (req->src) {
+               qat_req->in.dh.in.xa = ctx->dma_xa;
+               qat_req->in.dh.in.p = ctx->dma_p;
+               n_input_params = 3;
+       } else {
+               if (ctx->g2) {
+                       qat_req->in.dh.in_g2.xa = ctx->dma_xa;
+                       qat_req->in.dh.in_g2.p = ctx->dma_p;
+                       n_input_params = 2;
+               } else {
+                       qat_req->in.dh.in.b = ctx->dma_g;
+                       qat_req->in.dh.in.xa = ctx->dma_xa;
+                       qat_req->in.dh.in.p = ctx->dma_p;
+                       n_input_params = 3;
+               }
+       }
+
+       ret = -ENOMEM;
+       if (req->src) {
+               /*
+                * src can be of any size in valid range, but HW expects it to
+                * be the same as modulo p so in case it is different we need
+                * to allocate a new buf and copy src data.
+                * In other case we just need to map the user provided buffer.
+                * Also need to make sure that it is in contiguous buffer.
+                */
+               if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+                       qat_req->src_align = NULL;
+                       vaddr = sg_virt(req->src);
+               } else {
+                       int shift = ctx->p_size - req->src_len;
+
+                       qat_req->src_align = kzalloc(ctx->p_size, flags);
+                       if (unlikely(!qat_req->src_align))
+                               return ret;
+
+                       scatterwalk_map_and_copy(qat_req->src_align + shift,
+                                                req->src, 0, req->src_len, 0);
+
+                       vaddr = qat_req->src_align;
+               }
+
+               qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
+                                                    DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
+                       goto unmap_src;
+       }
+       /*
+        * dst can be of any size in valid range, but HW expects it to be the
+        * same as modulo m so in case it is different we need to allocate a
+        * new buf and copy src data.
+        * In other case we just need to map the user provided buffer.
+        * Also need to make sure that it is in contiguous buffer.
+        */
+       if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+               qat_req->dst_align = NULL;
+               vaddr = sg_virt(req->dst);
+       } else {
+               qat_req->dst_align = kzalloc(ctx->p_size, flags);
+               if (unlikely(!qat_req->dst_align))
+                       goto unmap_src;
+
+               vaddr = qat_req->dst_align;
+       }
+       qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
+                                          DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+               goto unmap_dst;
+
+       qat_req->in.dh.in_tab[n_input_params] = 0;
+       qat_req->out.dh.out_tab[1] = 0;
+       /* Mapping in.in.b or in.in_g2.xa is the same */
+       qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
+                                        sizeof(struct qat_dh_input_params),
+                                        DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+               goto unmap_dst;
+
+       qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
+                                         sizeof(struct qat_dh_output_params),
+                                         DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+               goto unmap_in_params;
+
+       msg->pke_mid.src_data_addr = qat_req->phy_in;
+       msg->pke_mid.dest_data_addr = qat_req->phy_out;
+       msg->pke_mid.opaque = (u64)(__force long)qat_req;
+       msg->input_param_count = n_input_params;
+       msg->output_param_count = 1;
+
+       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+       if (ret == -ENOSPC)
+               goto unmap_all;
+
+       return ret;
+
+unmap_all:
+       if (!dma_mapping_error(dev, qat_req->phy_out))
+               dma_unmap_single(dev, qat_req->phy_out,
+                                sizeof(struct qat_dh_output_params),
+                                DMA_TO_DEVICE);
+unmap_in_params:
+       if (!dma_mapping_error(dev, qat_req->phy_in))
+               dma_unmap_single(dev, qat_req->phy_in,
+                                sizeof(struct qat_dh_input_params),
+                                DMA_TO_DEVICE);
+unmap_dst:
+       if (!dma_mapping_error(dev, qat_req->out.dh.r))
+               dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+                                DMA_FROM_DEVICE);
+       kfree_sensitive(qat_req->dst_align);
+unmap_src:
+       if (req->src) {
+               if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+                       dma_unmap_single(dev, qat_req->in.dh.in.b,
+                                        ctx->p_size,
+                                        DMA_TO_DEVICE);
+               kfree_sensitive(qat_req->src_align);
+       }
+       return ret;
+}
+
+static int qat_dh_check_params_length(unsigned int p_len)
+{
+       switch (p_len) {
+       case 1536:
+       case 2048:
+       case 3072:
+       case 4096:
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+
+       if (qat_dh_check_params_length(params->p_size << 3))
+               return -EINVAL;
+
+       ctx->p_size = params->p_size;
+       ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+       if (!ctx->p)
+               return -ENOMEM;
+       memcpy(ctx->p, params->p, ctx->p_size);
+
+       /* If g equals 2 don't copy it */
+       if (params->g_size == 1 && *(char *)params->g == 0x02) {
+               ctx->g2 = true;
+               return 0;
+       }
+
+       ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+       if (!ctx->g)
+               return -ENOMEM;
+       memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
+              params->g_size);
+
+       return 0;
+}
+
+static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+{
+       if (ctx->g) {
+               memset(ctx->g, 0, ctx->p_size);
+               dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+               ctx->g = NULL;
+       }
+       if (ctx->xa) {
+               memset(ctx->xa, 0, ctx->p_size);
+               dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+               ctx->xa = NULL;
+       }
+       if (ctx->p) {
+               memset(ctx->p, 0, ctx->p_size);
+               dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+               ctx->p = NULL;
+       }
+       ctx->p_size = 0;
+       ctx->g2 = false;
+}
+
+static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+                            unsigned int len)
+{
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+       struct dh params;
+       int ret;
+
+       if (crypto_dh_decode_key(buf, len, &params) < 0)
+               return -EINVAL;
+
+       /* Free old secret if any */
+       qat_dh_clear_ctx(dev, ctx);
+
+       ret = qat_dh_set_params(ctx, &params);
+       if (ret < 0)
+               goto err_clear_ctx;
+
+       ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+                                    GFP_KERNEL);
+       if (!ctx->xa) {
+               ret = -ENOMEM;
+               goto err_clear_ctx;
+       }
+       memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
+              params.key_size);
+
+       return 0;
+
+err_clear_ctx:
+       qat_dh_clear_ctx(dev, ctx);
+       return ret;
+}
+
+static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
+{
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+       return ctx->p_size;
+}
+
+static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+{
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst =
+                       qat_crypto_get_instance_node(numa_node_id());
+
+       if (!inst)
+               return -EINVAL;
+
+       kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+
+       ctx->p_size = 0;
+       ctx->g2 = false;
+       ctx->inst = inst;
+       return 0;
+}
+
+static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+       qat_dh_clear_ctx(dev, ctx);
+       qat_crypto_put_instance(ctx->inst);
+}
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+       struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+       struct akcipher_request *areq = req->areq.rsa;
+       struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
+       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+                               resp->pke_resp_hdr.comn_resp_flags);
+
+       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+       kfree_sensitive(req->src_align);
+
+       dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+                        DMA_TO_DEVICE);
+
+       areq->dst_len = req->ctx.rsa->key_sz;
+       if (req->dst_align) {
+               scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+                                        areq->dst_len, 1);
+
+               kfree_sensitive(req->dst_align);
+       }
+
+       dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+                        DMA_FROM_DEVICE);
+
+       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+                        DMA_TO_DEVICE);
+       dma_unmap_single(dev, req->phy_out,
+                        sizeof(struct qat_rsa_output_params),
+                        DMA_TO_DEVICE);
+
+       akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+       struct icp_qat_fw_pke_resp *resp = _resp;
+       struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
+       struct qat_instance_backlog *backlog = areq->alg_req.backlog;
+
+       areq->cb(resp);
+
+       qat_alg_send_backlog(backlog);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 512:
+               return PKE_RSA_EP_512;
+       case 1024:
+               return PKE_RSA_EP_1024;
+       case 1536:
+               return PKE_RSA_EP_1536;
+       case 2048:
+               return PKE_RSA_EP_2048;
+       case 3072:
+               return PKE_RSA_EP_3072;
+       case 4096:
+               return PKE_RSA_EP_4096;
+       default:
+               return 0;
+       }
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 512:
+               return PKE_RSA_DP1_512;
+       case 1024:
+               return PKE_RSA_DP1_1024;
+       case 1536:
+               return PKE_RSA_DP1_1536;
+       case 2048:
+               return PKE_RSA_DP1_2048;
+       case 3072:
+               return PKE_RSA_DP1_3072;
+       case 4096:
+               return PKE_RSA_DP1_4096;
+       default:
+               return 0;
+       }
+}
+
+#define PKE_RSA_DP2_512 0x1c131b57
+#define PKE_RSA_DP2_1024 0x26131c2d
+#define PKE_RSA_DP2_1536 0x45111d12
+#define PKE_RSA_DP2_2048 0x59121dfa
+#define PKE_RSA_DP2_3072 0x81121ed9
+#define PKE_RSA_DP2_4096 0xb1111fb2
+
+static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
+{
+       unsigned int bitslen = len << 3;
+
+       switch (bitslen) {
+       case 512:
+               return PKE_RSA_DP2_512;
+       case 1024:
+               return PKE_RSA_DP2_1024;
+       case 1536:
+               return PKE_RSA_DP2_1536;
+       case 2048:
+               return PKE_RSA_DP2_2048;
+       case 3072:
+               return PKE_RSA_DP2_3072;
+       case 4096:
+               return PKE_RSA_DP2_4096;
+       default:
+               return 0;
+       }
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_asym_request *qat_req =
+                       PTR_ALIGN(akcipher_request_ctx(req), 64);
+       struct icp_qat_fw_pke_request *msg = &qat_req->req;
+       gfp_t flags = qat_algs_alloc_flags(&req->base);
+       u8 *vaddr;
+       int ret;
+
+       if (unlikely(!ctx->n || !ctx->e))
+               return -EINVAL;
+
+       if (req->dst_len < ctx->key_sz) {
+               req->dst_len = ctx->key_sz;
+               return -EOVERFLOW;
+       }
+
+       if (req->src_len > ctx->key_sz)
+               return -EINVAL;
+
+       memset(msg, '\0', sizeof(*msg));
+       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+               return -EINVAL;
+
+       qat_req->cb = qat_rsa_cb;
+       qat_req->ctx.rsa = ctx;
+       qat_req->areq.rsa = req;
+       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+       msg->pke_hdr.comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+       qat_req->in.rsa.enc.e = ctx->dma_e;
+       qat_req->in.rsa.enc.n = ctx->dma_n;
+       ret = -ENOMEM;
+
+       /*
+        * src can be of any size in valid range, but HW expects it to be the
+        * same as modulo n so in case it is different we need to allocate a
+        * new buf and copy src data.
+        * In other case we just need to map the user provided buffer.
+        * Also need to make sure that it is in contiguous buffer.
+        */
+       if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+               qat_req->src_align = NULL;
+               vaddr = sg_virt(req->src);
+       } else {
+               int shift = ctx->key_sz - req->src_len;
+
+               qat_req->src_align = kzalloc(ctx->key_sz, flags);
+               if (unlikely(!qat_req->src_align))
+                       return ret;
+
+               scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+                                        0, req->src_len, 0);
+               vaddr = qat_req->src_align;
+       }
+
+       qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
+                                              DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+               goto unmap_src;
+
+       if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+               qat_req->dst_align = NULL;
+               vaddr = sg_virt(req->dst);
+       } else {
+               qat_req->dst_align = kzalloc(ctx->key_sz, flags);
+               if (unlikely(!qat_req->dst_align))
+                       goto unmap_src;
+               vaddr = qat_req->dst_align;
+       }
+
+       qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
+                                               DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+               goto unmap_dst;
+
+       qat_req->in.rsa.in_tab[3] = 0;
+       qat_req->out.rsa.out_tab[1] = 0;
+       qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+                                        sizeof(struct qat_rsa_input_params),
+                                        DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+               goto unmap_dst;
+
+       qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+                                         sizeof(struct qat_rsa_output_params),
+                                         DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+               goto unmap_in_params;
+
+       msg->pke_mid.src_data_addr = qat_req->phy_in;
+       msg->pke_mid.dest_data_addr = qat_req->phy_out;
+       msg->pke_mid.opaque = (u64)(__force long)qat_req;
+       msg->input_param_count = 3;
+       msg->output_param_count = 1;
+
+       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+       if (ret == -ENOSPC)
+               goto unmap_all;
+
+       return ret;
+
+unmap_all:
+       if (!dma_mapping_error(dev, qat_req->phy_out))
+               dma_unmap_single(dev, qat_req->phy_out,
+                                sizeof(struct qat_rsa_output_params),
+                                DMA_TO_DEVICE);
+unmap_in_params:
+       if (!dma_mapping_error(dev, qat_req->phy_in))
+               dma_unmap_single(dev, qat_req->phy_in,
+                                sizeof(struct qat_rsa_input_params),
+                                DMA_TO_DEVICE);
+unmap_dst:
+       if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+               dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+                                ctx->key_sz, DMA_FROM_DEVICE);
+       kfree_sensitive(qat_req->dst_align);
+unmap_src:
+       if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+               dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
+                                DMA_TO_DEVICE);
+       kfree_sensitive(qat_req->src_align);
+       return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_asym_request *qat_req =
+                       PTR_ALIGN(akcipher_request_ctx(req), 64);
+       struct icp_qat_fw_pke_request *msg = &qat_req->req;
+       gfp_t flags = qat_algs_alloc_flags(&req->base);
+       u8 *vaddr;
+       int ret;
+
+       if (unlikely(!ctx->n || !ctx->d))
+               return -EINVAL;
+
+       if (req->dst_len < ctx->key_sz) {
+               req->dst_len = ctx->key_sz;
+               return -EOVERFLOW;
+       }
+
+       if (req->src_len > ctx->key_sz)
+               return -EINVAL;
+
+       memset(msg, '\0', sizeof(*msg));
+       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
+               qat_rsa_dec_fn_id_crt(ctx->key_sz) :
+               qat_rsa_dec_fn_id(ctx->key_sz);
+       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+               return -EINVAL;
+
+       qat_req->cb = qat_rsa_cb;
+       qat_req->ctx.rsa = ctx;
+       qat_req->areq.rsa = req;
+       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+       msg->pke_hdr.comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+       if (ctx->crt_mode) {
+               qat_req->in.rsa.dec_crt.p = ctx->dma_p;
+               qat_req->in.rsa.dec_crt.q = ctx->dma_q;
+               qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
+               qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
+               qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
+       } else {
+               qat_req->in.rsa.dec.d = ctx->dma_d;
+               qat_req->in.rsa.dec.n = ctx->dma_n;
+       }
+       ret = -ENOMEM;
+
+       /*
+        * src can be of any size in valid range, but HW expects it to be the
+        * same as modulo n so in case it is different we need to allocate a
+        * new buf and copy src data.
+        * In other case we just need to map the user provided buffer.
+        * Also need to make sure that it is in contiguous buffer.
+        */
+       if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
+               qat_req->src_align = NULL;
+               vaddr = sg_virt(req->src);
+       } else {
+               int shift = ctx->key_sz - req->src_len;
+
+               qat_req->src_align = kzalloc(ctx->key_sz, flags);
+               if (unlikely(!qat_req->src_align))
+                       return ret;
+
+               scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
+                                        0, req->src_len, 0);
+               vaddr = qat_req->src_align;
+       }
+
+       qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
+                                              DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+               goto unmap_src;
+
+       if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+               qat_req->dst_align = NULL;
+               vaddr = sg_virt(req->dst);
+       } else {
+               qat_req->dst_align = kzalloc(ctx->key_sz, flags);
+               if (unlikely(!qat_req->dst_align))
+                       goto unmap_src;
+               vaddr = qat_req->dst_align;
+       }
+       qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
+                                               DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+               goto unmap_dst;
+
+       if (ctx->crt_mode)
+               qat_req->in.rsa.in_tab[6] = 0;
+       else
+               qat_req->in.rsa.in_tab[3] = 0;
+       qat_req->out.rsa.out_tab[1] = 0;
+       qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+                                        sizeof(struct qat_rsa_input_params),
+                                        DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+               goto unmap_dst;
+
+       qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+                                         sizeof(struct qat_rsa_output_params),
+                                         DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+               goto unmap_in_params;
+
+       msg->pke_mid.src_data_addr = qat_req->phy_in;
+       msg->pke_mid.dest_data_addr = qat_req->phy_out;
+       msg->pke_mid.opaque = (u64)(__force long)qat_req;
+       if (ctx->crt_mode)
+               msg->input_param_count = 6;
+       else
+               msg->input_param_count = 3;
+
+       msg->output_param_count = 1;
+
+       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
+       if (ret == -ENOSPC)
+               goto unmap_all;
+
+       return ret;
+
+unmap_all:
+       if (!dma_mapping_error(dev, qat_req->phy_out))
+               dma_unmap_single(dev, qat_req->phy_out,
+                                sizeof(struct qat_rsa_output_params),
+                                DMA_TO_DEVICE);
+unmap_in_params:
+       if (!dma_mapping_error(dev, qat_req->phy_in))
+               dma_unmap_single(dev, qat_req->phy_in,
+                                sizeof(struct qat_rsa_input_params),
+                                DMA_TO_DEVICE);
+unmap_dst:
+       if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+               dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+                                ctx->key_sz, DMA_FROM_DEVICE);
+       kfree_sensitive(qat_req->dst_align);
+unmap_src:
+       if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+               dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
+                                DMA_TO_DEVICE);
+       kfree_sensitive(qat_req->src_align);
+       return ret;
+}
+
+static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
+                        size_t vlen)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+       int ret;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       ctx->key_sz = vlen;
+       ret = -EINVAL;
+       /* invalid key size provided */
+       if (!qat_rsa_enc_fn_id(ctx->key_sz))
+               goto err;
+
+       ret = -ENOMEM;
+       ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+       if (!ctx->n)
+               goto err;
+
+       memcpy(ctx->n, ptr, ctx->key_sz);
+       return 0;
+err:
+       ctx->key_sz = 0;
+       ctx->n = NULL;
+       return ret;
+}
+
+static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
+                        size_t vlen)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+               ctx->e = NULL;
+               return -EINVAL;
+       }
+
+       ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+       if (!ctx->e)
+               return -ENOMEM;
+
+       memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+       return 0;
+}
+
+static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
+                        size_t vlen)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr = value;
+       int ret;
+
+       while (!*ptr && vlen) {
+               ptr++;
+               vlen--;
+       }
+
+       ret = -EINVAL;
+       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+               goto err;
+
+       ret = -ENOMEM;
+       ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+       if (!ctx->d)
+               goto err;
+
+       memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+       return 0;
+err:
+       ctx->d = NULL;
+       return ret;
+}
+
+static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
+{
+       while (!**ptr && *len) {
+               (*ptr)++;
+               (*len)--;
+       }
+}
+
+static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
+{
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       const char *ptr;
+       unsigned int len;
+       unsigned int half_key_sz = ctx->key_sz / 2;
+
+       /* p */
+       ptr = rsa_key->p;
+       len = rsa_key->p_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto err;
+       ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+       if (!ctx->p)
+               goto err;
+       memcpy(ctx->p + (half_key_sz - len), ptr, len);
+
+       /* q */
+       ptr = rsa_key->q;
+       len = rsa_key->q_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto free_p;
+       ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+       if (!ctx->q)
+               goto free_p;
+       memcpy(ctx->q + (half_key_sz - len), ptr, len);
+
+       /* dp */
+       ptr = rsa_key->dp;
+       len = rsa_key->dp_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto free_q;
+       ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+                                    GFP_KERNEL);
+       if (!ctx->dp)
+               goto free_q;
+       memcpy(ctx->dp + (half_key_sz - len), ptr, len);
+
+       /* dq */
+       ptr = rsa_key->dq;
+       len = rsa_key->dq_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto free_dp;
+       ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+                                    GFP_KERNEL);
+       if (!ctx->dq)
+               goto free_dp;
+       memcpy(ctx->dq + (half_key_sz - len), ptr, len);
+
+       /* qinv */
+       ptr = rsa_key->qinv;
+       len = rsa_key->qinv_sz;
+       qat_rsa_drop_leading_zeros(&ptr, &len);
+       if (!len)
+               goto free_dq;
+       ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+                                      GFP_KERNEL);
+       if (!ctx->qinv)
+               goto free_dq;
+       memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+
+       ctx->crt_mode = true;
+       return;
+
+free_dq:
+       memset(ctx->dq, '\0', half_key_sz);
+       dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+       ctx->dq = NULL;
+free_dp:
+       memset(ctx->dp, '\0', half_key_sz);
+       dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+       ctx->dp = NULL;
+free_q:
+       memset(ctx->q, '\0', half_key_sz);
+       dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+       ctx->q = NULL;
+free_p:
+       memset(ctx->p, '\0', half_key_sz);
+       dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+       ctx->p = NULL;
+err:
+       ctx->crt_mode = false;
+}
+
+static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
+{
+       unsigned int half_key_sz = ctx->key_sz / 2;
+
+       /* Free the old key if any */
+       if (ctx->n)
+               dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+       if (ctx->e)
+               dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+       if (ctx->d) {
+               memset(ctx->d, '\0', ctx->key_sz);
+               dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+       }
+       if (ctx->p) {
+               memset(ctx->p, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+       }
+       if (ctx->q) {
+               memset(ctx->q, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+       }
+       if (ctx->dp) {
+               memset(ctx->dp, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+       }
+       if (ctx->dq) {
+               memset(ctx->dq, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+       }
+       if (ctx->qinv) {
+               memset(ctx->qinv, '\0', half_key_sz);
+               dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
+       }
+
+       ctx->n = NULL;
+       ctx->e = NULL;
+       ctx->d = NULL;
+       ctx->p = NULL;
+       ctx->q = NULL;
+       ctx->dp = NULL;
+       ctx->dq = NULL;
+       ctx->qinv = NULL;
+       ctx->crt_mode = false;
+       ctx->key_sz = 0;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+                         unsigned int keylen, bool private)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+       struct rsa_key rsa_key;
+       int ret;
+
+       qat_rsa_clear_ctx(dev, ctx);
+
+       if (private)
+               ret = rsa_parse_priv_key(&rsa_key, key, keylen);
+       else
+               ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+       if (ret < 0)
+               goto free;
+
+       ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
+       if (ret < 0)
+               goto free;
+       ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+       if (ret < 0)
+               goto free;
+       if (private) {
+               ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+               if (ret < 0)
+                       goto free;
+               qat_rsa_setkey_crt(ctx, &rsa_key);
+       }
+
+       if (!ctx->n || !ctx->e) {
+               /* invalid key provided */
+               ret = -EINVAL;
+               goto free;
+       }
+       if (private && !ctx->d) {
+               /* invalid private key provided */
+               ret = -EINVAL;
+               goto free;
+       }
+
+       return 0;
+free:
+       qat_rsa_clear_ctx(dev, ctx);
+       return ret;
+}
+
+static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+                            unsigned int keylen)
+{
+       return qat_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+                             unsigned int keylen)
+{
+       return qat_rsa_setkey(tfm, key, keylen, true);
+}
+
+static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+       return ctx->key_sz;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst =
+                       qat_crypto_get_instance_node(numa_node_id());
+
+       if (!inst)
+               return -EINVAL;
+
+       akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
+
+       ctx->key_sz = 0;
+       ctx->inst = inst;
+       return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+       qat_rsa_clear_ctx(dev, ctx);
+       qat_crypto_put_instance(ctx->inst);
+}
+
+static struct akcipher_alg rsa = {
+       .encrypt = qat_rsa_enc,
+       .decrypt = qat_rsa_dec,
+       .set_pub_key = qat_rsa_setpubkey,
+       .set_priv_key = qat_rsa_setprivkey,
+       .max_size = qat_rsa_max_size,
+       .init = qat_rsa_init_tfm,
+       .exit = qat_rsa_exit_tfm,
+       .base = {
+               .cra_name = "rsa",
+               .cra_driver_name = "qat-rsa",
+               .cra_priority = 1000,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct qat_rsa_ctx),
+       },
+};
+
+static struct kpp_alg dh = {
+       .set_secret = qat_dh_set_secret,
+       .generate_public_key = qat_dh_compute_value,
+       .compute_shared_secret = qat_dh_compute_value,
+       .max_size = qat_dh_max_size,
+       .init = qat_dh_init_tfm,
+       .exit = qat_dh_exit_tfm,
+       .base = {
+               .cra_name = "dh",
+               .cra_driver_name = "qat-dh",
+               .cra_priority = 1000,
+               .cra_module = THIS_MODULE,
+               .cra_ctxsize = sizeof(struct qat_dh_ctx),
+       },
+};
+
+int qat_asym_algs_register(void)
+{
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1) {
+               rsa.base.cra_flags = 0;
+               ret = crypto_register_akcipher(&rsa);
+               if (ret)
+                       goto unlock;
+               ret = crypto_register_kpp(&dh);
+       }
+unlock:
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0) {
+               crypto_unregister_akcipher(&rsa);
+               crypto_unregister_kpp(&dh);
+       }
+       mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
new file mode 100644 (file)
index 0000000..76baed0
--- /dev/null
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2014 - 2022 Intel Corporation */
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "qat_bl.h"
+#include "qat_crypto.h"
+
+void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+                     struct qat_request_buffs *buf)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       struct qat_alg_buf_list *bl = buf->bl;
+       struct qat_alg_buf_list *blout = buf->blout;
+       dma_addr_t blp = buf->blp;
+       dma_addr_t blpout = buf->bloutp;
+       size_t sz = buf->sz;
+       size_t sz_out = buf->sz_out;
+       int bl_dma_dir;
+       int i;
+
+       bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+       for (i = 0; i < bl->num_bufs; i++)
+               dma_unmap_single(dev, bl->buffers[i].addr,
+                                bl->buffers[i].len, bl_dma_dir);
+
+       dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
+       if (!buf->sgl_src_valid)
+               kfree(bl);
+
+       if (blp != blpout) {
+               for (i = 0; i < blout->num_mapped_bufs; i++) {
+                       dma_unmap_single(dev, blout->buffers[i].addr,
+                                        blout->buffers[i].len,
+                                        DMA_FROM_DEVICE);
+               }
+               dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
+
+               if (!buf->sgl_dst_valid)
+                       kfree(blout);
+       }
+}
+
+static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+                               struct scatterlist *sgl,
+                               struct scatterlist *sglout,
+                               struct qat_request_buffs *buf,
+                               dma_addr_t extra_dst_buff,
+                               size_t sz_extra_dst_buff,
+                               unsigned int sskip,
+                               unsigned int dskip,
+                               gfp_t flags)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       int i, sg_nctr = 0;
+       int n = sg_nents(sgl);
+       struct qat_alg_buf_list *bufl;
+       struct qat_alg_buf_list *buflout = NULL;
+       dma_addr_t blp = DMA_MAPPING_ERROR;
+       dma_addr_t bloutp = DMA_MAPPING_ERROR;
+       struct scatterlist *sg;
+       size_t sz_out, sz = struct_size(bufl, buffers, n);
+       int node = dev_to_node(&GET_DEV(accel_dev));
+       unsigned int left;
+       int bufl_dma_dir;
+
+       if (unlikely(!n))
+               return -EINVAL;
+
+       buf->sgl_src_valid = false;
+       buf->sgl_dst_valid = false;
+
+       if (n > QAT_MAX_BUFF_DESC) {
+               bufl = kzalloc_node(sz, flags, node);
+               if (unlikely(!bufl))
+                       return -ENOMEM;
+       } else {
+               bufl = &buf->sgl_src.sgl_hdr;
+               memset(bufl, 0, sizeof(struct qat_alg_buf_list));
+               buf->sgl_src_valid = true;
+       }
+
+       bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+       for (i = 0; i < n; i++)
+               bufl->buffers[i].addr = DMA_MAPPING_ERROR;
+
+       left = sskip;
+
+       for_each_sg(sgl, sg, n, i) {
+               int y = sg_nctr;
+
+               if (!sg->length)
+                       continue;
+
+               if (left >= sg->length) {
+                       left -= sg->length;
+                       continue;
+               }
+               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+                                                      sg->length - left,
+                                                      bufl_dma_dir);
+               bufl->buffers[y].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
+                       goto err_in;
+               sg_nctr++;
+               if (left) {
+                       bufl->buffers[y].len -= left;
+                       left = 0;
+               }
+       }
+       bufl->num_bufs = sg_nctr;
+       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, blp)))
+               goto err_in;
+       buf->bl = bufl;
+       buf->blp = blp;
+       buf->sz = sz;
+       /* Handle out of place operation */
+       if (sgl != sglout) {
+               struct qat_alg_buf *buffers;
+               int extra_buff = extra_dst_buff ? 1 : 0;
+               int n_sglout = sg_nents(sglout);
+
+               n = n_sglout + extra_buff;
+               sz_out = struct_size(buflout, buffers, n);
+               left = dskip;
+
+               sg_nctr = 0;
+
+               if (n > QAT_MAX_BUFF_DESC) {
+                       buflout = kzalloc_node(sz_out, flags, node);
+                       if (unlikely(!buflout))
+                               goto err_in;
+               } else {
+                       buflout = &buf->sgl_dst.sgl_hdr;
+                       memset(buflout, 0, sizeof(struct qat_alg_buf_list));
+                       buf->sgl_dst_valid = true;
+               }
+
+               buffers = buflout->buffers;
+               for (i = 0; i < n; i++)
+                       buffers[i].addr = DMA_MAPPING_ERROR;
+
+               for_each_sg(sglout, sg, n_sglout, i) {
+                       int y = sg_nctr;
+
+                       if (!sg->length)
+                               continue;
+
+                       if (left >= sg->length) {
+                               left -= sg->length;
+                               continue;
+                       }
+                       buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+                                                        sg->length - left,
+                                                        DMA_FROM_DEVICE);
+                       if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
+                               goto err_out;
+                       buffers[y].len = sg->length;
+                       sg_nctr++;
+                       if (left) {
+                               buffers[y].len -= left;
+                               left = 0;
+                       }
+               }
+               if (extra_buff) {
+                       buffers[sg_nctr].addr = extra_dst_buff;
+                       buffers[sg_nctr].len = sz_extra_dst_buff;
+               }
+
+               buflout->num_bufs = sg_nctr;
+               buflout->num_bufs += extra_buff;
+               buflout->num_mapped_bufs = sg_nctr;
+               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, bloutp)))
+                       goto err_out;
+               buf->blout = buflout;
+               buf->bloutp = bloutp;
+               buf->sz_out = sz_out;
+       } else {
+               /* Otherwise set the src and dst to the same address */
+               buf->bloutp = buf->blp;
+               buf->sz_out = 0;
+       }
+       return 0;
+
+err_out:
+       if (!dma_mapping_error(dev, bloutp))
+               dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
+       n = sg_nents(sglout);
+       for (i = 0; i < n; i++) {
+               if (buflout->buffers[i].addr == extra_dst_buff)
+                       break;
+               if (!dma_mapping_error(dev, buflout->buffers[i].addr))
+                       dma_unmap_single(dev, buflout->buffers[i].addr,
+                                        buflout->buffers[i].len,
+                                        DMA_FROM_DEVICE);
+       }
+
+       if (!buf->sgl_dst_valid)
+               kfree(buflout);
+
+err_in:
+       if (!dma_mapping_error(dev, blp))
+               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
+       n = sg_nents(sgl);
+       for (i = 0; i < n; i++)
+               if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+                       dma_unmap_single(dev, bufl->buffers[i].addr,
+                                        bufl->buffers[i].len,
+                                        bufl_dma_dir);
+
+       if (!buf->sgl_src_valid)
+               kfree(bufl);
+
+       dev_err(dev, "Failed to map buf for dma\n");
+       return -ENOMEM;
+}
+
+int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+                      struct scatterlist *sgl,
+                      struct scatterlist *sglout,
+                      struct qat_request_buffs *buf,
+                      struct qat_sgl_to_bufl_params *params,
+                      gfp_t flags)
+{
+       dma_addr_t extra_dst_buff = 0;
+       size_t sz_extra_dst_buff = 0;
+       unsigned int sskip = 0;
+       unsigned int dskip = 0;
+
+       if (params) {
+               extra_dst_buff = params->extra_dst_buff;
+               sz_extra_dst_buff = params->sz_extra_dst_buff;
+               sskip = params->sskip;
+               dskip = params->dskip;
+       }
+
+       return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
+                                   extra_dst_buff, sz_extra_dst_buff,
+                                   sskip, dskip, flags);
+}
+
+static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
+                            struct qat_alg_buf_list *bl)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       int n = bl->num_bufs;
+       int i;
+
+       for (i = 0; i < n; i++)
+               if (!dma_mapping_error(dev, bl->buffers[i].addr))
+                       dma_unmap_single(dev, bl->buffers[i].addr,
+                                        bl->buffers[i].len, DMA_FROM_DEVICE);
+}
+
+static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
+                         struct scatterlist *sgl,
+                         struct qat_alg_buf_list **bl)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       struct qat_alg_buf_list *bufl;
+       int node = dev_to_node(dev);
+       struct scatterlist *sg;
+       int n, i, sg_nctr;
+       size_t sz;
+
+       n = sg_nents(sgl);
+       sz = struct_size(bufl, buffers, n);
+       bufl = kzalloc_node(sz, GFP_KERNEL, node);
+       if (unlikely(!bufl))
+               return -ENOMEM;
+
+       for (i = 0; i < n; i++)
+               bufl->buffers[i].addr = DMA_MAPPING_ERROR;
+
+       sg_nctr = 0;
+       for_each_sg(sgl, sg, n, i) {
+               int y = sg_nctr;
+
+               if (!sg->length)
+                       continue;
+
+               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
+                                                      sg->length,
+                                                      DMA_FROM_DEVICE);
+               bufl->buffers[y].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
+                       goto err_map;
+               sg_nctr++;
+       }
+       bufl->num_bufs = sg_nctr;
+       bufl->num_mapped_bufs = sg_nctr;
+
+       *bl = bufl;
+
+       return 0;
+
+err_map:
+       for (i = 0; i < n; i++)
+               if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+                       dma_unmap_single(dev, bufl->buffers[i].addr,
+                                        bufl->buffers[i].len,
+                                        DMA_FROM_DEVICE);
+       kfree(bufl);
+       *bl = NULL;
+
+       return -ENOMEM;
+}
+
+static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
+                                 struct scatterlist *sgl,
+                                 struct qat_alg_buf_list *bl,
+                                 bool free_bl)
+{
+       if (bl) {
+               qat_bl_sgl_unmap(accel_dev, bl);
+
+               if (free_bl)
+                       kfree(bl);
+       }
+       if (sgl)
+               sgl_free(sgl);
+}
+
+static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
+                               struct scatterlist **sgl,
+                               struct qat_alg_buf_list **bl,
+                               unsigned int dlen,
+                               gfp_t gfp)
+{
+       struct scatterlist *dst;
+       int ret;
+
+       dst = sgl_alloc(dlen, gfp, NULL);
+       if (!dst) {
+               dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
+               return -ENOMEM;
+       }
+
+       ret = qat_bl_sgl_map(accel_dev, dst, bl);
+       if (ret)
+               goto err;
+
+       *sgl = dst;
+
+       return 0;
+
+err:
+       sgl_free(dst);
+       *sgl = NULL;
+       return ret;
+}
+
+int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
+                              struct scatterlist **sg,
+                              unsigned int dlen,
+                              struct qat_request_buffs *qat_bufs,
+                              gfp_t gfp)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       dma_addr_t new_blp = DMA_MAPPING_ERROR;
+       struct qat_alg_buf_list *new_bl;
+       struct scatterlist *new_sg;
+       size_t new_bl_size;
+       int ret;
+
+       ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
+       if (ret)
+               return ret;
+
+       new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
+
+       /* Map new firmware SGL descriptor */
+       new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, new_blp)))
+               goto err;
+
+       /* Unmap old firmware SGL descriptor */
+       dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
+
+       /* Free and unmap old scatterlist */
+       qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
+                             !qat_bufs->sgl_dst_valid);
+
+       qat_bufs->sgl_dst_valid = false;
+       qat_bufs->blout = new_bl;
+       qat_bufs->bloutp = new_blp;
+       qat_bufs->sz_out = new_bl_size;
+
+       *sg = new_sg;
+
+       return 0;
+err:
+       qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
+
+       if (!dma_mapping_error(dev, new_blp))
+               dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
+
+       return -ENOMEM;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h
new file mode 100644 (file)
index 0000000..d87e4f3
--- /dev/null
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2014 - 2022 Intel Corporation */
+#ifndef QAT_BL_H
+#define QAT_BL_H
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#define QAT_MAX_BUFF_DESC      4
+
+struct qat_alg_buf {
+       u32 len;
+       u32 resrvd;
+       u64 addr;
+} __packed;
+
+struct qat_alg_buf_list {
+       u64 resrvd;
+       u32 num_bufs;
+       u32 num_mapped_bufs;
+       struct qat_alg_buf buffers[];
+} __packed;
+
+struct qat_alg_fixed_buf_list {
+       struct qat_alg_buf_list sgl_hdr;
+       struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
+} __packed __aligned(64);
+
+struct qat_request_buffs {
+       struct qat_alg_buf_list *bl;
+       dma_addr_t blp;
+       struct qat_alg_buf_list *blout;
+       dma_addr_t bloutp;
+       size_t sz;
+       size_t sz_out;
+       bool sgl_src_valid;
+       bool sgl_dst_valid;
+       struct qat_alg_fixed_buf_list sgl_src;
+       struct qat_alg_fixed_buf_list sgl_dst;
+};
+
+struct qat_sgl_to_bufl_params {
+       dma_addr_t extra_dst_buff;
+       size_t sz_extra_dst_buff;
+       unsigned int sskip;
+       unsigned int dskip;
+};
+
+void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
+                     struct qat_request_buffs *buf);
+int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+                      struct scatterlist *sgl,
+                      struct scatterlist *sglout,
+                      struct qat_request_buffs *buf,
+                      struct qat_sgl_to_bufl_params *params,
+                      gfp_t flags);
+
+static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
+{
+       return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
+int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
+                              struct scatterlist **newd,
+                              unsigned int dlen,
+                              struct qat_request_buffs *qat_bufs,
+                              gfp_t gfp);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
new file mode 100644 (file)
index 0000000..b533984
--- /dev/null
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/crypto.h>
+#include <crypto/acompress.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "qat_bl.h"
+#include "qat_comp_req.h"
+#include "qat_compression.h"
+#include "qat_algs_send.h"
+
+#define QAT_RFC_1950_HDR_SIZE 2
+#define QAT_RFC_1950_FOOTER_SIZE 4
+#define QAT_RFC_1950_CM_DEFLATE 8
+#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
+#define QAT_RFC_1950_CM_MASK 0x0f
+#define QAT_RFC_1950_CM_OFFSET 4
+#define QAT_RFC_1950_DICT_MASK 0x20
+#define QAT_RFC_1950_COMP_HDR 0x785e
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+enum direction {
+       DECOMPRESSION = 0,
+       COMPRESSION = 1,
+};
+
+struct qat_compression_req;
+
+struct qat_compression_ctx {
+       u8 comp_ctx[QAT_COMP_CTX_SIZE];
+       struct qat_compression_instance *inst;
+       int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
+};
+
+struct qat_dst {
+       bool is_null;
+       int resubmitted;
+};
+
+struct qat_compression_req {
+       u8 req[QAT_COMP_REQ_SIZE];
+       struct qat_compression_ctx *qat_compression_ctx;
+       struct acomp_req *acompress_req;
+       struct qat_request_buffs buf;
+       enum direction dir;
+       int actual_dlen;
+       struct qat_alg_req alg_req;
+       struct work_struct resubmit;
+       struct qat_dst dst;
+};
+
+static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
+                                  struct qat_compression_instance *inst,
+                                  struct crypto_async_request *base)
+{
+       struct qat_alg_req *alg_req = &qat_req->alg_req;
+
+       alg_req->fw_req = (u32 *)&qat_req->req;
+       alg_req->tx_ring = inst->dc_tx;
+       alg_req->base = base;
+       alg_req->backlog = &inst->backlog;
+
+       return qat_alg_send_message(alg_req);
+}
+
+static void qat_comp_resubmit(struct work_struct *work)
+{
+       struct qat_compression_req *qat_req =
+               container_of(work, struct qat_compression_req, resubmit);
+       struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
+       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+       struct qat_request_buffs *qat_bufs = &qat_req->buf;
+       struct qat_compression_instance *inst = ctx->inst;
+       struct acomp_req *areq = qat_req->acompress_req;
+       struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
+       unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
+       u8 *req = qat_req->req;
+       dma_addr_t dfbuf;
+       int ret;
+
+       areq->dlen = dlen;
+
+       dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
+               crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
+               qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
+
+       ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
+                                        qat_algs_alloc_flags(&areq->base));
+       if (ret)
+               goto err;
+
+       qat_req->dst.resubmitted = true;
+
+       dfbuf = qat_req->buf.bloutp;
+       qat_comp_override_dst(req, dfbuf, dlen);
+
+       ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
+       if (ret != -ENOSPC)
+               return;
+
+err:
+       qat_bl_free_bufl(accel_dev, qat_bufs);
+       acomp_request_complete(areq, ret);
+}
+
+static int parse_zlib_header(u16 zlib_h)
+{
+       int ret = -EINVAL;
+       __be16 header;
+       u8 *header_p;
+       u8 cmf, flg;
+
+       header = cpu_to_be16(zlib_h);
+       header_p = (u8 *)&header;
+
+       flg = header_p[0];
+       cmf = header_p[1];
+
+       if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
+               return ret;
+
+       if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
+               return ret;
+
+       if (flg & QAT_RFC_1950_DICT_MASK)
+               return ret;
+
+       return 0;
+}
+
+static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
+                                    void *resp)
+{
+       struct acomp_req *areq = qat_req->acompress_req;
+       enum direction dir = qat_req->dir;
+       __be32 qat_produced_adler;
+
+       qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
+
+       if (dir == COMPRESSION) {
+               __be16 zlib_header;
+
+               zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
+               scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
+               areq->dlen += QAT_RFC_1950_HDR_SIZE;
+
+               scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
+                                        QAT_RFC_1950_FOOTER_SIZE, 1);
+               areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
+       } else {
+               __be32 decomp_adler;
+               int footer_offset;
+               int consumed;
+
+               consumed = qat_comp_get_consumed_ctr(resp);
+               footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
+               if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
+                       return -EBADMSG;
+
+               scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
+                                        QAT_RFC_1950_FOOTER_SIZE, 0);
+
+               if (qat_produced_adler != decomp_adler)
+                       return -EBADMSG;
+       }
+       return 0;
+}
+
+static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
+                                     void *resp)
+{
+       struct acomp_req *areq = qat_req->acompress_req;
+       struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
+       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+       struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
+       struct qat_compression_instance *inst = ctx->inst;
+       int consumed, produced;
+       s8 cmp_err, xlt_err;
+       int res = -EBADMSG;
+       int status;
+       u8 cnv;
+
+       status = qat_comp_get_cmp_status(resp);
+       status |= qat_comp_get_xlt_status(resp);
+       cmp_err = qat_comp_get_cmp_err(resp);
+       xlt_err = qat_comp_get_xlt_err(resp);
+
+       consumed = qat_comp_get_consumed_ctr(resp);
+       produced = qat_comp_get_produced_ctr(resp);
+
+       dev_dbg(&GET_DEV(accel_dev),
+               "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
+               crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
+               qat_req->dir == COMPRESSION ? "comp  " : "decomp",
+               status ? "ERR" : "OK ",
+               areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
+
+       areq->dlen = 0;
+
+       if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
+               if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
+                       if (qat_req->dst.resubmitted) {
+                               dev_dbg(&GET_DEV(accel_dev),
+                                       "Output does not fit destination buffer\n");
+                               res = -EOVERFLOW;
+                               goto end;
+                       }
+
+                       INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
+                       adf_misc_wq_queue_work(&qat_req->resubmit);
+                       return;
+               }
+       }
+
+       if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               goto end;
+
+       if (qat_req->dir == COMPRESSION) {
+               cnv = qat_comp_get_cmp_cnv_flag(resp);
+               if (unlikely(!cnv)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Verified compression not supported\n");
+                       goto end;
+               }
+
+               if (unlikely(produced > qat_req->actual_dlen)) {
+                       memset(inst->dc_data->ovf_buff, 0,
+                              inst->dc_data->ovf_buff_sz);
+                       dev_dbg(&GET_DEV(accel_dev),
+                               "Actual buffer overflow: produced=%d, dlen=%d\n",
+                               produced, qat_req->actual_dlen);
+                       goto end;
+               }
+       }
+
+       res = 0;
+       areq->dlen = produced;
+
+       if (ctx->qat_comp_callback)
+               res = ctx->qat_comp_callback(qat_req, resp);
+
+end:
+       qat_bl_free_bufl(accel_dev, &qat_req->buf);
+       acomp_request_complete(areq, res);
+}
+
+void qat_comp_alg_callback(void *resp)
+{
+       struct qat_compression_req *qat_req =
+                       (void *)(__force long)qat_comp_get_opaque(resp);
+       struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
+
+       qat_comp_generic_callback(qat_req, resp);
+
+       qat_alg_send_backlog(backlog);
+}
+
+static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_compression_instance *inst;
+       int node;
+
+       if (tfm->node == NUMA_NO_NODE)
+               node = numa_node_id();
+       else
+               node = tfm->node;
+
+       memset(ctx, 0, sizeof(*ctx));
+       inst = qat_compression_get_instance_node(node);
+       if (!inst)
+               return -EINVAL;
+       ctx->inst = inst;
+
+       ctx->inst->build_deflate_ctx(ctx->comp_ctx);
+
+       return 0;
+}
+
+static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
+{
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       qat_compression_put_instance(ctx->inst);
+       memset(ctx, 0, sizeof(*ctx));
+}
+
+static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+       int ret;
+
+       ret = qat_comp_alg_init_tfm(acomp_tfm);
+       ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
+
+       return ret;
+}
+
+static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
+                                           unsigned int shdr, unsigned int sftr,
+                                           unsigned int dhdr, unsigned int dftr)
+{
+       struct qat_compression_req *qat_req = acomp_request_ctx(areq);
+       struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_compression_instance *inst = ctx->inst;
+       gfp_t f = qat_algs_alloc_flags(&areq->base);
+       struct qat_sgl_to_bufl_params params = {0};
+       int slen = areq->slen - shdr - sftr;
+       int dlen = areq->dlen - dhdr - dftr;
+       dma_addr_t sfbuf, dfbuf;
+       u8 *req = qat_req->req;
+       size_t ovf_buff_sz;
+       int ret;
+
+       params.sskip = shdr;
+       params.dskip = dhdr;
+
+       if (!areq->src || !slen)
+               return -EINVAL;
+
+       if (areq->dst && !dlen)
+               return -EINVAL;
+
+       qat_req->dst.is_null = false;
+
+       /* Handle acomp requests that require the allocation of a destination
+        * buffer. The size of the destination buffer is double the source
+        * buffer (rounded up to the size of a page) to fit the decompressed
+        * output or an expansion on the data for compression.
+        */
+       if (!areq->dst) {
+               qat_req->dst.is_null = true;
+
+               dlen = round_up(2 * slen, PAGE_SIZE);
+               areq->dst = sgl_alloc(dlen, f, NULL);
+               if (!areq->dst)
+                       return -ENOMEM;
+
+               dlen -= dhdr + dftr;
+               areq->dlen = dlen;
+               qat_req->dst.resubmitted = false;
+       }
+
+       if (dir == COMPRESSION) {
+               params.extra_dst_buff = inst->dc_data->ovf_buff_p;
+               ovf_buff_sz = inst->dc_data->ovf_buff_sz;
+               params.sz_extra_dst_buff = ovf_buff_sz;
+       }
+
+       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
+                                &qat_req->buf, &params, f);
+       if (unlikely(ret))
+               return ret;
+
+       sfbuf = qat_req->buf.blp;
+       dfbuf = qat_req->buf.bloutp;
+       qat_req->qat_compression_ctx = ctx;
+       qat_req->acompress_req = areq;
+       qat_req->dir = dir;
+
+       if (dir == COMPRESSION) {
+               qat_req->actual_dlen = dlen;
+               dlen += ovf_buff_sz;
+               qat_comp_create_compression_req(ctx->comp_ctx, req,
+                                               (u64)(__force long)sfbuf, slen,
+                                               (u64)(__force long)dfbuf, dlen,
+                                               (u64)(__force long)qat_req);
+       } else {
+               qat_comp_create_decompression_req(ctx->comp_ctx, req,
+                                                 (u64)(__force long)sfbuf, slen,
+                                                 (u64)(__force long)dfbuf, dlen,
+                                                 (u64)(__force long)qat_req);
+       }
+
+       ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
+       if (ret == -ENOSPC)
+               qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
+
+       return ret;
+}
+
+static int qat_comp_alg_compress(struct acomp_req *req)
+{
+       return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_alg_decompress(struct acomp_req *req)
+{
+       return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
+{
+       if (!req->dst && req->dlen != 0)
+               return -EINVAL;
+
+       if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+               return -EINVAL;
+
+       return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
+                                               QAT_RFC_1950_HDR_SIZE,
+                                               QAT_RFC_1950_FOOTER_SIZE);
+}
+
+static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
+{
+       struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+       u16 zlib_header;
+       int ret;
+
+       if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+               return -EBADMSG;
+
+       scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
+
+       ret = parse_zlib_header(zlib_header);
+       if (ret) {
+               dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
+               return ret;
+       }
+
+       return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
+                                               QAT_RFC_1950_FOOTER_SIZE, 0, 0);
+}
+
+static struct acomp_alg qat_acomp[] = { {
+       .base = {
+               .cra_name = "deflate",
+               .cra_driver_name = "qat_deflate",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
+               .cra_ctxsize = sizeof(struct qat_compression_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_comp_alg_init_tfm,
+       .exit = qat_comp_alg_exit_tfm,
+       .compress = qat_comp_alg_compress,
+       .decompress = qat_comp_alg_decompress,
+       .dst_free = sgl_free,
+       .reqsize = sizeof(struct qat_compression_req),
+}, {
+       .base = {
+               .cra_name = "zlib-deflate",
+               .cra_driver_name = "qat_zlib_deflate",
+               .cra_priority = 4001,
+               .cra_flags = CRYPTO_ALG_ASYNC,
+               .cra_ctxsize = sizeof(struct qat_compression_ctx),
+               .cra_module = THIS_MODULE,
+       },
+       .init = qat_comp_alg_rfc1950_init_tfm,
+       .exit = qat_comp_alg_exit_tfm,
+       .compress = qat_comp_alg_rfc1950_compress,
+       .decompress = qat_comp_alg_rfc1950_decompress,
+       .dst_free = sgl_free,
+       .reqsize = sizeof(struct qat_compression_req),
+} };
+
+int qat_comp_algs_register(void)
+{
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1)
+               ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+       mutex_unlock(&algs_lock);
+       return ret;
+}
+
+void qat_comp_algs_unregister(void)
+{
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0)
+               crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+       mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
new file mode 100644 (file)
index 0000000..404e32c
--- /dev/null
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _QAT_COMP_REQ_H_
+#define _QAT_COMP_REQ_H_
+
+#include "icp_qat_fw_comp.h"
+
+#define QAT_COMP_REQ_SIZE (sizeof(struct icp_qat_fw_comp_req))
+#define QAT_COMP_CTX_SIZE (QAT_COMP_REQ_SIZE * 2)
+
+static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
+                                      u64 dst, u32 dlen, u64 opaque)
+{
+       struct icp_qat_fw_comp_req *fw_tmpl = ctx;
+       struct icp_qat_fw_comp_req *fw_req = req;
+       struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
+
+       memcpy(fw_req, fw_tmpl, sizeof(*fw_req));
+       fw_req->comn_mid.src_data_addr = src;
+       fw_req->comn_mid.src_length = slen;
+       fw_req->comn_mid.dest_data_addr = dst;
+       fw_req->comn_mid.dst_length = dlen;
+       fw_req->comn_mid.opaque_data = opaque;
+       req_pars->comp_len = slen;
+       req_pars->out_buffer_sz = dlen;
+}
+
+static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen)
+{
+       struct icp_qat_fw_comp_req *fw_req = req;
+       struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
+
+       fw_req->comn_mid.dest_data_addr = dst;
+       fw_req->comn_mid.dst_length = dlen;
+       req_pars->out_buffer_sz = dlen;
+}
+
+static inline void qat_comp_create_compression_req(void *ctx, void *req,
+                                                  u64 src, u32 slen,
+                                                  u64 dst, u32 dlen,
+                                                  u64 opaque)
+{
+       qat_comp_create_req(ctx, req, src, slen, dst, dlen, opaque);
+}
+
+static inline void qat_comp_create_decompression_req(void *ctx, void *req,
+                                                    u64 src, u32 slen,
+                                                    u64 dst, u32 dlen,
+                                                    u64 opaque)
+{
+       struct icp_qat_fw_comp_req *fw_tmpl = ctx;
+
+       fw_tmpl++;
+       qat_comp_create_req(fw_tmpl, req, src, slen, dst, dlen, opaque);
+}
+
+static inline u32 qat_comp_get_consumed_ctr(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comp_resp_pars.input_byte_counter;
+}
+
+static inline u32 qat_comp_get_produced_ctr(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comp_resp_pars.output_byte_counter;
+}
+
+static inline u32 qat_comp_get_produced_adler32(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comp_resp_pars.crc.legacy.curr_adler_32;
+}
+
+static inline u64 qat_comp_get_opaque(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->opaque_data;
+}
+
+static inline s8 qat_comp_get_cmp_err(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comn_resp.comn_error.cmp_err_code;
+}
+
+static inline s8 qat_comp_get_xlt_err(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+
+       return qat_resp->comn_resp.comn_error.xlat_err_code;
+}
+
+static inline s8 qat_comp_get_cmp_status(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+       u8 stat_filed = qat_resp->comn_resp.comn_status;
+
+       return ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(stat_filed);
+}
+
+static inline s8 qat_comp_get_xlt_status(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+       u8 stat_filed = qat_resp->comn_resp.comn_status;
+
+       return ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(stat_filed);
+}
+
+static inline u8 qat_comp_get_cmp_cnv_flag(void *resp)
+{
+       struct icp_qat_fw_comp_resp *qat_resp = resp;
+       u8 flags = qat_resp->comn_resp.hdr_flags;
+
+       return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags);
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
new file mode 100644 (file)
index 0000000..3f1f352
--- /dev/null
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2022 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_compression.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_compression;
+
+void qat_compression_put_instance(struct qat_compression_instance *inst)
+{
+       atomic_dec(&inst->refctr);
+       adf_dev_put(inst->accel_dev);
+}
+
+static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
+{
+       struct qat_compression_instance *inst;
+       struct list_head *list_ptr, *tmp;
+       int i;
+
+       list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
+               inst = list_entry(list_ptr,
+                                 struct qat_compression_instance, list);
+
+               for (i = 0; i < atomic_read(&inst->refctr); i++)
+                       qat_compression_put_instance(inst);
+
+               if (inst->dc_tx)
+                       adf_remove_ring(inst->dc_tx);
+
+               if (inst->dc_rx)
+                       adf_remove_ring(inst->dc_rx);
+
+               list_del(list_ptr);
+               kfree(inst);
+       }
+       return 0;
+}
+
+struct qat_compression_instance *qat_compression_get_instance_node(int node)
+{
+       struct qat_compression_instance *inst = NULL;
+       struct adf_accel_dev *accel_dev = NULL;
+       unsigned long best = ~0;
+       struct list_head *itr;
+
+       list_for_each(itr, adf_devmgr_get_head()) {
+               struct adf_accel_dev *tmp_dev;
+               unsigned long ctr;
+               int tmp_dev_node;
+
+               tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+               tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
+
+               if ((node == tmp_dev_node || tmp_dev_node < 0) &&
+                   adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
+                       ctr = atomic_read(&tmp_dev->ref_count);
+                       if (best > ctr) {
+                               accel_dev = tmp_dev;
+                               best = ctr;
+                       }
+               }
+       }
+
+       if (!accel_dev) {
+               pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
+               /* Get any started device */
+               list_for_each(itr, adf_devmgr_get_head()) {
+                       struct adf_accel_dev *tmp_dev;
+
+                       tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+                       if (adf_dev_started(tmp_dev) &&
+                           !list_empty(&tmp_dev->compression_list)) {
+                               accel_dev = tmp_dev;
+                               break;
+                       }
+               }
+       }
+
+       if (!accel_dev)
+               return NULL;
+
+       best = ~0;
+       list_for_each(itr, &accel_dev->compression_list) {
+               struct qat_compression_instance *tmp_inst;
+               unsigned long ctr;
+
+               tmp_inst = list_entry(itr, struct qat_compression_instance, list);
+               ctr = atomic_read(&tmp_inst->refctr);
+               if (best > ctr) {
+                       inst = tmp_inst;
+                       best = ctr;
+               }
+       }
+       if (inst) {
+               if (adf_dev_get(accel_dev)) {
+                       dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+                       return NULL;
+               }
+               atomic_inc(&inst->refctr);
+       }
+       return inst;
+}
+
+static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
+{
+       struct qat_compression_instance *inst;
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       unsigned long num_inst, num_msg_dc;
+       unsigned long bank;
+       int msg_size;
+       int ret;
+       int i;
+
+       INIT_LIST_HEAD(&accel_dev->compression_list);
+       strscpy(key, ADF_NUM_DC, sizeof(key));
+       ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+       if (ret)
+               return ret;
+
+       ret = kstrtoul(val, 10, &num_inst);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < num_inst; i++) {
+               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+                                   dev_to_node(&GET_DEV(accel_dev)));
+               if (!inst) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               list_add_tail(&inst->list, &accel_dev->compression_list);
+               inst->id = i;
+               atomic_set(&inst->refctr, 0);
+               inst->accel_dev = accel_dev;
+               inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
+
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       return ret;
+
+               ret = kstrtoul(val, 10, &bank);
+               if (ret)
+                       return ret;
+
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       return ret;
+
+               ret = kstrtoul(val, 10, &num_msg_dc);
+               if (ret)
+                       return ret;
+
+               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+               ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
+                                     msg_size, key, NULL, 0, &inst->dc_tx);
+               if (ret)
+                       return ret;
+
+               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+               ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
+                                     msg_size, key, qat_comp_alg_callback, 0,
+                                     &inst->dc_rx);
+               if (ret)
+                       return ret;
+
+               inst->dc_data = accel_dev->dc_data;
+               INIT_LIST_HEAD(&inst->backlog.list);
+               spin_lock_init(&inst->backlog.lock);
+       }
+       return 0;
+err:
+       qat_compression_free_instances(accel_dev);
+       return ret;
+}
+
+static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
+{
+       struct device *dev = &GET_DEV(accel_dev);
+       dma_addr_t obuff_p = DMA_MAPPING_ERROR;
+       size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
+       struct adf_dc_data *dc_data = NULL;
+       u8 *obuff = NULL;
+
+       dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
+       if (!dc_data)
+               goto err;
+
+       obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
+       if (!obuff)
+               goto err;
+
+       obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, obuff_p)))
+               goto err;
+
+       dc_data->ovf_buff = obuff;
+       dc_data->ovf_buff_p = obuff_p;
+       dc_data->ovf_buff_sz = ovf_buff_sz;
+
+       accel_dev->dc_data = dc_data;
+
+       return 0;
+
+err:
+       accel_dev->dc_data = NULL;
+       kfree(obuff);
+       devm_kfree(dev, dc_data);
+       return -ENOMEM;
+}
+
+static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_dc_data *dc_data = accel_dev->dc_data;
+       struct device *dev = &GET_DEV(accel_dev);
+
+       if (!dc_data)
+               return;
+
+       dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
+                        DMA_FROM_DEVICE);
+       memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz);
+       kfree(dc_data->ovf_buff);
+       devm_kfree(dev, dc_data);
+       accel_dev->dc_data = NULL;
+}
+
+static int qat_compression_init(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = qat_compression_alloc_dc_data(accel_dev);
+       if (ret)
+               return ret;
+
+       ret = qat_compression_create_instances(accel_dev);
+       if (ret)
+               qat_free_dc_data(accel_dev);
+
+       return ret;
+}
+
+static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
+{
+       qat_free_dc_data(accel_dev);
+       return qat_compression_free_instances(accel_dev);
+}
+
+static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
+                                        enum adf_event event)
+{
+       int ret;
+
+       switch (event) {
+       case ADF_EVENT_INIT:
+               ret = qat_compression_init(accel_dev);
+               break;
+       case ADF_EVENT_SHUTDOWN:
+               ret = qat_compression_shutdown(accel_dev);
+               break;
+       case ADF_EVENT_RESTARTING:
+       case ADF_EVENT_RESTARTED:
+       case ADF_EVENT_START:
+       case ADF_EVENT_STOP:
+       default:
+               ret = 0;
+       }
+       return ret;
+}
+
+int qat_compression_register(void)
+{
+       memset(&qat_compression, 0, sizeof(qat_compression));
+       qat_compression.event_hld = qat_compression_event_handler;
+       qat_compression.name = "qat_compression";
+       return adf_service_register(&qat_compression);
+}
+
+int qat_compression_unregister(void)
+{
+       return adf_service_unregister(&qat_compression);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
new file mode 100644 (file)
index 0000000..aebac23
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2022 Intel Corporation */
+#ifndef _QAT_COMPRESSION_H_
+#define _QAT_COMPRESSION_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "adf_accel_devices.h"
+#include "qat_algs_send.h"
+
+#define QAT_COMP_MAX_SKID 4096
+
+struct qat_compression_instance {
+       struct adf_etr_ring_data *dc_tx;
+       struct adf_etr_ring_data *dc_rx;
+       struct adf_accel_dev *accel_dev;
+       struct list_head list;
+       unsigned long state;
+       int id;
+       atomic_t refctr;
+       struct qat_instance_backlog backlog;
+       struct adf_dc_data *dc_data;
+       void (*build_deflate_ctx)(void *ctx);
+};
+
+static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       u32 mask = ~hw_device->accel_capabilities_mask;
+
+       if (mask & ADF_ACCEL_CAPABILITIES_COMPRESSION)
+               return false;
+
+       return true;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c
new file mode 100644 (file)
index 0000000..40c8e74
--- /dev/null
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_gen2_hw_data.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+       atomic_dec(&inst->refctr);
+       adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+       struct qat_crypto_instance *inst, *tmp;
+       int i;
+
+       list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
+               for (i = 0; i < atomic_read(&inst->refctr); i++)
+                       qat_crypto_put_instance(inst);
+
+               if (inst->sym_tx)
+                       adf_remove_ring(inst->sym_tx);
+
+               if (inst->sym_rx)
+                       adf_remove_ring(inst->sym_rx);
+
+               if (inst->pke_tx)
+                       adf_remove_ring(inst->pke_tx);
+
+               if (inst->pke_rx)
+                       adf_remove_ring(inst->pke_rx);
+
+               list_del(&inst->list);
+               kfree(inst);
+       }
+       return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+       struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
+       struct qat_crypto_instance *inst = NULL, *tmp_inst;
+       unsigned long best = ~0;
+
+       list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+               unsigned long ctr;
+
+               if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
+                    dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
+                   adf_dev_started(tmp_dev) &&
+                   !list_empty(&tmp_dev->crypto_list)) {
+                       ctr = atomic_read(&tmp_dev->ref_count);
+                       if (best > ctr) {
+                               accel_dev = tmp_dev;
+                               best = ctr;
+                       }
+               }
+       }
+
+       if (!accel_dev) {
+               pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
+               /* Get any started device */
+               list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
+                       if (adf_dev_started(tmp_dev) &&
+                           !list_empty(&tmp_dev->crypto_list)) {
+                               accel_dev = tmp_dev;
+                               break;
+                       }
+               }
+       }
+
+       if (!accel_dev)
+               return NULL;
+
+       best = ~0;
+       list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
+               unsigned long ctr;
+
+               ctr = atomic_read(&tmp_inst->refctr);
+               if (best > ctr) {
+                       inst = tmp_inst;
+                       best = ctr;
+               }
+       }
+       if (inst) {
+               if (adf_dev_get(accel_dev)) {
+                       dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+                       return NULL;
+               }
+               atomic_inc(&inst->refctr);
+       }
+       return inst;
+}
+
+/**
+ * qat_crypto_vf_dev_config()
+ *     create dev config required to create crypto inst.
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create
+ * asym, sym or, crypto instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
+{
+       u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
+
+       if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Unsupported ring/service mapping present on PF");
+               return -EFAULT;
+       }
+
+       return GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+}
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+       unsigned long num_inst, num_msg_sym, num_msg_asym;
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       unsigned long sym_bank, asym_bank;
+       struct qat_crypto_instance *inst;
+       int msg_size;
+       int ret;
+       int i;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
+       if (ret)
+               return ret;
+
+       ret = kstrtoul(val, 0, &num_inst);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < num_inst; i++) {
+               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+                                   dev_to_node(&GET_DEV(accel_dev)));
+               if (!inst) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               list_add_tail(&inst->list, &accel_dev->crypto_list);
+               inst->id = i;
+               atomic_set(&inst->refctr, 0);
+               inst->accel_dev = accel_dev;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       goto err;
+
+               ret = kstrtoul(val, 10, &sym_bank);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       goto err;
+
+               ret = kstrtoul(val, 10, &asym_bank);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       goto err;
+
+               ret = kstrtoul(val, 10, &num_msg_sym);
+               if (ret)
+                       goto err;
+
+               num_msg_sym = num_msg_sym >> 1;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
+               if (ret)
+                       goto err;
+
+               ret = kstrtoul(val, 10, &num_msg_asym);
+               if (ret)
+                       goto err;
+               num_msg_asym = num_msg_asym >> 1;
+
+               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
+                                     msg_size, key, NULL, 0, &inst->sym_tx);
+               if (ret)
+                       goto err;
+
+               msg_size = msg_size >> 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
+                                     msg_size, key, NULL, 0, &inst->pke_tx);
+               if (ret)
+                       goto err;
+
+               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
+                                     msg_size, key, qat_alg_callback, 0,
+                                     &inst->sym_rx);
+               if (ret)
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
+                                     msg_size, key, qat_alg_asym_callback, 0,
+                                     &inst->pke_rx);
+               if (ret)
+                       goto err;
+
+               INIT_LIST_HEAD(&inst->backlog.list);
+               spin_lock_init(&inst->backlog.lock);
+       }
+       return 0;
+err:
+       qat_crypto_free_instances(accel_dev);
+       return ret;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+       if (qat_crypto_create_instances(accel_dev))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+       return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+                                   enum adf_event event)
+{
+       int ret;
+
+       switch (event) {
+       case ADF_EVENT_INIT:
+               ret = qat_crypto_init(accel_dev);
+               break;
+       case ADF_EVENT_SHUTDOWN:
+               ret = qat_crypto_shutdown(accel_dev);
+               break;
+       case ADF_EVENT_RESTARTING:
+       case ADF_EVENT_RESTARTED:
+       case ADF_EVENT_START:
+       case ADF_EVENT_STOP:
+       default:
+               ret = 0;
+       }
+       return ret;
+}
+
+int qat_crypto_register(void)
+{
+       memset(&qat_crypto, 0, sizeof(qat_crypto));
+       qat_crypto.event_hld = qat_crypto_event_handler;
+       qat_crypto.name = "qat_crypto";
+       return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+       return adf_service_unregister(&qat_crypto);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.h b/drivers/crypto/intel/qat/qat_common/qat_crypto.h
new file mode 100644 (file)
index 0000000..6a0e961
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <crypto/aes.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+#include "qat_algs_send.h"
+#include "qat_bl.h"
+
+struct qat_crypto_instance {
+       struct adf_etr_ring_data *sym_tx;
+       struct adf_etr_ring_data *sym_rx;
+       struct adf_etr_ring_data *pke_tx;
+       struct adf_etr_ring_data *pke_rx;
+       struct adf_accel_dev *accel_dev;
+       struct list_head list;
+       unsigned long state;
+       int id;
+       atomic_t refctr;
+       struct qat_instance_backlog backlog;
+};
+
+struct qat_crypto_request;
+
+struct qat_crypto_request {
+       struct icp_qat_fw_la_bulk_req req;
+       union {
+               struct qat_alg_aead_ctx *aead_ctx;
+               struct qat_alg_skcipher_ctx *skcipher_ctx;
+       };
+       union {
+               struct aead_request *aead_req;
+               struct skcipher_request *skcipher_req;
+       };
+       struct qat_request_buffs buf;
+       void (*cb)(struct icp_qat_fw_la_resp *resp,
+                  struct qat_crypto_request *req);
+       union {
+               struct {
+                       __be64 iv_hi;
+                       __be64 iv_lo;
+               };
+               u8 iv[AES_BLOCK_SIZE];
+       };
+       bool encryption;
+       struct qat_alg_req alg_req;
+};
+
+static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       u32 mask = ~hw_device->accel_capabilities_mask;
+
+       if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC)
+               return false;
+       if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)
+               return false;
+       if (mask & ADF_ACCEL_CAPABILITIES_AUTHENTICATION)
+               return false;
+
+       return true;
+}
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
new file mode 100644 (file)
index 0000000..cbb946a
--- /dev/null
@@ -0,0 +1,1594 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci_ids.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR           0xffff
+#define MAX_RETRY_TIMES           10000
+#define INIT_CTX_ARB_VALUE     0x0
+#define INIT_CTX_ENABLE_VALUE     0x0
+#define INIT_PC_VALUE       0x0
+#define INIT_WAKEUP_EVENTS_VALUE  0x1
+#define INIT_SIG_EVENTS_VALUE     0x1
+#define INIT_CCENABLE_VALUE       0x2000
+#define RST_CSR_QAT_LSB           20
+#define RST_CSR_AE_LSB           0
+#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+       (~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+       (inst = ((inst & 0xFFFF00C03FFull) | \
+               ((((const_val) << 12) & 0x0FF00000ull) | \
+               (((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+       (inst = ((inst & 0xFFFF00FFF00ull) | \
+               ((((const_val) << 12) & 0x0FF00000ull) | \
+               (((const_val) <<  0) & 0x000000FFull))))
+
+#define AE(handle, ae) ((handle)->hal_handle->aes[ae])
+
+static const u64 inst_4b[] = {
+       0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+       0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0A021000000ull
+};
+
+static const u64 inst[] = {
+       0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+       0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+       0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+       0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+       0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+       0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+       0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+       0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+       0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+       0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+       0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+       0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+       0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+       0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+       0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+       0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+       0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+       0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+       0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+       0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+                         unsigned char ae, unsigned int ctx_mask)
+{
+       AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned int csr)
+{
+       unsigned int iterations = CSR_RETRY_TIMES;
+       int value;
+
+       do {
+               value = GET_AE_CSR(handle, ae, csr);
+               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+                       return value;
+       } while (iterations--);
+
+       pr_err("QAT: Read CSR timeout\n");
+       return 0;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned int csr,
+                            unsigned int value)
+{
+       unsigned int iterations = CSR_RETRY_TIMES;
+
+       do {
+               SET_AE_CSR(handle, ae, csr, value);
+               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+                       return 0;
+       } while (iterations--);
+
+       pr_err("QAT: Write CSR Timeout\n");
+       return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+                                    unsigned char ae, unsigned char ctx,
+                                    unsigned int *events)
+{
+       unsigned int cur_ctx;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+       *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int cycles,
+                              int chk_inactive)
+{
+       unsigned int base_cnt = 0, cur_cnt = 0;
+       unsigned int csr = (1 << ACS_ABO_BITPOS);
+       int times = MAX_RETRY_TIMES;
+       int elapsed_cycles = 0;
+
+       base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+       base_cnt &= 0xffff;
+       while ((int)cycles > elapsed_cycles && times--) {
+               if (chk_inactive)
+                       csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+
+               cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+               cur_cnt &= 0xffff;
+               elapsed_cycles = cur_cnt - base_cnt;
+
+               if (elapsed_cycles < 0)
+                       elapsed_cycles += 0x10000;
+
+               /* ensure at least 8 time cycles elapsed in wait_cycles */
+               if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+                       return 0;
+       }
+       if (times < 0) {
+               pr_err("QAT: wait_num_cycles time out\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+#define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
+#define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+                           unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       if (mode != 4 && mode != 8) {
+               pr_err("QAT: bad ctx mode=%d\n", mode);
+               return -EINVAL;
+       }
+
+       /* Sets the accelaration engine context mode to either four or eight */
+       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       csr = IGNORE_W1C_MASK & csr;
+       new_csr = (mode == 4) ?
+               SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+               CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+       return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       csr &= IGNORE_W1C_MASK;
+
+       new_csr = (mode) ?
+               SET_BIT(csr, CE_NN_MODE_BITPOS) :
+               CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+       return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
+                          unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       csr &= IGNORE_W1C_MASK;
+       switch (lm_type) {
+       case ICP_LMEM0:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+               break;
+       case ICP_LMEM1:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+               break;
+       case ICP_LMEM2:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
+               break;
+       case ICP_LMEM3:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
+               break;
+       default:
+               pr_err("QAT: lmType = 0x%x\n", lm_type);
+               return -EINVAL;
+       }
+
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+       return 0;
+}
+
+void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       csr &= IGNORE_W1C_MASK;
+       new_csr = (mode) ?
+                 SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
+                 CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+                                          unsigned short reg_num)
+{
+       unsigned short reg_addr;
+
+       switch (type) {
+       case ICP_GPA_ABS:
+       case ICP_GPB_ABS:
+               reg_addr = 0x80 | (reg_num & 0x7f);
+               break;
+       case ICP_GPA_REL:
+       case ICP_GPB_REL:
+               reg_addr = reg_num & 0x1f;
+               break;
+       case ICP_SR_RD_REL:
+       case ICP_SR_WR_REL:
+       case ICP_SR_REL:
+               reg_addr = 0x180 | (reg_num & 0x1f);
+               break;
+       case ICP_SR_ABS:
+               reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+               break;
+       case ICP_DR_RD_REL:
+       case ICP_DR_WR_REL:
+       case ICP_DR_REL:
+               reg_addr = 0x1c0 | (reg_num & 0x1f);
+               break;
+       case ICP_DR_ABS:
+               reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+               break;
+       case ICP_NEIGH_REL:
+               reg_addr = 0x280 | (reg_num & 0x1f);
+               break;
+       case ICP_LMEM0:
+               reg_addr = 0x200;
+               break;
+       case ICP_LMEM1:
+               reg_addr = 0x220;
+               break;
+       case ICP_LMEM2:
+               reg_addr = 0x2c0;
+               break;
+       case ICP_LMEM3:
+               reg_addr = 0x2e0;
+               break;
+       case ICP_NO_DEST:
+               reg_addr = 0x300 | (reg_num & 0xff);
+               break;
+       default:
+               reg_addr = BAD_REGADDR;
+               break;
+       }
+       return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int reset_mask = handle->chip_info->icp_rst_mask;
+       unsigned int reset_csr = handle->chip_info->icp_rst_csr;
+       unsigned int csr_val;
+
+       csr_val = GET_CAP_CSR(handle, reset_csr);
+       csr_val |= reset_mask;
+       SET_CAP_CSR(handle, reset_csr, csr_val);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned int ctx_mask,
+                               unsigned int ae_csr, unsigned int csr_val)
+{
+       unsigned int ctx, cur_ctx;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+       }
+
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned char ctx,
+                               unsigned int ae_csr)
+{
+       unsigned int cur_ctx, csr_val;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+       csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+       return csr_val;
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+                                 unsigned char ae, unsigned int ctx_mask,
+                                 unsigned int events)
+{
+       unsigned int ctx, cur_ctx;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+       }
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+                                    unsigned char ae, unsigned int ctx_mask,
+                                    unsigned int events)
+{
+       unsigned int ctx, cur_ctx;
+
+       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+                                 events);
+       }
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned int base_cnt, cur_cnt;
+       unsigned char ae;
+       int times = MAX_RETRY_TIMES;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+               base_cnt &= 0xffff;
+
+               do {
+                       cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
+                       cur_cnt &= 0xffff;
+               } while (times-- && (cur_cnt == base_cnt));
+
+               if (times < 0) {
+                       pr_err("QAT: AE%d is inactive!!\n", ae);
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
+                           unsigned int ae)
+{
+       unsigned int enable = 0, active = 0;
+
+       enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+       if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
+           (active & (1 << ACS_ABO_BITPOS)))
+               return 1;
+       else
+               return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned int misc_ctl_csr, misc_ctl;
+       unsigned char ae;
+
+       misc_ctl_csr = handle->chip_info->misc_ctl_csr;
+       /* stop the timestamp timers */
+       misc_ctl = GET_CAP_CSR(handle, misc_ctl_csr);
+       if (misc_ctl & MC_TIMESTAMP_ENABLE)
+               SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl &
+                           (~MC_TIMESTAMP_ENABLE));
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+       }
+       /* start timestamp timers */
+       SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT       BIT(2)
+#define ESRAM_AUTO_TINIT_DONE  BIT(3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+       void __iomem *csr_addr =
+                       (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
+                       ESRAM_AUTO_INIT_CSR_OFFSET);
+       unsigned int csr_val;
+       int times = 30;
+
+       if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
+               return 0;
+
+       csr_val = ADF_CSR_RD(csr_addr, 0);
+       if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+               return 0;
+
+       csr_val = ADF_CSR_RD(csr_addr, 0);
+       csr_val |= ESRAM_AUTO_TINIT;
+       ADF_CSR_WR(csr_addr, 0, csr_val);
+
+       do {
+               qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+               csr_val = ADF_CSR_RD(csr_addr, 0);
+       } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+       if (times < 0) {
+               pr_err("QAT: Fail to init eSram!\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int clk_csr = handle->chip_info->glb_clk_enable_csr;
+       unsigned int reset_mask = handle->chip_info->icp_rst_mask;
+       unsigned int reset_csr = handle->chip_info->icp_rst_csr;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned char ae = 0;
+       unsigned int times = 100;
+       unsigned int csr_val;
+
+       /* write to the reset csr */
+       csr_val = GET_CAP_CSR(handle, reset_csr);
+       csr_val &= ~reset_mask;
+       do {
+               SET_CAP_CSR(handle, reset_csr, csr_val);
+               if (!(times--))
+                       goto out_err;
+               csr_val = GET_CAP_CSR(handle, reset_csr);
+               csr_val &= reset_mask;
+       } while (csr_val);
+       /* enable clock */
+       csr_val = GET_CAP_CSR(handle, clk_csr);
+       csr_val |= reset_mask;
+       SET_CAP_CSR(handle, clk_csr, csr_val);
+       if (qat_hal_check_ae_alive(handle))
+               goto out_err;
+
+       /* Set undefined power-up/reset states to reasonable default values */
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+                                 INIT_CTX_ENABLE_VALUE);
+               qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+                                   CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+               qat_hal_put_wakeup_event(handle, ae,
+                                        ICP_QAT_UCLO_AE_ALL_CTX,
+                                        INIT_WAKEUP_EVENTS_VALUE);
+               qat_hal_put_sig_event(handle, ae,
+                                     ICP_QAT_UCLO_AE_ALL_CTX,
+                                     INIT_SIG_EVENTS_VALUE);
+       }
+       if (qat_hal_init_esram(handle))
+               goto out_err;
+       if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+               goto out_err;
+       qat_hal_reset_timestamp(handle);
+
+       return 0;
+out_err:
+       pr_err("QAT: failed to get device out of reset\n");
+       return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned int ctx_mask)
+{
+       unsigned int ctx;
+
+       ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx &= IGNORE_W1C_MASK &
+               (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static u64 qat_hal_parity_64bit(u64 word)
+{
+       word ^= word >> 1;
+       word ^= word >> 2;
+       word ^= word >> 4;
+       word ^= word >> 8;
+       word ^= word >> 16;
+       word ^= word >> 32;
+       return word & 1;
+}
+
+static u64 qat_hal_set_uword_ecc(u64 uword)
+{
+       u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+               bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+               bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+               bit6_mask = 0xdaf69a46910ULL;
+
+       /* clear the ecc bits */
+       uword &= ~(0x7fULL << 0x2C);
+       uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+       uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+       uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+       uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+       uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+       uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+       uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+       return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+                      unsigned char ae, unsigned int uaddr,
+                      unsigned int words_num, u64 *uword)
+{
+       unsigned int ustore_addr;
+       unsigned int i;
+
+       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+       uaddr |= UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       for (i = 0; i < words_num; i++) {
+               unsigned int uwrd_lo, uwrd_hi;
+               u64 tmp;
+
+               tmp = qat_hal_set_uword_ecc(uword[i]);
+               uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+               uwrd_hi = (unsigned int)(tmp >> 0x20);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       }
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int ctx_mask)
+{
+       unsigned int ctx;
+
+       ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx &= IGNORE_W1C_MASK;
+       ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+       ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned char ae;
+       unsigned short reg;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+                                            reg, 0);
+                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+                                            reg, 0);
+               }
+       }
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned char ae;
+       unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+       int times = MAX_RETRY_TIMES;
+       unsigned int csr_val = 0;
+       unsigned int savctx = 0;
+       int ret = 0;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+               csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+               qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+               csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+               csr_val &= IGNORE_W1C_MASK;
+               if (handle->chip_info->nn)
+                       csr_val |= CE_NN_MODE;
+
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+               qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+                                 (u64 *)inst);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+               qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+                                   CTX_SIG_EVENTS_INDIRECT, 0);
+               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+               qat_hal_enable_ctx(handle, ae, ctx_mask);
+       }
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               /* wait for AE to finish */
+               do {
+                       ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+               } while (ret && times--);
+
+               if (times < 0) {
+                       pr_err("QAT: clear GPR of AE %d failed", ae);
+                       return -EINVAL;
+               }
+               qat_hal_disable_ctx(handle, ae, ctx_mask);
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 savctx & ACS_ACNO);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+                                 INIT_CTX_ENABLE_VALUE);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+               qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+                                        INIT_WAKEUP_EVENTS_VALUE);
+               qat_hal_put_sig_event(handle, ae, ctx_mask,
+                                     INIT_SIG_EVENTS_VALUE);
+       }
+       return 0;
+}
+
+static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
+                            struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+       unsigned int max_en_ae_id = 0;
+       struct adf_bar *sram_bar;
+       unsigned int csr_val = 0;
+       unsigned long ae_mask;
+       unsigned char ae = 0;
+       int ret = 0;
+
+       handle->pci_dev = pci_info->pci_dev;
+       switch (handle->pci_dev->device) {
+       case ADF_4XXX_PCI_DEVICE_ID:
+       case ADF_401XX_PCI_DEVICE_ID:
+       case ADF_402XX_PCI_DEVICE_ID:
+               handle->chip_info->mmp_sram_size = 0;
+               handle->chip_info->nn = false;
+               handle->chip_info->lm2lm3 = true;
+               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
+               handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
+               handle->chip_info->icp_rst_mask = 0x100015;
+               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
+               handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
+               handle->chip_info->wakeup_event_val = 0x80000000;
+               handle->chip_info->fw_auth = true;
+               handle->chip_info->css_3k = true;
+               handle->chip_info->tgroup_share_ustore = true;
+               handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
+               handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
+               handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI_4XXX;
+               handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO_4XXX;
+               handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
+               handle->chip_info->fcu_loaded_ae_pos = 0;
+
+               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
+               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
+               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
+               handle->hal_cap_ae_local_csr_addr_v =
+                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+                       + LOCAL_TO_XFER_REG_OFFSET);
+               break;
+       case PCI_DEVICE_ID_INTEL_QAT_C62X:
+       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+               handle->chip_info->mmp_sram_size = 0;
+               handle->chip_info->nn = true;
+               handle->chip_info->lm2lm3 = false;
+               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
+               handle->chip_info->icp_rst_csr = ICP_RESET;
+               handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
+                                                 (hw_data->accel_mask << RST_CSR_QAT_LSB);
+               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
+               handle->chip_info->misc_ctl_csr = MISC_CONTROL;
+               handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
+               handle->chip_info->fw_auth = true;
+               handle->chip_info->css_3k = false;
+               handle->chip_info->tgroup_share_ustore = false;
+               handle->chip_info->fcu_ctl_csr = FCU_CONTROL;
+               handle->chip_info->fcu_sts_csr = FCU_STATUS;
+               handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI;
+               handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
+               handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
+               handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
+               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
+               handle->hal_cap_ae_local_csr_addr_v =
+                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+                       + LOCAL_TO_XFER_REG_OFFSET);
+               break;
+       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+               handle->chip_info->mmp_sram_size = 0x40000;
+               handle->chip_info->nn = true;
+               handle->chip_info->lm2lm3 = false;
+               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
+               handle->chip_info->icp_rst_csr = ICP_RESET;
+               handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
+                                                 (hw_data->accel_mask << RST_CSR_QAT_LSB);
+               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
+               handle->chip_info->misc_ctl_csr = MISC_CONTROL;
+               handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
+               handle->chip_info->fw_auth = false;
+               handle->chip_info->css_3k = false;
+               handle->chip_info->tgroup_share_ustore = false;
+               handle->chip_info->fcu_ctl_csr = 0;
+               handle->chip_info->fcu_sts_csr = 0;
+               handle->chip_info->fcu_dram_addr_hi = 0;
+               handle->chip_info->fcu_dram_addr_lo = 0;
+               handle->chip_info->fcu_loaded_ae_csr = 0;
+               handle->chip_info->fcu_loaded_ae_pos = 0;
+               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
+               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
+               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
+               handle->hal_cap_ae_local_csr_addr_v =
+                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
+                       + LOCAL_TO_XFER_REG_OFFSET);
+               break;
+       default:
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       if (handle->chip_info->mmp_sram_size > 0) {
+               sram_bar =
+                       &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
+               handle->hal_sram_addr_v = sram_bar->virt_addr;
+       }
+       handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+       handle->hal_handle->ae_mask = hw_data->ae_mask;
+       handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
+       handle->hal_handle->slice_mask = hw_data->accel_mask;
+       handle->cfg_ae_mask = ALL_AE_MASK;
+       /* create AE objects */
+       handle->hal_handle->upc_mask = 0x1ffff;
+       handle->hal_handle->max_ustore = 0x4000;
+
+       ae_mask = handle->hal_handle->ae_mask;
+       for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) {
+               handle->hal_handle->aes[ae].free_addr = 0;
+               handle->hal_handle->aes[ae].free_size =
+                   handle->hal_handle->max_ustore;
+               handle->hal_handle->aes[ae].ustore_size =
+                   handle->hal_handle->max_ustore;
+               handle->hal_handle->aes[ae].live_ctx_mask =
+                                               ICP_QAT_UCLO_AE_ALL_CTX;
+               max_en_ae_id = ae;
+       }
+       handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+
+       /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
+               csr_val |= 0x1;
+               qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+       }
+out_err:
+       return ret;
+}
+
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+       struct icp_qat_fw_loader_handle *handle;
+       int ret = 0;
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+       if (!handle->hal_handle) {
+               ret = -ENOMEM;
+               goto out_hal_handle;
+       }
+
+       handle->chip_info = kzalloc(sizeof(*handle->chip_info), GFP_KERNEL);
+       if (!handle->chip_info) {
+               ret = -ENOMEM;
+               goto out_chip_info;
+       }
+
+       ret = qat_hal_chip_init(handle, accel_dev);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "qat_hal_chip_init error\n");
+               goto out_err;
+       }
+
+       /* take all AEs out of reset */
+       ret = qat_hal_clr_reset(handle);
+       if (ret) {
+               dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
+               goto out_err;
+       }
+
+       qat_hal_clear_xfer(handle);
+       if (!handle->chip_info->fw_auth) {
+               ret = qat_hal_clear_gpr(handle);
+               if (ret)
+                       goto out_err;
+       }
+
+       accel_dev->fw_loader->fw_loader = handle;
+       return 0;
+
+out_err:
+       kfree(handle->chip_info);
+out_chip_info:
+       kfree(handle->hal_handle);
+out_hal_handle:
+       kfree(handle);
+       return ret;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+       if (!handle)
+               return;
+       kfree(handle->chip_info);
+       kfree(handle->hal_handle);
+       kfree(handle);
+}
+
+int qat_hal_start(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       u32 wakeup_val = handle->chip_info->wakeup_event_val;
+       u32 fcu_ctl_csr, fcu_sts_csr;
+       unsigned int fcu_sts;
+       unsigned char ae;
+       u32 ae_ctr = 0;
+       int retry = 0;
+
+       if (handle->chip_info->fw_auth) {
+               fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+               fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+               ae_ctr = hweight32(ae_mask);
+               SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
+               do {
+                       msleep(FW_AUTH_WAIT_PERIOD);
+                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+                       if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
+                               return ae_ctr;
+               } while (retry++ < FW_AUTH_MAX_RETRY);
+               pr_err("QAT: start error (FCU_STS = 0x%x)\n", fcu_sts);
+               return 0;
+       } else {
+               for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+                       qat_hal_put_wakeup_event(handle, ae, 0, wakeup_val);
+                       qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
+                       ae_ctr++;
+               }
+               return ae_ctr;
+       }
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                 unsigned int ctx_mask)
+{
+       if (!handle->chip_info->fw_auth)
+               qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+       qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                           handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int uaddr,
+                              unsigned int words_num, u64 *uword)
+{
+       unsigned int i, uwrd_lo, uwrd_hi;
+       unsigned int ustore_addr, misc_control;
+
+       misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+                         misc_control & 0xfffffffb);
+       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+       uaddr |= UA_ECS;
+       for (i = 0; i < words_num; i++) {
+               qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+               uaddr++;
+               uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+               uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
+               uword[i] = uwrd_hi;
+               uword[i] = (uword[i] << 0x20) | uwrd_lo;
+       }
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned int uaddr,
+                    unsigned int words_num, unsigned int *data)
+{
+       unsigned int i, ustore_addr;
+
+       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+       uaddr |= UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       for (i = 0; i < words_num; i++) {
+               unsigned int uwrd_lo, uwrd_hi, tmp;
+
+               uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+                         ((data[i] & 0xff00) << 2) |
+                         (0x3 << 8) | (data[i] & 0xff);
+               uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+               uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+               tmp = ((data[i] >> 0x10) & 0xffff);
+               uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       }
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  u64 *micro_inst, unsigned int inst_num,
+                                  int code_off, unsigned int max_cycle,
+                                  unsigned int *endpc)
+{
+       unsigned int ind_lm_addr_byte0 = 0, ind_lm_addr_byte1 = 0;
+       unsigned int ind_lm_addr_byte2 = 0, ind_lm_addr_byte3 = 0;
+       unsigned int ind_t_index = 0, ind_t_index_byte = 0;
+       unsigned int ind_lm_addr0 = 0, ind_lm_addr1 = 0;
+       unsigned int ind_lm_addr2 = 0, ind_lm_addr3 = 0;
+       u64 savuwords[MAX_EXEC_INST];
+       unsigned int ind_cnt_sig;
+       unsigned int ind_sig, act_sig;
+       unsigned int csr_val = 0, newcsr_val;
+       unsigned int savctx;
+       unsigned int savcc, wakeup_events, savpc;
+       unsigned int ctxarb_ctl, ctx_enables;
+
+       if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+               pr_err("QAT: invalid instruction num %d\n", inst_num);
+               return -EINVAL;
+       }
+       /* save current context */
+       ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+       ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+       ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                               INDIRECT_LM_ADDR_0_BYTE_INDEX);
+       ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                               INDIRECT_LM_ADDR_1_BYTE_INDEX);
+       if (handle->chip_info->lm2lm3) {
+               ind_lm_addr2 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                  LM_ADDR_2_INDIRECT);
+               ind_lm_addr3 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                  LM_ADDR_3_INDIRECT);
+               ind_lm_addr_byte2 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                       INDIRECT_LM_ADDR_2_BYTE_INDEX);
+               ind_lm_addr_byte3 = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                       INDIRECT_LM_ADDR_3_BYTE_INDEX);
+               ind_t_index = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                 INDIRECT_T_INDEX);
+               ind_t_index_byte = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                      INDIRECT_T_INDEX_BYTE_INDEX);
+       }
+       if (inst_num <= MAX_EXEC_INST)
+               qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+       qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+       savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
+       savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx_enables &= IGNORE_W1C_MASK;
+       savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+       savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+       ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+       ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                         FUTURE_COUNT_SIGNAL_INDIRECT);
+       ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                     CTX_SIG_EVENTS_INDIRECT);
+       act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
+       /* execute micro codes */
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+       if (code_off)
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+       qat_hal_enable_ctx(handle, ae, (1 << ctx));
+       /* wait for micro codes to finish */
+       if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+               return -EFAULT;
+       if (endpc) {
+               unsigned int ctx_status;
+
+               ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+                                                CTX_STS_INDIRECT);
+               *endpc = ctx_status & handle->hal_handle->upc_mask;
+       }
+       /* retore to saved context */
+       qat_hal_disable_ctx(handle, ae, (1 << ctx));
+       if (inst_num <= MAX_EXEC_INST)
+               qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+                           handle->hal_handle->upc_mask & savpc);
+       csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
+       newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+       qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           LM_ADDR_0_INDIRECT, ind_lm_addr0);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           LM_ADDR_1_INDIRECT, ind_lm_addr1);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+       if (handle->chip_info->lm2lm3) {
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_2_INDIRECT,
+                                   ind_lm_addr2);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_3_INDIRECT,
+                                   ind_lm_addr3);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+                                   INDIRECT_LM_ADDR_2_BYTE_INDEX,
+                                   ind_lm_addr_byte2);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+                                   INDIRECT_LM_ADDR_3_BYTE_INDEX,
+                                   ind_lm_addr_byte3);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+                                   INDIRECT_T_INDEX, ind_t_index);
+               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
+                                   INDIRECT_T_INDEX_BYTE_INDEX,
+                                   ind_t_index_byte);
+       }
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           CTX_SIG_EVENTS_INDIRECT, ind_sig);
+       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+       return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             enum icp_qat_uof_regtype reg_type,
+                             unsigned short reg_num, unsigned int *data)
+{
+       unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+       unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+       unsigned short reg_addr;
+       int status = 0;
+       u64 insts, savuword;
+
+       reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (reg_addr == BAD_REGADDR) {
+               pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+               return -EINVAL;
+       }
+       switch (reg_type) {
+       case ICP_GPA_REL:
+               insts = 0xA070000000ull | (reg_addr & 0x3ff);
+               break;
+       default:
+               insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+               break;
+       }
+       savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+       ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx_enables &= IGNORE_W1C_MASK;
+       if (ctx != (savctx & ACS_ACNO))
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 ctx & ACS_ACNO);
+       qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
+       uaddr = UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       insts = qat_hal_set_uword_ecc(insts);
+       uwrd_lo = (unsigned int)(insts & 0xffffffff);
+       uwrd_hi = (unsigned int)(insts >> 0x20);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       /* delay for at least 8 cycles */
+       qat_hal_wait_cycles(handle, ae, 0x8, 0);
+       /*
+        * read ALU output
+        * the instruction should have been executed
+        * prior to clearing the ECS in putUwords
+        */
+       *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+       qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+       if (ctx != (savctx & ACS_ACNO))
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 savctx & ACS_ACNO);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+       return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             enum icp_qat_uof_regtype reg_type,
+                             unsigned short reg_num, unsigned int data)
+{
+       unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+       u64 insts[] = {
+               0x0F440000000ull,
+               0x0F040000000ull,
+               0x0F0000C0300ull,
+               0x0E000010000ull
+       };
+       const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+       const int imm_w1 = 0, imm_w0 = 1;
+
+       dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (dest_addr == BAD_REGADDR) {
+               pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+               return -EINVAL;
+       }
+
+       data16lo = 0xffff & data;
+       data16hi = 0xffff & (data >> 0x10);
+       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+                                         (0xff & data16hi));
+       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+                                          (0xff & data16lo));
+       switch (reg_type) {
+       case ICP_GPA_REL:
+               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+                   ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+                   ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+               break;
+       default:
+               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+                   ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+                   ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+               break;
+       }
+
+       return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+                                      code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+       return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(u64 *micro_inst,
+                                    unsigned int inst_num, unsigned int size,
+                                    unsigned int addr, unsigned int *value)
+{
+       int i;
+       unsigned int cur_value;
+       const u64 *inst_arr;
+       int fixup_offset;
+       int usize = 0;
+       int orig_num;
+
+       orig_num = inst_num;
+       cur_value = value[0];
+       inst_arr = inst_4b;
+       usize = ARRAY_SIZE(inst_4b);
+       fixup_offset = inst_num;
+       for (i = 0; i < usize; i++)
+               micro_inst[inst_num++] = inst_arr[i];
+       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+       fixup_offset++;
+       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+       fixup_offset++;
+       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+       fixup_offset++;
+       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+       return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned char ae, unsigned char ctx,
+                                     int *pfirst_exec, u64 *micro_inst,
+                                     unsigned int inst_num)
+{
+       int stat = 0;
+       unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+       unsigned int gprb0 = 0, gprb1 = 0;
+
+       if (*pfirst_exec) {
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+               *pfirst_exec = 0;
+       }
+       stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+                                      inst_num * 0x5, NULL);
+       if (stat != 0)
+               return -EFAULT;
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+       return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                       unsigned char ae,
+                       struct icp_qat_uof_batch_init *lm_init_header)
+{
+       struct icp_qat_uof_batch_init *plm_init;
+       u64 *micro_inst_arry;
+       int micro_inst_num;
+       int alloc_inst_size;
+       int first_exec = 1;
+       int stat = 0;
+
+       plm_init = lm_init_header->next;
+       alloc_inst_size = lm_init_header->size;
+       if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+               alloc_inst_size = handle->hal_handle->max_ustore;
+       micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
+                                       GFP_KERNEL);
+       if (!micro_inst_arry)
+               return -ENOMEM;
+       micro_inst_num = 0;
+       while (plm_init) {
+               unsigned int addr, *value, size;
+
+               ae = plm_init->ae;
+               addr = plm_init->addr;
+               value = plm_init->value;
+               size = plm_init->size;
+               micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+                                                           micro_inst_num,
+                                                           size, addr, value);
+               plm_init = plm_init->next;
+       }
+       /* exec micro codes */
+       if (micro_inst_arry && micro_inst_num > 0) {
+               micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+               stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+                                                 micro_inst_arry,
+                                                 micro_inst_num);
+       }
+       kfree(micro_inst_arry);
+       return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  enum icp_qat_uof_regtype reg_type,
+                                  unsigned short reg_num, unsigned int val)
+{
+       int status = 0;
+       unsigned int reg_addr;
+       unsigned int ctx_enables;
+       unsigned short mask;
+       unsigned short dr_offset = 0x10;
+
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       if (CE_INUSE_CONTEXTS & ctx_enables) {
+               if (ctx & 0x1) {
+                       pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+                       return -EINVAL;
+               }
+               mask = 0x1f;
+               dr_offset = 0x20;
+       } else {
+               mask = 0x0f;
+       }
+       if (reg_num & ~mask)
+               return -EINVAL;
+       reg_addr = reg_num + (ctx << 0x5);
+       switch (reg_type) {
+       case ICP_SR_RD_REL:
+       case ICP_SR_REL:
+               SET_AE_XFER(handle, ae, reg_addr, val);
+               break;
+       case ICP_DR_RD_REL:
+       case ICP_DR_REL:
+               SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+               break;
+       default:
+               status = -EINVAL;
+               break;
+       }
+       return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  enum icp_qat_uof_regtype reg_type,
+                                  unsigned short reg_num, unsigned int data)
+{
+       unsigned int gprval, ctx_enables;
+       unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+           data16low;
+       unsigned short reg_mask;
+       int status = 0;
+       u64 micro_inst[] = {
+               0x0F440000000ull,
+               0x0F040000000ull,
+               0x0A000000000ull,
+               0x0F0000C0300ull,
+               0x0E000010000ull
+       };
+       const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+       const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       if (CE_INUSE_CONTEXTS & ctx_enables) {
+               if (ctx & 0x1) {
+                       pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+                       return -EINVAL;
+               }
+               reg_mask = (unsigned short)~0x1f;
+       } else {
+               reg_mask = (unsigned short)~0xf;
+       }
+       if (reg_num & reg_mask)
+               return -EINVAL;
+       xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (xfr_addr == BAD_REGADDR) {
+               pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+               return -EINVAL;
+       }
+       status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+       if (status) {
+               pr_err("QAT: failed to read register");
+               return status;
+       }
+       gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+       data16low = 0xffff & data;
+       data16hi = 0xffff & (data >> 0x10);
+       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+                                         (unsigned short)(0xff & data16hi));
+       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+                                          (unsigned short)(0xff & data16low));
+       micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+           ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+       micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+           ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+       micro_inst[0x2] = micro_inst[0x2] |
+           ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+       status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+                                        code_off, dly, NULL);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+       return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             unsigned short nn, unsigned int val)
+{
+       unsigned int ctx_enables;
+       int stat = 0;
+
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       ctx_enables &= IGNORE_W1C_MASK;
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+       stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+                                     *handle, unsigned char ae,
+                                     unsigned short absreg_num,
+                                     unsigned short *relreg,
+                                     unsigned char *ctx)
+{
+       unsigned int ctx_enables;
+
+       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+       if (ctx_enables & CE_INUSE_CONTEXTS) {
+               /* 4-ctx mode */
+               *relreg = absreg_num & 0x1F;
+               *ctx = (absreg_num >> 0x4) & 0x6;
+       } else {
+               /* 8-ctx mode */
+               *relreg = absreg_num & 0x0F;
+               *ctx = (absreg_num >> 0x4) & 0x7;
+       }
+       return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned long ctx_mask,
+                    enum icp_qat_uof_regtype reg_type,
+                    unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 1;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, &ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+               if (stat) {
+                       pr_err("QAT: write gpr fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned long ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 3;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, &ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+                                              regdata);
+               if (stat) {
+                       pr_err("QAT: write wr xfer fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned long ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 3;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, &ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+                                              regdata);
+               if (stat) {
+                       pr_err("QAT: write rd xfer fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned long ctx_mask,
+                   unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned char ctx;
+       if (!handle->chip_info->nn) {
+               dev_err(&handle->pci_dev->dev, "QAT: No next neigh in 0x%x\n",
+                       handle->pci_dev->device);
+               return -EINVAL;
+       }
+
+       if (ctx_mask == 0)
+               return -EINVAL;
+
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!test_bit(ctx, &ctx_mask))
+                       continue;
+               stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+               if (stat) {
+                       pr_err("QAT: write neigh error\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
new file mode 100644 (file)
index 0000000..3ba8ca2
--- /dev/null
@@ -0,0 +1,2133 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci_ids.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+                                unsigned int ae, unsigned int image_num)
+{
+       struct icp_qat_uclo_aedata *ae_data;
+       struct icp_qat_uclo_encapme *encap_image;
+       struct icp_qat_uclo_page *page = NULL;
+       struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+       ae_data = &obj_handle->ae_data[ae];
+       encap_image = &obj_handle->ae_uimage[image_num];
+       ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+       ae_slice->encap_image = encap_image;
+
+       if (encap_image->img_ptr) {
+               ae_slice->ctx_mask_assigned =
+                                       encap_image->img_ptr->ctx_assigned;
+               ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+       } else {
+               ae_slice->ctx_mask_assigned = 0;
+       }
+       ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+       if (!ae_slice->region)
+               return -ENOMEM;
+       ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+       if (!ae_slice->page)
+               goto out_err;
+       page = ae_slice->page;
+       page->encap_page = encap_image->page;
+       ae_slice->page->region = ae_slice->region;
+       ae_data->slice_num++;
+       return 0;
+out_err:
+       kfree(ae_slice->region);
+       ae_slice->region = NULL;
+       return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+       unsigned int i;
+
+       if (!ae_data) {
+               pr_err("QAT: bad argument, ae_data is NULL\n ");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ae_data->slice_num; i++) {
+               kfree(ae_data->ae_slices[i].region);
+               ae_data->ae_slices[i].region = NULL;
+               kfree(ae_data->ae_slices[i].page);
+               ae_data->ae_slices[i].page = NULL;
+       }
+       return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+                                unsigned int str_offset)
+{
+       if (!str_table->table_len || str_offset > str_table->table_len)
+               return NULL;
+       return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
+{
+       int maj = hdr->maj_ver & 0xff;
+       int min = hdr->min_ver & 0xff;
+
+       if (hdr->file_id != ICP_QAT_UOF_FID) {
+               pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+               return -EINVAL;
+       }
+       if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+               pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+                      maj, min);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
+{
+       int maj = suof_hdr->maj_ver & 0xff;
+       int min = suof_hdr->min_ver & 0xff;
+
+       if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
+               pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+               return -EINVAL;
+       }
+       if (suof_hdr->fw_type != 0) {
+               pr_err("QAT: unsupported firmware type\n");
+               return -EINVAL;
+       }
+       if (suof_hdr->num_chunks <= 0x1) {
+               pr_err("QAT: SUOF chunk amount is incorrect\n");
+               return -EINVAL;
+       }
+       if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
+               pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
+                      maj, min);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned int addr, unsigned int *val,
+                                     unsigned int num_in_bytes)
+{
+       unsigned int outval;
+       unsigned char *ptr = (unsigned char *)val;
+
+       while (num_in_bytes) {
+               memcpy(&outval, ptr, 4);
+               SRAM_WRITE(handle, addr, outval);
+               num_in_bytes -= 4;
+               ptr += 4;
+               addr += 4;
+       }
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned char ae, unsigned int addr,
+                                     unsigned int *val,
+                                     unsigned int num_in_bytes)
+{
+       unsigned int outval;
+       unsigned char *ptr = (unsigned char *)val;
+
+       addr >>= 0x2; /* convert to uword address */
+
+       while (num_in_bytes) {
+               memcpy(&outval, ptr, 4);
+               qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+               num_in_bytes -= 4;
+               ptr += 4;
+       }
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae,
+                                  struct icp_qat_uof_batch_init
+                                  *umem_init_header)
+{
+       struct icp_qat_uof_batch_init *umem_init;
+
+       if (!umem_init_header)
+               return;
+       umem_init = umem_init_header->next;
+       while (umem_init) {
+               unsigned int addr, *value, size;
+
+               ae = umem_init->ae;
+               addr = umem_init->addr;
+               value = umem_init->value;
+               size = umem_init->size;
+               qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+               umem_init = umem_init->next;
+       }
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+                                struct icp_qat_uof_batch_init **base)
+{
+       struct icp_qat_uof_batch_init *umem_init;
+
+       umem_init = *base;
+       while (umem_init) {
+               struct icp_qat_uof_batch_init *pre;
+
+               pre = umem_init;
+               umem_init = umem_init->next;
+               kfree(pre);
+       }
+       *base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+       char buf[16] = {0};
+       unsigned long ae = 0;
+       int i;
+
+       strncpy(buf, str, 15);
+       for (i = 0; i < 16; i++) {
+               if (!isdigit(buf[i])) {
+                       buf[i] = '\0';
+                       break;
+               }
+       }
+       if ((kstrtoul(buf, 10, &ae)))
+               return -EFAULT;
+
+       *num = (unsigned int)ae;
+       return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+                                    struct icp_qat_uof_initmem *init_mem,
+                                    unsigned int size_range, unsigned int *ae)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       char *str;
+
+       if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+               pr_err("QAT: initmem is out of range");
+               return -EINVAL;
+       }
+       if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+               pr_err("QAT: Memory scope for init_mem error\n");
+               return -EINVAL;
+       }
+       str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+       if (!str) {
+               pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+               return -EINVAL;
+       }
+       if (qat_uclo_parse_num(str, ae)) {
+               pr_err("QAT: Parse num for AE number failed\n");
+               return -EINVAL;
+       }
+       if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+               pr_err("QAT: ae %d out of range\n", *ae);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+                                          *handle, struct icp_qat_uof_initmem
+                                          *init_mem, unsigned int ae,
+                                          struct icp_qat_uof_batch_init
+                                          **init_tab_base)
+{
+       struct icp_qat_uof_batch_init *init_header, *tail;
+       struct icp_qat_uof_batch_init *mem_init, *tail_old;
+       struct icp_qat_uof_memvar_attr *mem_val_attr;
+       unsigned int i, flag = 0;
+
+       mem_val_attr =
+               (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
+               sizeof(struct icp_qat_uof_initmem));
+
+       init_header = *init_tab_base;
+       if (!init_header) {
+               init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+               if (!init_header)
+                       return -ENOMEM;
+               init_header->size = 1;
+               *init_tab_base = init_header;
+               flag = 1;
+       }
+       tail_old = init_header;
+       while (tail_old->next)
+               tail_old = tail_old->next;
+       tail = tail_old;
+       for (i = 0; i < init_mem->val_attr_num; i++) {
+               mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+               if (!mem_init)
+                       goto out_err;
+               mem_init->ae = ae;
+               mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+               mem_init->value = &mem_val_attr->value;
+               mem_init->size = 4;
+               mem_init->next = NULL;
+               tail->next = mem_init;
+               tail = mem_init;
+               init_header->size += qat_hal_get_ins_num();
+               mem_val_attr++;
+       }
+       return 0;
+out_err:
+       /* Do not free the list head unless we allocated it. */
+       tail_old = tail_old->next;
+       if (flag) {
+               kfree(*init_tab_base);
+               *init_tab_base = NULL;
+       }
+
+       while (tail_old) {
+               mem_init = tail_old->next;
+               kfree(tail_old);
+               tail_old = mem_init;
+       }
+       return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+                                 struct icp_qat_uof_initmem *init_mem)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae;
+
+       if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+                                     handle->chip_info->lm_size, &ae))
+               return -EINVAL;
+       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+                                           &obj_handle->lm_init_tab[ae]))
+               return -EINVAL;
+       return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+                                 struct icp_qat_uof_initmem *init_mem)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae, ustore_size, uaddr, i;
+       struct icp_qat_uclo_aedata *aed;
+
+       ustore_size = obj_handle->ustore_phy_size;
+       if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+               return -EINVAL;
+       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+                                           &obj_handle->umem_init_tab[ae]))
+               return -EINVAL;
+       /* set the highest ustore address referenced */
+       uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+       aed = &obj_handle->ae_data[ae];
+       for (i = 0; i < aed->slice_num; i++) {
+               if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
+                       aed->ae_slices[i].encap_image->uwords_num = uaddr;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+                                  struct icp_qat_uof_initmem *init_mem)
+{
+       switch (init_mem->region) {
+       case ICP_QAT_UOF_LMEM_REGION:
+               if (qat_uclo_init_lmem_seg(handle, init_mem))
+                       return -EINVAL;
+               break;
+       case ICP_QAT_UOF_UMEM_REGION:
+               if (qat_uclo_init_umem_seg(handle, init_mem))
+                       return -EINVAL;
+               break;
+       default:
+               pr_err("QAT: initmem region error. region type=0x%x\n",
+                      init_mem->region);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+                               struct icp_qat_uclo_encapme *image)
+{
+       unsigned int i;
+       struct icp_qat_uclo_encap_page *page;
+       struct icp_qat_uof_image *uof_image;
+       unsigned char ae;
+       unsigned int ustore_size;
+       unsigned int patt_pos;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+       u64 *fill_data;
+
+       uof_image = image->img_ptr;
+       fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
+                           GFP_KERNEL);
+       if (!fill_data)
+               return -ENOMEM;
+       for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+               memcpy(&fill_data[i], &uof_image->fill_pattern,
+                      sizeof(u64));
+       page = image->page;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               unsigned long ae_assigned = uof_image->ae_assigned;
+
+               if (!test_bit(ae, &ae_assigned))
+                       continue;
+
+               if (!test_bit(ae, &cfg_ae_mask))
+                       continue;
+
+               ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+               patt_pos = page->beg_addr_p + page->micro_words_num;
+
+               qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+                                 page->beg_addr_p, &fill_data[0]);
+               qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+                                 ustore_size - patt_pos + 1,
+                                 &fill_data[page->beg_addr_p]);
+       }
+       kfree(fill_data);
+       return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+       int i, ae;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+
+       for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+               if (initmem->num_in_bytes) {
+                       if (qat_uclo_init_ae_memory(handle, initmem))
+                               return -EINVAL;
+               }
+               initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
+                       (uintptr_t)initmem +
+                       sizeof(struct icp_qat_uof_initmem)) +
+                       (sizeof(struct icp_qat_uof_memvar_attr) *
+                       initmem->val_attr_num));
+       }
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               if (qat_hal_batch_wr_lm(handle, ae,
+                                       obj_handle->lm_init_tab[ae])) {
+                       pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+                       return -EINVAL;
+               }
+               qat_uclo_cleanup_batch_init_list(handle,
+                                                &obj_handle->lm_init_tab[ae]);
+               qat_uclo_batch_wr_umem(handle, ae,
+                                      obj_handle->umem_init_tab[ae]);
+               qat_uclo_cleanup_batch_init_list(handle,
+                                                &obj_handle->
+                                                umem_init_tab[ae]);
+       }
+       return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+                                char *chunk_id, void *cur)
+{
+       int i;
+       struct icp_qat_uof_chunkhdr *chunk_hdr =
+           (struct icp_qat_uof_chunkhdr *)
+           ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+       for (i = 0; i < obj_hdr->num_chunks; i++) {
+               if ((cur < (void *)&chunk_hdr[i]) &&
+                   !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+                            ICP_QAT_UOF_OBJID_LEN)) {
+                       return &chunk_hdr[i];
+               }
+       }
+       return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+       int i;
+       unsigned int topbit = 1 << 0xF;
+       unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+       reg ^= inbyte << 0x8;
+       for (i = 0; i < 0x8; i++) {
+               if (reg & topbit)
+                       reg = (reg << 1) ^ 0x1021;
+               else
+                       reg <<= 1;
+       }
+       return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+       unsigned int chksum = 0;
+
+       if (ptr)
+               while (num--)
+                       chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+       return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+                  char *chunk_id)
+{
+       struct icp_qat_uof_filechunkhdr *file_chunk;
+       struct icp_qat_uclo_objhdr *obj_hdr;
+       char *chunk;
+       int i;
+
+       file_chunk = (struct icp_qat_uof_filechunkhdr *)
+               (buf + sizeof(struct icp_qat_uof_filehdr));
+       for (i = 0; i < file_hdr->num_chunks; i++) {
+               if (!strncmp(file_chunk->chunk_id, chunk_id,
+                            ICP_QAT_UOF_OBJID_LEN)) {
+                       chunk = buf + file_chunk->offset;
+                       if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+                               chunk, file_chunk->size))
+                               break;
+                       obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+                       if (!obj_hdr)
+                               break;
+                       obj_hdr->file_buff = chunk;
+                       obj_hdr->checksum = file_chunk->checksum;
+                       obj_hdr->size = file_chunk->size;
+                       return obj_hdr;
+               }
+               file_chunk++;
+       }
+       return NULL;
+}
+
+static int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+                           struct icp_qat_uof_image *image)
+{
+       struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+       struct icp_qat_uof_objtable *neigh_reg_tab;
+       struct icp_qat_uof_code_page *code_page;
+
+       code_page = (struct icp_qat_uof_code_page *)
+                       ((char *)image + sizeof(struct icp_qat_uof_image));
+       uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+                    code_page->uc_var_tab_offset);
+       imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+                     code_page->imp_var_tab_offset);
+       imp_expr_tab = (struct icp_qat_uof_objtable *)
+                      (encap_uof_obj->beg_uof +
+                      code_page->imp_expr_tab_offset);
+       if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+           imp_expr_tab->entry_num) {
+               pr_err("QAT: UOF can't contain imported variable to be parsed\n");
+               return -EINVAL;
+       }
+       neigh_reg_tab = (struct icp_qat_uof_objtable *)
+                       (encap_uof_obj->beg_uof +
+                       code_page->neigh_reg_tab_offset);
+       if (neigh_reg_tab->entry_num) {
+               pr_err("QAT: UOF can't contain neighbor register table\n");
+               return -EINVAL;
+       }
+       if (image->numpages > 1) {
+               pr_err("QAT: UOF can't contain multiple pages\n");
+               return -EINVAL;
+       }
+       if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+               pr_err("QAT: UOF can't use shared control store feature\n");
+               return -EFAULT;
+       }
+       if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+               pr_err("QAT: UOF can't use reloadable feature\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+                                    *encap_uof_obj,
+                                    struct icp_qat_uof_image *img,
+                                    struct icp_qat_uclo_encap_page *page)
+{
+       struct icp_qat_uof_code_page *code_page;
+       struct icp_qat_uof_code_area *code_area;
+       struct icp_qat_uof_objtable *uword_block_tab;
+       struct icp_qat_uof_uword_block *uwblock;
+       int i;
+
+       code_page = (struct icp_qat_uof_code_page *)
+                       ((char *)img + sizeof(struct icp_qat_uof_image));
+       page->def_page = code_page->def_page;
+       page->page_region = code_page->page_region;
+       page->beg_addr_v = code_page->beg_addr_v;
+       page->beg_addr_p = code_page->beg_addr_p;
+       code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+                                               code_page->code_area_offset);
+       page->micro_words_num = code_area->micro_words_num;
+       uword_block_tab = (struct icp_qat_uof_objtable *)
+                         (encap_uof_obj->beg_uof +
+                         code_area->uword_block_tab);
+       page->uwblock_num = uword_block_tab->entry_num;
+       uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+                       sizeof(struct icp_qat_uof_objtable));
+       page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+       for (i = 0; i < uword_block_tab->entry_num; i++)
+               page->uwblock[i].micro_words =
+               (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+                              struct icp_qat_uclo_encapme *ae_uimage,
+                              int max_image)
+{
+       int i, j;
+       struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+       struct icp_qat_uof_image *image;
+       struct icp_qat_uof_objtable *ae_regtab;
+       struct icp_qat_uof_objtable *init_reg_sym_tab;
+       struct icp_qat_uof_objtable *sbreak_tab;
+       struct icp_qat_uof_encap_obj *encap_uof_obj =
+                                       &obj_handle->encap_uof_obj;
+
+       for (j = 0; j < max_image; j++) {
+               chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+                                               ICP_QAT_UOF_IMAG, chunk_hdr);
+               if (!chunk_hdr)
+                       break;
+               image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+                                                    chunk_hdr->offset);
+               ae_regtab = (struct icp_qat_uof_objtable *)
+                          (image->reg_tab_offset +
+                          obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+               ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+                       (((char *)ae_regtab) +
+                       sizeof(struct icp_qat_uof_objtable));
+               init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+                                  (image->init_reg_sym_tab +
+                                  obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+               ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+                       (((char *)init_reg_sym_tab) +
+                       sizeof(struct icp_qat_uof_objtable));
+               sbreak_tab = (struct icp_qat_uof_objtable *)
+                       (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+               ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+                                     (((char *)sbreak_tab) +
+                                     sizeof(struct icp_qat_uof_objtable));
+               ae_uimage[j].img_ptr = image;
+               if (qat_uclo_check_image_compat(encap_uof_obj, image))
+                       goto out_err;
+               ae_uimage[j].page =
+                       kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+                               GFP_KERNEL);
+               if (!ae_uimage[j].page)
+                       goto out_err;
+               qat_uclo_map_image_page(encap_uof_obj, image,
+                                       ae_uimage[j].page);
+       }
+       return j;
+out_err:
+       for (i = 0; i < j; i++)
+               kfree(ae_uimage[i].page);
+       return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+       int i, ae;
+       int mflag = 0;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+
+       for_each_set_bit(ae, &ae_mask, max_ae) {
+               if (!test_bit(ae, &cfg_ae_mask))
+                       continue;
+
+               for (i = 0; i < obj_handle->uimage_num; i++) {
+                       unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
+
+                       if (!test_bit(ae, &ae_assigned))
+                               continue;
+                       mflag = 1;
+                       if (qat_uclo_init_ae_data(obj_handle, ae, i))
+                               return -EINVAL;
+               }
+       }
+       if (!mflag) {
+               pr_err("QAT: uimage uses AE not set\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+                      char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+       chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+                                       obj_hdr->file_buff, tab_name, NULL);
+       if (chunk_hdr) {
+               int hdr_size;
+
+               memcpy(&str_table->table_len, obj_hdr->file_buff +
+                      chunk_hdr->offset, sizeof(str_table->table_len));
+               hdr_size = (char *)&str_table->strings - (char *)str_table;
+               str_table->strings = (uintptr_t)obj_hdr->file_buff +
+                                       chunk_hdr->offset + hdr_size;
+               return str_table;
+       }
+       return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+                          struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+       chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+                                       ICP_QAT_UOF_IMEM, NULL);
+       if (chunk_hdr) {
+               memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+                       chunk_hdr->offset, sizeof(unsigned int));
+               init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+               (encap_uof_obj->beg_uof + chunk_hdr->offset +
+               sizeof(unsigned int));
+       }
+}
+
+static unsigned int
+qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
+{
+       switch (handle->pci_dev->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+               return ICP_QAT_AC_895XCC_DEV_TYPE;
+       case PCI_DEVICE_ID_INTEL_QAT_C62X:
+               return ICP_QAT_AC_C62X_DEV_TYPE;
+       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
+               return ICP_QAT_AC_C3XXX_DEV_TYPE;
+       case ADF_4XXX_PCI_DEVICE_ID:
+       case ADF_401XX_PCI_DEVICE_ID:
+       case ADF_402XX_PCI_DEVICE_ID:
+               return ICP_QAT_AC_4XXX_A_DEV_TYPE;
+       default:
+               pr_err("QAT: unsupported device 0x%x\n",
+                      handle->pci_dev->device);
+               return 0;
+       }
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+       unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+       if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
+               pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+                      obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
+                      prod_type);
+               return -EINVAL;
+       }
+       maj_ver = obj_handle->prod_rev & 0xff;
+       if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
+           obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
+               pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned char ctx_mask,
+                            enum icp_qat_uof_regtype reg_type,
+                            unsigned short reg_addr, unsigned int value)
+{
+       switch (reg_type) {
+       case ICP_GPA_ABS:
+       case ICP_GPB_ABS:
+               ctx_mask = 0;
+               fallthrough;
+       case ICP_GPA_REL:
+       case ICP_GPB_REL:
+               return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+                                       reg_addr, value);
+       case ICP_SR_ABS:
+       case ICP_DR_ABS:
+       case ICP_SR_RD_ABS:
+       case ICP_DR_RD_ABS:
+               ctx_mask = 0;
+               fallthrough;
+       case ICP_SR_REL:
+       case ICP_DR_REL:
+       case ICP_SR_RD_REL:
+       case ICP_DR_RD_REL:
+               return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+                                           reg_addr, value);
+       case ICP_SR_WR_ABS:
+       case ICP_DR_WR_ABS:
+               ctx_mask = 0;
+               fallthrough;
+       case ICP_SR_WR_REL:
+       case ICP_DR_WR_REL:
+               return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+                                           reg_addr, value);
+       case ICP_NEIGH_REL:
+               return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+       default:
+               pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+                                unsigned int ae,
+                                struct icp_qat_uclo_encapme *encap_ae)
+{
+       unsigned int i;
+       unsigned char ctx_mask;
+       struct icp_qat_uof_init_regsym *init_regsym;
+
+       if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+           ICP_QAT_UCLO_MAX_CTX)
+               ctx_mask = 0xff;
+       else
+               ctx_mask = 0x55;
+
+       for (i = 0; i < encap_ae->init_regsym_num; i++) {
+               unsigned int exp_res;
+
+               init_regsym = &encap_ae->init_regsym[i];
+               exp_res = init_regsym->value;
+               switch (init_regsym->init_type) {
+               case ICP_QAT_UOF_INIT_REG:
+                       qat_uclo_init_reg(handle, ae, ctx_mask,
+                                         (enum icp_qat_uof_regtype)
+                                         init_regsym->reg_type,
+                                         (unsigned short)init_regsym->reg_addr,
+                                         exp_res);
+                       break;
+               case ICP_QAT_UOF_INIT_REG_CTX:
+                       /* check if ctx is appropriate for the ctxMode */
+                       if (!((1 << init_regsym->ctx) & ctx_mask)) {
+                               pr_err("QAT: invalid ctx num = 0x%x\n",
+                                      init_regsym->ctx);
+                               return -EINVAL;
+                       }
+                       qat_uclo_init_reg(handle, ae,
+                                         (unsigned char)
+                                         (1 << init_regsym->ctx),
+                                         (enum icp_qat_uof_regtype)
+                                         init_regsym->reg_type,
+                                         (unsigned short)init_regsym->reg_addr,
+                                         exp_res);
+                       break;
+               case ICP_QAT_UOF_INIT_EXPR:
+                       pr_err("QAT: INIT_EXPR feature not supported\n");
+                       return -EINVAL;
+               case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+                       pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+                       return -EINVAL;
+               default:
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       struct icp_qat_uclo_aedata *aed;
+       unsigned int s, ae;
+
+       if (obj_handle->global_inited)
+               return 0;
+       if (obj_handle->init_mem_tab.entry_num) {
+               if (qat_uclo_init_memory(handle)) {
+                       pr_err("QAT: initialize memory failed\n");
+                       return -EINVAL;
+               }
+       }
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               aed = &obj_handle->ae_data[ae];
+               for (s = 0; s < aed->slice_num; s++) {
+                       if (!aed->ae_slices[s].encap_image)
+                               continue;
+                       if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
+                               return -EINVAL;
+               }
+       }
+       obj_handle->global_inited = 1;
+       return 0;
+}
+
+static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
+                            struct icp_qat_uclo_objhandle *obj_handle,
+                            unsigned char ae,
+                            struct icp_qat_uof_image *uof_image)
+{
+       unsigned char mode;
+       int ret;
+
+       mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
+       ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
+       if (ret) {
+               pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+               return ret;
+       }
+       if (handle->chip_info->nn) {
+               mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+               ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
+               if (ret) {
+                       pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+                       return ret;
+               }
+       }
+       mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
+       ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
+       if (ret) {
+               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+               return ret;
+       }
+       mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
+       ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
+       if (ret) {
+               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+               return ret;
+       }
+       if (handle->chip_info->lm2lm3) {
+               mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
+               ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
+               if (ret) {
+                       pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
+                       return ret;
+               }
+               mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
+               ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
+               if (ret) {
+                       pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
+                       return ret;
+               }
+               mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
+               qat_hal_set_ae_tindex_mode(handle, ae, mode);
+       }
+       return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uof_image *uof_image;
+       struct icp_qat_uclo_aedata *ae_data;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+       unsigned char ae, s;
+       int error;
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               if (!test_bit(ae, &cfg_ae_mask))
+                       continue;
+
+               ae_data = &obj_handle->ae_data[ae];
+               for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+                                     ICP_QAT_UCLO_MAX_CTX); s++) {
+                       if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+                               continue;
+                       uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+                       error = qat_hal_set_modes(handle, obj_handle, ae,
+                                                 uof_image);
+                       if (error)
+                               return error;
+               }
+       }
+       return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       struct icp_qat_uclo_encapme *image;
+       int a;
+
+       for (a = 0; a < obj_handle->uimage_num; a++) {
+               image = &obj_handle->ae_uimage[a];
+               image->uwords_num = image->page->beg_addr_p +
+                                       image->page->micro_words_num;
+       }
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae;
+
+       obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+       obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+                                            obj_handle->obj_hdr->file_buff;
+       obj_handle->uword_in_bytes = 6;
+       obj_handle->prod_type = qat_uclo_get_dev_type(handle);
+       obj_handle->prod_rev = PID_MAJOR_REV |
+                       (PID_MINOR_REV & handle->hal_handle->revision_id);
+       if (qat_uclo_check_uof_compat(obj_handle)) {
+               pr_err("QAT: UOF incompatible\n");
+               return -EINVAL;
+       }
+       obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
+                                       GFP_KERNEL);
+       if (!obj_handle->uword_buf)
+               return -ENOMEM;
+       obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+       if (!obj_handle->obj_hdr->file_buff ||
+           !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+                                   &obj_handle->str_table)) {
+               pr_err("QAT: UOF doesn't have effective images\n");
+               goto out_err;
+       }
+       obj_handle->uimage_num =
+               qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+                                   ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+       if (!obj_handle->uimage_num)
+               goto out_err;
+       if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+               pr_err("QAT: Bad object\n");
+               goto out_check_uof_aemask_err;
+       }
+       qat_uclo_init_uword_num(handle);
+       qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+                                  &obj_handle->init_mem_tab);
+       if (qat_uclo_set_ae_mode(handle))
+               goto out_check_uof_aemask_err;
+       return 0;
+out_check_uof_aemask_err:
+       for (ae = 0; ae < obj_handle->uimage_num; ae++)
+               kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+       kfree(obj_handle->uword_buf);
+       return -EFAULT;
+}
+
+static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+                                     struct icp_qat_suof_filehdr *suof_ptr,
+                                     int suof_size)
+{
+       unsigned int check_sum = 0;
+       unsigned int min_ver_offset = 0;
+       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+
+       suof_handle->file_id = ICP_QAT_SUOF_FID;
+       suof_handle->suof_buf = (char *)suof_ptr;
+       suof_handle->suof_size = suof_size;
+       min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
+                                             min_ver);
+       check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
+                                              min_ver_offset);
+       if (check_sum != suof_ptr->check_sum) {
+               pr_err("QAT: incorrect SUOF checksum\n");
+               return -EINVAL;
+       }
+       suof_handle->check_sum = suof_ptr->check_sum;
+       suof_handle->min_ver = suof_ptr->min_ver;
+       suof_handle->maj_ver = suof_ptr->maj_ver;
+       suof_handle->fw_type = suof_ptr->fw_type;
+       return 0;
+}
+
+static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
+                             struct icp_qat_suof_img_hdr *suof_img_hdr,
+                             struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+       struct icp_qat_simg_ae_mode *ae_mode;
+       struct icp_qat_suof_objhdr *suof_objhdr;
+
+       suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
+                                  suof_chunk_hdr->offset +
+                                  sizeof(*suof_objhdr));
+       suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
+                                 (suof_handle->suof_buf +
+                                  suof_chunk_hdr->offset))->img_length;
+
+       suof_img_hdr->css_header = suof_img_hdr->simg_buf;
+       suof_img_hdr->css_key = (suof_img_hdr->css_header +
+                                sizeof(struct icp_qat_css_hdr));
+       suof_img_hdr->css_signature = suof_img_hdr->css_key +
+                                     ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+                                     ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+       suof_img_hdr->css_simg = suof_img_hdr->css_signature +
+                                ICP_QAT_CSS_SIGNATURE_LEN(handle);
+
+       ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
+       suof_img_hdr->ae_mask = ae_mode->ae_mask;
+       suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
+       suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
+       suof_img_hdr->fw_type = ae_mode->fw_type;
+}
+
+static void
+qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
+                         struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
+{
+       char **sym_str = (char **)&suof_handle->sym_str;
+       unsigned int *sym_size = &suof_handle->sym_size;
+       struct icp_qat_suof_strtable *str_table_obj;
+
+       *sym_size = *(unsigned int *)(uintptr_t)
+                  (suof_chunk_hdr->offset + suof_handle->suof_buf);
+       *sym_str = (char *)(uintptr_t)
+                  (suof_handle->suof_buf + suof_chunk_hdr->offset +
+                  sizeof(str_table_obj->tab_length));
+}
+
+static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
+                                     struct icp_qat_suof_img_hdr *img_hdr)
+{
+       struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
+       unsigned int prod_rev, maj_ver, prod_type;
+
+       prod_type = qat_uclo_get_dev_type(handle);
+       img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
+       prod_rev = PID_MAJOR_REV |
+                        (PID_MINOR_REV & handle->hal_handle->revision_id);
+       if (img_ae_mode->dev_type != prod_type) {
+               pr_err("QAT: incompatible product type %x\n",
+                      img_ae_mode->dev_type);
+               return -EINVAL;
+       }
+       maj_ver = prod_rev & 0xff;
+       if (maj_ver > img_ae_mode->devmax_ver ||
+           maj_ver < img_ae_mode->devmin_ver) {
+               pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+
+       kfree(sobj_handle->img_table.simg_hdr);
+       sobj_handle->img_table.simg_hdr = NULL;
+       kfree(handle->sobj_handle);
+       handle->sobj_handle = NULL;
+}
+
+static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
+                             unsigned int img_id, unsigned int num_simgs)
+{
+       struct icp_qat_suof_img_hdr img_header;
+
+       if (img_id != num_simgs - 1) {
+               memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
+                      sizeof(*suof_img_hdr));
+               memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
+                      sizeof(*suof_img_hdr));
+               memcpy(&suof_img_hdr[img_id], &img_header,
+                      sizeof(*suof_img_hdr));
+       }
+}
+
+static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
+                            struct icp_qat_suof_filehdr *suof_ptr,
+                            int suof_size)
+{
+       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
+       struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
+       struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
+       int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
+       unsigned int i = 0;
+       struct icp_qat_suof_img_hdr img_header;
+
+       if (!suof_ptr || suof_size == 0) {
+               pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+               return -EINVAL;
+       }
+       if (qat_uclo_check_suof_format(suof_ptr))
+               return -EINVAL;
+       ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
+       if (ret)
+               return ret;
+       suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
+                        ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
+
+       qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
+       suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
+
+       if (suof_handle->img_table.num_simgs != 0) {
+               suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
+                                      sizeof(img_header),
+                                      GFP_KERNEL);
+               if (!suof_img_hdr)
+                       return -ENOMEM;
+               suof_handle->img_table.simg_hdr = suof_img_hdr;
+
+               for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
+                       qat_uclo_map_simg(handle, &suof_img_hdr[i],
+                                         &suof_chunk_hdr[1 + i]);
+                       ret = qat_uclo_check_simg_compat(handle,
+                                                        &suof_img_hdr[i]);
+                       if (ret)
+                               return ret;
+                       suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
+                       if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
+                               ae0_img = i;
+               }
+
+               if (!handle->chip_info->tgroup_share_ustore) {
+                       qat_uclo_tail_img(suof_img_hdr, ae0_img,
+                                         suof_handle->img_table.num_simgs);
+               }
+       }
+       return 0;
+}
+
+#define ADD_ADDR(high, low)  ((((u64)high) << 32) + low)
+#define BITS_IN_DWORD 32
+
+static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
+                           struct icp_qat_fw_auth_desc *desc)
+{
+       u32 fcu_sts, retry = 0;
+       u32 fcu_ctl_csr, fcu_sts_csr;
+       u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
+       u64 bus_addr;
+
+       bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
+                          - sizeof(struct icp_qat_auth_chunk);
+
+       fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+       fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+       fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
+       fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
+
+       SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
+       SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
+       SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
+
+       do {
+               msleep(FW_AUTH_WAIT_PERIOD);
+               fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+               if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
+                       goto auth_fail;
+               if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
+                       if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
+                               return 0;
+       } while (retry++ < FW_AUTH_MAX_RETRY);
+auth_fail:
+       pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+              fcu_sts & FCU_AUTH_STS_MASK, retry);
+       return -EINVAL;
+}
+
+static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
+                                 int imgid)
+{
+       struct icp_qat_suof_handle *sobj_handle;
+
+       if (!handle->chip_info->tgroup_share_ustore)
+               return false;
+
+       sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
+       if (handle->hal_handle->admin_ae_mask &
+           sobj_handle->img_table.simg_hdr[imgid].ae_mask)
+               return false;
+
+       return true;
+}
+
+static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
+                                     struct icp_qat_fw_auth_desc *desc)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long desc_ae_mask = desc->ae_mask;
+       u32 fcu_sts, ae_broadcast_mask = 0;
+       u32 fcu_loaded_csr, ae_loaded;
+       u32 fcu_sts_csr, fcu_ctl_csr;
+       unsigned int ae, retry = 0;
+
+       if (handle->chip_info->tgroup_share_ustore) {
+               fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+               fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+               fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
+       } else {
+               pr_err("Chip 0x%x doesn't support broadcast load\n",
+                      handle->pci_dev->device);
+               return -EINVAL;
+       }
+
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
+                       pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
+                       return -EINVAL;
+               }
+
+               if (test_bit(ae, &desc_ae_mask))
+                       ae_broadcast_mask |= 1 << ae;
+       }
+
+       if (ae_broadcast_mask) {
+               SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
+                           ae_broadcast_mask);
+
+               SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
+
+               do {
+                       msleep(FW_AUTH_WAIT_PERIOD);
+                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+                       fcu_sts &= FCU_AUTH_STS_MASK;
+
+                       if (fcu_sts == FCU_STS_LOAD_FAIL) {
+                               pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
+                               return -EINVAL;
+                       } else if (fcu_sts == FCU_STS_LOAD_DONE) {
+                               ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
+                               ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
+
+                               if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
+                                       break;
+                       }
+               } while (retry++ < FW_AUTH_MAX_RETRY);
+
+               if (retry > FW_AUTH_MAX_RETRY) {
+                       pr_err("QAT: broadcast load failed timeout %d\n", retry);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
+                              struct icp_firml_dram_desc *dram_desc,
+                              unsigned int size)
+{
+       void *vptr;
+       dma_addr_t ptr;
+
+       vptr = dma_alloc_coherent(&handle->pci_dev->dev,
+                                 size, &ptr, GFP_KERNEL);
+       if (!vptr)
+               return -ENOMEM;
+       dram_desc->dram_base_addr_v = vptr;
+       dram_desc->dram_bus_addr = ptr;
+       dram_desc->dram_size = size;
+       return 0;
+}
+
+static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
+                              struct icp_firml_dram_desc *dram_desc)
+{
+       if (handle && dram_desc && dram_desc->dram_base_addr_v) {
+               dma_free_coherent(&handle->pci_dev->dev,
+                                 (size_t)(dram_desc->dram_size),
+                                 dram_desc->dram_base_addr_v,
+                                 dram_desc->dram_bus_addr);
+       }
+
+       if (dram_desc)
+               memset(dram_desc, 0, sizeof(*dram_desc));
+}
+
+static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
+                                  struct icp_qat_fw_auth_desc **desc)
+{
+       struct icp_firml_dram_desc dram_desc;
+
+       if (*desc) {
+               dram_desc.dram_base_addr_v = *desc;
+               dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
+                                          (*desc))->chunk_bus_addr;
+               dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
+                                      (*desc))->chunk_size;
+               qat_uclo_simg_free(handle, &dram_desc);
+       }
+}
+
+static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
+                               char *image, unsigned int size,
+                               unsigned int fw_type)
+{
+       char *fw_type_name = fw_type ? "MMP" : "AE";
+       unsigned int css_dword_size = sizeof(u32);
+
+       if (handle->chip_info->fw_auth) {
+               struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+               unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+
+               if ((css_hdr->header_len * css_dword_size) != header_len)
+                       goto err;
+               if ((css_hdr->size * css_dword_size) != size)
+                       goto err;
+               if (fw_type != css_hdr->fw_type)
+                       goto err;
+               if (size <= header_len)
+                       goto err;
+               size -= header_len;
+       }
+
+       if (fw_type == CSS_AE_FIRMWARE) {
+               if (size < sizeof(struct icp_qat_simg_ae_mode *) +
+                   ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
+                       goto err;
+               if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
+                       goto err;
+       } else if (fw_type == CSS_MMP_FIRMWARE) {
+               if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
+                       goto err;
+       } else {
+               pr_err("QAT: Unsupported firmware type\n");
+               return -EINVAL;
+       }
+       return 0;
+
+err:
+       pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+       return -EINVAL;
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+                               char *image, unsigned int size,
+                               struct icp_qat_fw_auth_desc **desc)
+{
+       struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+       struct icp_qat_fw_auth_desc *auth_desc;
+       struct icp_qat_auth_chunk *auth_chunk;
+       u64 virt_addr,  bus_addr, virt_base;
+       unsigned int length, simg_offset = sizeof(*auth_chunk);
+       struct icp_qat_simg_ae_mode *simg_ae_mode;
+       struct icp_firml_dram_desc img_desc;
+
+       if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
+               pr_err("QAT: error, input image size overflow %d\n", size);
+               return -EINVAL;
+       }
+       length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
+                ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
+                size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
+       if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
+               pr_err("QAT: error, allocate continuous dram fail\n");
+               return -ENOMEM;
+       }
+
+       auth_chunk = img_desc.dram_base_addr_v;
+       auth_chunk->chunk_size = img_desc.dram_size;
+       auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+       virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
+       bus_addr  = img_desc.dram_bus_addr + simg_offset;
+       auth_desc = img_desc.dram_base_addr_v;
+       auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+       auth_desc->css_hdr_low = (unsigned int)bus_addr;
+       virt_addr = virt_base;
+
+       memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+       /* pub key */
+       bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
+                          sizeof(*css_hdr);
+       virt_addr = virt_addr + sizeof(*css_hdr);
+
+       auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+       auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+
+       memcpy((void *)(uintptr_t)virt_addr,
+              (void *)(image + sizeof(*css_hdr)),
+              ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+       /* padding */
+       memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
+              0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
+
+       /* exponent */
+       memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+              ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
+              (void *)(image + sizeof(*css_hdr) +
+                       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
+              sizeof(unsigned int));
+
+       /* signature */
+       bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
+                           auth_desc->fwsk_pub_low) +
+                  ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+       virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+       auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+       auth_desc->signature_low = (unsigned int)bus_addr;
+
+       memcpy((void *)(uintptr_t)virt_addr,
+              (void *)(image + sizeof(*css_hdr) +
+              ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+              ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
+              ICP_QAT_CSS_SIGNATURE_LEN(handle));
+
+       bus_addr = ADD_ADDR(auth_desc->signature_high,
+                           auth_desc->signature_low) +
+                  ICP_QAT_CSS_SIGNATURE_LEN(handle);
+       virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+
+       auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
+       auth_desc->img_low = (unsigned int)bus_addr;
+       auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
+       memcpy((void *)(uintptr_t)virt_addr,
+              (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
+              auth_desc->img_len);
+       virt_addr = virt_base;
+       /* AE firmware */
+       if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
+           CSS_AE_FIRMWARE) {
+               auth_desc->img_ae_mode_data_high = auth_desc->img_high;
+               auth_desc->img_ae_mode_data_low = auth_desc->img_low;
+               bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
+                                   auth_desc->img_ae_mode_data_low) +
+                          sizeof(struct icp_qat_simg_ae_mode);
+
+               auth_desc->img_ae_init_data_high = (unsigned int)
+                                                (bus_addr >> BITS_IN_DWORD);
+               auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+               bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+               auth_desc->img_ae_insts_high = (unsigned int)
+                                            (bus_addr >> BITS_IN_DWORD);
+               auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+               virt_addr += sizeof(struct icp_qat_css_hdr);
+               virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
+               virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+               simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
+               auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
+       } else {
+               auth_desc->img_ae_insts_high = auth_desc->img_high;
+               auth_desc->img_ae_insts_low = auth_desc->img_low;
+       }
+       *desc = auth_desc;
+       return 0;
+}
+
+static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
+                           struct icp_qat_fw_auth_desc *desc)
+{
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       u32 fcu_sts_csr, fcu_ctl_csr;
+       u32 loaded_aes, loaded_csr;
+       unsigned int i;
+       u32 fcu_sts;
+
+       fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
+       fcu_sts_csr = handle->chip_info->fcu_sts_csr;
+       loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
+
+       for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
+               int retry = 0;
+
+               if (!((desc->ae_mask >> i) & 0x1))
+                       continue;
+               if (qat_hal_check_ae_active(handle, i)) {
+                       pr_err("QAT: AE %d is active\n", i);
+                       return -EINVAL;
+               }
+               SET_CAP_CSR(handle, fcu_ctl_csr,
+                           (FCU_CTRL_CMD_LOAD |
+                           (1 << FCU_CTRL_BROADCAST_POS) |
+                           (i << FCU_CTRL_AE_POS)));
+
+               do {
+                       msleep(FW_AUTH_WAIT_PERIOD);
+                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+                       if ((fcu_sts & FCU_AUTH_STS_MASK) ==
+                           FCU_STS_LOAD_DONE) {
+                               loaded_aes = GET_CAP_CSR(handle, loaded_csr);
+                               loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
+                               if (loaded_aes & (1 << i))
+                                       break;
+                       }
+               } while (retry++ < FW_AUTH_MAX_RETRY);
+               if (retry > FW_AUTH_MAX_RETRY) {
+                       pr_err("QAT: firmware load failed timeout %x\n", retry);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
+                                void *addr_ptr, int mem_size)
+{
+       struct icp_qat_suof_handle *suof_handle;
+
+       suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
+       if (!suof_handle)
+               return -ENOMEM;
+       handle->sobj_handle = suof_handle;
+       if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
+               qat_uclo_del_suof(handle);
+               pr_err("QAT: map SUOF failed\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+                      void *addr_ptr, int mem_size)
+{
+       struct icp_qat_fw_auth_desc *desc = NULL;
+       int status = 0;
+       int ret;
+
+       ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
+       if (ret)
+               return ret;
+
+       if (handle->chip_info->fw_auth) {
+               status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
+               if (!status)
+                       status = qat_uclo_auth_fw(handle, desc);
+               qat_uclo_ummap_auth_fw(handle, &desc);
+       } else {
+               if (handle->chip_info->mmp_sram_size < mem_size) {
+                       pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+                       return -EFBIG;
+               }
+               qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
+       }
+       return status;
+}
+
+static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+                               void *addr_ptr, int mem_size)
+{
+       struct icp_qat_uof_filehdr *filehdr;
+       struct icp_qat_uclo_objhandle *objhdl;
+
+       objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+       if (!objhdl)
+               return -ENOMEM;
+       objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+       if (!objhdl->obj_buf)
+               goto out_objbuf_err;
+       filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+       if (qat_uclo_check_uof_format(filehdr))
+               goto out_objhdr_err;
+       objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+                                            ICP_QAT_UOF_OBJS);
+       if (!objhdl->obj_hdr) {
+               pr_err("QAT: object file chunk is null\n");
+               goto out_objhdr_err;
+       }
+       handle->obj_handle = objhdl;
+       if (qat_uclo_parse_uof_obj(handle))
+               goto out_overlay_obj_err;
+       return 0;
+
+out_overlay_obj_err:
+       handle->obj_handle = NULL;
+       kfree(objhdl->obj_hdr);
+out_objhdr_err:
+       kfree(objhdl->obj_buf);
+out_objbuf_err:
+       kfree(objhdl);
+       return -ENOMEM;
+}
+
+static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
+                                    struct icp_qat_mof_file_hdr *mof_ptr,
+                                    u32 mof_size)
+{
+       struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
+       unsigned int min_ver_offset;
+       unsigned int checksum;
+
+       mobj_handle->file_id = ICP_QAT_MOF_FID;
+       mobj_handle->mof_buf = (char *)mof_ptr;
+       mobj_handle->mof_size = mof_size;
+
+       min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
+                                            min_ver);
+       checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
+                                             min_ver_offset);
+       if (checksum != mof_ptr->checksum) {
+               pr_err("QAT: incorrect MOF checksum\n");
+               return -EINVAL;
+       }
+
+       mobj_handle->checksum = mof_ptr->checksum;
+       mobj_handle->min_ver = mof_ptr->min_ver;
+       mobj_handle->maj_ver = mof_ptr->maj_ver;
+       return 0;
+}
+
+static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
+
+       kfree(mobj_handle->obj_table.obj_hdr);
+       mobj_handle->obj_table.obj_hdr = NULL;
+       kfree(handle->mobj_handle);
+       handle->mobj_handle = NULL;
+}
+
+static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
+                                       char *obj_name, char **obj_ptr,
+                                       unsigned int *obj_size)
+{
+       struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
+       unsigned int i;
+
+       for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
+               if (!strncmp(obj_hdr[i].obj_name, obj_name,
+                            ICP_QAT_SUOF_OBJ_NAME_LEN)) {
+                       *obj_ptr  = obj_hdr[i].obj_buf;
+                       *obj_size = obj_hdr[i].obj_size;
+                       return 0;
+               }
+       }
+
+       pr_err("QAT: object %s is not found inside MOF\n", obj_name);
+       return -EINVAL;
+}
+
+static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
+                                    struct icp_qat_mof_objhdr *mobj_hdr,
+                                    struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
+{
+       u8 *obj;
+
+       if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
+                    ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
+               obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
+       } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
+                           ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
+               obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
+       } else {
+               pr_err("QAT: unsupported chunk id\n");
+               return -EINVAL;
+       }
+       mobj_hdr->obj_buf = obj;
+       mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
+       mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
+       return 0;
+}
+
+static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
+{
+       struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
+       struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
+       struct icp_qat_mof_obj_hdr *uobj_hdr;
+       struct icp_qat_mof_obj_hdr *sobj_hdr;
+       struct icp_qat_mof_objhdr *mobj_hdr;
+       unsigned int uobj_chunk_num = 0;
+       unsigned int sobj_chunk_num = 0;
+       unsigned int *valid_chunk;
+       int ret, i;
+
+       uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
+       sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
+       if (uobj_hdr)
+               uobj_chunk_num = uobj_hdr->num_chunks;
+       if (sobj_hdr)
+               sobj_chunk_num = sobj_hdr->num_chunks;
+
+       mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
+                          sizeof(*mobj_hdr), GFP_KERNEL);
+       if (!mobj_hdr)
+               return -ENOMEM;
+
+       mobj_handle->obj_table.obj_hdr = mobj_hdr;
+       valid_chunk = &mobj_handle->obj_table.num_objs;
+       uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
+                        ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
+       sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
+                       ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
+
+       /* map uof objects */
+       for (i = 0; i < uobj_chunk_num; i++) {
+               ret = qat_uclo_map_obj_from_mof(mobj_handle,
+                                               &mobj_hdr[*valid_chunk],
+                                               &uobj_chunkhdr[i]);
+               if (ret)
+                       return ret;
+               (*valid_chunk)++;
+       }
+
+       /* map suof objects */
+       for (i = 0; i < sobj_chunk_num; i++) {
+               ret = qat_uclo_map_obj_from_mof(mobj_handle,
+                                               &mobj_hdr[*valid_chunk],
+                                               &sobj_chunkhdr[i]);
+               if (ret)
+                       return ret;
+               (*valid_chunk)++;
+       }
+
+       if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
+               pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
+                                    struct icp_qat_mof_chunkhdr *mof_chunkhdr)
+{
+       char **sym_str = (char **)&mobj_handle->sym_str;
+       unsigned int *sym_size = &mobj_handle->sym_size;
+       struct icp_qat_mof_str_table *str_table_obj;
+
+       *sym_size = *(unsigned int *)(uintptr_t)
+                   (mof_chunkhdr->offset + mobj_handle->mof_buf);
+       *sym_str = (char *)(uintptr_t)
+                  (mobj_handle->mof_buf + mof_chunkhdr->offset +
+                   sizeof(str_table_obj->tab_len));
+}
+
+static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
+                                  struct icp_qat_mof_chunkhdr *mof_chunkhdr)
+{
+       char *chunk_id = mof_chunkhdr->chunk_id;
+
+       if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+               qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
+       else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+               mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
+                                        mof_chunkhdr->offset;
+       else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
+               mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
+                                        mof_chunkhdr->offset;
+}
+
+static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
+{
+       int maj = mof_hdr->maj_ver & 0xff;
+       int min = mof_hdr->min_ver & 0xff;
+
+       if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
+               pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
+               return -EINVAL;
+       }
+
+       if (mof_hdr->num_chunks <= 0x1) {
+               pr_err("QAT: MOF chunk amount is incorrect\n");
+               return -EINVAL;
+       }
+       if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
+               pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
+                      maj, min);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
+                               struct icp_qat_mof_file_hdr *mof_ptr,
+                               u32 mof_size, char *obj_name, char **obj_ptr,
+                               unsigned int *obj_size)
+{
+       struct icp_qat_mof_chunkhdr *mof_chunkhdr;
+       unsigned int file_id = mof_ptr->file_id;
+       struct icp_qat_mof_handle *mobj_handle;
+       unsigned short chunks_num;
+       unsigned int i;
+       int ret;
+
+       if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
+               if (obj_ptr)
+                       *obj_ptr = (char *)mof_ptr;
+               if (obj_size)
+                       *obj_size = mof_size;
+               return 0;
+       }
+       if (qat_uclo_check_mof_format(mof_ptr))
+               return -EINVAL;
+
+       mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
+       if (!mobj_handle)
+               return -ENOMEM;
+
+       handle->mobj_handle = mobj_handle;
+       ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
+       if (ret)
+               return ret;
+
+       mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
+       chunks_num = mof_ptr->num_chunks;
+
+       /* Parse MOF file chunks */
+       for (i = 0; i < chunks_num; i++)
+               qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
+
+       /* All sym_objs uobjs and sobjs should be available */
+       if (!mobj_handle->sym_str ||
+           (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
+               return -EINVAL;
+
+       ret = qat_uclo_map_objs_from_mof(mobj_handle);
+       if (ret)
+               return ret;
+
+       /* Seek specified uof object in MOF */
+       return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
+                                           obj_ptr, obj_size);
+}
+
+int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
+                    void *addr_ptr, u32 mem_size, char *obj_name)
+{
+       char *obj_addr;
+       u32 obj_size;
+       int ret;
+
+       BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+                    (sizeof(handle->hal_handle->ae_mask) * 8));
+
+       if (!handle || !addr_ptr || mem_size < 24)
+               return -EINVAL;
+
+       if (obj_name) {
+               ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
+                                          &obj_addr, &obj_size);
+               if (ret)
+                       return ret;
+       } else {
+               obj_addr = addr_ptr;
+               obj_size = mem_size;
+       }
+
+       return (handle->chip_info->fw_auth) ?
+                       qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
+                       qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
+}
+
+void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int a;
+
+       if (handle->mobj_handle)
+               qat_uclo_del_mof(handle);
+       if (handle->sobj_handle)
+               qat_uclo_del_suof(handle);
+       if (!obj_handle)
+               return;
+
+       kfree(obj_handle->uword_buf);
+       for (a = 0; a < obj_handle->uimage_num; a++)
+               kfree(obj_handle->ae_uimage[a].page);
+
+       for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+               qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+       kfree(obj_handle->obj_hdr);
+       kfree(obj_handle->obj_buf);
+       kfree(obj_handle);
+       handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+                                struct icp_qat_uclo_encap_page *encap_page,
+                                u64 *uword, unsigned int addr_p,
+                                unsigned int raddr, u64 fill)
+{
+       unsigned int i, addr;
+       u64 uwrd = 0;
+
+       if (!encap_page) {
+               *uword = fill;
+               return;
+       }
+       addr = (encap_page->page_region) ? raddr : addr_p;
+       for (i = 0; i < encap_page->uwblock_num; i++) {
+               if (addr >= encap_page->uwblock[i].start_addr &&
+                   addr <= encap_page->uwblock[i].start_addr +
+                   encap_page->uwblock[i].words_num - 1) {
+                       addr -= encap_page->uwblock[i].start_addr;
+                       addr *= obj_handle->uword_in_bytes;
+                       memcpy(&uwrd, (void *)(((uintptr_t)
+                              encap_page->uwblock[i].micro_words) + addr),
+                              obj_handle->uword_in_bytes);
+                       uwrd = uwrd & GENMASK_ULL(43, 0);
+               }
+       }
+       *uword = uwrd;
+       if (*uword == INVLD_UWORD)
+               *uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+                                       struct icp_qat_uclo_encap_page
+                                       *encap_page, unsigned int ae)
+{
+       unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       u64 fill_pat;
+
+       /* load the page starting at appropriate ustore address */
+       /* get fill-pattern from an image -- they are all the same */
+       memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+              sizeof(u64));
+       uw_physical_addr = encap_page->beg_addr_p;
+       uw_relative_addr = 0;
+       words_num = encap_page->micro_words_num;
+       while (words_num) {
+               if (words_num < UWORD_CPYBUF_SIZE)
+                       cpylen = words_num;
+               else
+                       cpylen = UWORD_CPYBUF_SIZE;
+
+               /* load the buffer */
+               for (i = 0; i < cpylen; i++)
+                       qat_uclo_fill_uwords(obj_handle, encap_page,
+                                            &obj_handle->uword_buf[i],
+                                            uw_physical_addr + i,
+                                            uw_relative_addr + i, fill_pat);
+
+               /* copy the buffer to ustore */
+               qat_hal_wr_uwords(handle, (unsigned char)ae,
+                                 uw_physical_addr, cpylen,
+                                 obj_handle->uword_buf);
+
+               uw_physical_addr += cpylen;
+               uw_relative_addr += cpylen;
+               words_num -= cpylen;
+       }
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+                                   struct icp_qat_uof_image *image)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned long ae_mask = handle->hal_handle->ae_mask;
+       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
+       unsigned long ae_assigned = image->ae_assigned;
+       struct icp_qat_uclo_aedata *aed;
+       unsigned int ctx_mask, s;
+       struct icp_qat_uclo_page *page;
+       unsigned char ae;
+       int ctx;
+
+       if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+               ctx_mask = 0xff;
+       else
+               ctx_mask = 0x55;
+       /* load the default page and set assigned CTX PC
+        * to the entrypoint address */
+       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
+               if (!test_bit(ae, &cfg_ae_mask))
+                       continue;
+
+               if (!test_bit(ae, &ae_assigned))
+                       continue;
+
+               aed = &obj_handle->ae_data[ae];
+               /* find the slice to which this image is assigned */
+               for (s = 0; s < aed->slice_num; s++) {
+                       if (image->ctx_assigned &
+                           aed->ae_slices[s].ctx_mask_assigned)
+                               break;
+               }
+               if (s >= aed->slice_num)
+                       continue;
+               page = aed->ae_slices[s].page;
+               if (!page->encap_page->def_page)
+                       continue;
+               qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+               page = aed->ae_slices[s].page;
+               for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+                       aed->ae_slices[s].cur_page[ctx] =
+                                       (ctx_mask & (1 << ctx)) ? page : NULL;
+               qat_hal_set_live_ctx(handle, (unsigned char)ae,
+                                    image->ctx_assigned);
+               qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+                              image->entry_address);
+       }
+}
+
+static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int i;
+       struct icp_qat_fw_auth_desc *desc = NULL;
+       struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
+       struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+       int ret;
+
+       for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+               ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
+                                          simg_hdr[i].simg_len,
+                                          CSS_AE_FIRMWARE);
+               if (ret)
+                       return ret;
+
+               if (qat_uclo_map_auth_fw(handle,
+                                        (char *)simg_hdr[i].simg_buf,
+                                        (unsigned int)
+                                        simg_hdr[i].simg_len,
+                                        &desc))
+                       goto wr_err;
+               if (qat_uclo_auth_fw(handle, desc))
+                       goto wr_err;
+               if (qat_uclo_is_broadcast(handle, i)) {
+                       if (qat_uclo_broadcast_load_fw(handle, desc))
+                               goto wr_err;
+               } else {
+                       if (qat_uclo_load_fw(handle, desc))
+                               goto wr_err;
+               }
+               qat_uclo_ummap_auth_fw(handle, &desc);
+       }
+       return 0;
+wr_err:
+       qat_uclo_ummap_auth_fw(handle, &desc);
+       return -EINVAL;
+}
+
+static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int i;
+
+       if (qat_uclo_init_globals(handle))
+               return -EINVAL;
+       for (i = 0; i < obj_handle->uimage_num; i++) {
+               if (!obj_handle->ae_uimage[i].img_ptr)
+                       return -EINVAL;
+               if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+                       return -EINVAL;
+               qat_uclo_wr_uimage_page(handle,
+                                       obj_handle->ae_uimage[i].img_ptr);
+       }
+       return 0;
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+       return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
+                                  qat_uclo_wr_uof_img(handle);
+}
+
+int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
+                            unsigned int cfg_ae_mask)
+{
+       if (!cfg_ae_mask)
+               return -EINVAL;
+
+       handle->cfg_ae_mask = cfg_ae_mask;
+       return 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
new file mode 100644 (file)
index 0000000..38d6f8e
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644 (file)
index 0000000..1ebe0b3
--- /dev/null
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "icp_qat_hw.h"
+
+#define ADF_DH895XCC_VF_MSK    0xFFFFFFFF
+
+/* Worker thread to service arbiter mappings */
+static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
+       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+       .name = ADF_DH895XCC_DEVICE_NAME,
+       .type = DEV_DH895XCC,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       u32 fuses = self->fuses;
+
+       return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+                        ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       u32 fuses = self->fuses;
+
+       return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_SRAM_BAR;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+       u32 capabilities;
+       u32 legfuses;
+
+       capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+                      ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+                      ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+                      ICP_ACCEL_CAPABILITIES_CIPHER |
+                      ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       /* Read accelerator capabilities mask */
+       pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
+
+       /* A set bit in legfuses means the feature is OFF in this SKU */
+       if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+       if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+       }
+       if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
+               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+
+       return capabilities;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+           >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+       switch (sku) {
+       case ADF_DH895XCC_FUSECTL_SKU_1:
+               return DEV_SKU_1;
+       case ADF_DH895XCC_FUSECTL_SKU_2:
+               return DEV_SKU_2;
+       case ADF_DH895XCC_FUSECTL_SKU_3:
+               return DEV_SKU_3;
+       case ADF_DH895XCC_FUSECTL_SKU_4:
+               return DEV_SKU_4;
+       default:
+               return DEV_SKU_UNKNOWN;
+       }
+       return DEV_SKU_UNKNOWN;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+       return thrd_to_arb_map;
+}
+
+static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+       if (vf_mask & 0xFFFF) {
+               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+       }
+
+       /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
+       if (vf_mask >> 16) {
+               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+       }
+}
+
+static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       u32 val;
+
+       /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+       val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+             | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+
+       /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
+       val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+             | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+}
+
+static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+       u32 sources, pending, disabled;
+       u32 errsou3, errmsk3;
+       u32 errsou5, errmsk5;
+
+       /* Get the interrupt sources triggered by VFs */
+       errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+       errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
+       sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
+                 | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
+
+       if (!sources)
+               return 0;
+
+       /* Get the already disabled interrupts */
+       errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+       errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
+       disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
+                  | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
+
+       pending = sources & ~disabled;
+       if (!pending)
+               return 0;
+
+       /* Due to HW limitations, when disabling the interrupts, we can't
+        * just disable the requested sources, as this would lead to missed
+        * interrupts if sources changes just before writing to ERRMSK3 and
+        * ERRMSK5.
+        * To work around it, disable all and re-enable only the sources that
+        * are not in vf_mask and were not already disabled. Re-enabling will
+        * trigger a new interrupt for the sources that have changed in the
+        * meantime, if any.
+        */
+       errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+       errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+       errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+       errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+       /* Return the sources of the (new) interrupt(s) */
+       return pending;
+}
+
+static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
+{
+       adf_gen2_cfg_iov_thds(accel_dev, enable,
+                             ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
+                             ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &dh895xcc_class;
+       hw_data->instance_id = dh895xcc_class.instances++;
+       hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_accel_cap = get_accel_cap;
+       hw_data->get_num_accels = adf_gen2_get_num_accels;
+       hw_data->get_num_aes = adf_gen2_get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_admin_info = adf_gen2_get_admin_info;
+       hw_data->get_arb_info = adf_gen2_get_arb_info;
+       hw_data->get_sram_bar_id = get_sram_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->fw_name = ADF_DH895XCC_FW;
+       hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
+       hw_data->init_admin_comms = adf_init_admin_comms;
+       hw_data->exit_admin_comms = adf_exit_admin_comms;
+       hw_data->configure_iov_threads = configure_iov_threads;
+       hw_data->send_admin_init = adf_send_admin_init;
+       hw_data->init_arb = adf_init_arb;
+       hw_data->exit_arb = adf_exit_arb;
+       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+       hw_data->enable_ints = adf_gen2_enable_ints;
+       hw_data->reset_device = adf_reset_sbr;
+       hw_data->disable_iov = adf_disable_sriov;
+       hw_data->dev_config = adf_gen2_dev_config;
+
+       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+       hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
+       hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
+       hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644 (file)
index 0000000..7b674bb
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+
+/* Masks for VF2PF interrupts */
+#define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src)   (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask)  (((vf_mask) & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src)   (((vf_src) & 0x0000FFFF) << 16)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask)  ((vf_mask) >> 16)
+
+/* AE to function mapping */
+#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
+#define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
+
+/* FW names */
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
new file mode 100644 (file)
index 0000000..e18860a
--- /dev/null
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_dh895xcc_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_DH895XCC_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+       .sriov_configure = adf_sriov_configure,
+       .err_handler = &adf_err_handler,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+                       adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+               /* If the accelerator is connected to a node with no memory
+                * there is no point in using the accelerator since the remote
+                * memory transaction will be very slow. */
+               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+               return -EINVAL;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table.
+        * This should be called before adf_cleanup_accel is called */
+       if (adf_devmgr_add_dev(accel_dev, NULL)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
+                             &hw_data->fuses);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       /* If the device has no acceleration engines then ignore it. */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           ((~hw_data->ae_mask) & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       pcie_set_readrq(pdev, 1024);
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Get accelerator capabilities mask */
+       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state\n");
+               ret = -ENOMEM;
+               goto out_err_free_reg;
+       }
+
+       ret = adf_dev_up(accel_dev, true);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_DH895XCC_FW);
+MODULE_FIRMWARE(ADF_DH895XCC_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
new file mode 100644 (file)
index 0000000..0153c85
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644 (file)
index 0000000..70e56cc
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2021 Intel Corporation */
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_gen2_config.h>
+#include <adf_gen2_dc.h>
+#include <adf_gen2_hw_data.h>
+#include <adf_gen2_pfvf.h>
+#include <adf_pfvf_vf_msg.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+       .name = ADF_DH895XCCVF_DEVICE_NAME,
+       .type = DEV_DH895XCCVF,
+       .instances = 0
+};
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       return DEV_SKU_VF;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &dh895xcciov_class;
+       hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
+       hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
+       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+       hw_data->free_irq = adf_vf_isr_resource_free;
+       hw_data->enable_error_correction = adf_vf_void_noop;
+       hw_data->init_admin_comms = adf_vf_int_noop;
+       hw_data->exit_admin_comms = adf_vf_void_noop;
+       hw_data->send_admin_init = adf_vf2pf_notify_init;
+       hw_data->init_arb = adf_vf_int_noop;
+       hw_data->exit_arb = adf_vf_void_noop;
+       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->enable_ints = adf_vf_void_noop;
+       hw_data->dev_class->instances++;
+       hw_data->dev_config = adf_gen2_dev_config;
+       adf_devmgr_update_class_index(hw_data);
+       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
+       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+       adf_gen2_init_dc_ops(&hw_data->dc_ops);
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+       adf_devmgr_update_class_index(hw_data);
+}
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644 (file)
index 0000000..6973fa9
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+#endif
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644 (file)
index 0000000..96854a1
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include "adf_dh895xccvf_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF), },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = ADF_DH895XCCVF_DEVICE_NAME,
+       .probe = adf_probe,
+       .remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       struct adf_accel_dev *pf;
+       int i;
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_pci_dev->pci_dev->device) {
+               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+                       adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+               accel_dev->hw_device = NULL;
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+       adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_dev *pf;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       unsigned long bar_mask;
+       int ret;
+
+       switch (ent->device) {
+       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+                                dev_to_node(&pdev->dev));
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->is_vf = true;
+       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       accel_pci_dev->pci_dev = pdev;
+
+       /* Add accel device to accel table */
+       if (adf_devmgr_add_dev(accel_dev, pf)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+                              dev_to_node(&pdev->dev));
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+       accel_dev->hw_device = hw_data;
+       adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, pci_name(pdev));
+
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+       if (ret) {
+               dev_err(&pdev->dev, "No usable DMA configuration\n");
+               goto out_err_disable;
+       }
+
+       if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
+               ret = -EFAULT;
+               goto out_err_disable;
+       }
+
+       /* Find and map all the device's BARS */
+       i = 0;
+       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+                       ret = -EFAULT;
+                       goto out_err_free_reg;
+               }
+       }
+       pci_set_master(pdev);
+       /* Completion for VF2PF request/response message exchange */
+       init_completion(&accel_dev->vf.msg_received);
+
+       ret = adf_dev_up(accel_dev, false);
+       if (ret)
+               goto out_err_dev_stop;
+
+       return ret;
+
+out_err_dev_stop:
+       adf_dev_down(accel_dev, false);
+out_err_free_reg:
+       pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+       pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+       adf_cleanup_accel(accel_dev);
+       kfree(accel_dev);
+       return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       adf_flush_vf_wq(accel_dev);
+       adf_dev_down(accel_dev, false);
+       adf_cleanup_accel(accel_dev);
+       adf_cleanup_pci_dev(accel_dev);
+       kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
deleted file mode 100644 (file)
index 1220cc8..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config CRYPTO_DEV_QAT
-       tristate
-       select CRYPTO_AEAD
-       select CRYPTO_AUTHENC
-       select CRYPTO_SKCIPHER
-       select CRYPTO_AKCIPHER
-       select CRYPTO_DH
-       select CRYPTO_HMAC
-       select CRYPTO_RSA
-       select CRYPTO_SHA1
-       select CRYPTO_SHA256
-       select CRYPTO_SHA512
-       select CRYPTO_LIB_AES
-       select FW_LOADER
-       select CRC8
-
-config CRYPTO_DEV_QAT_DH895xCC
-       tristate "Support for Intel(R) DH895xCC"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
-         for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_dh895xcc.
-
-config CRYPTO_DEV_QAT_C3XXX
-       tristate "Support for Intel(R) C3XXX"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
-         for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_c3xxx.
-
-config CRYPTO_DEV_QAT_C62X
-       tristate "Support for Intel(R) C62X"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) C62x with Intel(R) QuickAssist Technology
-         for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_c62x.
-
-config CRYPTO_DEV_QAT_4XXX
-       tristate "Support for Intel(R) QAT_4XXX"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) QuickAssist Technology QAT_4xxx
-         for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_4xxx.
-
-config CRYPTO_DEV_QAT_DH895xCCVF
-       tristate "Support for Intel(R) DH895xCC Virtual Function"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select PCI_IOV
-       select CRYPTO_DEV_QAT
-
-       help
-         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
-         Virtual Function for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_dh895xccvf.
-
-config CRYPTO_DEV_QAT_C3XXXVF
-       tristate "Support for Intel(R) C3XXX Virtual Function"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select PCI_IOV
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology
-         Virtual Function for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_c3xxxvf.
-
-config CRYPTO_DEV_QAT_C62XVF
-       tristate "Support for Intel(R) C62X Virtual Function"
-       depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
-       select PCI_IOV
-       select CRYPTO_DEV_QAT
-       help
-         Support for Intel(R) C62x with Intel(R) QuickAssist Technology
-         Virtual Function for accelerating crypto and compression workloads.
-
-         To compile this as a module, choose M here: the module
-         will be called qat_c62xvf.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
deleted file mode 100644 (file)
index 258c8a6..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
-obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
-obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
-obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
-obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
-obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
-obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
-obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/qat/qat_4xxx/Makefile b/drivers/crypto/qat/qat_4xxx/Makefile
deleted file mode 100644 (file)
index ff9c8b5..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
-qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
deleted file mode 100644 (file)
index 7324b86..0000000
+++ /dev/null
@@ -1,417 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 - 2021 Intel Corporation */
-#include <linux/iopoll.h>
-#include <adf_accel_devices.h>
-#include <adf_cfg.h>
-#include <adf_common_drv.h>
-#include <adf_gen4_dc.h>
-#include <adf_gen4_hw_data.h>
-#include <adf_gen4_pfvf.h>
-#include <adf_gen4_pm.h>
-#include "adf_4xxx_hw_data.h"
-#include "icp_qat_hw.h"
-
-struct adf_fw_config {
-       u32 ae_mask;
-       char *obj_name;
-};
-
-static struct adf_fw_config adf_4xxx_fw_cy_config[] = {
-       {0xF0, ADF_4XXX_SYM_OBJ},
-       {0xF, ADF_4XXX_ASYM_OBJ},
-       {0x100, ADF_4XXX_ADMIN_OBJ},
-};
-
-static struct adf_fw_config adf_4xxx_fw_dc_config[] = {
-       {0xF0, ADF_4XXX_DC_OBJ},
-       {0xF, ADF_4XXX_DC_OBJ},
-       {0x100, ADF_4XXX_ADMIN_OBJ},
-};
-
-static struct adf_fw_config adf_402xx_fw_cy_config[] = {
-       {0xF0, ADF_402XX_SYM_OBJ},
-       {0xF, ADF_402XX_ASYM_OBJ},
-       {0x100, ADF_402XX_ADMIN_OBJ},
-};
-
-static struct adf_fw_config adf_402xx_fw_dc_config[] = {
-       {0xF0, ADF_402XX_DC_OBJ},
-       {0xF, ADF_402XX_DC_OBJ},
-       {0x100, ADF_402XX_ADMIN_OBJ},
-};
-
-/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map_cy[ADF_4XXX_MAX_ACCELENGINES] = {
-       0x5555555, 0x5555555, 0x5555555, 0x5555555,
-       0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
-       0x0
-};
-
-static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
-       0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
-       0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
-       0x0
-};
-
-static struct adf_hw_device_class adf_4xxx_class = {
-       .name = ADF_4XXX_DEVICE_NAME,
-       .type = DEV_4XXX,
-       .instances = 0,
-};
-
-enum dev_services {
-       SVC_CY = 0,
-       SVC_DC,
-};
-
-static const char *const dev_cfg_services[] = {
-       [SVC_CY] = ADF_CFG_CY,
-       [SVC_DC] = ADF_CFG_DC,
-};
-
-static int get_service_enabled(struct adf_accel_dev *accel_dev)
-{
-       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-       int ret;
-
-       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-                                     ADF_SERVICES_ENABLED, services);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev),
-                       ADF_SERVICES_ENABLED " param not found\n");
-               return ret;
-       }
-
-       ret = match_string(dev_cfg_services, ARRAY_SIZE(dev_cfg_services),
-                          services);
-       if (ret < 0)
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
-                       services);
-
-       return ret;
-}
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       u32 me_disable = self->fuses;
-
-       return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
-}
-
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
-       if (!self || !self->ae_mask)
-               return 0;
-
-       return hweight32(self->ae_mask);
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_4XXX_SRAM_BAR;
-}
-
-/*
- * The vector routing table is used to select the MSI-X entry to use for each
- * interrupt source.
- * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
- * The final entry corresponds to VF2PF or error interrupts.
- * This vector table could be used to configure one MSI-X entry to be shared
- * between multiple interrupt sources.
- *
- * The default routing is set to have a one to one correspondence between the
- * interrupt source and the MSI-X entry used.
- */
-static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *csr;
-       int i;
-
-       csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-       for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
-               ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
-}
-
-static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
-       u32 capabilities_cy, capabilities_dc;
-       u32 fusectl1;
-
-       /* Read accelerator capabilities mask */
-       pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
-
-       capabilities_cy = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
-                         ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
-                         ICP_ACCEL_CAPABILITIES_CIPHER |
-                         ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
-                         ICP_ACCEL_CAPABILITIES_SHA3 |
-                         ICP_ACCEL_CAPABILITIES_SHA3_EXT |
-                         ICP_ACCEL_CAPABILITIES_HKDF |
-                         ICP_ACCEL_CAPABILITIES_ECEDMONT |
-                         ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
-                         ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
-                         ICP_ACCEL_CAPABILITIES_AES_V2;
-
-       /* A set bit in fusectl1 means the feature is OFF in this SKU */
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_HKDF;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
-               capabilities_cy &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
-       }
-
-       capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
-                         ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
-                         ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
-                         ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
-
-       if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
-               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
-               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
-               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
-               capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
-       }
-
-       switch (get_service_enabled(accel_dev)) {
-       case SVC_CY:
-               return capabilities_cy;
-       case SVC_DC:
-               return capabilities_dc;
-       }
-
-       return 0;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       return DEV_SKU_1;
-}
-
-static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
-{
-       switch (get_service_enabled(accel_dev)) {
-       case SVC_CY:
-               return thrd_to_arb_map_cy;
-       case SVC_DC:
-               return thrd_to_arb_map_dc;
-       }
-
-       return NULL;
-}
-
-static void get_arb_info(struct arb_info *arb_info)
-{
-       arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
-       arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
-       arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
-}
-
-static void get_admin_info(struct admin_info *admin_csrs_info)
-{
-       admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
-       admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
-       admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
-}
-
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
-       struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
-       void __iomem *csr = misc_bar->virt_addr;
-
-       /* Enable all in errsou3 except VFLR notification on host */
-       ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
-}
-
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *addr;
-
-       addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
-       /* Enable bundle interrupts */
-       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
-       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
-
-       /* Enable misc interrupts */
-       ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
-}
-
-static int adf_init_device(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *addr;
-       u32 status;
-       u32 csr;
-       int ret;
-
-       addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
-       /* Temporarily mask PM interrupt */
-       csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
-       csr |= ADF_GEN4_PM_SOU;
-       ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
-
-       /* Set DRV_ACTIVE bit to power up the device */
-       ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
-
-       /* Poll status register to make sure the device is powered up */
-       ret = read_poll_timeout(ADF_CSR_RD, status,
-                               status & ADF_GEN4_PM_INIT_STATE,
-                               ADF_GEN4_PM_POLL_DELAY_US,
-                               ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
-                               ADF_GEN4_PM_STATUS);
-       if (ret)
-               dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
-
-       return ret;
-}
-
-static u32 uof_get_num_objs(void)
-{
-       BUILD_BUG_ON_MSG(ARRAY_SIZE(adf_4xxx_fw_cy_config) !=
-                        ARRAY_SIZE(adf_4xxx_fw_dc_config),
-                        "Size mismatch between adf_4xxx_fw_*_config arrays");
-
-       return ARRAY_SIZE(adf_4xxx_fw_cy_config);
-}
-
-static char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
-{
-       switch (get_service_enabled(accel_dev)) {
-       case SVC_CY:
-               return adf_4xxx_fw_cy_config[obj_num].obj_name;
-       case SVC_DC:
-               return adf_4xxx_fw_dc_config[obj_num].obj_name;
-       }
-
-       return NULL;
-}
-
-static char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
-{
-       switch (get_service_enabled(accel_dev)) {
-       case SVC_CY:
-               return adf_402xx_fw_cy_config[obj_num].obj_name;
-       case SVC_DC:
-               return adf_402xx_fw_dc_config[obj_num].obj_name;
-       }
-
-       return NULL;
-}
-
-static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
-{
-       switch (get_service_enabled(accel_dev)) {
-       case SVC_CY:
-               return adf_4xxx_fw_cy_config[obj_num].ae_mask;
-       case SVC_DC:
-               return adf_4xxx_fw_dc_config[obj_num].ae_mask;
-       }
-
-       return 0;
-}
-
-void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
-{
-       hw_data->dev_class = &adf_4xxx_class;
-       hw_data->instance_id = adf_4xxx_class.instances++;
-       hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
-       hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
-       hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
-       hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
-       hw_data->num_logical_accel = 1;
-       hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_isr_resource_alloc;
-       hw_data->free_irq = adf_isr_resource_free;
-       hw_data->enable_error_correction = adf_enable_error_correction;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_num_accels = get_num_accels;
-       hw_data->get_num_aes = get_num_aes;
-       hw_data->get_sram_bar_id = get_sram_bar_id;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_arb_info = get_arb_info;
-       hw_data->get_admin_info = get_admin_info;
-       hw_data->get_accel_cap = get_accel_cap;
-       hw_data->get_sku = get_sku;
-       hw_data->init_admin_comms = adf_init_admin_comms;
-       hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->send_admin_init = adf_send_admin_init;
-       hw_data->init_arb = adf_init_arb;
-       hw_data->exit_arb = adf_exit_arb;
-       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
-       hw_data->enable_ints = adf_enable_ints;
-       hw_data->init_device = adf_init_device;
-       hw_data->reset_device = adf_reset_flr;
-       hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
-       switch (dev_id) {
-       case ADF_402XX_PCI_DEVICE_ID:
-               hw_data->fw_name = ADF_402XX_FW;
-               hw_data->fw_mmp_name = ADF_402XX_MMP;
-               hw_data->uof_get_name = uof_get_name_402xx;
-               break;
-
-       default:
-               hw_data->fw_name = ADF_4XXX_FW;
-               hw_data->fw_mmp_name = ADF_4XXX_MMP;
-               hw_data->uof_get_name = uof_get_name_4xxx;
-       }
-       hw_data->uof_get_num_objs = uof_get_num_objs;
-       hw_data->uof_get_ae_mask = uof_get_ae_mask;
-       hw_data->set_msix_rttable = set_msix_default_rttable;
-       hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
-       hw_data->disable_iov = adf_disable_sriov;
-       hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
-       hw_data->enable_pm = adf_gen4_enable_pm;
-       hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
-       hw_data->dev_config = adf_gen4_dev_config;
-
-       adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen4_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-}
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
deleted file mode 100644 (file)
index 085e259..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_4XXX_HW_DATA_H_
-#define ADF_4XXX_HW_DATA_H_
-
-#include <adf_accel_devices.h>
-
-/* PCIe configuration space */
-#define ADF_4XXX_SRAM_BAR              0
-#define ADF_4XXX_PMISC_BAR             1
-#define ADF_4XXX_ETR_BAR               2
-#define ADF_4XXX_RX_RINGS_OFFSET       1
-#define ADF_4XXX_TX_RINGS_MASK         0x1
-#define ADF_4XXX_MAX_ACCELERATORS      1
-#define ADF_4XXX_MAX_ACCELENGINES      9
-#define ADF_4XXX_BAR_MASK              (BIT(0) | BIT(2) | BIT(4))
-
-/* Physical function fuses */
-#define ADF_4XXX_FUSECTL0_OFFSET       (0x2C8)
-#define ADF_4XXX_FUSECTL1_OFFSET       (0x2CC)
-#define ADF_4XXX_FUSECTL2_OFFSET       (0x2D0)
-#define ADF_4XXX_FUSECTL3_OFFSET       (0x2D4)
-#define ADF_4XXX_FUSECTL4_OFFSET       (0x2D8)
-#define ADF_4XXX_FUSECTL5_OFFSET       (0x2DC)
-
-#define ADF_4XXX_ACCELERATORS_MASK     (0x1)
-#define ADF_4XXX_ACCELENGINES_MASK     (0x1FF)
-#define ADF_4XXX_ADMIN_AE_MASK         (0x100)
-
-#define ADF_4XXX_ETR_MAX_BANKS         64
-
-/* MSIX interrupt */
-#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET      (0x41A040)
-#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET      (0x41A044)
-#define ADF_4XXX_SMIAPF_MASK_OFFSET            (0x41A084)
-#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i)                (0x409000 + ((i) * 0x04))
-
-/* Bank and ring configuration */
-#define ADF_4XXX_NUM_RINGS_PER_BANK    2
-#define ADF_4XXX_NUM_BANKS_PER_VF      4
-
-/* Arbiter configuration */
-#define ADF_4XXX_ARB_CONFIG                    (BIT(31) | BIT(6) | BIT(0))
-#define ADF_4XXX_ARB_OFFSET                    (0x0)
-#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET      (0x400)
-
-/* Admin Interface Reg Offset */
-#define ADF_4XXX_ADMINMSGUR_OFFSET     (0x500574)
-#define ADF_4XXX_ADMINMSGLR_OFFSET     (0x500578)
-#define ADF_4XXX_MAILBOX_BASE_OFFSET   (0x600970)
-
-/* Firmware Binaries */
-#define ADF_4XXX_FW            "qat_4xxx.bin"
-#define ADF_4XXX_MMP           "qat_4xxx_mmp.bin"
-#define ADF_4XXX_SYM_OBJ       "qat_4xxx_sym.bin"
-#define ADF_4XXX_DC_OBJ                "qat_4xxx_dc.bin"
-#define ADF_4XXX_ASYM_OBJ      "qat_4xxx_asym.bin"
-#define ADF_4XXX_ADMIN_OBJ     "qat_4xxx_admin.bin"
-/* Firmware for 402XXX */
-#define ADF_402XX_FW           "qat_402xx.bin"
-#define ADF_402XX_MMP          "qat_402xx_mmp.bin"
-#define ADF_402XX_SYM_OBJ      "qat_402xx_sym.bin"
-#define ADF_402XX_DC_OBJ       "qat_402xx_dc.bin"
-#define ADF_402XX_ASYM_OBJ     "qat_402xx_asym.bin"
-#define ADF_402XX_ADMIN_OBJ    "qat_402xx_admin.bin"
-
-/* qat_4xxx fuse bits are different from old GENs, redefine them */
-enum icp_qat_4xxx_slice_mask {
-       ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
-       ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
-       ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
-       ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
-       ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
-       ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
-       ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
-};
-
-void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
-void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
-
-#endif
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c
deleted file mode 100644 (file)
index ceb8732..0000000
+++ /dev/null
@@ -1,459 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 Intel Corporation */
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <adf_accel_devices.h>
-#include <adf_cfg.h>
-#include <adf_common_drv.h>
-
-#include "adf_4xxx_hw_data.h"
-#include "qat_compression.h"
-#include "qat_crypto.h"
-#include "adf_transport_access_macros.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
-       { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
-       { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-enum configs {
-       DEV_CFG_CY = 0,
-       DEV_CFG_DC,
-};
-
-static const char * const services_operations[] = {
-       ADF_CFG_CY,
-       ADF_CFG_DC,
-};
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       if (accel_dev->hw_device) {
-               adf_clean_hw_data_4xxx(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev, NULL);
-}
-
-static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
-{
-       const char *config;
-       int ret;
-
-       config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
-
-       ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
-       if (ret)
-               return ret;
-
-       /* Default configuration is crypto only for even devices
-        * and compression for odd devices
-        */
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
-                                         ADF_SERVICES_ENABLED, config,
-                                         ADF_STR);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       int banks = GET_MAX_BANKS(accel_dev);
-       int cpus = num_online_cpus();
-       unsigned long bank, val;
-       int instances;
-       int ret;
-       int i;
-
-       if (adf_hw_dev_has_crypto(accel_dev))
-               instances = min(cpus, banks / 2);
-       else
-               instances = 0;
-
-       for (i = 0; i < instances; i++) {
-               val = i;
-               bank = i * 2;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &bank, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               bank += 1;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &bank, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
-                        i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
-               val = 128;
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 512;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 0;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 0;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 1;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 1;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = ADF_COALESCING_DEF_TIME;
-               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-       }
-
-       val = i;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       val = 0;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       return 0;
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
-       return ret;
-}
-
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       int banks = GET_MAX_BANKS(accel_dev);
-       int cpus = num_online_cpus();
-       unsigned long val;
-       int instances;
-       int ret;
-       int i;
-
-       if (adf_hw_dev_has_compression(accel_dev))
-               instances = min(cpus, banks);
-       else
-               instances = 0;
-
-       for (i = 0; i < instances; i++) {
-               val = i;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 512;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 0;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 1;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = ADF_COALESCING_DEF_TIME;
-               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-       }
-
-       val = i;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       val = 0;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       return 0;
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
-       return ret;
-}
-
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-       int ret;
-
-       ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
-       if (ret)
-               goto err;
-
-       ret = adf_cfg_section_add(accel_dev, "Accelerator0");
-       if (ret)
-               goto err;
-
-       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-                                     ADF_SERVICES_ENABLED, services);
-       if (ret)
-               goto err;
-
-       ret = sysfs_match_string(services_operations, services);
-       if (ret < 0)
-               goto err;
-
-       switch (ret) {
-       case DEV_CFG_CY:
-               ret = adf_crypto_dev_config(accel_dev);
-               break;
-       case DEV_CFG_DC:
-               ret = adf_comp_dev_config(accel_dev);
-               break;
-       }
-
-       if (ret)
-               goto err;
-
-       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
-       return ret;
-
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
-       return ret;
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       struct adf_bar *bar;
-       int ret;
-
-       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
-               /*
-                * If the accelerator is connected to a node with no memory
-                * there is no point in using the accelerator since the remote
-                * memory transaction will be very slow.
-                */
-               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
-               return -EINVAL;
-       }
-
-       accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
-       if (!accel_dev)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /*
-        * Add accel device to accel table
-        * This should be called before adf_cleanup_accel is called
-        */
-       if (adf_devmgr_add_dev(accel_dev, NULL)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               return -EFAULT;
-       }
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and initialise device hardware meta-data structure */
-       hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
-
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-       pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       /* If the device has no acceleration engines then ignore it */
-       if (!hw_data->accel_mask || !hw_data->ae_mask ||
-           (~hw_data->ae_mask & 0x01)) {
-               dev_err(&pdev->dev, "No acceleration units found.\n");
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* Enable PCI device */
-       ret = pcim_enable_device(pdev);
-       if (ret) {
-               dev_err(&pdev->dev, "Can't enable PCI device.\n");
-               goto out_err;
-       }
-
-       /* Set DMA identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration.\n");
-               goto out_err;
-       }
-
-       ret = adf_cfg_dev_init(accel_dev);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to initialize configuration.\n");
-               goto out_err;
-       }
-
-       /* Get accelerator capabilities mask */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-       if (!hw_data->accel_capabilities_mask) {
-               dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
-               ret = -EINVAL;
-               goto out_err;
-       }
-
-       /* Find and map all the device's BARS */
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
-
-       ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to map pci regions.\n");
-               goto out_err;
-       }
-
-       i = 0;
-       for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
-               bar = &accel_pci_dev->pci_bars[i++];
-               bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
-       }
-
-       pci_set_master(pdev);
-
-       if (pci_save_state(pdev)) {
-               dev_err(&pdev->dev, "Failed to save pci state.\n");
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       ret = adf_dev_up(accel_dev, true);
-       if (ret)
-               goto out_err_dev_stop;
-
-       ret = adf_sysfs_init(accel_dev);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_down(accel_dev, false);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_dev_down(accel_dev, false);
-       adf_cleanup_accel(accel_dev);
-}
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_4XXX_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-       .sriov_configure = adf_sriov_configure,
-       .err_handler = &adf_err_handler,
-};
-
-module_pci_driver(adf_driver);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE(ADF_4XXX_FW);
-MODULE_FIRMWARE(ADF_4XXX_MMP);
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_SOFTDEP("pre: crypto-intel_qat");
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile
deleted file mode 100644 (file)
index 92ef416..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
-qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
deleted file mode 100644 (file)
index 4756436..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include "adf_c3xxx_hw_data.h"
-#include "icp_qat_hw.h"
-
-/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
-       0x12222AAA, 0x11222AAA, 0x12222AAA,
-       0x11222AAA, 0x12222AAA, 0x11222AAA
-};
-
-static struct adf_hw_device_class c3xxx_class = {
-       .name = ADF_C3XXX_DEVICE_NAME,
-       .type = DEV_C3XXX,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       u32 straps = self->straps;
-       u32 fuses = self->fuses;
-       u32 accel;
-
-       accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET;
-       accel &= ADF_C3XXX_ACCELERATORS_MASK;
-
-       return accel;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       u32 straps = self->straps;
-       u32 fuses = self->fuses;
-       unsigned long disabled;
-       u32 ae_disable;
-       int accel;
-
-       /* If an accel is disabled, then disable the corresponding two AEs */
-       disabled = ~get_accel_mask(self) & ADF_C3XXX_ACCELERATORS_MASK;
-       ae_disable = BIT(1) | BIT(0);
-       for_each_set_bit(accel, &disabled, ADF_C3XXX_MAX_ACCELERATORS)
-               straps |= ae_disable << (accel << 1);
-
-       return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXX_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXX_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXX_SRAM_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       int aes = self->get_num_aes(self);
-
-       if (aes == 6)
-               return DEV_SKU_4;
-
-       return DEV_SKU_UNKNOWN;
-}
-
-static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
-{
-       return thrd_to_arb_map;
-}
-
-static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
-{
-       adf_gen2_cfg_iov_thds(accel_dev, enable,
-                             ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS,
-                             ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS);
-}
-
-void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &c3xxx_class;
-       hw_data->instance_id = c3xxx_class.instances++;
-       hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_isr_resource_alloc;
-       hw_data->free_irq = adf_isr_resource_free;
-       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_accel_cap = adf_gen2_get_accel_cap;
-       hw_data->get_num_accels = adf_gen2_get_num_accels;
-       hw_data->get_num_aes = adf_gen2_get_num_aes;
-       hw_data->get_sram_bar_id = get_sram_bar_id;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_admin_info = adf_gen2_get_admin_info;
-       hw_data->get_arb_info = adf_gen2_get_arb_info;
-       hw_data->get_sku = get_sku;
-       hw_data->fw_name = ADF_C3XXX_FW;
-       hw_data->fw_mmp_name = ADF_C3XXX_MMP;
-       hw_data->init_admin_comms = adf_init_admin_comms;
-       hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->send_admin_init = adf_send_admin_init;
-       hw_data->init_arb = adf_init_arb;
-       hw_data->exit_arb = adf_exit_arb;
-       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
-       hw_data->enable_ints = adf_gen2_enable_ints;
-       hw_data->reset_device = adf_reset_flr;
-       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
-       hw_data->disable_iov = adf_disable_sriov;
-       hw_data->dev_config = adf_gen2_dev_config;
-
-       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
deleted file mode 100644 (file)
index 336a06f..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_C3XXX_HW_DATA_H_
-#define ADF_C3XXX_HW_DATA_H_
-
-/* PCIe configuration space */
-#define ADF_C3XXX_PMISC_BAR 0
-#define ADF_C3XXX_ETR_BAR 1
-#define ADF_C3XXX_SRAM_BAR 0
-#define ADF_C3XXX_MAX_ACCELERATORS 3
-#define ADF_C3XXX_MAX_ACCELENGINES 6
-#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
-#define ADF_C3XXX_ACCELERATORS_MASK 0x7
-#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
-#define ADF_C3XXX_ETR_MAX_BANKS 16
-#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
-
-/* AE to function mapping */
-#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
-#define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
-
-/* Firmware Binary */
-#define ADF_C3XXX_FW "qat_c3xxx.bin"
-#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
-
-void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
deleted file mode 100644 (file)
index bb4dca7..0000000
+++ /dev/null
@@ -1,258 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_c3xxx_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_C3XXX_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-       .sriov_configure = adf_sriov_configure,
-       .err_handler = &adf_err_handler,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
-                       adf_clean_hw_data_c3xxx(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev, NULL);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
-               /* If the accelerator is connected to a node with no memory
-                * there is no point in using the accelerator since the remote
-                * memory transaction will be very slow. */
-               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
-               return -EINVAL;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table.
-        * This should be called before adf_cleanup_accel is called */
-       if (adf_devmgr_add_dev(accel_dev, NULL)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_c3xxx(accel_dev->hw_device);
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
-                             &hw_data->fuses);
-       pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET,
-                             &hw_data->straps);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       /* If the device has no acceleration engines then ignore it. */
-       if (!hw_data->accel_mask || !hw_data->ae_mask ||
-           ((~hw_data->ae_mask) & 0x01)) {
-               dev_err(&pdev->dev, "No acceleration units found");
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Get accelerator capabilities mask */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-
-       if (pci_save_state(pdev)) {
-               dev_err(&pdev->dev, "Failed to save pci state\n");
-               ret = -ENOMEM;
-               goto out_err_free_reg;
-       }
-
-       ret = adf_dev_up(accel_dev, true);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_down(accel_dev, false);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_dev_down(accel_dev, false);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE(ADF_C3XXX_FW);
-MODULE_FIRMWARE(ADF_C3XXX_MMP);
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile
deleted file mode 100644 (file)
index b6d7682..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
-qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
deleted file mode 100644 (file)
index 84d9486..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include <adf_pfvf_vf_msg.h>
-#include "adf_c3xxxvf_hw_data.h"
-
-static struct adf_hw_device_class c3xxxiov_class = {
-       .name = ADF_C3XXXVF_DEVICE_NAME,
-       .type = DEV_C3XXXVF,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_ACCELENGINES_MASK;
-}
-
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_MAX_ACCELENGINES;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C3XXXIOV_ETR_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       return DEV_SKU_VF;
-}
-
-static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-
-static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
-{
-}
-
-void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &c3xxxiov_class;
-       hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
-       hw_data->free_irq = adf_vf_isr_resource_free;
-       hw_data->enable_error_correction = adf_vf_void_noop;
-       hw_data->init_admin_comms = adf_vf_int_noop;
-       hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_notify_init;
-       hw_data->init_arb = adf_vf_int_noop;
-       hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_num_accels = get_num_accels;
-       hw_data->get_num_aes = get_num_aes;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_sku = get_sku;
-       hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->dev_class->instances++;
-       hw_data->dev_config = adf_gen2_dev_config;
-       adf_devmgr_update_class_index(hw_data);
-       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-       adf_devmgr_update_class_index(hw_data);
-}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
deleted file mode 100644 (file)
index 6b4bf18..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#ifndef ADF_C3XXXVF_HW_DATA_H_
-#define ADF_C3XXXVF_HW_DATA_H_
-
-#define ADF_C3XXXIOV_PMISC_BAR 1
-#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1
-#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1
-#define ADF_C3XXXIOV_MAX_ACCELERATORS 1
-#define ADF_C3XXXIOV_MAX_ACCELENGINES 1
-#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8
-#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF
-#define ADF_C3XXXIOV_ETR_BAR 0
-#define ADF_C3XXXIOV_ETR_MAX_BANKS 1
-
-void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
deleted file mode 100644 (file)
index e8cc10f..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_c3xxxvf_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_C3XXXVF_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       struct adf_accel_dev *pf;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
-                       adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
-       adf_devmgr_rm_dev(accel_dev, pf);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_dev *pf;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       accel_dev->is_vf = true;
-       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table */
-       if (adf_devmgr_add_dev(accel_dev, pf)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_c3xxxiov(accel_dev->hw_device);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-       /* Completion for VF2PF request/response message exchange */
-       init_completion(&accel_dev->vf.msg_received);
-
-       ret = adf_dev_up(accel_dev, false);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_down(accel_dev, false);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_flush_vf_wq(accel_dev);
-       adf_dev_down(accel_dev, false);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-       adf_clean_vf_map(true);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile
deleted file mode 100644 (file)
index d581f7c..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
-qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
deleted file mode 100644 (file)
index e142707..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include "adf_c62x_hw_data.h"
-#include "icp_qat_hw.h"
-
-/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
-       0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
-       0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA
-};
-
-static struct adf_hw_device_class c62x_class = {
-       .name = ADF_C62X_DEVICE_NAME,
-       .type = DEV_C62X,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       u32 straps = self->straps;
-       u32 fuses = self->fuses;
-       u32 accel;
-
-       accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET;
-       accel &= ADF_C62X_ACCELERATORS_MASK;
-
-       return accel;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       u32 straps = self->straps;
-       u32 fuses = self->fuses;
-       unsigned long disabled;
-       u32 ae_disable;
-       int accel;
-
-       /* If an accel is disabled, then disable the corresponding two AEs */
-       disabled = ~get_accel_mask(self) & ADF_C62X_ACCELERATORS_MASK;
-       ae_disable = BIT(1) | BIT(0);
-       for_each_set_bit(accel, &disabled, ADF_C62X_MAX_ACCELERATORS)
-               straps |= ae_disable << (accel << 1);
-
-       return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62X_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62X_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62X_SRAM_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       int aes = self->get_num_aes(self);
-
-       if (aes == 8)
-               return DEV_SKU_2;
-       else if (aes == 10)
-               return DEV_SKU_4;
-
-       return DEV_SKU_UNKNOWN;
-}
-
-static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
-{
-       return thrd_to_arb_map;
-}
-
-static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
-{
-       adf_gen2_cfg_iov_thds(accel_dev, enable,
-                             ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS,
-                             ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS);
-}
-
-void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &c62x_class;
-       hw_data->instance_id = c62x_class.instances++;
-       hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_isr_resource_alloc;
-       hw_data->free_irq = adf_isr_resource_free;
-       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_accel_cap = adf_gen2_get_accel_cap;
-       hw_data->get_num_accels = adf_gen2_get_num_accels;
-       hw_data->get_num_aes = adf_gen2_get_num_aes;
-       hw_data->get_sram_bar_id = get_sram_bar_id;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_admin_info = adf_gen2_get_admin_info;
-       hw_data->get_arb_info = adf_gen2_get_arb_info;
-       hw_data->get_sku = get_sku;
-       hw_data->fw_name = ADF_C62X_FW;
-       hw_data->fw_mmp_name = ADF_C62X_MMP;
-       hw_data->init_admin_comms = adf_init_admin_comms;
-       hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->send_admin_init = adf_send_admin_init;
-       hw_data->init_arb = adf_init_arb;
-       hw_data->exit_arb = adf_exit_arb;
-       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
-       hw_data->enable_ints = adf_gen2_enable_ints;
-       hw_data->reset_device = adf_reset_flr;
-       hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
-       hw_data->disable_iov = adf_disable_sriov;
-       hw_data->dev_config = adf_gen2_dev_config;
-
-       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-}
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
deleted file mode 100644 (file)
index 008c0a3..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_C62X_HW_DATA_H_
-#define ADF_C62X_HW_DATA_H_
-
-/* PCIe configuration space */
-#define ADF_C62X_SRAM_BAR 0
-#define ADF_C62X_PMISC_BAR 1
-#define ADF_C62X_ETR_BAR 2
-#define ADF_C62X_MAX_ACCELERATORS 5
-#define ADF_C62X_MAX_ACCELENGINES 10
-#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
-#define ADF_C62X_ACCELERATORS_MASK 0x1F
-#define ADF_C62X_ACCELENGINES_MASK 0x3FF
-#define ADF_C62X_ETR_MAX_BANKS 16
-#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
-
-/* AE to function mapping */
-#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
-#define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
-
-/* Firmware Binary */
-#define ADF_C62X_FW "qat_c62x.bin"
-#define ADF_C62X_MMP "qat_c62x_mmp.bin"
-
-void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
deleted file mode 100644 (file)
index ca18ae1..0000000
+++ /dev/null
@@ -1,258 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_c62x_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_C62X_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-       .sriov_configure = adf_sriov_configure,
-       .err_handler = &adf_err_handler,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_C62X:
-                       adf_clean_hw_data_c62x(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev, NULL);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_C62X:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
-               /* If the accelerator is connected to a node with no memory
-                * there is no point in using the accelerator since the remote
-                * memory transaction will be very slow. */
-               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
-               return -EINVAL;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table.
-        * This should be called before adf_cleanup_accel is called */
-       if (adf_devmgr_add_dev(accel_dev, NULL)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_c62x(accel_dev->hw_device);
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
-                             &hw_data->fuses);
-       pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET,
-                             &hw_data->straps);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       /* If the device has no acceleration engines then ignore it. */
-       if (!hw_data->accel_mask || !hw_data->ae_mask ||
-           ((~hw_data->ae_mask) & 0x01)) {
-               dev_err(&pdev->dev, "No acceleration units found");
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Get accelerator capabilities mask */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-
-       /* Find and map all the device's BARS */
-       i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-
-       if (pci_save_state(pdev)) {
-               dev_err(&pdev->dev, "Failed to save pci state\n");
-               ret = -ENOMEM;
-               goto out_err_free_reg;
-       }
-
-       ret = adf_dev_up(accel_dev, true);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_down(accel_dev, false);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_dev_down(accel_dev, false);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE(ADF_C62X_FW);
-MODULE_FIRMWARE(ADF_C62X_MMP);
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile
deleted file mode 100644 (file)
index 446c3d6..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
-qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
deleted file mode 100644 (file)
index 751d7aa..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include <adf_pfvf_vf_msg.h>
-#include "adf_c62xvf_hw_data.h"
-
-static struct adf_hw_device_class c62xiov_class = {
-       .name = ADF_C62XVF_DEVICE_NAME,
-       .type = DEV_C62XVF,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_ACCELENGINES_MASK;
-}
-
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_MAX_ACCELENGINES;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_C62XIOV_ETR_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       return DEV_SKU_VF;
-}
-
-static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-
-static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
-{
-}
-
-void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &c62xiov_class;
-       hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
-       hw_data->free_irq = adf_vf_isr_resource_free;
-       hw_data->enable_error_correction = adf_vf_void_noop;
-       hw_data->init_admin_comms = adf_vf_int_noop;
-       hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_notify_init;
-       hw_data->init_arb = adf_vf_int_noop;
-       hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_num_accels = get_num_accels;
-       hw_data->get_num_aes = get_num_aes;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_sku = get_sku;
-       hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->dev_class->instances++;
-       hw_data->dev_config = adf_gen2_dev_config;
-       adf_devmgr_update_class_index(hw_data);
-       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-       adf_devmgr_update_class_index(hw_data);
-}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
deleted file mode 100644 (file)
index a1a62c0..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#ifndef ADF_C62XVF_HW_DATA_H_
-#define ADF_C62XVF_HW_DATA_H_
-
-#define ADF_C62XIOV_PMISC_BAR 1
-#define ADF_C62XIOV_ACCELERATORS_MASK 0x1
-#define ADF_C62XIOV_ACCELENGINES_MASK 0x1
-#define ADF_C62XIOV_MAX_ACCELERATORS 1
-#define ADF_C62XIOV_MAX_ACCELENGINES 1
-#define ADF_C62XIOV_RX_RINGS_OFFSET 8
-#define ADF_C62XIOV_TX_RINGS_MASK 0xFF
-#define ADF_C62XIOV_ETR_BAR 0
-#define ADF_C62XIOV_ETR_MAX_BANKS 1
-
-void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
deleted file mode 100644 (file)
index 3756630..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_c62xvf_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X_VF), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_C62XVF_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       struct adf_accel_dev *pf;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
-                       adf_clean_hw_data_c62xiov(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
-       adf_devmgr_rm_dev(accel_dev, pf);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_dev *pf;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       accel_dev->is_vf = true;
-       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table */
-       if (adf_devmgr_add_dev(accel_dev, pf)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_c62xiov(accel_dev->hw_device);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-       /* Completion for VF2PF request/response message exchange */
-       init_completion(&accel_dev->vf.msg_received);
-
-       ret = adf_dev_up(accel_dev, false);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_down(accel_dev, false);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_flush_vf_wq(accel_dev);
-       adf_dev_down(accel_dev, false);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-       adf_clean_vf_map(true);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
deleted file mode 100644 (file)
index 1fb8d50..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
-intel_qat-objs := adf_cfg.o \
-       adf_isr.o \
-       adf_ctl_drv.o \
-       adf_dev_mgr.o \
-       adf_init.o \
-       adf_accel_engine.o \
-       adf_aer.o \
-       adf_transport.o \
-       adf_admin.o \
-       adf_hw_arbiter.o \
-       adf_sysfs.o \
-       adf_gen2_hw_data.o \
-       adf_gen2_config.o \
-       adf_gen4_hw_data.o \
-       adf_gen4_pm.o \
-       adf_gen2_dc.o \
-       adf_gen4_dc.o \
-       qat_crypto.o \
-       qat_compression.o \
-       qat_comp_algs.o \
-       qat_algs.o \
-       qat_asym_algs.o \
-       qat_algs_send.o \
-       qat_uclo.o \
-       qat_hal.o \
-       qat_bl.o
-
-intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
-                              adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
-                              adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
-                              adf_gen2_pfvf.o adf_gen4_pfvf.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
deleted file mode 100644 (file)
index bd19e64..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_ACCEL_DEVICES_H_
-#define ADF_ACCEL_DEVICES_H_
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/ratelimit.h>
-#include "adf_cfg_common.h"
-#include "adf_pfvf_msg.h"
-
-#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
-#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
-#define ADF_C62X_DEVICE_NAME "c6xx"
-#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
-#define ADF_C3XXX_DEVICE_NAME "c3xxx"
-#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
-#define ADF_4XXX_DEVICE_NAME "4xxx"
-#define ADF_4XXX_PCI_DEVICE_ID 0x4940
-#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_401XX_PCI_DEVICE_ID 0x4942
-#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
-#define ADF_402XX_PCI_DEVICE_ID 0x4944
-#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
-#define ADF_DEVICE_FUSECTL_OFFSET 0x40
-#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
-#define ADF_DEVICE_FUSECTL_MASK 0x80000000
-#define ADF_PCI_MAX_BARS 3
-#define ADF_DEVICE_NAME_LENGTH 32
-#define ADF_ETR_MAX_RINGS_PER_BANK 16
-#define ADF_MAX_MSIX_VECTOR_NAME 16
-#define ADF_DEVICE_NAME_PREFIX "qat_"
-
-enum adf_accel_capabilities {
-       ADF_ACCEL_CAPABILITIES_NULL = 0,
-       ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
-       ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
-       ADF_ACCEL_CAPABILITIES_CIPHER = 4,
-       ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
-       ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
-       ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
-       ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
-};
-
-struct adf_bar {
-       resource_size_t base_addr;
-       void __iomem *virt_addr;
-       resource_size_t size;
-};
-
-struct adf_irq {
-       bool enabled;
-       char name[ADF_MAX_MSIX_VECTOR_NAME];
-};
-
-struct adf_accel_msix {
-       struct adf_irq *irqs;
-       u32 num_entries;
-};
-
-struct adf_accel_pci {
-       struct pci_dev *pci_dev;
-       struct adf_accel_msix msix_entries;
-       struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
-       u8 revid;
-       u8 sku;
-};
-
-enum dev_state {
-       DEV_DOWN = 0,
-       DEV_UP
-};
-
-enum dev_sku_info {
-       DEV_SKU_1 = 0,
-       DEV_SKU_2,
-       DEV_SKU_3,
-       DEV_SKU_4,
-       DEV_SKU_VF,
-       DEV_SKU_UNKNOWN,
-};
-
-static inline const char *get_sku_info(enum dev_sku_info info)
-{
-       switch (info) {
-       case DEV_SKU_1:
-               return "SKU1";
-       case DEV_SKU_2:
-               return "SKU2";
-       case DEV_SKU_3:
-               return "SKU3";
-       case DEV_SKU_4:
-               return "SKU4";
-       case DEV_SKU_VF:
-               return "SKUVF";
-       case DEV_SKU_UNKNOWN:
-       default:
-               break;
-       }
-       return "Unknown SKU";
-}
-
-struct adf_hw_device_class {
-       const char *name;
-       const enum adf_device_type type;
-       u32 instances;
-};
-
-struct arb_info {
-       u32 arb_cfg;
-       u32 arb_offset;
-       u32 wt2sam_offset;
-};
-
-struct admin_info {
-       u32 admin_msg_ur;
-       u32 admin_msg_lr;
-       u32 mailbox_offset;
-};
-
-struct adf_hw_csr_ops {
-       u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
-       u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
-                                 u32 ring);
-       void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
-                                   u32 ring, u32 value);
-       u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
-                                 u32 ring);
-       void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
-                                   u32 ring, u32 value);
-       u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
-       void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
-                                     u32 ring, u32 value);
-       void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
-                                   u32 ring, dma_addr_t addr);
-       void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
-                                  u32 value);
-       void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
-       void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
-                                    u32 value);
-       void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
-                                     u32 value);
-       void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
-                                          u32 bank, u32 value);
-       void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
-                                         u32 value);
-};
-
-struct adf_cfg_device_data;
-struct adf_accel_dev;
-struct adf_etr_data;
-struct adf_etr_ring_data;
-
-struct adf_pfvf_ops {
-       int (*enable_comms)(struct adf_accel_dev *accel_dev);
-       u32 (*get_pf2vf_offset)(u32 i);
-       u32 (*get_vf2pf_offset)(u32 i);
-       void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
-       void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
-       u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
-       int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                       u32 pfvf_offset, struct mutex *csr_lock);
-       struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
-                                       u32 pfvf_offset, u8 compat_ver);
-};
-
-struct adf_dc_ops {
-       void (*build_deflate_ctx)(void *ctx);
-};
-
-struct adf_hw_device_data {
-       struct adf_hw_device_class *dev_class;
-       u32 (*get_accel_mask)(struct adf_hw_device_data *self);
-       u32 (*get_ae_mask)(struct adf_hw_device_data *self);
-       u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
-       u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
-       u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
-       u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
-       u32 (*get_num_aes)(struct adf_hw_device_data *self);
-       u32 (*get_num_accels)(struct adf_hw_device_data *self);
-       void (*get_arb_info)(struct arb_info *arb_csrs_info);
-       void (*get_admin_info)(struct admin_info *admin_csrs_info);
-       enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
-       int (*alloc_irq)(struct adf_accel_dev *accel_dev);
-       void (*free_irq)(struct adf_accel_dev *accel_dev);
-       void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
-       int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
-       void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
-       int (*send_admin_init)(struct adf_accel_dev *accel_dev);
-       int (*init_arb)(struct adf_accel_dev *accel_dev);
-       void (*exit_arb)(struct adf_accel_dev *accel_dev);
-       const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
-       int (*init_device)(struct adf_accel_dev *accel_dev);
-       int (*enable_pm)(struct adf_accel_dev *accel_dev);
-       bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
-       void (*disable_iov)(struct adf_accel_dev *accel_dev);
-       void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
-                                     bool enable);
-       void (*enable_ints)(struct adf_accel_dev *accel_dev);
-       void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
-       int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
-       void (*reset_device)(struct adf_accel_dev *accel_dev);
-       void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
-       char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
-       u32 (*uof_get_num_objs)(void);
-       u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
-       int (*dev_config)(struct adf_accel_dev *accel_dev);
-       struct adf_pfvf_ops pfvf_ops;
-       struct adf_hw_csr_ops csr_ops;
-       struct adf_dc_ops dc_ops;
-       const char *fw_name;
-       const char *fw_mmp_name;
-       u32 fuses;
-       u32 straps;
-       u32 accel_capabilities_mask;
-       u32 extended_dc_capabilities;
-       u32 clock_frequency;
-       u32 instance_id;
-       u16 accel_mask;
-       u32 ae_mask;
-       u32 admin_ae_mask;
-       u16 tx_rings_mask;
-       u16 ring_to_svc_map;
-       u8 tx_rx_gap;
-       u8 num_banks;
-       u16 num_banks_per_vf;
-       u8 num_rings_per_bank;
-       u8 num_accel;
-       u8 num_logical_accel;
-       u8 num_engines;
-};
-
-/* CSR write macro */
-#define ADF_CSR_WR(csr_base, csr_offset, val) \
-       __raw_writel(val, csr_base + csr_offset)
-
-/* CSR read macro */
-#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
-
-#define ADF_CFG_NUM_SERVICES   4
-#define ADF_SRV_TYPE_BIT_LEN   3
-#define ADF_SRV_TYPE_MASK      0x7
-
-#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
-#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
-#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
-#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
-#define GET_NUM_RINGS_PER_BANK(accel_dev) \
-       GET_HW_DATA(accel_dev)->num_rings_per_bank
-#define GET_SRV_TYPE(accel_dev, idx) \
-       (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
-       & ADF_SRV_TYPE_MASK)
-#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
-#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
-#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
-#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
-#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
-
-struct adf_admin_comms;
-struct icp_qat_fw_loader_handle;
-struct adf_fw_loader_data {
-       struct icp_qat_fw_loader_handle *fw_loader;
-       const struct firmware *uof_fw;
-       const struct firmware *mmp_fw;
-};
-
-struct adf_accel_vf_info {
-       struct adf_accel_dev *accel_dev;
-       struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
-       struct ratelimit_state vf2pf_ratelimit;
-       u32 vf_nr;
-       bool init;
-       u8 vf_compat_ver;
-};
-
-struct adf_dc_data {
-       u8 *ovf_buff;
-       size_t ovf_buff_sz;
-       dma_addr_t ovf_buff_p;
-};
-
-struct adf_accel_dev {
-       struct adf_etr_data *transport;
-       struct adf_hw_device_data *hw_device;
-       struct adf_cfg_device_data *cfg;
-       struct adf_fw_loader_data *fw_loader;
-       struct adf_admin_comms *admin;
-       struct adf_dc_data *dc_data;
-       struct list_head crypto_list;
-       struct list_head compression_list;
-       unsigned long status;
-       atomic_t ref_count;
-       struct dentry *debugfs_dir;
-       struct list_head list;
-       struct module *owner;
-       struct adf_accel_pci accel_pci_dev;
-       union {
-               struct {
-                       /* protects VF2PF interrupts access */
-                       spinlock_t vf2pf_ints_lock;
-                       /* vf_info is non-zero when SR-IOV is init'ed */
-                       struct adf_accel_vf_info *vf_info;
-               } pf;
-               struct {
-                       bool irq_enabled;
-                       char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
-                       struct tasklet_struct pf2vf_bh_tasklet;
-                       struct mutex vf2pf_lock; /* protect CSR access */
-                       struct completion msg_received;
-                       struct pfvf_message response; /* temp field holding pf2vf response */
-                       u8 pf_compat_ver;
-               } vf;
-       };
-       struct mutex state_lock; /* protect state of the device */
-       bool is_vf;
-       u32 accel_id;
-};
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
deleted file mode 100644 (file)
index 4ce2b66..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/firmware.h>
-#include <linux/pci.h>
-#include "adf_cfg.h"
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "icp_qat_uclo.h"
-
-static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
-                                u32 fw_size)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       struct icp_qat_fw_loader_handle *loader;
-       char *obj_name;
-       u32 num_objs;
-       u32 ae_mask;
-       int i;
-
-       loader = loader_data->fw_loader;
-       num_objs = hw_device->uof_get_num_objs();
-
-       for (i = 0; i < num_objs; i++) {
-               obj_name = hw_device->uof_get_name(accel_dev, i);
-               ae_mask = hw_device->uof_get_ae_mask(accel_dev, i);
-               if (!obj_name || !ae_mask) {
-                       dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n");
-                       goto out_err;
-               }
-
-               if (qat_uclo_set_cfg_ae_mask(loader, ae_mask)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Invalid mask for UOF image\n");
-                       goto out_err;
-               }
-               if (qat_uclo_map_obj(loader, fw_addr, fw_size, obj_name)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to map UOF firmware\n");
-                       goto out_err;
-               }
-               if (qat_uclo_wr_all_uimage(loader)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to load UOF firmware\n");
-                       goto out_err;
-               }
-               qat_uclo_del_obj(loader);
-       }
-
-       return 0;
-
-out_err:
-       adf_ae_fw_release(accel_dev);
-       return -EFAULT;
-}
-
-int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       void *fw_addr, *mmp_addr;
-       u32 fw_size, mmp_size;
-
-       if (!hw_device->fw_name)
-               return 0;
-
-       if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
-                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
-                       hw_device->fw_mmp_name);
-               return -EFAULT;
-       }
-       if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
-                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
-                       hw_device->fw_name);
-               goto out_err;
-       }
-
-       fw_size = loader_data->uof_fw->size;
-       fw_addr = (void *)loader_data->uof_fw->data;
-       mmp_size = loader_data->mmp_fw->size;
-       mmp_addr = (void *)loader_data->mmp_fw->data;
-
-       if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n");
-               goto out_err;
-       }
-
-       if (hw_device->uof_get_num_objs)
-               return adf_ae_fw_load_images(accel_dev, fw_addr, fw_size);
-
-       if (qat_uclo_map_obj(loader_data->fw_loader, fw_addr, fw_size, NULL)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to map FW\n");
-               goto out_err;
-       }
-       if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
-               goto out_err;
-       }
-       return 0;
-
-out_err:
-       adf_ae_fw_release(accel_dev);
-       return -EFAULT;
-}
-
-void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-
-       if (!hw_device->fw_name)
-               return;
-
-       qat_uclo_del_obj(loader_data->fw_loader);
-       qat_hal_deinit(loader_data->fw_loader);
-       release_firmware(loader_data->uof_fw);
-       release_firmware(loader_data->mmp_fw);
-       loader_data->uof_fw = NULL;
-       loader_data->mmp_fw = NULL;
-       loader_data->fw_loader = NULL;
-}
-
-int adf_ae_start(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 ae_ctr;
-
-       if (!hw_data->fw_name)
-               return 0;
-
-       ae_ctr = qat_hal_start(loader_data->fw_loader);
-       dev_info(&GET_DEV(accel_dev),
-                "qat_dev%d started %d acceleration engines\n",
-                accel_dev->accel_id, ae_ctr);
-       return 0;
-}
-
-int adf_ae_stop(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
-
-       if (!hw_data->fw_name)
-               return 0;
-
-       for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
-               if (hw_data->ae_mask & (1 << ae)) {
-                       qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
-                       ae_ctr++;
-               }
-       }
-       dev_info(&GET_DEV(accel_dev),
-                "qat_dev%d stopped %d acceleration engines\n",
-                accel_dev->accel_id, ae_ctr);
-       return 0;
-}
-
-static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-
-       qat_hal_reset(loader_data->fw_loader);
-       if (qat_hal_clr_reset(loader_data->fw_loader))
-               return -EFAULT;
-
-       return 0;
-}
-
-int adf_ae_init(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-
-       if (!hw_device->fw_name)
-               return 0;
-
-       loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
-       if (!loader_data)
-               return -ENOMEM;
-
-       accel_dev->fw_loader = loader_data;
-       if (qat_hal_init(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n");
-               kfree(loader_data);
-               return -EFAULT;
-       }
-       if (adf_ae_reset(accel_dev, 0)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n");
-               qat_hal_deinit(loader_data->fw_loader);
-               kfree(loader_data);
-               return -EFAULT;
-       }
-       return 0;
-}
-
-int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
-{
-       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-
-       if (!hw_device->fw_name)
-               return 0;
-
-       qat_hal_deinit(loader_data->fw_loader);
-       kfree(accel_dev->fw_loader);
-       accel_dev->fw_loader = NULL;
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
deleted file mode 100644 (file)
index 3b6184c..0000000
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/iopoll.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "icp_qat_fw_init_admin.h"
-
-#define ADF_ADMIN_MAILBOX_STRIDE 0x1000
-#define ADF_ADMINMSG_LEN 32
-#define ADF_CONST_TABLE_SIZE 1024
-#define ADF_ADMIN_POLL_DELAY_US 20
-#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
-
-static const u8 const_tab[1024] __aligned(1024) = {
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
-0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
-0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
-0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
-0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
-0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
-0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
-0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
-0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
-0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
-0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
-0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
-0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
-0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
-0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
-0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
-0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
-0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
-0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
-0x7e, 0x21, 0x79, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x18,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x01, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x02, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x14, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x02,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25,
-0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x01,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x2B, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-struct adf_admin_comms {
-       dma_addr_t phy_addr;
-       dma_addr_t const_tbl_addr;
-       void *virt_addr;
-       void *virt_tbl_addr;
-       void __iomem *mailbox_addr;
-       struct mutex lock;      /* protects adf_admin_comms struct */
-};
-
-static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
-                                 void *in, void *out)
-{
-       int ret;
-       u32 status;
-       struct adf_admin_comms *admin = accel_dev->admin;
-       int offset = ae * ADF_ADMINMSG_LEN * 2;
-       void __iomem *mailbox = admin->mailbox_addr;
-       int mb_offset = ae * ADF_ADMIN_MAILBOX_STRIDE;
-       struct icp_qat_fw_init_admin_req *request = in;
-
-       mutex_lock(&admin->lock);
-
-       if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
-               mutex_unlock(&admin->lock);
-               return -EAGAIN;
-       }
-
-       memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
-       ADF_CSR_WR(mailbox, mb_offset, 1);
-
-       ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
-                               ADF_ADMIN_POLL_DELAY_US,
-                               ADF_ADMIN_POLL_TIMEOUT_US, true,
-                               mailbox, mb_offset);
-       if (ret < 0) {
-               /* Response timeout */
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send admin msg %d to accelerator %d\n",
-                       request->cmd_id, ae);
-       } else {
-               /* Response received from admin message, we can now
-                * make response data available in "out" parameter.
-                */
-               memcpy(out, admin->virt_addr + offset +
-                      ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
-       }
-
-       mutex_unlock(&admin->lock);
-       return ret;
-}
-
-static int adf_send_admin(struct adf_accel_dev *accel_dev,
-                         struct icp_qat_fw_init_admin_req *req,
-                         struct icp_qat_fw_init_admin_resp *resp,
-                         const unsigned long ae_mask)
-{
-       u32 ae;
-
-       for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
-               if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
-                   resp->status)
-                       return -EFAULT;
-
-       return 0;
-}
-
-static int adf_init_ae(struct adf_accel_dev *accel_dev)
-{
-       struct icp_qat_fw_init_admin_req req;
-       struct icp_qat_fw_init_admin_resp resp;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       u32 ae_mask = hw_device->ae_mask;
-
-       memset(&req, 0, sizeof(req));
-       memset(&resp, 0, sizeof(resp));
-       req.cmd_id = ICP_QAT_FW_INIT_AE;
-
-       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
-}
-
-static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
-{
-       struct icp_qat_fw_init_admin_req req;
-       struct icp_qat_fw_init_admin_resp resp;
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       u32 ae_mask = hw_device->admin_ae_mask ?: hw_device->ae_mask;
-
-       memset(&req, 0, sizeof(req));
-       memset(&resp, 0, sizeof(resp));
-       req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
-
-       req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
-       req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
-
-       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
-}
-
-static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
-                                  u32 *capabilities)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       struct icp_qat_fw_init_admin_resp resp;
-       struct icp_qat_fw_init_admin_req req;
-       unsigned long ae_mask;
-       unsigned long ae;
-       int ret;
-
-       /* Target only service accelerator engines */
-       ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
-
-       memset(&req, 0, sizeof(req));
-       memset(&resp, 0, sizeof(resp));
-       req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
-
-       *capabilities = 0;
-       for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
-               ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
-               if (ret)
-                       return ret;
-
-               *capabilities |= resp.extended_features;
-       }
-
-       return 0;
-}
-
-/**
- * adf_send_admin_init() - Function sends init message to FW
- * @accel_dev: Pointer to acceleration device.
- *
- * Function sends admin init message to the FW
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_admin_init(struct adf_accel_dev *accel_dev)
-{
-       u32 dc_capabilities = 0;
-       int ret;
-
-       ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
-               return ret;
-       }
-       accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
-
-       ret = adf_set_fw_constants(accel_dev);
-       if (ret)
-               return ret;
-
-       return adf_init_ae(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_send_admin_init);
-
-/**
- * adf_init_admin_pm() - Function sends PM init message to FW
- * @accel_dev: Pointer to acceleration device.
- * @idle_delay: QAT HW idle time before power gating is initiated.
- *             000 - 64us
- *             001 - 128us
- *             010 - 256us
- *             011 - 512us
- *             100 - 1ms
- *             101 - 2ms
- *             110 - 4ms
- *             111 - 8ms
- *
- * Function sends to the FW the admin init message for the PM state
- * configuration.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct icp_qat_fw_init_admin_resp resp = {0};
-       struct icp_qat_fw_init_admin_req req = {0};
-       u32 ae_mask = hw_data->admin_ae_mask;
-
-       if (!accel_dev->admin) {
-               dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
-               return -EFAULT;
-       }
-
-       req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
-       req.idle_filter = idle_delay;
-
-       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
-}
-EXPORT_SYMBOL_GPL(adf_init_admin_pm);
-
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
-{
-       struct adf_admin_comms *admin;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       struct admin_info admin_csrs_info;
-       u32 mailbox_offset, adminmsg_u, adminmsg_l;
-       void __iomem *mailbox;
-       u64 reg_val;
-
-       admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
-                            dev_to_node(&GET_DEV(accel_dev)));
-       if (!admin)
-               return -ENOMEM;
-       admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                             &admin->phy_addr, GFP_KERNEL);
-       if (!admin->virt_addr) {
-               dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
-               kfree(admin);
-               return -ENOMEM;
-       }
-
-       admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
-                                                 PAGE_SIZE,
-                                                 &admin->const_tbl_addr,
-                                                 GFP_KERNEL);
-       if (!admin->virt_tbl_addr) {
-               dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
-               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                 admin->virt_addr, admin->phy_addr);
-               kfree(admin);
-               return -ENOMEM;
-       }
-
-       memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
-       hw_data->get_admin_info(&admin_csrs_info);
-
-       mailbox_offset = admin_csrs_info.mailbox_offset;
-       mailbox = pmisc_addr + mailbox_offset;
-       adminmsg_u = admin_csrs_info.admin_msg_ur;
-       adminmsg_l = admin_csrs_info.admin_msg_lr;
-
-       reg_val = (u64)admin->phy_addr;
-       ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
-       ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
-
-       mutex_init(&admin->lock);
-       admin->mailbox_addr = mailbox;
-       accel_dev->admin = admin;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_init_admin_comms);
-
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
-{
-       struct adf_admin_comms *admin = accel_dev->admin;
-
-       if (!admin)
-               return;
-
-       if (admin->virt_addr)
-               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                 admin->virt_addr, admin->phy_addr);
-       if (admin->virt_tbl_addr)
-               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-                                 admin->virt_tbl_addr, admin->const_tbl_addr);
-
-       mutex_destroy(&admin->lock);
-       kfree(admin);
-       accel_dev->admin = NULL;
-}
-EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
deleted file mode 100644 (file)
index 04af32a..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-
-static struct workqueue_struct *device_reset_wq;
-
-static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
-                                          pci_channel_state_t state)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       dev_info(&pdev->dev, "Acceleration driver hardware error detected.\n");
-       if (!accel_dev) {
-               dev_err(&pdev->dev, "Can't find acceleration device\n");
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-
-       if (state == pci_channel_io_perm_failure) {
-               dev_err(&pdev->dev, "Can't recover from device error\n");
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-
-       return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/* reset dev data */
-struct adf_reset_dev_data {
-       int mode;
-       struct adf_accel_dev *accel_dev;
-       struct completion compl;
-       struct work_struct reset_work;
-};
-
-void adf_reset_sbr(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-       struct pci_dev *parent = pdev->bus->self;
-       u16 bridge_ctl = 0;
-
-       if (!parent)
-               parent = pdev;
-
-       if (!pci_wait_for_pending_transaction(pdev))
-               dev_info(&GET_DEV(accel_dev),
-                        "Transaction still in progress. Proceeding\n");
-
-       dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
-
-       pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
-       bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
-       msleep(100);
-       bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
-       msleep(100);
-}
-EXPORT_SYMBOL_GPL(adf_reset_sbr);
-
-void adf_reset_flr(struct adf_accel_dev *accel_dev)
-{
-       pcie_flr(accel_to_pci_dev(accel_dev));
-}
-EXPORT_SYMBOL_GPL(adf_reset_flr);
-
-void adf_dev_restore(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
-       if (hw_device->reset_device) {
-               dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
-                        accel_dev->accel_id);
-               hw_device->reset_device(accel_dev);
-               pci_restore_state(pdev);
-               pci_save_state(pdev);
-       }
-}
-
-static void adf_device_reset_worker(struct work_struct *work)
-{
-       struct adf_reset_dev_data *reset_data =
-                 container_of(work, struct adf_reset_dev_data, reset_work);
-       struct adf_accel_dev *accel_dev = reset_data->accel_dev;
-
-       adf_dev_restarting_notify(accel_dev);
-       if (adf_dev_restart(accel_dev)) {
-               /* The device hanged and we can't restart it so stop here */
-               dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
-               kfree(reset_data);
-               WARN(1, "QAT: device restart failed. Device is unusable\n");
-               return;
-       }
-       adf_dev_restarted_notify(accel_dev);
-       clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
-
-       /* The dev is back alive. Notify the caller if in sync mode */
-       if (reset_data->mode == ADF_DEV_RESET_SYNC)
-               complete(&reset_data->compl);
-       else
-               kfree(reset_data);
-}
-
-static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
-                                     enum adf_dev_reset_mode mode)
-{
-       struct adf_reset_dev_data *reset_data;
-
-       if (!adf_dev_started(accel_dev) ||
-           test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
-               return 0;
-
-       set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
-       reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
-       if (!reset_data)
-               return -ENOMEM;
-       reset_data->accel_dev = accel_dev;
-       init_completion(&reset_data->compl);
-       reset_data->mode = mode;
-       INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
-       queue_work(device_reset_wq, &reset_data->reset_work);
-
-       /* If in sync mode wait for the result */
-       if (mode == ADF_DEV_RESET_SYNC) {
-               int ret = 0;
-               /* Maximum device reset time is 10 seconds */
-               unsigned long wait_jiffies = msecs_to_jiffies(10000);
-               unsigned long timeout = wait_for_completion_timeout(
-                                  &reset_data->compl, wait_jiffies);
-               if (!timeout) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Reset device timeout expired\n");
-                       ret = -EFAULT;
-               }
-               kfree(reset_data);
-               return ret;
-       }
-       return 0;
-}
-
-static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Can't find acceleration device\n");
-               return PCI_ERS_RESULT_DISCONNECT;
-       }
-       if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
-               return PCI_ERS_RESULT_DISCONNECT;
-
-       return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void adf_resume(struct pci_dev *pdev)
-{
-       dev_info(&pdev->dev, "Acceleration driver reset completed\n");
-       dev_info(&pdev->dev, "Device is up and running\n");
-}
-
-const struct pci_error_handlers adf_err_handler = {
-       .error_detected = adf_error_detected,
-       .slot_reset = adf_slot_reset,
-       .resume = adf_resume,
-};
-EXPORT_SYMBOL_GPL(adf_err_handler);
-
-int adf_init_aer(void)
-{
-       device_reset_wq = alloc_workqueue("qat_device_reset_wq",
-                                         WQ_MEM_RECLAIM, 0);
-       return !device_reset_wq ? -EFAULT : 0;
-}
-
-void adf_exit_aer(void)
-{
-       if (device_reset_wq)
-               destroy_workqueue(device_reset_wq);
-       device_reset_wq = NULL;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
deleted file mode 100644 (file)
index 1931e5b..0000000
+++ /dev/null
@@ -1,339 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include "adf_accel_devices.h"
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-static DEFINE_MUTEX(qat_cfg_read_lock);
-
-static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
-{
-       struct adf_cfg_device_data *dev_cfg = sfile->private;
-
-       mutex_lock(&qat_cfg_read_lock);
-       return seq_list_start(&dev_cfg->sec_list, *pos);
-}
-
-static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
-{
-       struct list_head *list;
-       struct adf_cfg_section *sec =
-                               list_entry(v, struct adf_cfg_section, list);
-
-       seq_printf(sfile, "[%s]\n", sec->name);
-       list_for_each(list, &sec->param_head) {
-               struct adf_cfg_key_val *ptr =
-                       list_entry(list, struct adf_cfg_key_val, list);
-               seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
-       }
-       return 0;
-}
-
-static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
-{
-       struct adf_cfg_device_data *dev_cfg = sfile->private;
-
-       return seq_list_next(v, &dev_cfg->sec_list, pos);
-}
-
-static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
-{
-       mutex_unlock(&qat_cfg_read_lock);
-}
-
-static const struct seq_operations qat_dev_cfg_sops = {
-       .start = qat_dev_cfg_start,
-       .next = qat_dev_cfg_next,
-       .stop = qat_dev_cfg_stop,
-       .show = qat_dev_cfg_show
-};
-
-DEFINE_SEQ_ATTRIBUTE(qat_dev_cfg);
-
-/**
- * adf_cfg_dev_add() - Create an acceleration device configuration table.
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function creates a configuration table for the given acceleration device.
- * The table stores device specific config values.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
-{
-       struct adf_cfg_device_data *dev_cfg_data;
-
-       dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
-       if (!dev_cfg_data)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&dev_cfg_data->sec_list);
-       init_rwsem(&dev_cfg_data->lock);
-       accel_dev->cfg = dev_cfg_data;
-
-       /* accel_dev->debugfs_dir should always be non-NULL here */
-       dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
-                                                 accel_dev->debugfs_dir,
-                                                 dev_cfg_data,
-                                                 &qat_dev_cfg_fops);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
-
-static void adf_cfg_section_del_all(struct list_head *head);
-
-void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
-{
-       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
-
-       down_write(&dev_cfg_data->lock);
-       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
-       up_write(&dev_cfg_data->lock);
-       clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-}
-
-/**
- * adf_cfg_dev_remove() - Clears acceleration device configuration table.
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function removes configuration table from the given acceleration device
- * and frees all allocated memory.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
-{
-       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
-
-       if (!dev_cfg_data)
-               return;
-
-       down_write(&dev_cfg_data->lock);
-       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
-       up_write(&dev_cfg_data->lock);
-       debugfs_remove(dev_cfg_data->debug);
-       kfree(dev_cfg_data);
-       accel_dev->cfg = NULL;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
-
-static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
-                              struct adf_cfg_section *sec)
-{
-       list_add_tail(&new->list, &sec->param_head);
-}
-
-static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec)
-{
-       struct list_head *head = &sec->param_head;
-       struct list_head *list_ptr, *tmp;
-
-       list_for_each_prev_safe(list_ptr, tmp, head) {
-               struct adf_cfg_key_val *ptr =
-                       list_entry(list_ptr, struct adf_cfg_key_val, list);
-
-               if (strncmp(ptr->key, key, sizeof(ptr->key)))
-                       continue;
-
-               list_del(list_ptr);
-               kfree(ptr);
-               break;
-       }
-}
-
-static void adf_cfg_keyval_del_all(struct list_head *head)
-{
-       struct list_head *list_ptr, *tmp;
-
-       list_for_each_prev_safe(list_ptr, tmp, head) {
-               struct adf_cfg_key_val *ptr =
-                       list_entry(list_ptr, struct adf_cfg_key_val, list);
-               list_del(list_ptr);
-               kfree(ptr);
-       }
-}
-
-static void adf_cfg_section_del_all(struct list_head *head)
-{
-       struct adf_cfg_section *ptr;
-       struct list_head *list, *tmp;
-
-       list_for_each_prev_safe(list, tmp, head) {
-               ptr = list_entry(list, struct adf_cfg_section, list);
-               adf_cfg_keyval_del_all(&ptr->param_head);
-               list_del(list);
-               kfree(ptr);
-       }
-}
-
-static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
-                                                     const char *key)
-{
-       struct list_head *list;
-
-       list_for_each(list, &s->param_head) {
-               struct adf_cfg_key_val *ptr =
-                       list_entry(list, struct adf_cfg_key_val, list);
-               if (!strcmp(ptr->key, key))
-                       return ptr;
-       }
-       return NULL;
-}
-
-static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
-                                               const char *sec_name)
-{
-       struct adf_cfg_device_data *cfg = accel_dev->cfg;
-       struct list_head *list;
-
-       list_for_each(list, &cfg->sec_list) {
-               struct adf_cfg_section *ptr =
-                       list_entry(list, struct adf_cfg_section, list);
-               if (!strcmp(ptr->name, sec_name))
-                       return ptr;
-       }
-       return NULL;
-}
-
-static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
-                              const char *sec_name,
-                              const char *key_name,
-                              char *val)
-{
-       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
-       struct adf_cfg_key_val *keyval = NULL;
-
-       if (sec)
-               keyval = adf_cfg_key_value_find(sec, key_name);
-       if (keyval) {
-               memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
-               return 0;
-       }
-       return -ENODATA;
-}
-
-/**
- * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
- * @accel_dev:  Pointer to acceleration device.
- * @section_name: Name of the section where the param will be added
- * @key: The key string
- * @val: Value pain for the given @key
- * @type: Type - string, int or address
- *
- * Function adds configuration key - value entry in the appropriate section
- * in the given acceleration device. If the key exists already, the value
- * is updated.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
-                               const char *section_name,
-                               const char *key, const void *val,
-                               enum adf_cfg_val_type type)
-{
-       struct adf_cfg_device_data *cfg = accel_dev->cfg;
-       struct adf_cfg_key_val *key_val;
-       struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
-                                                          section_name);
-       char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-
-       if (!section)
-               return -EFAULT;
-
-       key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
-       if (!key_val)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&key_val->list);
-       strscpy(key_val->key, key, sizeof(key_val->key));
-
-       if (type == ADF_DEC) {
-               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
-                        "%ld", (*((long *)val)));
-       } else if (type == ADF_STR) {
-               strscpy(key_val->val, (char *)val, sizeof(key_val->val));
-       } else if (type == ADF_HEX) {
-               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
-                        "0x%lx", (unsigned long)val);
-       } else {
-               dev_err(&GET_DEV(accel_dev), "Unknown type given.\n");
-               kfree(key_val);
-               return -EINVAL;
-       }
-       key_val->type = type;
-
-       /* Add the key-value pair as below policy:
-        * 1. if the key doesn't exist, add it;
-        * 2. if the key already exists with a different value then update it
-        *    to the new value (the key is deleted and the newly created
-        *    key_val containing the new value is added to the database);
-        * 3. if the key exists with the same value, then return without doing
-        *    anything (the newly created key_val is freed).
-        */
-       if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
-               if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
-                       adf_cfg_keyval_remove(key, section);
-               } else {
-                       kfree(key_val);
-                       return 0;
-               }
-       }
-
-       down_write(&cfg->lock);
-       adf_cfg_keyval_add(key_val, section);
-       up_write(&cfg->lock);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
-
-/**
- * adf_cfg_section_add() - Add config section entry to config table.
- * @accel_dev:  Pointer to acceleration device.
- * @name: Name of the section
- *
- * Function adds configuration section where key - value entries
- * will be stored.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
-{
-       struct adf_cfg_device_data *cfg = accel_dev->cfg;
-       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
-
-       if (sec)
-               return 0;
-
-       sec = kzalloc(sizeof(*sec), GFP_KERNEL);
-       if (!sec)
-               return -ENOMEM;
-
-       strscpy(sec->name, name, sizeof(sec->name));
-       INIT_LIST_HEAD(&sec->param_head);
-       down_write(&cfg->lock);
-       list_add_tail(&sec->list, &cfg->sec_list);
-       up_write(&cfg->lock);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_section_add);
-
-int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
-                           const char *section, const char *name,
-                           char *value)
-{
-       struct adf_cfg_device_data *cfg = accel_dev->cfg;
-       int ret;
-
-       down_read(&cfg->lock);
-       ret = adf_cfg_key_val_get(accel_dev, section, name, value);
-       up_read(&cfg->lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_cfg_get_param_value);
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
deleted file mode 100644 (file)
index 376cde6..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_CFG_H_
-#define ADF_CFG_H_
-
-#include <linux/list.h>
-#include <linux/rwsem.h>
-#include <linux/debugfs.h>
-#include "adf_accel_devices.h"
-#include "adf_cfg_common.h"
-#include "adf_cfg_strings.h"
-
-struct adf_cfg_key_val {
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       enum adf_cfg_val_type type;
-       struct list_head list;
-};
-
-struct adf_cfg_section {
-       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
-       struct list_head list;
-       struct list_head param_head;
-};
-
-struct adf_cfg_device_data {
-       struct list_head sec_list;
-       struct dentry *debug;
-       struct rw_semaphore lock;
-};
-
-int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
-void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
-int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
-void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
-int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
-                               const char *section_name,
-                               const char *key, const void *val,
-                               enum adf_cfg_val_type type);
-int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
-                           const char *section, const char *name, char *value);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
deleted file mode 100644 (file)
index 6e5de1d..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_CFG_COMMON_H_
-#define ADF_CFG_COMMON_H_
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define ADF_CFG_MAX_STR_LEN 64
-#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
-#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
-#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
-#define ADF_CFG_BASE_DEC 10
-#define ADF_CFG_BASE_HEX 16
-#define ADF_CFG_ALL_DEVICES 0xFE
-#define ADF_CFG_NO_DEVICE 0xFF
-#define ADF_CFG_AFFINITY_WHATEVER 0xFF
-#define MAX_DEVICE_NAME_SIZE 32
-#define ADF_MAX_DEVICES (32 * 32)
-#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
-
-#define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0
-#define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3
-#define ADF_CFG_SERV_RING_PAIR_2_SHIFT 6
-#define ADF_CFG_SERV_RING_PAIR_3_SHIFT 9
-enum adf_cfg_service_type {
-       UNUSED = 0,
-       CRYPTO,
-       COMP,
-       SYM,
-       ASYM,
-       USED
-};
-
-enum adf_cfg_val_type {
-       ADF_DEC,
-       ADF_HEX,
-       ADF_STR
-};
-
-enum adf_device_type {
-       DEV_UNKNOWN = 0,
-       DEV_DH895XCC,
-       DEV_DH895XCCVF,
-       DEV_C62X,
-       DEV_C62XVF,
-       DEV_C3XXX,
-       DEV_C3XXXVF,
-       DEV_4XXX,
-};
-
-struct adf_dev_status_info {
-       enum adf_device_type type;
-       __u32 accel_id;
-       __u32 instance_id;
-       __u8 num_ae;
-       __u8 num_accel;
-       __u8 num_logical_accel;
-       __u8 banks_per_accel;
-       __u8 state;
-       __u8 bus;
-       __u8 dev;
-       __u8 fun;
-       char name[MAX_DEVICE_NAME_SIZE];
-};
-
-#define ADF_CTL_IOC_MAGIC 'a'
-#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
-               struct adf_user_cfg_ctl_data)
-#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
-               struct adf_user_cfg_ctl_data)
-#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
-               struct adf_user_cfg_ctl_data)
-#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32)
-#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32)
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
deleted file mode 100644 (file)
index 5d8c3bd..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_CFG_STRINGS_H_
-#define ADF_CFG_STRINGS_H_
-
-#define ADF_GENERAL_SEC "GENERAL"
-#define ADF_KERNEL_SEC "KERNEL"
-#define ADF_ACCEL_SEC "Accelerator"
-#define ADF_NUM_CY "NumberCyInstances"
-#define ADF_NUM_DC "NumberDcInstances"
-#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
-#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
-#define ADF_RING_DC_SIZE "NumConcurrentRequests"
-#define ADF_RING_ASYM_TX "RingAsymTx"
-#define ADF_RING_SYM_TX "RingSymTx"
-#define ADF_RING_ASYM_RX "RingAsymRx"
-#define ADF_RING_SYM_RX "RingSymRx"
-#define ADF_RING_DC_TX "RingTx"
-#define ADF_RING_DC_RX "RingRx"
-#define ADF_ETRMGR_BANK "Bank"
-#define ADF_RING_SYM_BANK_NUM "BankSymNumber"
-#define ADF_RING_ASYM_BANK_NUM "BankAsymNumber"
-#define ADF_RING_DC_BANK_NUM "BankDcNumber"
-#define ADF_CY "Cy"
-#define ADF_DC "Dc"
-#define ADF_CFG_DC "dc"
-#define ADF_CFG_CY "sym;asym"
-#define ADF_SERVICES_ENABLED "ServicesEnabled"
-#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
-#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
-       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_ENABLED
-#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
-#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
-       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCE_TIMER
-#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
-#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
-       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_COALESCING_MSG_ENABLED
-#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
-#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
-       ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
-#define ADF_ACCEL_STR "Accelerator%d"
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
deleted file mode 100644 (file)
index 421f4fb..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_CFG_USER_H_
-#define ADF_CFG_USER_H_
-
-#include "adf_cfg_common.h"
-#include "adf_cfg_strings.h"
-
-struct adf_user_cfg_key_val {
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       union {
-               struct adf_user_cfg_key_val *next;
-               __u64 padding3;
-       };
-       enum adf_cfg_val_type type;
-} __packed;
-
-struct adf_user_cfg_section {
-       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
-       union {
-               struct adf_user_cfg_key_val *params;
-               __u64 padding1;
-       };
-       union {
-               struct adf_user_cfg_section *next;
-               __u64 padding3;
-       };
-} __packed;
-
-struct adf_user_cfg_ctl_data {
-       union {
-               struct adf_user_cfg_section *config_section;
-               __u64 padding;
-       };
-       __u8 device_id;
-} __packed;
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
deleted file mode 100644 (file)
index db79759..0000000
+++ /dev/null
@@ -1,249 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2021 Intel Corporation */
-#ifndef ADF_DRV_H
-#define ADF_DRV_H
-
-#include <linux/list.h>
-#include <linux/pci.h>
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_loader_handle.h"
-#include "icp_qat_hal.h"
-
-#define ADF_MAJOR_VERSION      0
-#define ADF_MINOR_VERSION      6
-#define ADF_BUILD_VERSION      0
-#define ADF_DRV_VERSION                __stringify(ADF_MAJOR_VERSION) "." \
-                               __stringify(ADF_MINOR_VERSION) "." \
-                               __stringify(ADF_BUILD_VERSION)
-
-#define ADF_STATUS_RESTARTING 0
-#define ADF_STATUS_STARTING 1
-#define ADF_STATUS_CONFIGURED 2
-#define ADF_STATUS_STARTED 3
-#define ADF_STATUS_AE_INITIALISED 4
-#define ADF_STATUS_AE_UCODE_LOADED 5
-#define ADF_STATUS_AE_STARTED 6
-#define ADF_STATUS_PF_RUNNING 7
-#define ADF_STATUS_IRQ_ALLOCATED 8
-
-enum adf_dev_reset_mode {
-       ADF_DEV_RESET_ASYNC = 0,
-       ADF_DEV_RESET_SYNC
-};
-
-enum adf_event {
-       ADF_EVENT_INIT = 0,
-       ADF_EVENT_START,
-       ADF_EVENT_STOP,
-       ADF_EVENT_SHUTDOWN,
-       ADF_EVENT_RESTARTING,
-       ADF_EVENT_RESTARTED,
-};
-
-struct service_hndl {
-       int (*event_hld)(struct adf_accel_dev *accel_dev,
-                        enum adf_event event);
-       unsigned long init_status[ADF_DEVS_ARRAY_SIZE];
-       unsigned long start_status[ADF_DEVS_ARRAY_SIZE];
-       char *name;
-       struct list_head list;
-};
-
-int adf_service_register(struct service_hndl *service);
-int adf_service_unregister(struct service_hndl *service);
-
-int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
-int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
-int adf_dev_restart(struct adf_accel_dev *accel_dev);
-
-void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
-void adf_clean_vf_map(bool);
-
-int adf_ctl_dev_register(void);
-void adf_ctl_dev_unregister(void);
-int adf_processes_dev_register(void);
-void adf_processes_dev_unregister(void);
-
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
-                      struct adf_accel_dev *pf);
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
-                      struct adf_accel_dev *pf);
-struct list_head *adf_devmgr_get_head(void);
-struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
-struct adf_accel_dev *adf_devmgr_get_first(void);
-struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
-int adf_devmgr_verify_id(u32 id);
-void adf_devmgr_get_num_dev(u32 *num);
-int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
-int adf_dev_started(struct adf_accel_dev *accel_dev);
-int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
-int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
-int adf_ae_init(struct adf_accel_dev *accel_dev);
-int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
-int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
-void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
-int adf_ae_start(struct adf_accel_dev *accel_dev);
-int adf_ae_stop(struct adf_accel_dev *accel_dev);
-
-extern const struct pci_error_handlers adf_err_handler;
-void adf_reset_sbr(struct adf_accel_dev *accel_dev);
-void adf_reset_flr(struct adf_accel_dev *accel_dev);
-void adf_dev_restore(struct adf_accel_dev *accel_dev);
-int adf_init_aer(void);
-void adf_exit_aer(void);
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
-int adf_send_admin_init(struct adf_accel_dev *accel_dev);
-int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
-int adf_init_arb(struct adf_accel_dev *accel_dev);
-void adf_exit_arb(struct adf_accel_dev *accel_dev);
-void adf_update_ring_arb(struct adf_etr_ring_data *ring);
-
-int adf_dev_get(struct adf_accel_dev *accel_dev);
-void adf_dev_put(struct adf_accel_dev *accel_dev);
-int adf_dev_in_use(struct adf_accel_dev *accel_dev);
-int adf_init_etr_data(struct adf_accel_dev *accel_dev);
-void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
-int qat_crypto_register(void);
-int qat_crypto_unregister(void);
-int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev);
-struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
-void qat_crypto_put_instance(struct qat_crypto_instance *inst);
-void qat_alg_callback(void *resp);
-void qat_alg_asym_callback(void *resp);
-int qat_algs_register(void);
-void qat_algs_unregister(void);
-int qat_asym_algs_register(void);
-void qat_asym_algs_unregister(void);
-
-struct qat_compression_instance *qat_compression_get_instance_node(int node);
-void qat_compression_put_instance(struct qat_compression_instance *inst);
-int qat_compression_register(void);
-int qat_compression_unregister(void);
-int qat_comp_algs_register(void);
-void qat_comp_algs_unregister(void);
-void qat_comp_alg_callback(void *resp);
-
-int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
-void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
-int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
-void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
-
-int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
-
-int adf_sysfs_init(struct adf_accel_dev *accel_dev);
-
-int qat_hal_init(struct adf_accel_dev *accel_dev);
-void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
-int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
-void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
-                 unsigned int ctx_mask);
-void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
-int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
-void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
-                         unsigned char ae, unsigned int ctx_mask);
-int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
-                           unsigned int ae);
-int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
-                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
-                          unsigned char mode);
-int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
-                           unsigned char ae, unsigned char mode);
-int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
-                          unsigned char ae, unsigned char mode);
-void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
-                   unsigned char ae, unsigned int ctx_mask, unsigned int upc);
-void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
-                      unsigned char ae, unsigned int uaddr,
-                      unsigned int words_num, u64 *uword);
-void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
-                    unsigned int uword_addr, unsigned int words_num,
-                    unsigned int *data);
-int qat_hal_get_ins_num(void);
-int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
-                       unsigned char ae,
-                       struct icp_qat_uof_batch_init *lm_init_header);
-int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
-                    unsigned char ae, unsigned long ctx_mask,
-                    enum icp_qat_uof_regtype reg_type,
-                    unsigned short reg_num, unsigned int regdata);
-int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
-                        unsigned char ae, unsigned long ctx_mask,
-                        enum icp_qat_uof_regtype reg_type,
-                        unsigned short reg_num, unsigned int regdata);
-int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
-                        unsigned char ae, unsigned long ctx_mask,
-                        enum icp_qat_uof_regtype reg_type,
-                        unsigned short reg_num, unsigned int regdata);
-int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
-                   unsigned char ae, unsigned long ctx_mask,
-                   unsigned short reg_num, unsigned int regdata);
-int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
-                 unsigned char ae, unsigned short lm_addr, unsigned int value);
-void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned char mode);
-int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
-void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle);
-int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr,
-                      int mem_size);
-int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
-                    void *addr_ptr, u32 mem_size, char *obj_name);
-int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
-                            unsigned int cfg_ae_mask);
-int adf_init_misc_wq(void);
-void adf_exit_misc_wq(void);
-bool adf_misc_wq_queue_work(struct work_struct *work);
-#if defined(CONFIG_PCI_IOV)
-int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
-void adf_disable_sriov(struct adf_accel_dev *accel_dev);
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask);
-void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev);
-bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
-bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr);
-int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
-void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
-int adf_init_pf_wq(void);
-void adf_exit_pf_wq(void);
-int adf_init_vf_wq(void);
-void adf_exit_vf_wq(void);
-void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
-#else
-#define adf_sriov_configure NULL
-
-static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
-{
-}
-
-static inline int adf_init_pf_wq(void)
-{
-       return 0;
-}
-
-static inline void adf_exit_pf_wq(void)
-{
-}
-
-static inline int adf_init_vf_wq(void)
-{
-       return 0;
-}
-
-static inline void adf_exit_vf_wq(void)
-{
-}
-
-#endif
-
-static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_bar *pmisc;
-
-       pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
-
-       return pmisc->virt_addr;
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
deleted file mode 100644 (file)
index 88c41d6..0000000
+++ /dev/null
@@ -1,475 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-
-#include <crypto/algapi.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/bitops.h>
-#include <linux/pci.h>
-#include <linux/cdev.h>
-#include <linux/uaccess.h>
-
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_cfg.h"
-#include "adf_cfg_common.h"
-#include "adf_cfg_user.h"
-
-#define ADF_CFG_MAX_SECTION 512
-#define ADF_CFG_MAX_KEY_VAL 256
-
-#define DEVICE_NAME "qat_adf_ctl"
-
-static DEFINE_MUTEX(adf_ctl_lock);
-static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
-
-static const struct file_operations adf_ctl_ops = {
-       .owner = THIS_MODULE,
-       .unlocked_ioctl = adf_ctl_ioctl,
-       .compat_ioctl = compat_ptr_ioctl,
-};
-
-struct adf_ctl_drv_info {
-       unsigned int major;
-       struct cdev drv_cdev;
-       struct class *drv_class;
-};
-
-static struct adf_ctl_drv_info adf_ctl_drv;
-
-static void adf_chr_drv_destroy(void)
-{
-       device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
-       cdev_del(&adf_ctl_drv.drv_cdev);
-       class_destroy(adf_ctl_drv.drv_class);
-       unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
-}
-
-static int adf_chr_drv_create(void)
-{
-       dev_t dev_id;
-       struct device *drv_device;
-
-       if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
-               pr_err("QAT: unable to allocate chrdev region\n");
-               return -EFAULT;
-       }
-
-       adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
-       if (IS_ERR(adf_ctl_drv.drv_class)) {
-               pr_err("QAT: class_create failed for adf_ctl\n");
-               goto err_chrdev_unreg;
-       }
-       adf_ctl_drv.major = MAJOR(dev_id);
-       cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
-       if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
-               pr_err("QAT: cdev add failed\n");
-               goto err_class_destr;
-       }
-
-       drv_device = device_create(adf_ctl_drv.drv_class, NULL,
-                                  MKDEV(adf_ctl_drv.major, 0),
-                                  NULL, DEVICE_NAME);
-       if (IS_ERR(drv_device)) {
-               pr_err("QAT: failed to create device\n");
-               goto err_cdev_del;
-       }
-       return 0;
-err_cdev_del:
-       cdev_del(&adf_ctl_drv.drv_cdev);
-err_class_destr:
-       class_destroy(adf_ctl_drv.drv_class);
-err_chrdev_unreg:
-       unregister_chrdev_region(dev_id, 1);
-       return -EFAULT;
-}
-
-static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
-                                  unsigned long arg)
-{
-       struct adf_user_cfg_ctl_data *cfg_data;
-
-       cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
-       if (!cfg_data)
-               return -ENOMEM;
-
-       /* Initialize device id to NO DEVICE as 0 is a valid device id */
-       cfg_data->device_id = ADF_CFG_NO_DEVICE;
-
-       if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
-               pr_err("QAT: failed to copy from user cfg_data.\n");
-               kfree(cfg_data);
-               return -EIO;
-       }
-
-       *ctl_data = cfg_data;
-       return 0;
-}
-
-static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
-                                 const char *section,
-                                 const struct adf_user_cfg_key_val *key_val)
-{
-       if (key_val->type == ADF_HEX) {
-               long *ptr = (long *)key_val->val;
-               long val = *ptr;
-
-               if (adf_cfg_add_key_value_param(accel_dev, section,
-                                               key_val->key, (void *)val,
-                                               key_val->type)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to add hex keyvalue.\n");
-                       return -EFAULT;
-               }
-       } else {
-               if (adf_cfg_add_key_value_param(accel_dev, section,
-                                               key_val->key, key_val->val,
-                                               key_val->type)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to add keyvalue.\n");
-                       return -EFAULT;
-               }
-       }
-       return 0;
-}
-
-static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
-                                  struct adf_user_cfg_ctl_data *ctl_data)
-{
-       struct adf_user_cfg_key_val key_val;
-       struct adf_user_cfg_key_val *params_head;
-       struct adf_user_cfg_section section, *section_head;
-       int i, j;
-
-       section_head = ctl_data->config_section;
-
-       for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
-               if (copy_from_user(&section, (void __user *)section_head,
-                                  sizeof(*section_head))) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to copy section info\n");
-                       goto out_err;
-               }
-
-               if (adf_cfg_section_add(accel_dev, section.name)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "failed to add section.\n");
-                       goto out_err;
-               }
-
-               params_head = section.params;
-
-               for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
-                       if (copy_from_user(&key_val, (void __user *)params_head,
-                                          sizeof(key_val))) {
-                               dev_err(&GET_DEV(accel_dev),
-                                       "Failed to copy keyvalue.\n");
-                               goto out_err;
-                       }
-                       if (adf_add_key_value_data(accel_dev, section.name,
-                                                  &key_val)) {
-                               goto out_err;
-                       }
-                       params_head = key_val.next;
-               }
-               section_head = section.next;
-       }
-       return 0;
-out_err:
-       adf_cfg_del_all(accel_dev);
-       return -EFAULT;
-}
-
-static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
-                                   unsigned long arg)
-{
-       int ret;
-       struct adf_user_cfg_ctl_data *ctl_data;
-       struct adf_accel_dev *accel_dev;
-
-       ret = adf_ctl_alloc_resources(&ctl_data, arg);
-       if (ret)
-               return ret;
-
-       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
-       if (!accel_dev) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       if (adf_dev_started(accel_dev)) {
-               ret = -EFAULT;
-               goto out;
-       }
-
-       if (adf_copy_key_value_data(accel_dev, ctl_data)) {
-               ret = -EFAULT;
-               goto out;
-       }
-       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-out:
-       kfree(ctl_data);
-       return ret;
-}
-
-static int adf_ctl_is_device_in_use(int id)
-{
-       struct adf_accel_dev *dev;
-
-       list_for_each_entry(dev, adf_devmgr_get_head(), list) {
-               if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
-                       if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
-                               dev_info(&GET_DEV(dev),
-                                        "device qat_dev%d is busy\n",
-                                        dev->accel_id);
-                               return -EBUSY;
-                       }
-               }
-       }
-       return 0;
-}
-
-static void adf_ctl_stop_devices(u32 id)
-{
-       struct adf_accel_dev *accel_dev;
-
-       list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
-               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
-                       if (!adf_dev_started(accel_dev))
-                               continue;
-
-                       /* First stop all VFs */
-                       if (!accel_dev->is_vf)
-                               continue;
-
-                       adf_dev_down(accel_dev, false);
-               }
-       }
-
-       list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
-               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
-                       if (!adf_dev_started(accel_dev))
-                               continue;
-
-                       adf_dev_down(accel_dev, false);
-               }
-       }
-}
-
-static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
-                                 unsigned long arg)
-{
-       int ret;
-       struct adf_user_cfg_ctl_data *ctl_data;
-
-       ret = adf_ctl_alloc_resources(&ctl_data, arg);
-       if (ret)
-               return ret;
-
-       if (adf_devmgr_verify_id(ctl_data->device_id)) {
-               pr_err("QAT: Device %d not found\n", ctl_data->device_id);
-               ret = -ENODEV;
-               goto out;
-       }
-
-       ret = adf_ctl_is_device_in_use(ctl_data->device_id);
-       if (ret)
-               goto out;
-
-       if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
-               pr_info("QAT: Stopping all acceleration devices.\n");
-       else
-               pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
-                       ctl_data->device_id);
-
-       adf_ctl_stop_devices(ctl_data->device_id);
-
-out:
-       kfree(ctl_data);
-       return ret;
-}
-
-static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
-                                  unsigned long arg)
-{
-       int ret;
-       struct adf_user_cfg_ctl_data *ctl_data;
-       struct adf_accel_dev *accel_dev;
-
-       ret = adf_ctl_alloc_resources(&ctl_data, arg);
-       if (ret)
-               return ret;
-
-       ret = -ENODEV;
-       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
-       if (!accel_dev)
-               goto out;
-
-       dev_info(&GET_DEV(accel_dev),
-                "Starting acceleration device qat_dev%d.\n",
-                ctl_data->device_id);
-
-       ret = adf_dev_up(accel_dev, false);
-
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
-                       ctl_data->device_id);
-               adf_dev_down(accel_dev, false);
-       }
-out:
-       kfree(ctl_data);
-       return ret;
-}
-
-static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
-                                        unsigned long arg)
-{
-       u32 num_devices = 0;
-
-       adf_devmgr_get_num_dev(&num_devices);
-       if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
-                                   unsigned long arg)
-{
-       struct adf_hw_device_data *hw_data;
-       struct adf_dev_status_info dev_info;
-       struct adf_accel_dev *accel_dev;
-
-       if (copy_from_user(&dev_info, (void __user *)arg,
-                          sizeof(struct adf_dev_status_info))) {
-               pr_err("QAT: failed to copy from user.\n");
-               return -EFAULT;
-       }
-
-       accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
-       if (!accel_dev)
-               return -ENODEV;
-
-       hw_data = accel_dev->hw_device;
-       dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
-       dev_info.num_ae = hw_data->get_num_aes(hw_data);
-       dev_info.num_accel = hw_data->get_num_accels(hw_data);
-       dev_info.num_logical_accel = hw_data->num_logical_accel;
-       dev_info.banks_per_accel = hw_data->num_banks
-                                       / hw_data->num_logical_accel;
-       strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
-       dev_info.instance_id = hw_data->instance_id;
-       dev_info.type = hw_data->dev_class->type;
-       dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
-       dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
-       dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
-
-       if (copy_to_user((void __user *)arg, &dev_info,
-                        sizeof(struct adf_dev_status_info))) {
-               dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
-       int ret;
-
-       if (mutex_lock_interruptible(&adf_ctl_lock))
-               return -EFAULT;
-
-       switch (cmd) {
-       case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
-               ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
-               break;
-
-       case IOCTL_STOP_ACCEL_DEV:
-               ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
-               break;
-
-       case IOCTL_START_ACCEL_DEV:
-               ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
-               break;
-
-       case IOCTL_GET_NUM_DEVICES:
-               ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
-               break;
-
-       case IOCTL_STATUS_ACCEL_DEV:
-               ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
-               break;
-       default:
-               pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
-               ret = -EFAULT;
-               break;
-       }
-       mutex_unlock(&adf_ctl_lock);
-       return ret;
-}
-
-static int __init adf_register_ctl_device_driver(void)
-{
-       if (adf_chr_drv_create())
-               goto err_chr_dev;
-
-       if (adf_init_misc_wq())
-               goto err_misc_wq;
-
-       if (adf_init_aer())
-               goto err_aer;
-
-       if (adf_init_pf_wq())
-               goto err_pf_wq;
-
-       if (adf_init_vf_wq())
-               goto err_vf_wq;
-
-       if (qat_crypto_register())
-               goto err_crypto_register;
-
-       if (qat_compression_register())
-               goto err_compression_register;
-
-       return 0;
-
-err_compression_register:
-       qat_crypto_unregister();
-err_crypto_register:
-       adf_exit_vf_wq();
-err_vf_wq:
-       adf_exit_pf_wq();
-err_pf_wq:
-       adf_exit_aer();
-err_aer:
-       adf_exit_misc_wq();
-err_misc_wq:
-       adf_chr_drv_destroy();
-err_chr_dev:
-       mutex_destroy(&adf_ctl_lock);
-       return -EFAULT;
-}
-
-static void __exit adf_unregister_ctl_device_driver(void)
-{
-       adf_chr_drv_destroy();
-       adf_exit_misc_wq();
-       adf_exit_aer();
-       adf_exit_vf_wq();
-       adf_exit_pf_wq();
-       qat_crypto_unregister();
-       qat_compression_unregister();
-       adf_clean_vf_map(false);
-       mutex_destroy(&adf_ctl_lock);
-}
-
-module_init(adf_register_ctl_device_driver);
-module_exit(adf_unregister_ctl_device_driver);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_ALIAS_CRYPTO("intel_qat");
-MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
deleted file mode 100644 (file)
index 86ee36f..0000000
+++ /dev/null
@@ -1,452 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-static LIST_HEAD(accel_table);
-static LIST_HEAD(vfs_table);
-static DEFINE_MUTEX(table_lock);
-static u32 num_devices;
-static u8 id_map[ADF_MAX_DEVICES];
-
-struct vf_id_map {
-       u32 bdf;
-       u32 id;
-       u32 fake_id;
-       bool attached;
-       struct list_head list;
-};
-
-static int adf_get_vf_id(struct adf_accel_dev *vf)
-{
-       return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
-               PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
-               (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
-}
-
-static int adf_get_vf_num(struct adf_accel_dev *vf)
-{
-       return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
-}
-
-static struct vf_id_map *adf_find_vf(u32 bdf)
-{
-       struct list_head *itr;
-
-       list_for_each(itr, &vfs_table) {
-               struct vf_id_map *ptr =
-                       list_entry(itr, struct vf_id_map, list);
-
-               if (ptr->bdf == bdf)
-                       return ptr;
-       }
-       return NULL;
-}
-
-static int adf_get_vf_real_id(u32 fake)
-{
-       struct list_head *itr;
-
-       list_for_each(itr, &vfs_table) {
-               struct vf_id_map *ptr =
-                       list_entry(itr, struct vf_id_map, list);
-               if (ptr->fake_id == fake)
-                       return ptr->id;
-       }
-       return -1;
-}
-
-/**
- * adf_clean_vf_map() - Cleans VF id mapings
- *
- * Function cleans internal ids for virtual functions.
- * @vf: flag indicating whether mappings is cleaned
- *     for vfs only or for vfs and pfs
- */
-void adf_clean_vf_map(bool vf)
-{
-       struct vf_id_map *map;
-       struct list_head *ptr, *tmp;
-
-       mutex_lock(&table_lock);
-       list_for_each_safe(ptr, tmp, &vfs_table) {
-               map = list_entry(ptr, struct vf_id_map, list);
-               if (map->bdf != -1) {
-                       id_map[map->id] = 0;
-                       num_devices--;
-               }
-
-               if (vf && map->bdf == -1)
-                       continue;
-
-               list_del(ptr);
-               kfree(map);
-       }
-       mutex_unlock(&table_lock);
-}
-EXPORT_SYMBOL_GPL(adf_clean_vf_map);
-
-/**
- * adf_devmgr_update_class_index() - Update internal index
- * @hw_data:  Pointer to internal device data.
- *
- * Function updates internal dev index for VFs
- */
-void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
-{
-       struct adf_hw_device_class *class = hw_data->dev_class;
-       struct list_head *itr;
-       int i = 0;
-
-       list_for_each(itr, &accel_table) {
-               struct adf_accel_dev *ptr =
-                               list_entry(itr, struct adf_accel_dev, list);
-
-               if (ptr->hw_device->dev_class == class)
-                       ptr->hw_device->instance_id = i++;
-
-               if (i == class->instances)
-                       break;
-       }
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
-
-static unsigned int adf_find_free_id(void)
-{
-       unsigned int i;
-
-       for (i = 0; i < ADF_MAX_DEVICES; i++) {
-               if (!id_map[i]) {
-                       id_map[i] = 1;
-                       return i;
-               }
-       }
-       return ADF_MAX_DEVICES + 1;
-}
-
-/**
- * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
- * @accel_dev:  Pointer to acceleration device.
- * @pf:                Corresponding PF if the accel_dev is a VF
- *
- * Function adds acceleration device to the acceleration framework.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
-                      struct adf_accel_dev *pf)
-{
-       struct list_head *itr;
-       int ret = 0;
-
-       if (num_devices == ADF_MAX_DEVICES) {
-               dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
-                       ADF_MAX_DEVICES);
-               return -EFAULT;
-       }
-
-       mutex_lock(&table_lock);
-       atomic_set(&accel_dev->ref_count, 0);
-
-       /* PF on host or VF on guest - optimized to remove redundant is_vf */
-       if (!accel_dev->is_vf || !pf) {
-               struct vf_id_map *map;
-
-               list_for_each(itr, &accel_table) {
-                       struct adf_accel_dev *ptr =
-                               list_entry(itr, struct adf_accel_dev, list);
-
-                       if (ptr == accel_dev) {
-                               ret = -EEXIST;
-                               goto unlock;
-                       }
-               }
-
-               list_add_tail(&accel_dev->list, &accel_table);
-               accel_dev->accel_id = adf_find_free_id();
-               if (accel_dev->accel_id > ADF_MAX_DEVICES) {
-                       ret = -EFAULT;
-                       goto unlock;
-               }
-               num_devices++;
-               map = kzalloc(sizeof(*map), GFP_KERNEL);
-               if (!map) {
-                       ret = -ENOMEM;
-                       goto unlock;
-               }
-               map->bdf = ~0;
-               map->id = accel_dev->accel_id;
-               map->fake_id = map->id;
-               map->attached = true;
-               list_add_tail(&map->list, &vfs_table);
-       } else if (accel_dev->is_vf && pf) {
-               /* VF on host */
-               struct vf_id_map *map;
-
-               map = adf_find_vf(adf_get_vf_num(accel_dev));
-               if (map) {
-                       struct vf_id_map *next;
-
-                       accel_dev->accel_id = map->id;
-                       list_add_tail(&accel_dev->list, &accel_table);
-                       map->fake_id++;
-                       map->attached = true;
-                       next = list_next_entry(map, list);
-                       while (next && &next->list != &vfs_table) {
-                               next->fake_id++;
-                               next = list_next_entry(next, list);
-                       }
-
-                       ret = 0;
-                       goto unlock;
-               }
-
-               map = kzalloc(sizeof(*map), GFP_KERNEL);
-               if (!map) {
-                       ret = -ENOMEM;
-                       goto unlock;
-               }
-               accel_dev->accel_id = adf_find_free_id();
-               if (accel_dev->accel_id > ADF_MAX_DEVICES) {
-                       kfree(map);
-                       ret = -EFAULT;
-                       goto unlock;
-               }
-               num_devices++;
-               list_add_tail(&accel_dev->list, &accel_table);
-               map->bdf = adf_get_vf_num(accel_dev);
-               map->id = accel_dev->accel_id;
-               map->fake_id = map->id;
-               map->attached = true;
-               list_add_tail(&map->list, &vfs_table);
-       }
-       mutex_init(&accel_dev->state_lock);
-unlock:
-       mutex_unlock(&table_lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
-
-struct list_head *adf_devmgr_get_head(void)
-{
-       return &accel_table;
-}
-
-/**
- * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
- * @accel_dev:  Pointer to acceleration device.
- * @pf:                Corresponding PF if the accel_dev is a VF
- *
- * Function removes acceleration device from the acceleration framework.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
-                      struct adf_accel_dev *pf)
-{
-       mutex_lock(&table_lock);
-       /* PF on host or VF on guest - optimized to remove redundant is_vf */
-       if (!accel_dev->is_vf || !pf) {
-               id_map[accel_dev->accel_id] = 0;
-               num_devices--;
-       } else if (accel_dev->is_vf && pf) {
-               struct vf_id_map *map, *next;
-
-               map = adf_find_vf(adf_get_vf_num(accel_dev));
-               if (!map) {
-                       dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
-                       goto unlock;
-               }
-               map->fake_id--;
-               map->attached = false;
-               next = list_next_entry(map, list);
-               while (next && &next->list != &vfs_table) {
-                       next->fake_id--;
-                       next = list_next_entry(next, list);
-               }
-       }
-unlock:
-       mutex_destroy(&accel_dev->state_lock);
-       list_del(&accel_dev->list);
-       mutex_unlock(&table_lock);
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
-
-struct adf_accel_dev *adf_devmgr_get_first(void)
-{
-       struct adf_accel_dev *dev = NULL;
-
-       if (!list_empty(&accel_table))
-               dev = list_first_entry(&accel_table, struct adf_accel_dev,
-                                      list);
-       return dev;
-}
-
-/**
- * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
- * @pci_dev:  Pointer to PCI device.
- *
- * Function returns acceleration device associated with the given PCI device.
- * To be used by QAT device specific drivers.
- *
- * Return: pointer to accel_dev or NULL if not found.
- */
-struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
-{
-       struct list_head *itr;
-
-       mutex_lock(&table_lock);
-       list_for_each(itr, &accel_table) {
-               struct adf_accel_dev *ptr =
-                               list_entry(itr, struct adf_accel_dev, list);
-
-               if (ptr->accel_pci_dev.pci_dev == pci_dev) {
-                       mutex_unlock(&table_lock);
-                       return ptr;
-               }
-       }
-       mutex_unlock(&table_lock);
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
-
-struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
-{
-       struct list_head *itr;
-       int real_id;
-
-       mutex_lock(&table_lock);
-       real_id = adf_get_vf_real_id(id);
-       if (real_id < 0)
-               goto unlock;
-
-       id = real_id;
-
-       list_for_each(itr, &accel_table) {
-               struct adf_accel_dev *ptr =
-                               list_entry(itr, struct adf_accel_dev, list);
-               if (ptr->accel_id == id) {
-                       mutex_unlock(&table_lock);
-                       return ptr;
-               }
-       }
-unlock:
-       mutex_unlock(&table_lock);
-       return NULL;
-}
-
-int adf_devmgr_verify_id(u32 id)
-{
-       if (id == ADF_CFG_ALL_DEVICES)
-               return 0;
-
-       if (adf_devmgr_get_dev_by_id(id))
-               return 0;
-
-       return -ENODEV;
-}
-
-static int adf_get_num_dettached_vfs(void)
-{
-       struct list_head *itr;
-       int vfs = 0;
-
-       mutex_lock(&table_lock);
-       list_for_each(itr, &vfs_table) {
-               struct vf_id_map *ptr =
-                       list_entry(itr, struct vf_id_map, list);
-               if (ptr->bdf != ~0 && !ptr->attached)
-                       vfs++;
-       }
-       mutex_unlock(&table_lock);
-       return vfs;
-}
-
-void adf_devmgr_get_num_dev(u32 *num)
-{
-       *num = num_devices - adf_get_num_dettached_vfs();
-}
-
-/**
- * adf_dev_in_use() - Check whether accel_dev is currently in use
- * @accel_dev: Pointer to acceleration device.
- *
- * To be used by QAT device specific drivers.
- *
- * Return: 1 when device is in use, 0 otherwise.
- */
-int adf_dev_in_use(struct adf_accel_dev *accel_dev)
-{
-       return atomic_read(&accel_dev->ref_count) != 0;
-}
-EXPORT_SYMBOL_GPL(adf_dev_in_use);
-
-/**
- * adf_dev_get() - Increment accel_dev reference count
- * @accel_dev: Pointer to acceleration device.
- *
- * Increment the accel_dev refcount and if this is the first time
- * incrementing it during this period the accel_dev is in use,
- * increment the module refcount too.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 when successful, EFAULT when fail to bump module refcount
- */
-int adf_dev_get(struct adf_accel_dev *accel_dev)
-{
-       if (atomic_add_return(1, &accel_dev->ref_count) == 1)
-               if (!try_module_get(accel_dev->owner))
-                       return -EFAULT;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_dev_get);
-
-/**
- * adf_dev_put() - Decrement accel_dev reference count
- * @accel_dev: Pointer to acceleration device.
- *
- * Decrement the accel_dev refcount and if this is the last time
- * decrementing it during this period the accel_dev is in use,
- * decrement the module refcount too.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_dev_put(struct adf_accel_dev *accel_dev)
-{
-       if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
-               module_put(accel_dev->owner);
-}
-EXPORT_SYMBOL_GPL(adf_dev_put);
-
-/**
- * adf_devmgr_in_reset() - Check whether device is in reset
- * @accel_dev: Pointer to acceleration device.
- *
- * To be used by QAT device specific drivers.
- *
- * Return: 1 when the device is being reset, 0 otherwise.
- */
-int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
-{
-       return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
-}
-EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
-
-/**
- * adf_dev_started() - Check whether device has started
- * @accel_dev: Pointer to acceleration device.
- *
- * To be used by QAT device specific drivers.
- *
- * Return: 1 when the device has started, 0 otherwise
- */
-int adf_dev_started(struct adf_accel_dev *accel_dev)
-{
-       return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
-}
-EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.c b/drivers/crypto/qat/qat_common/adf_gen2_config.c
deleted file mode 100644 (file)
index eeb30da..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "adf_gen2_config.h"
-#include "adf_common_drv.h"
-#include "qat_crypto.h"
-#include "qat_compression.h"
-#include "adf_transport_access_macros.h"
-
-static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       int banks = GET_MAX_BANKS(accel_dev);
-       int cpus = num_online_cpus();
-       unsigned long val;
-       int instances;
-       int ret;
-       int i;
-
-       if (adf_hw_dev_has_crypto(accel_dev))
-               instances = min(cpus, banks);
-       else
-               instances = 0;
-
-       for (i = 0; i < instances; i++) {
-               val = i;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
-                        i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
-               val = 128;
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 512;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 0;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 2;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 8;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 10;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = ADF_COALESCING_DEF_TIME;
-               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-       }
-
-       val = i;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
-                                         &val, ADF_DEC);
-       if (ret)
-               goto err;
-
-       return ret;
-
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
-       return ret;
-}
-
-static int adf_gen2_comp_dev_config(struct adf_accel_dev *accel_dev)
-{
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       int banks = GET_MAX_BANKS(accel_dev);
-       int cpus = num_online_cpus();
-       unsigned long val;
-       int instances;
-       int ret;
-       int i;
-
-       if (adf_hw_dev_has_compression(accel_dev))
-               instances = min(cpus, banks);
-       else
-               instances = 0;
-
-       for (i = 0; i < instances; i++) {
-               val = i;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 512;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 6;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-
-               val = 14;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                                 key, &val, ADF_DEC);
-               if (ret)
-                       goto err;
-       }
-
-       val = i;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
-                                         &val, ADF_DEC);
-       if (ret)
-               return ret;
-
-       return ret;
-
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
-       return ret;
-}
-
-/**
- * adf_gen2_dev_config() - create dev config required to create instances
- *
- * @accel_dev: Pointer to acceleration device.
- *
- * Function creates device configuration required to create instances
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
-       if (ret)
-               goto err;
-
-       ret = adf_cfg_section_add(accel_dev, "Accelerator0");
-       if (ret)
-               goto err;
-
-       ret = adf_gen2_crypto_dev_config(accel_dev);
-       if (ret)
-               goto err;
-
-       ret = adf_gen2_comp_dev_config(accel_dev);
-       if (ret)
-               goto err;
-
-       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
-       return ret;
-
-err:
-       dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_dev_config);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_config.h b/drivers/crypto/qat/qat_common/adf_gen2_config.h
deleted file mode 100644 (file)
index 4bf9da2..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN2_CONFIG_H_
-#define ADF_GEN2_CONFIG_H_
-
-#include "adf_accel_devices.h"
-
-int adf_gen2_dev_config(struct adf_accel_dev *accel_dev);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/qat/qat_common/adf_gen2_dc.c
deleted file mode 100644 (file)
index 47261b1..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_gen2_dc.h"
-#include "icp_qat_fw_comp.h"
-
-static void qat_comp_build_deflate_ctx(void *ctx)
-{
-       struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
-       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
-       struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
-       struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
-       struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
-
-       memset(req_tmpl, 0, sizeof(*req_tmpl));
-       header->hdr_flags =
-               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
-       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
-       header->comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
-                                           QAT_COMN_PTR_TYPE_SGL);
-       header->serv_specif_flags =
-               ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
-                                           ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
-       cd_pars->u.sl.comp_slice_cfg_word[0] =
-               ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
-                                                   ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
-                                                   ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
-                                                   ICP_QAT_HW_COMPRESSION_DEPTH_1,
-                                                   ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
-       req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
-       req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
-       req_pars->req_par_flags =
-               ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
-                                                     ICP_QAT_FW_COMP_EOP,
-                                                     ICP_QAT_FW_COMP_BFINAL,
-                                                     ICP_QAT_FW_COMP_CNV,
-                                                     ICP_QAT_FW_COMP_CNV_RECOVERY,
-                                                     ICP_QAT_FW_COMP_NO_CNV_DFX,
-                                                     ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
-                                                     ICP_QAT_FW_COMP_NO_XXHASH_ACC,
-                                                     ICP_QAT_FW_COMP_CNV_ERROR_NONE,
-                                                     ICP_QAT_FW_COMP_NO_APPEND_CRC,
-                                                     ICP_QAT_FW_COMP_NO_DROP_DATA);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-       ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
-
-       /* Fill second half of the template for decompression */
-       memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
-       req_tmpl++;
-       header = &req_tmpl->comn_hdr;
-       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
-       cd_pars = &req_tmpl->cd_pars;
-       cd_pars->u.sl.comp_slice_cfg_word[0] =
-               ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
-                                                   ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
-                                                   ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
-                                                   ICP_QAT_HW_COMPRESSION_DEPTH_1,
-                                                   ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
-}
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
-       dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/qat/qat_common/adf_gen2_dc.h
deleted file mode 100644 (file)
index 6eae023..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN2_DC_H
-#define ADF_GEN2_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
deleted file mode 100644 (file)
index d188454..0000000
+++ /dev/null
@@ -1,268 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 Intel Corporation */
-#include "adf_common_drv.h"
-#include "adf_gen2_hw_data.h"
-#include "icp_qat_hw.h"
-#include <linux/pci.h>
-
-u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
-{
-       if (!self || !self->accel_mask)
-               return 0;
-
-       return hweight16(self->accel_mask);
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
-
-u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
-{
-       if (!self || !self->ae_mask)
-               return 0;
-
-       return hweight32(self->ae_mask);
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
-
-void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       unsigned long accel_mask = hw_data->accel_mask;
-       unsigned long ae_mask = hw_data->ae_mask;
-       unsigned int val, i;
-
-       /* Enable Accel Engine error detection & correction */
-       for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
-               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
-               val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
-               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
-               val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
-       }
-
-       /* Enable shared memory error detection & correction */
-       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
-               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
-               val |= ADF_GEN2_ERRSSMSH_EN;
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
-               val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
-               val |= ADF_GEN2_ERRSSMSH_EN;
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
-       }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
-
-void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
-                          int num_a_regs, int num_b_regs)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u32 reg;
-       int i;
-
-       /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
-       for (i = 0; i < num_a_regs; i++) {
-               reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
-               if (enable)
-                       reg |= AE2FUNCTION_MAP_VALID;
-               else
-                       reg &= ~AE2FUNCTION_MAP_VALID;
-               WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
-       }
-
-       /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
-       for (i = 0; i < num_b_regs; i++) {
-               reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
-               if (enable)
-                       reg |= AE2FUNCTION_MAP_VALID;
-               else
-                       reg &= ~AE2FUNCTION_MAP_VALID;
-               WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
-       }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
-
-void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
-{
-       admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
-       admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
-       admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
-
-void adf_gen2_get_arb_info(struct arb_info *arb_info)
-{
-       arb_info->arb_cfg = ADF_ARB_CONFIG;
-       arb_info->arb_offset = ADF_ARB_OFFSET;
-       arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
-
-void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *addr = adf_get_pmisc_base(accel_dev);
-       u32 val;
-
-       val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
-
-       /* Enable bundle and misc interrupts */
-       ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
-       ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
-}
-EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
-
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
-       return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
-       return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               u32 value)
-{
-       WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
-       return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               u32 value)
-{
-       WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
-       return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
-                                 u32 ring, u32 value)
-{
-       WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               dma_addr_t addr)
-{
-       WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
-       WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
-       WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
-                                u32 value)
-{
-       WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
-                                 u32 value)
-{
-       WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
-                                      u32 value)
-{
-       WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
-                                     u32 value)
-{
-       WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
-       csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
-       csr_ops->read_csr_ring_head = read_csr_ring_head;
-       csr_ops->write_csr_ring_head = write_csr_ring_head;
-       csr_ops->read_csr_ring_tail = read_csr_ring_tail;
-       csr_ops->write_csr_ring_tail = write_csr_ring_tail;
-       csr_ops->read_csr_e_stat = read_csr_e_stat;
-       csr_ops->write_csr_ring_config = write_csr_ring_config;
-       csr_ops->write_csr_ring_base = write_csr_ring_base;
-       csr_ops->write_csr_int_flag = write_csr_int_flag;
-       csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
-       csr_ops->write_csr_int_col_en = write_csr_int_col_en;
-       csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
-       csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
-       csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
-
-u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
-       u32 straps = hw_data->straps;
-       u32 fuses = hw_data->fuses;
-       u32 legfuses;
-       u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
-                          ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
-                          ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
-                          ICP_ACCEL_CAPABILITIES_CIPHER |
-                          ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       /* Read accelerator capabilities mask */
-       pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
-
-       /* A set bit in legfuses means the feature is OFF in this SKU */
-       if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
-       if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       if ((straps | fuses) & ADF_POWERGATE_PKE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
-
-       if ((straps | fuses) & ADF_POWERGATE_DC)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       return capabilities;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
-
-void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
-       u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
-       unsigned long accel_mask = hw_data->accel_mask;
-       u32 i = 0;
-
-       /* Configures WDT timers */
-       for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
-               /* Enable WDT for sym and dc */
-               ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
-               /* Enable WDT for pke */
-               ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
-       }
-}
-EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
deleted file mode 100644 (file)
index e4bc075..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2020 Intel Corporation */
-#ifndef ADF_GEN2_HW_DATA_H_
-#define ADF_GEN2_HW_DATA_H_
-
-#include "adf_accel_devices.h"
-#include "adf_cfg_common.h"
-
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK_0    0x4444444CUL
-#define ADF_BANK_INT_SRC_SEL_MASK_X    0x44444444UL
-#define ADF_RING_CSR_RING_CONFIG       0x000
-#define ADF_RING_CSR_RING_LBASE                0x040
-#define ADF_RING_CSR_RING_UBASE                0x080
-#define ADF_RING_CSR_RING_HEAD         0x0C0
-#define ADF_RING_CSR_RING_TAIL         0x100
-#define ADF_RING_CSR_E_STAT            0x14C
-#define ADF_RING_CSR_INT_FLAG          0x170
-#define ADF_RING_CSR_INT_SRCSEL                0x174
-#define ADF_RING_CSR_INT_SRCSEL_2      0x178
-#define ADF_RING_CSR_INT_COL_EN                0x17C
-#define ADF_RING_CSR_INT_COL_CTL       0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL  0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
-#define ADF_RING_BUNDLE_SIZE           0x1000
-#define ADF_GEN2_RX_RINGS_OFFSET       8
-#define ADF_GEN2_TX_RINGS_MASK         0xFF
-
-#define BUILD_RING_BASE_ADDR(addr, size) \
-       (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
-       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
-       u32 l_base = 0, u_base = 0; \
-       l_base = (u32)((value) & 0xFFFFFFFF); \
-       u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_INT_FLAG, value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-do { \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-       ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-       ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
-} while (0)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_INT_COL_EN, value)
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_INT_COL_CTL, \
-                  ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
-       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
-                  ADF_RING_CSR_INT_FLAG_AND_COL, value)
-
-/* AE to function map */
-#define AE2FUNCTION_MAP_A_OFFSET       (0x3A400 + 0x190)
-#define AE2FUNCTION_MAP_B_OFFSET       (0x3A400 + 0x310)
-#define AE2FUNCTION_MAP_REG_SIZE       4
-#define AE2FUNCTION_MAP_VALID          BIT(7)
-
-#define READ_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index) \
-       ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
-                  AE2FUNCTION_MAP_REG_SIZE * (index))
-#define WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
-       ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
-                  AE2FUNCTION_MAP_REG_SIZE * (index), value)
-#define READ_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index) \
-       ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
-                  AE2FUNCTION_MAP_REG_SIZE * (index))
-#define WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
-       ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
-                  AE2FUNCTION_MAP_REG_SIZE * (index), value)
-
-/* Admin Interface Offsets */
-#define ADF_ADMINMSGUR_OFFSET  (0x3A000 + 0x574)
-#define ADF_ADMINMSGLR_OFFSET  (0x3A000 + 0x578)
-#define ADF_MAILBOX_BASE_OFFSET        0x20970
-
-/* Arbiter configuration */
-#define ADF_ARB_OFFSET                 0x30000
-#define ADF_ARB_WRK_2_SER_MAP_OFFSET   0x180
-#define ADF_ARB_CONFIG                 (BIT(31) | BIT(6) | BIT(0))
-#define ADF_ARB_REG_SLOT               0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET    0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
-       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
-       (ADF_ARB_REG_SLOT * (index)), value)
-
-/* Power gating */
-#define ADF_POWERGATE_DC               BIT(23)
-#define ADF_POWERGATE_PKE              BIT(24)
-
-/* Default ring mapping */
-#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \
-       (CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
-        CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
-        UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
-          COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
-
-/* WDT timers
- *
- * Timeout is in cycles. Clock speed may vary across products but this
- * value should be a few milli-seconds.
- */
-#define ADF_SSM_WDT_DEFAULT_VALUE      0x200000
-#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x2000000
-#define ADF_SSMWDT_OFFSET              0x54
-#define ADF_SSMWDTPKE_OFFSET           0x58
-#define ADF_SSMWDT(i)          (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
-#define ADF_SSMWDTPKE(i)       (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
-
-/* Error detection and correction */
-#define ADF_GEN2_AE_CTX_ENABLES(i)     ((i) * 0x1000 + 0x20818)
-#define ADF_GEN2_AE_MISC_CONTROL(i)    ((i) * 0x1000 + 0x20960)
-#define ADF_GEN2_ENABLE_AE_ECC_ERR     BIT(28)
-#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR     (BIT(24) | BIT(12))
-#define ADF_GEN2_UERRSSMSH(i)          ((i) * 0x4000 + 0x18)
-#define ADF_GEN2_CERRSSMSH(i)          ((i) * 0x4000 + 0x10)
-#define ADF_GEN2_ERRSSMSH_EN           BIT(3)
-
-/* Interrupts */
-#define ADF_GEN2_SMIAPF0_MASK_OFFSET    (0x3A000 + 0x28)
-#define ADF_GEN2_SMIAPF1_MASK_OFFSET    (0x3A000 + 0x30)
-#define ADF_GEN2_SMIA1_MASK             0x1
-
-u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
-u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
-void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
-void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
-                          int num_a_regs, int num_b_regs);
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
-void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
-void adf_gen2_get_arb_info(struct arb_info *arb_info);
-void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
-u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
-void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c
deleted file mode 100644 (file)
index 70ef119..0000000
+++ /dev/null
@@ -1,399 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2021 Intel Corporation */
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen2_pfvf.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_pf_proto.h"
-#include "adf_pfvf_vf_proto.h"
-#include "adf_pfvf_utils.h"
-
- /* VF2PF interrupts */
-#define ADF_GEN2_VF_MSK                        0xFFFF
-#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask)        (((vf_mask) & ADF_GEN2_VF_MSK) << 9)
-
-#define ADF_GEN2_PF_PF2VF_OFFSET(i)    (0x3A000 + 0x280 + ((i) * 0x04))
-#define ADF_GEN2_VF_PF2VF_OFFSET       0x200
-
-#define ADF_GEN2_CSR_IN_USE            0x6AC2
-#define ADF_GEN2_CSR_IN_USE_MASK       0xFFFE
-
-enum gen2_csr_pos {
-       ADF_GEN2_CSR_PF2VF_OFFSET       =  0,
-       ADF_GEN2_CSR_VF2PF_OFFSET       = 16,
-};
-
-#define ADF_PFVF_GEN2_MSGTYPE_SHIFT    2
-#define ADF_PFVF_GEN2_MSGTYPE_MASK     0x0F
-#define ADF_PFVF_GEN2_MSGDATA_SHIFT    6
-#define ADF_PFVF_GEN2_MSGDATA_MASK     0x3FF
-
-static const struct pfvf_csr_format csr_gen2_fmt = {
-       { ADF_PFVF_GEN2_MSGTYPE_SHIFT, ADF_PFVF_GEN2_MSGTYPE_MASK },
-       { ADF_PFVF_GEN2_MSGDATA_SHIFT, ADF_PFVF_GEN2_MSGDATA_MASK },
-};
-
-#define ADF_PFVF_MSG_RETRY_DELAY       5
-#define ADF_PFVF_MSG_MAX_RETRIES       3
-
-static u32 adf_gen2_pf_get_pfvf_offset(u32 i)
-{
-       return ADF_GEN2_PF_PF2VF_OFFSET(i);
-}
-
-static u32 adf_gen2_vf_get_pfvf_offset(u32 i)
-{
-       return ADF_GEN2_VF_PF2VF_OFFSET;
-}
-
-static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
-       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
-       if (vf_mask & ADF_GEN2_VF_MSK) {
-               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
-                         & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
-       }
-}
-
-static void adf_gen2_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
-       u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
-                 | ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
-}
-
-static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       u32 sources, disabled, pending;
-       u32 errsou3, errmsk3;
-
-       /* Get the interrupt sources triggered by VFs */
-       errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
-       sources = ADF_GEN2_ERR_REG_VF2PF(errsou3);
-
-       if (!sources)
-               return 0;
-
-       /* Get the already disabled interrupts */
-       errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
-       disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3);
-
-       pending = sources & ~disabled;
-       if (!pending)
-               return 0;
-
-       /* Due to HW limitations, when disabling the interrupts, we can't
-        * just disable the requested sources, as this would lead to missed
-        * interrupts if ERRSOU3 changes just before writing to ERRMSK3.
-        * To work around it, disable all and re-enable only the sources that
-        * are not in vf_mask and were not already disabled. Re-enabling will
-        * trigger a new interrupt for the sources that have changed in the
-        * meantime, if any.
-        */
-       errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
-
-       errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
-
-       /* Return the sources of the (new) interrupt(s) */
-       return pending;
-}
-
-static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset)
-{
-       return ADF_PFVF_INT << offset;
-}
-
-static u32 gen2_csr_msg_to_position(u32 csr_msg, enum gen2_csr_pos offset)
-{
-       return (csr_msg & 0xFFFF) << offset;
-}
-
-static u32 gen2_csr_msg_from_position(u32 csr_val, enum gen2_csr_pos offset)
-{
-       return (csr_val >> offset) & 0xFFFF;
-}
-
-static bool gen2_csr_is_in_use(u32 msg, enum gen2_csr_pos offset)
-{
-       return ((msg >> offset) & ADF_GEN2_CSR_IN_USE_MASK) == ADF_GEN2_CSR_IN_USE;
-}
-
-static void gen2_csr_clear_in_use(u32 *msg, enum gen2_csr_pos offset)
-{
-       *msg &= ~(ADF_GEN2_CSR_IN_USE_MASK << offset);
-}
-
-static void gen2_csr_set_in_use(u32 *msg, enum gen2_csr_pos offset)
-{
-       *msg |= (ADF_GEN2_CSR_IN_USE << offset);
-}
-
-static bool is_legacy_user_pfvf_message(u32 msg)
-{
-       return !(msg & ADF_PFVF_MSGORIGIN_SYSTEM);
-}
-
-static bool is_pf2vf_notification(u8 msg_type)
-{
-       switch (msg_type) {
-       case ADF_PF2VF_MSGTYPE_RESTARTING:
-               return true;
-       default:
-               return false;
-       }
-}
-
-static bool is_vf2pf_notification(u8 msg_type)
-{
-       switch (msg_type) {
-       case ADF_VF2PF_MSGTYPE_INIT:
-       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
-               return true;
-       default:
-               return false;
-       }
-}
-
-struct pfvf_gen2_params {
-       u32 pfvf_offset;
-       struct mutex *csr_lock; /* lock preventing concurrent access of CSR */
-       enum gen2_csr_pos local_offset;
-       enum gen2_csr_pos remote_offset;
-       bool (*is_notification_message)(u8 msg_type);
-       u8 compat_ver;
-};
-
-static int adf_gen2_pfvf_send(struct adf_accel_dev *accel_dev,
-                             struct pfvf_message msg,
-                             struct pfvf_gen2_params *params)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       enum gen2_csr_pos remote_offset = params->remote_offset;
-       enum gen2_csr_pos local_offset = params->local_offset;
-       unsigned int retries = ADF_PFVF_MSG_MAX_RETRIES;
-       struct mutex *lock = params->csr_lock;
-       u32 pfvf_offset = params->pfvf_offset;
-       u32 int_bit;
-       u32 csr_val;
-       u32 csr_msg;
-       int ret;
-
-       /* Gen2 messages, both PF->VF and VF->PF, are all 16 bits long. This
-        * allows us to build and read messages as if they where all 0 based.
-        * However, send and receive are in a single shared 32 bits register,
-        * so we need to shift and/or mask the message half before decoding
-        * it and after encoding it. Which one to shift depends on the
-        * direction.
-        */
-
-       int_bit = gen2_csr_get_int_bit(local_offset);
-
-       csr_msg = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen2_fmt);
-       if (unlikely(!csr_msg))
-               return -EINVAL;
-
-       /* Prepare for CSR format, shifting the wire message in place and
-        * setting the in use pattern
-        */
-       csr_msg = gen2_csr_msg_to_position(csr_msg, local_offset);
-       gen2_csr_set_in_use(&csr_msg, remote_offset);
-
-       mutex_lock(lock);
-
-start:
-       /* Check if the PFVF CSR is in use by remote function */
-       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
-       if (gen2_csr_is_in_use(csr_val, local_offset)) {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "PFVF CSR in use by remote function\n");
-               goto retry;
-       }
-
-       /* Attempt to get ownership of the PFVF CSR */
-       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_msg | int_bit);
-
-       /* Wait for confirmation from remote func it received the message */
-       ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & int_bit),
-                               ADF_PFVF_MSG_ACK_DELAY_US,
-                               ADF_PFVF_MSG_ACK_MAX_DELAY_US,
-                               true, pmisc_addr, pfvf_offset);
-       if (unlikely(ret < 0)) {
-               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
-               csr_val &= ~int_bit;
-       }
-
-       /* For fire-and-forget notifications, the receiver does not clear
-        * the in-use pattern. This is used to detect collisions.
-        */
-       if (params->is_notification_message(msg.type) && csr_val != csr_msg) {
-               /* Collision must have overwritten the message */
-               dev_err(&GET_DEV(accel_dev),
-                       "Collision on notification - PFVF CSR overwritten by remote function\n");
-               goto retry;
-       }
-
-       /* If the far side did not clear the in-use pattern it is either
-        * 1) Notification - message left intact to detect collision
-        * 2) Older protocol (compatibility version < 3) on the far side
-        *    where the sender is responsible for clearing the in-use
-        *    pattern after the received has acknowledged receipt.
-        * In either case, clear the in-use pattern now.
-        */
-       if (gen2_csr_is_in_use(csr_val, remote_offset)) {
-               gen2_csr_clear_in_use(&csr_val, remote_offset);
-               ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
-       }
-
-out:
-       mutex_unlock(lock);
-       return ret;
-
-retry:
-       if (--retries) {
-               msleep(ADF_PFVF_MSG_RETRY_DELAY);
-               goto start;
-       } else {
-               ret = -EBUSY;
-               goto out;
-       }
-}
-
-static struct pfvf_message adf_gen2_pfvf_recv(struct adf_accel_dev *accel_dev,
-                                             struct pfvf_gen2_params *params)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       enum gen2_csr_pos remote_offset = params->remote_offset;
-       enum gen2_csr_pos local_offset = params->local_offset;
-       u32 pfvf_offset = params->pfvf_offset;
-       struct pfvf_message msg = { 0 };
-       u32 int_bit;
-       u32 csr_val;
-       u16 csr_msg;
-
-       int_bit = gen2_csr_get_int_bit(local_offset);
-
-       /* Read message */
-       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
-       if (!(csr_val & int_bit)) {
-               dev_info(&GET_DEV(accel_dev),
-                        "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
-               return msg;
-       }
-
-       /* Extract the message from the CSR */
-       csr_msg = gen2_csr_msg_from_position(csr_val, local_offset);
-
-       /* Ignore legacy non-system (non-kernel) messages */
-       if (unlikely(is_legacy_user_pfvf_message(csr_msg))) {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Ignored non-system message (0x%.8x);\n", csr_val);
-               /* Because this must be a legacy message, the far side
-                * must clear the in-use pattern, so don't do it.
-                */
-               return msg;
-       }
-
-       /* Return the pfvf_message format */
-       msg = adf_pfvf_message_of(accel_dev, csr_msg, &csr_gen2_fmt);
-
-       /* The in-use pattern is not cleared for notifications (so that
-        * it can be used for collision detection) or older implementations
-        */
-       if (params->compat_ver >= ADF_PFVF_COMPAT_FAST_ACK &&
-           !params->is_notification_message(msg.type))
-               gen2_csr_clear_in_use(&csr_val, remote_offset);
-
-       /* To ACK, clear the INT bit */
-       csr_val &= ~int_bit;
-       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val);
-
-       return msg;
-}
-
-static int adf_gen2_pf2vf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                              u32 pfvf_offset, struct mutex *csr_lock)
-{
-       struct pfvf_gen2_params params = {
-               .csr_lock = csr_lock,
-               .pfvf_offset = pfvf_offset,
-               .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
-               .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
-               .is_notification_message = is_pf2vf_notification,
-       };
-
-       return adf_gen2_pfvf_send(accel_dev, msg, &params);
-}
-
-static int adf_gen2_vf2pf_send(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                              u32 pfvf_offset, struct mutex *csr_lock)
-{
-       struct pfvf_gen2_params params = {
-               .csr_lock = csr_lock,
-               .pfvf_offset = pfvf_offset,
-               .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
-               .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
-               .is_notification_message = is_vf2pf_notification,
-       };
-
-       return adf_gen2_pfvf_send(accel_dev, msg, &params);
-}
-
-static struct pfvf_message adf_gen2_pf2vf_recv(struct adf_accel_dev *accel_dev,
-                                              u32 pfvf_offset, u8 compat_ver)
-{
-       struct pfvf_gen2_params params = {
-               .pfvf_offset = pfvf_offset,
-               .local_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
-               .remote_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
-               .is_notification_message = is_pf2vf_notification,
-               .compat_ver = compat_ver,
-       };
-
-       return adf_gen2_pfvf_recv(accel_dev, &params);
-}
-
-static struct pfvf_message adf_gen2_vf2pf_recv(struct adf_accel_dev *accel_dev,
-                                              u32 pfvf_offset, u8 compat_ver)
-{
-       struct pfvf_gen2_params params = {
-               .pfvf_offset = pfvf_offset,
-               .local_offset = ADF_GEN2_CSR_VF2PF_OFFSET,
-               .remote_offset = ADF_GEN2_CSR_PF2VF_OFFSET,
-               .is_notification_message = is_vf2pf_notification,
-               .compat_ver = compat_ver,
-       };
-
-       return adf_gen2_pfvf_recv(accel_dev, &params);
-}
-
-void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
-       pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset;
-       pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset;
-       pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
-       pfvf_ops->disable_all_vf2pf_interrupts = adf_gen2_disable_all_vf2pf_interrupts;
-       pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts;
-       pfvf_ops->send_msg = adf_gen2_pf2vf_send;
-       pfvf_ops->recv_msg = adf_gen2_vf2pf_recv;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_pf_pfvf_ops);
-
-void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
-       pfvf_ops->get_pf2vf_offset = adf_gen2_vf_get_pfvf_offset;
-       pfvf_ops->get_vf2pf_offset = adf_gen2_vf_get_pfvf_offset;
-       pfvf_ops->send_msg = adf_gen2_vf2pf_send;
-       pfvf_ops->recv_msg = adf_gen2_pf2vf_recv;
-}
-EXPORT_SYMBOL_GPL(adf_gen2_init_vf_pfvf_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.h
deleted file mode 100644 (file)
index a716545..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_GEN2_PFVF_H
-#define ADF_GEN2_PFVF_H
-
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-
-#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
-#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
-#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
-
-#if defined(CONFIG_PCI_IOV)
-void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
-void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
-#else
-static inline void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
-}
-
-static inline void adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
-}
-#endif
-
-#endif /* ADF_GEN2_PFVF_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/qat/qat_common/adf_gen4_dc.c
deleted file mode 100644 (file)
index 5859238..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_comp.h"
-#include "icp_qat_hw_20_comp.h"
-#include "adf_gen4_dc.h"
-
-static void qat_comp_build_deflate(void *ctx)
-{
-       struct icp_qat_fw_comp_req *req_tmpl =
-                               (struct icp_qat_fw_comp_req *)ctx;
-       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
-       struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
-       struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
-       struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
-       struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
-       struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
-       u32 upper_val;
-       u32 lower_val;
-
-       memset(req_tmpl, 0, sizeof(*req_tmpl));
-       header->hdr_flags =
-               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
-       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
-       header->comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
-                                           QAT_COMN_PTR_TYPE_SGL);
-       header->serv_specif_flags =
-               ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
-                                           ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
-                                           ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
-       hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
-       hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
-       hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
-       hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
-       hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
-       hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
-       hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
-       hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
-
-       upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
-       lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
-
-       cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
-       cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
-
-       req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
-       req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
-       req_pars->req_par_flags =
-               ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
-                                                     ICP_QAT_FW_COMP_EOP,
-                                                     ICP_QAT_FW_COMP_BFINAL,
-                                                     ICP_QAT_FW_COMP_CNV,
-                                                     ICP_QAT_FW_COMP_CNV_RECOVERY,
-                                                     ICP_QAT_FW_COMP_NO_CNV_DFX,
-                                                     ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
-                                                     ICP_QAT_FW_COMP_NO_XXHASH_ACC,
-                                                     ICP_QAT_FW_COMP_CNV_ERROR_NONE,
-                                                     ICP_QAT_FW_COMP_NO_APPEND_CRC,
-                                                     ICP_QAT_FW_COMP_NO_DROP_DATA);
-
-       /* Fill second half of the template for decompression */
-       memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
-       req_tmpl++;
-       header = &req_tmpl->comn_hdr;
-       header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
-       cd_pars = &req_tmpl->cd_pars;
-
-       hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
-       lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
-
-       cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
-       cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
-}
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
-       dc_ops->build_deflate_ctx = qat_comp_build_deflate;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/qat/qat_common/adf_gen4_dc.h
deleted file mode 100644 (file)
index 0b1a677..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN4_DC_H
-#define ADF_GEN4_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
deleted file mode 100644 (file)
index 3148a62..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2020 Intel Corporation */
-#include <linux/iopoll.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen4_hw_data.h"
-
-static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
-{
-       return BUILD_RING_BASE_ADDR(addr, size);
-}
-
-static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
-       return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               u32 value)
-{
-       WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
-{
-       return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
-}
-
-static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               u32 value)
-{
-       WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
-}
-
-static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
-{
-       return READ_CSR_E_STAT(csr_base_addr, bank);
-}
-
-static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                                 u32 value)
-{
-       WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
-}
-
-static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
-                               dma_addr_t addr)
-{
-       WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
-}
-
-static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
-                              u32 value)
-{
-       WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
-{
-       WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
-}
-
-static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
-{
-       WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
-                                 u32 value)
-{
-       WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
-}
-
-static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
-                                      u32 value)
-{
-       WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
-}
-
-static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
-                                     u32 value)
-{
-       WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
-}
-
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
-{
-       csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
-       csr_ops->read_csr_ring_head = read_csr_ring_head;
-       csr_ops->write_csr_ring_head = write_csr_ring_head;
-       csr_ops->read_csr_ring_tail = read_csr_ring_tail;
-       csr_ops->write_csr_ring_tail = write_csr_ring_tail;
-       csr_ops->read_csr_e_stat = read_csr_e_stat;
-       csr_ops->write_csr_ring_config = write_csr_ring_config;
-       csr_ops->write_csr_ring_base = write_csr_ring_base;
-       csr_ops->write_csr_int_flag = write_csr_int_flag;
-       csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
-       csr_ops->write_csr_int_col_en = write_csr_int_col_en;
-       csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
-       csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
-       csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
-
-static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
-                                              u32 *lower)
-{
-       *lower = lower_32_bits(value);
-       *upper = upper_32_bits(value);
-}
-
-void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
-       u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
-       u32 ssm_wdt_pke_high = 0;
-       u32 ssm_wdt_pke_low = 0;
-       u32 ssm_wdt_high = 0;
-       u32 ssm_wdt_low = 0;
-
-       /* Convert 64bit WDT timer value into 32bit values for
-        * mmio write to 32bit CSRs.
-        */
-       adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
-       adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
-                                   &ssm_wdt_pke_low);
-
-       /* Enable WDT for sym and dc */
-       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
-       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
-       /* Enable WDT for pke */
-       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
-       ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
-}
-EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
-
-int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
-
-static int reset_ring_pair(void __iomem *csr, u32 bank_number)
-{
-       u32 status;
-       int ret;
-
-       /* Write rpresetctl register BIT(0) as 1
-        * Since rpresetctl registers have no RW fields, no need to preserve
-        * values for other bits. Just write directly.
-        */
-       ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
-                  ADF_WQM_CSR_RPRESETCTL_RESET);
-
-       /* Read rpresetsts register and wait for rp reset to complete */
-       ret = read_poll_timeout(ADF_CSR_RD, status,
-                               status & ADF_WQM_CSR_RPRESETSTS_STATUS,
-                               ADF_RPRESET_POLL_DELAY_US,
-                               ADF_RPRESET_POLL_TIMEOUT_US, true,
-                               csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
-       if (!ret) {
-               /* When rp reset is done, clear rpresetsts */
-               ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
-                          ADF_WQM_CSR_RPRESETSTS_STATUS);
-       }
-
-       return ret;
-}
-
-int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
-       void __iomem *csr;
-       int ret;
-
-       if (bank_number >= hw_data->num_banks)
-               return -EINVAL;
-
-       dev_dbg(&GET_DEV(accel_dev),
-               "ring pair reset for bank:%d\n", bank_number);
-
-       csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
-       ret = reset_ring_pair(csr, bank_number);
-       if (ret)
-               dev_err(&GET_DEV(accel_dev),
-                       "ring pair reset failed (timeout)\n");
-       else
-               dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
deleted file mode 100644 (file)
index 4fb4b3d..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2020 Intel Corporation */
-#ifndef ADF_GEN4_HW_CSR_DATA_H_
-#define ADF_GEN4_HW_CSR_DATA_H_
-
-#include "adf_accel_devices.h"
-#include "adf_cfg_common.h"
-
-/* Transport access */
-#define ADF_BANK_INT_SRC_SEL_MASK      0x44UL
-#define ADF_RING_CSR_RING_CONFIG       0x1000
-#define ADF_RING_CSR_RING_LBASE                0x1040
-#define ADF_RING_CSR_RING_UBASE                0x1080
-#define ADF_RING_CSR_RING_HEAD         0x0C0
-#define ADF_RING_CSR_RING_TAIL         0x100
-#define ADF_RING_CSR_E_STAT            0x14C
-#define ADF_RING_CSR_INT_FLAG          0x170
-#define ADF_RING_CSR_INT_SRCSEL                0x174
-#define ADF_RING_CSR_INT_COL_CTL       0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL  0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
-#define ADF_RING_CSR_INT_COL_EN                0x17C
-#define ADF_RING_CSR_ADDR_OFFSET       0x100000
-#define ADF_RING_BUNDLE_SIZE           0x2000
-
-#define BUILD_RING_BASE_ADDR(addr, size) \
-       ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
-       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_HEAD + ((ring) << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
-       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_TAIL + ((ring) << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
-       ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value)  \
-do { \
-       void __iomem *_csr_base_addr = csr_base_addr; \
-       u32 _bank = bank;                                               \
-       u32 _ring = ring;                                               \
-       dma_addr_t _value = value;                                      \
-       u32 l_base = 0, u_base = 0;                                     \
-       l_base = lower_32_bits(_value);                                 \
-       u_base = upper_32_bits(_value);                                 \
-       ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET,         \
-                  ADF_RING_BUNDLE_SIZE * (_bank) +                     \
-                  ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base);   \
-       ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET,         \
-                  ADF_RING_BUNDLE_SIZE * (_bank) +                     \
-                  ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base);   \
-} while (0)
-
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_HEAD + ((ring) << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
-#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_FLAG, (value))
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_COL_EN, (value))
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_COL_CTL, \
-                  ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_INT_FLAG_AND_COL, (value))
-
-/* Arbiter configuration */
-#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
-
-#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
-       ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
-                  ADF_RING_BUNDLE_SIZE * (bank) + \
-                  ADF_RING_CSR_RING_SRV_ARB_EN, (value))
-
-/* Default ring mapping */
-#define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \
-       (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
-         SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
-        ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
-         SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
-
-/* WDT timers
- *
- * Timeout is in cycles. Clock speed may vary across products but this
- * value should be a few milli-seconds.
- */
-#define ADF_SSM_WDT_DEFAULT_VALUE      0x7000000ULL
-#define ADF_SSM_WDT_PKE_DEFAULT_VALUE  0x8000000
-#define ADF_SSMWDTL_OFFSET             0x54
-#define ADF_SSMWDTH_OFFSET             0x5C
-#define ADF_SSMWDTPKEL_OFFSET          0x58
-#define ADF_SSMWDTPKEH_OFFSET          0x60
-
-/* Ring reset */
-#define ADF_RPRESET_POLL_TIMEOUT_US    (5 * USEC_PER_SEC)
-#define ADF_RPRESET_POLL_DELAY_US      20
-#define ADF_WQM_CSR_RPRESETCTL_RESET   BIT(0)
-#define ADF_WQM_CSR_RPRESETCTL(bank)   (0x6000 + ((bank) << 3))
-#define ADF_WQM_CSR_RPRESETSTS_STATUS  BIT(0)
-#define ADF_WQM_CSR_RPRESETSTS(bank)   (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
-
-/* Error source registers */
-#define ADF_GEN4_ERRSOU0       (0x41A200)
-#define ADF_GEN4_ERRSOU1       (0x41A204)
-#define ADF_GEN4_ERRSOU2       (0x41A208)
-#define ADF_GEN4_ERRSOU3       (0x41A20C)
-
-/* Error source mask registers */
-#define ADF_GEN4_ERRMSK0       (0x41A210)
-#define ADF_GEN4_ERRMSK1       (0x41A214)
-#define ADF_GEN4_ERRMSK2       (0x41A218)
-#define ADF_GEN4_ERRMSK3       (0x41A21C)
-
-#define ADF_GEN4_VFLNOTIFY     BIT(7)
-
-void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
-int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c
deleted file mode 100644 (file)
index 8e8efe9..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2021 Intel Corporation */
-#include <linux/iopoll.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen4_pfvf.h"
-#include "adf_pfvf_pf_proto.h"
-#include "adf_pfvf_utils.h"
-
-#define ADF_4XXX_PF2VM_OFFSET(i)       (0x40B010 + ((i) * 0x20))
-#define ADF_4XXX_VM2PF_OFFSET(i)       (0x40B014 + ((i) * 0x20))
-
-/* VF2PF interrupt source registers */
-#define ADF_4XXX_VM2PF_SOU             0x41A180
-#define ADF_4XXX_VM2PF_MSK             0x41A1C0
-#define ADF_GEN4_VF_MSK                        0xFFFF
-
-#define ADF_PFVF_GEN4_MSGTYPE_SHIFT    2
-#define ADF_PFVF_GEN4_MSGTYPE_MASK     0x3F
-#define ADF_PFVF_GEN4_MSGDATA_SHIFT    8
-#define ADF_PFVF_GEN4_MSGDATA_MASK     0xFFFFFF
-
-static const struct pfvf_csr_format csr_gen4_fmt = {
-       { ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
-       { ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
-};
-
-static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
-{
-       return ADF_4XXX_PF2VM_OFFSET(i);
-}
-
-static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
-{
-       return ADF_4XXX_VM2PF_OFFSET(i);
-}
-
-static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
-       u32 val;
-
-       val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask;
-       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val);
-}
-
-static void adf_gen4_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
-}
-
-static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       u32 sources, disabled, pending;
-
-       /* Get the interrupt sources triggered by VFs */
-       sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU);
-       if (!sources)
-               return 0;
-
-       /* Get the already disabled interrupts */
-       disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK);
-
-       pending = sources & ~disabled;
-       if (!pending)
-               return 0;
-
-       /* Due to HW limitations, when disabling the interrupts, we can't
-        * just disable the requested sources, as this would lead to missed
-        * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK.
-        * To work around it, disable all and re-enable only the sources that
-        * are not in vf_mask and were not already disabled. Re-enabling will
-        * trigger a new interrupt for the sources that have changed in the
-        * meantime, if any.
-        */
-       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources);
-
-       /* Return the sources of the (new) interrupt(s) */
-       return pending;
-}
-
-static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
-                             struct pfvf_message msg, u32 pfvf_offset,
-                             struct mutex *csr_lock)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u32 csr_val;
-       int ret;
-
-       csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
-       if (unlikely(!csr_val))
-               return -EINVAL;
-
-       mutex_lock(csr_lock);
-
-       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
-
-       /* Wait for confirmation from remote that it received the message */
-       ret = read_poll_timeout(ADF_CSR_RD, csr_val, !(csr_val & ADF_PFVF_INT),
-                               ADF_PFVF_MSG_ACK_DELAY_US,
-                               ADF_PFVF_MSG_ACK_MAX_DELAY_US,
-                               true, pmisc_addr, pfvf_offset);
-       if (ret < 0)
-               dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
-
-       mutex_unlock(csr_lock);
-       return ret;
-}
-
-static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
-                                             u32 pfvf_offset, u8 compat_ver)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       struct pfvf_message msg = { 0 };
-       u32 csr_val;
-
-       /* Read message from the CSR */
-       csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
-       if (!(csr_val & ADF_PFVF_INT)) {
-               dev_info(&GET_DEV(accel_dev),
-                        "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val);
-               return msg;
-       }
-
-       /* We can now acknowledge the message reception by clearing the
-        * interrupt bit
-        */
-       ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
-
-       /* Return the pfvf_message format */
-       return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
-}
-
-void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_enable_pf2vf_comms;
-       pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset;
-       pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset;
-       pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts;
-       pfvf_ops->disable_all_vf2pf_interrupts = adf_gen4_disable_all_vf2pf_interrupts;
-       pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts;
-       pfvf_ops->send_msg = adf_gen4_pfvf_send;
-       pfvf_ops->recv_msg = adf_gen4_pfvf_recv;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_pf_pfvf_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.h
deleted file mode 100644 (file)
index 17d1b77..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_GEN4_PFVF_H
-#define ADF_GEN4_PFVF_H
-
-#include "adf_accel_devices.h"
-
-#ifdef CONFIG_PCI_IOV
-void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
-#else
-static inline void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
-{
-       pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
-}
-#endif
-
-#endif /* ADF_GEN4_PFVF_H */
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/qat/qat_common/adf_gen4_pm.c
deleted file mode 100644 (file)
index 7037c08..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2022 Intel Corporation */
-#include <linux/bitfield.h>
-#include <linux/iopoll.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_gen4_pm.h"
-#include "adf_cfg_strings.h"
-#include "icp_qat_fw_init_admin.h"
-#include "adf_gen4_hw_data.h"
-#include "adf_cfg.h"
-
-enum qat_pm_host_msg {
-       PM_NO_CHANGE = 0,
-       PM_SET_MIN,
-};
-
-struct adf_gen4_pm_data {
-       struct work_struct pm_irq_work;
-       struct adf_accel_dev *accel_dev;
-       u32 pm_int_sts;
-};
-
-static int send_host_msg(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
-       u32 msg;
-
-       msg = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
-       if (msg & ADF_GEN4_PM_MSG_PENDING)
-               return -EBUSY;
-
-       /* Send HOST_MSG */
-       msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, PM_SET_MIN);
-       msg |= ADF_GEN4_PM_MSG_PENDING;
-       ADF_CSR_WR(pmisc, ADF_GEN4_PM_HOST_MSG, msg);
-
-       /* Poll status register to make sure the HOST_MSG has been processed */
-       return read_poll_timeout(ADF_CSR_RD, msg,
-                               !(msg & ADF_GEN4_PM_MSG_PENDING),
-                               ADF_GEN4_PM_MSG_POLL_DELAY_US,
-                               ADF_GEN4_PM_POLL_TIMEOUT_US, true, pmisc,
-                               ADF_GEN4_PM_HOST_MSG);
-}
-
-static void pm_bh_handler(struct work_struct *work)
-{
-       struct adf_gen4_pm_data *pm_data =
-               container_of(work, struct adf_gen4_pm_data, pm_irq_work);
-       struct adf_accel_dev *accel_dev = pm_data->accel_dev;
-       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
-       u32 pm_int_sts = pm_data->pm_int_sts;
-       u32 val;
-
-       /* PM Idle interrupt */
-       if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
-               /* Issue host message to FW */
-               if (send_host_msg(accel_dev))
-                       dev_warn_ratelimited(&GET_DEV(accel_dev),
-                                            "Failed to send host msg to FW\n");
-       }
-
-       /* Clear interrupt status */
-       ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
-
-       /* Reenable PM interrupt */
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
-       val &= ~ADF_GEN4_PM_SOU;
-       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
-
-       kfree(pm_data);
-}
-
-bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
-       struct adf_gen4_pm_data *pm_data = NULL;
-       u32 errsou2;
-       u32 errmsk2;
-       u32 val;
-
-       /* Only handle the interrupt triggered by PM */
-       errmsk2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
-       if (errmsk2 & ADF_GEN4_PM_SOU)
-               return false;
-
-       errsou2 = ADF_CSR_RD(pmisc, ADF_GEN4_ERRSOU2);
-       if (!(errsou2 & ADF_GEN4_PM_SOU))
-               return false;
-
-       /* Disable interrupt */
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
-       val |= ADF_GEN4_PM_SOU;
-       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
-
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
-
-       pm_data = kzalloc(sizeof(*pm_data), GFP_ATOMIC);
-       if (!pm_data)
-               return false;
-
-       pm_data->pm_int_sts = val;
-       pm_data->accel_dev = accel_dev;
-
-       INIT_WORK(&pm_data->pm_irq_work, pm_bh_handler);
-       adf_misc_wq_queue_work(&pm_data->pm_irq_work);
-
-       return true;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_handle_pm_interrupt);
-
-int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
-       int ret;
-       u32 val;
-
-       ret = adf_init_admin_pm(accel_dev, ADF_GEN4_PM_DEFAULT_IDLE_FILTER);
-       if (ret)
-               return ret;
-
-       /* Enable default PM interrupts: IDLE, THROTTLE */
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
-       val |= ADF_GEN4_PM_INT_EN_DEFAULT;
-
-       /* Clear interrupt status */
-       val |= ADF_GEN4_PM_INT_STS_MASK;
-       ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, val);
-
-       /* Unmask PM Interrupt */
-       val = ADF_CSR_RD(pmisc, ADF_GEN4_ERRMSK2);
-       val &= ~ADF_GEN4_PM_SOU;
-       ADF_CSR_WR(pmisc, ADF_GEN4_ERRMSK2, val);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_enable_pm);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/qat/qat_common/adf_gen4_pm.h
deleted file mode 100644 (file)
index f8f8a9e..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN4_PM_H
-#define ADF_GEN4_PM_H
-
-#include "adf_accel_devices.h"
-
-/* Power management registers */
-#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
-
-/* Power management */
-#define ADF_GEN4_PM_POLL_DELAY_US      20
-#define ADF_GEN4_PM_POLL_TIMEOUT_US    USEC_PER_SEC
-#define ADF_GEN4_PM_MSG_POLL_DELAY_US  (10 * USEC_PER_MSEC)
-#define ADF_GEN4_PM_STATUS             (0x50A00C)
-#define ADF_GEN4_PM_INTERRUPT          (0x50A028)
-
-/* Power management source in ERRSOU2 and ERRMSK2 */
-#define ADF_GEN4_PM_SOU                        BIT(18)
-
-#define ADF_GEN4_PM_IDLE_INT_EN                BIT(18)
-#define ADF_GEN4_PM_THROTTLE_INT_EN    BIT(19)
-#define ADF_GEN4_PM_DRV_ACTIVE         BIT(20)
-#define ADF_GEN4_PM_INIT_STATE         BIT(21)
-#define ADF_GEN4_PM_INT_EN_DEFAULT     (ADF_GEN4_PM_IDLE_INT_EN | \
-                                       ADF_GEN4_PM_THROTTLE_INT_EN)
-
-#define ADF_GEN4_PM_THR_STS    BIT(0)
-#define ADF_GEN4_PM_IDLE_STS   BIT(1)
-#define ADF_GEN4_PM_FW_INT_STS BIT(2)
-#define ADF_GEN4_PM_INT_STS_MASK (ADF_GEN4_PM_THR_STS | \
-                                ADF_GEN4_PM_IDLE_STS | \
-                                ADF_GEN4_PM_FW_INT_STS)
-
-#define ADF_GEN4_PM_MSG_PENDING                        BIT(0)
-#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK       GENMASK(28, 1)
-
-#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER                (0x0)
-#define ADF_GEN4_PM_MAX_IDLE_FILTER            (0x7)
-
-int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
-bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
deleted file mode 100644 (file)
index da69566..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_transport_internal.h"
-
-#define ADF_ARB_NUM 4
-#define ADF_ARB_REG_SIZE 0x4
-
-#define WRITE_CSR_ARB_SARCONFIG(csr_addr, arb_offset, index, value) \
-       ADF_CSR_WR(csr_addr, (arb_offset) + \
-       (ADF_ARB_REG_SIZE * (index)), value)
-
-#define WRITE_CSR_ARB_WT2SAM(csr_addr, arb_offset, wt_offset, index, value) \
-       ADF_CSR_WR(csr_addr, ((arb_offset) + (wt_offset)) + \
-       (ADF_ARB_REG_SIZE * (index)), value)
-
-int adf_init_arb(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
-       unsigned long ae_mask = hw_data->ae_mask;
-       u32 arb_off, wt_off, arb_cfg;
-       const u32 *thd_2_arb_cfg;
-       struct arb_info info;
-       int arb, i;
-
-       hw_data->get_arb_info(&info);
-       arb_cfg = info.arb_cfg;
-       arb_off = info.arb_offset;
-       wt_off = info.wt2sam_offset;
-
-       /* Service arb configured for 32 bytes responses and
-        * ring flow control check enabled. */
-       for (arb = 0; arb < ADF_ARB_NUM; arb++)
-               WRITE_CSR_ARB_SARCONFIG(csr, arb_off, arb, arb_cfg);
-
-       /* Map worker threads to service arbiters */
-       thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev);
-
-       for_each_set_bit(i, &ae_mask, hw_data->num_engines)
-               WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, thd_2_arb_cfg[i]);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_init_arb);
-
-void adf_update_ring_arb(struct adf_etr_ring_data *ring)
-{
-       struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
-       u32 tx_ring_mask = hw_data->tx_rings_mask;
-       u32 shift = hw_data->tx_rx_gap;
-       u32 arben, arben_tx, arben_rx;
-       u32 rx_ring_mask;
-
-       /*
-        * Enable arbitration on a ring only if the TX half of the ring mask
-        * matches the RX part. This results in writes to CSR on both TX and
-        * RX update - only one is necessary, but both are done for
-        * simplicity.
-        */
-       rx_ring_mask = tx_ring_mask << shift;
-       arben_tx = (ring->bank->ring_mask & tx_ring_mask) >> 0;
-       arben_rx = (ring->bank->ring_mask & rx_ring_mask) >> shift;
-       arben = arben_tx & arben_rx;
-
-       csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
-                                          ring->bank->bank_number, arben);
-}
-
-void adf_exit_arb(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
-       u32 arb_off, wt_off;
-       struct arb_info info;
-       void __iomem *csr;
-       unsigned int i;
-
-       hw_data->get_arb_info(&info);
-       arb_off = info.arb_offset;
-       wt_off = info.wt2sam_offset;
-
-       if (!accel_dev->transport)
-               return;
-
-       csr = accel_dev->transport->banks[0].csr_addr;
-
-       hw_data->get_arb_info(&info);
-
-       /* Reset arbiter configuration */
-       for (i = 0; i < ADF_ARB_NUM; i++)
-               WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
-
-       /* Unmap worker threads to service arbiters */
-       for (i = 0; i < hw_data->num_engines; i++)
-               WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
-
-       /* Disable arbitration on all rings */
-       for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
-               csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
-}
-EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
deleted file mode 100644 (file)
index 0985f64..0000000
+++ /dev/null
@@ -1,480 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include "adf_accel_devices.h"
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-static LIST_HEAD(service_table);
-static DEFINE_MUTEX(service_lock);
-
-static void adf_service_add(struct service_hndl *service)
-{
-       mutex_lock(&service_lock);
-       list_add(&service->list, &service_table);
-       mutex_unlock(&service_lock);
-}
-
-int adf_service_register(struct service_hndl *service)
-{
-       memset(service->init_status, 0, sizeof(service->init_status));
-       memset(service->start_status, 0, sizeof(service->start_status));
-       adf_service_add(service);
-       return 0;
-}
-
-static void adf_service_remove(struct service_hndl *service)
-{
-       mutex_lock(&service_lock);
-       list_del(&service->list);
-       mutex_unlock(&service_lock);
-}
-
-int adf_service_unregister(struct service_hndl *service)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
-               if (service->init_status[i] || service->start_status[i]) {
-                       pr_err("QAT: Could not remove active service\n");
-                       return -EFAULT;
-               }
-       }
-       adf_service_remove(service);
-       return 0;
-}
-
-/**
- * adf_dev_init() - Init data structures and services for the given accel device
- * @accel_dev: Pointer to acceleration device.
- *
- * Initialize the ring data structures and the admin comms and arbitration
- * services.
- *
- * Return: 0 on success, error code otherwise.
- */
-static int adf_dev_init(struct adf_accel_dev *accel_dev)
-{
-       struct service_hndl *service;
-       struct list_head *list_itr;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       int ret;
-
-       if (!hw_data) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to init device - hw_data not set\n");
-               return -EFAULT;
-       }
-
-       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
-           !accel_dev->is_vf) {
-               dev_err(&GET_DEV(accel_dev), "Device not configured\n");
-               return -EFAULT;
-       }
-
-       if (adf_init_etr_data(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
-               return -EFAULT;
-       }
-
-       if (hw_data->init_device && hw_data->init_device(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
-               return -EFAULT;
-       }
-
-       if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
-               return -EFAULT;
-       }
-
-       if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
-               return -EFAULT;
-       }
-
-       if (adf_ae_init(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to initialise Acceleration Engine\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
-
-       if (adf_ae_fw_load(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to load acceleration FW\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
-
-       if (hw_data->alloc_irq(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
-
-       hw_data->enable_ints(accel_dev);
-       hw_data->enable_error_correction(accel_dev);
-
-       ret = hw_data->pfvf_ops.enable_comms(accel_dev);
-       if (ret)
-               return ret;
-
-       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
-           accel_dev->is_vf) {
-               if (qat_crypto_vf_dev_config(accel_dev))
-                       return -EFAULT;
-       }
-
-       /*
-        * Subservice initialisation is divided into two stages: init and start.
-        * This is to facilitate any ordering dependencies between services
-        * prior to starting any of the accelerators.
-        */
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to initialise service %s\n",
-                               service->name);
-                       return -EFAULT;
-               }
-               set_bit(accel_dev->accel_id, service->init_status);
-       }
-
-       return 0;
-}
-
-/**
- * adf_dev_start() - Start acceleration service for the given accel device
- * @accel_dev:    Pointer to acceleration device.
- *
- * Function notifies all the registered services that the acceleration device
- * is ready to be used.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-static int adf_dev_start(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct service_hndl *service;
-       struct list_head *list_itr;
-
-       set_bit(ADF_STATUS_STARTING, &accel_dev->status);
-
-       if (adf_ae_start(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
-
-       if (hw_data->send_admin_init(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
-               return -EFAULT;
-       }
-
-       /* Set ssm watch dog timer */
-       if (hw_data->set_ssm_wdtimer)
-               hw_data->set_ssm_wdtimer(accel_dev);
-
-       /* Enable Power Management */
-       if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
-               return -EFAULT;
-       }
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to start service %s\n",
-                               service->name);
-                       return -EFAULT;
-               }
-               set_bit(accel_dev->accel_id, service->start_status);
-       }
-
-       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
-       set_bit(ADF_STATUS_STARTED, &accel_dev->status);
-
-       if (!list_empty(&accel_dev->crypto_list) &&
-           (qat_algs_register() || qat_asym_algs_register())) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to register crypto algs\n");
-               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
-               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
-               return -EFAULT;
-       }
-
-       if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to register compression algs\n");
-               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
-               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
-               return -EFAULT;
-       }
-       return 0;
-}
-
-/**
- * adf_dev_stop() - Stop acceleration service for the given accel device
- * @accel_dev:    Pointer to acceleration device.
- *
- * Function notifies all the registered services that the acceleration device
- * is shuting down.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-static void adf_dev_stop(struct adf_accel_dev *accel_dev)
-{
-       struct service_hndl *service;
-       struct list_head *list_itr;
-       bool wait = false;
-       int ret;
-
-       if (!adf_dev_started(accel_dev) &&
-           !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
-               return;
-
-       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
-       clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
-
-       if (!list_empty(&accel_dev->crypto_list)) {
-               qat_algs_unregister();
-               qat_asym_algs_unregister();
-       }
-
-       if (!list_empty(&accel_dev->compression_list))
-               qat_comp_algs_unregister();
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!test_bit(accel_dev->accel_id, service->start_status))
-                       continue;
-               ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
-               if (!ret) {
-                       clear_bit(accel_dev->accel_id, service->start_status);
-               } else if (ret == -EAGAIN) {
-                       wait = true;
-                       clear_bit(accel_dev->accel_id, service->start_status);
-               }
-       }
-
-       if (wait)
-               msleep(100);
-
-       if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
-               if (adf_ae_stop(accel_dev))
-                       dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
-               else
-                       clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
-       }
-}
-
-/**
- * adf_dev_shutdown() - shutdown acceleration services and data strucutures
- * @accel_dev: Pointer to acceleration device
- *
- * Cleanup the ring data structures and the admin comms and arbitration
- * services.
- */
-static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct service_hndl *service;
-       struct list_head *list_itr;
-
-       if (!hw_data) {
-               dev_err(&GET_DEV(accel_dev),
-                       "QAT: Failed to shutdown device - hw_data not set\n");
-               return;
-       }
-
-       if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
-               adf_ae_fw_release(accel_dev);
-               clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
-       }
-
-       if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
-               if (adf_ae_shutdown(accel_dev))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to shutdown Accel Engine\n");
-               else
-                       clear_bit(ADF_STATUS_AE_INITIALISED,
-                                 &accel_dev->status);
-       }
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (!test_bit(accel_dev->accel_id, service->init_status))
-                       continue;
-               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to shutdown service %s\n",
-                               service->name);
-               else
-                       clear_bit(accel_dev->accel_id, service->init_status);
-       }
-
-       hw_data->disable_iov(accel_dev);
-
-       if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
-               hw_data->free_irq(accel_dev);
-               clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
-       }
-
-       /* Delete configuration only if not restarting */
-       if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
-               adf_cfg_del_all(accel_dev);
-
-       if (hw_data->exit_arb)
-               hw_data->exit_arb(accel_dev);
-
-       if (hw_data->exit_admin_comms)
-               hw_data->exit_admin_comms(accel_dev);
-
-       adf_cleanup_etr_data(accel_dev);
-       adf_dev_restore(accel_dev);
-}
-
-int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
-{
-       struct service_hndl *service;
-       struct list_head *list_itr;
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to restart service %s.\n",
-                               service->name);
-       }
-       return 0;
-}
-
-int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
-{
-       struct service_hndl *service;
-       struct list_head *list_itr;
-
-       list_for_each(list_itr, &service_table) {
-               service = list_entry(list_itr, struct service_hndl, list);
-               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to restart service %s.\n",
-                               service->name);
-       }
-       return 0;
-}
-
-static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
-{
-       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-       int ret;
-
-       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-                                     ADF_SERVICES_ENABLED, services);
-
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-
-       if (!ret) {
-               ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
-               if (ret)
-                       return ret;
-
-               ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
-                                                 ADF_SERVICES_ENABLED,
-                                                 services, ADF_STR);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
-{
-       int ret = 0;
-
-       if (!accel_dev)
-               return -EINVAL;
-
-       mutex_lock(&accel_dev->state_lock);
-
-       if (!adf_dev_started(accel_dev)) {
-               dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
-                        accel_dev->accel_id);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (reconfig) {
-               ret = adf_dev_shutdown_cache_cfg(accel_dev);
-               goto out;
-       }
-
-       adf_dev_stop(accel_dev);
-       adf_dev_shutdown(accel_dev);
-
-out:
-       mutex_unlock(&accel_dev->state_lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_dev_down);
-
-int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
-{
-       int ret = 0;
-
-       if (!accel_dev)
-               return -EINVAL;
-
-       mutex_lock(&accel_dev->state_lock);
-
-       if (adf_dev_started(accel_dev)) {
-               dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
-                        accel_dev->accel_id);
-               ret = -EALREADY;
-               goto out;
-       }
-
-       if (config && GET_HW_DATA(accel_dev)->dev_config) {
-               ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
-               if (unlikely(ret))
-                       goto out;
-       }
-
-       ret = adf_dev_init(accel_dev);
-       if (unlikely(ret))
-               goto out;
-
-       ret = adf_dev_start(accel_dev);
-
-out:
-       mutex_unlock(&accel_dev->state_lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_dev_up);
-
-int adf_dev_restart(struct adf_accel_dev *accel_dev)
-{
-       int ret = 0;
-
-       if (!accel_dev)
-               return -EFAULT;
-
-       adf_dev_down(accel_dev, false);
-
-       ret = adf_dev_up(accel_dev, false);
-       /* if device is already up return success*/
-       if (ret == -EALREADY)
-               return 0;
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_dev_restart);
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
deleted file mode 100644 (file)
index ad9e135..0000000
+++ /dev/null
@@ -1,382 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "adf_cfg_common.h"
-#include "adf_transport_access_macros.h"
-#include "adf_transport_internal.h"
-
-#define ADF_MAX_NUM_VFS        32
-static struct workqueue_struct *adf_misc_wq;
-
-static int adf_enable_msix(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 msix_num_entries = hw_data->num_banks + 1;
-       int ret;
-
-       if (hw_data->set_msix_rttable)
-               hw_data->set_msix_rttable(accel_dev);
-
-       ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
-                                   msix_num_entries, PCI_IRQ_MSIX);
-       if (unlikely(ret < 0)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to allocate %d MSI-X vectors\n",
-                       msix_num_entries);
-               return ret;
-       }
-       return 0;
-}
-
-static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
-{
-       pci_free_irq_vectors(pci_dev_info->pci_dev);
-}
-
-static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
-{
-       struct adf_etr_bank_data *bank = bank_ptr;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
-                                           0);
-       tasklet_hi_schedule(&bank->resp_handler);
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_PCI_IOV
-void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
-       GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
-       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
-}
-
-void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
-       GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr);
-       spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
-}
-
-static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       u32 pending;
-
-       spin_lock(&accel_dev->pf.vf2pf_ints_lock);
-       pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr);
-       spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
-
-       return pending;
-}
-
-static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev)
-{
-       bool irq_handled = false;
-       unsigned long vf_mask;
-
-       /* Get the interrupt sources triggered by VFs, except for those already disabled */
-       vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev);
-       if (vf_mask) {
-               struct adf_accel_vf_info *vf_info;
-               int i;
-
-               /*
-                * Handle VF2PF interrupt unless the VF is malicious and
-                * is attempting to flood the host OS with VF2PF interrupts.
-                */
-               for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
-                       vf_info = accel_dev->pf.vf_info + i;
-
-                       if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
-                               dev_info(&GET_DEV(accel_dev),
-                                        "Too many ints from VF%d\n",
-                                         vf_info->vf_nr);
-                               continue;
-                       }
-
-                       adf_schedule_vf2pf_handler(vf_info);
-                       irq_handled = true;
-               }
-       }
-       return irq_handled;
-}
-#endif /* CONFIG_PCI_IOV */
-
-static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-
-       if (hw_data->handle_pm_interrupt &&
-           hw_data->handle_pm_interrupt(accel_dev))
-               return true;
-
-       return false;
-}
-
-static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
-{
-       struct adf_accel_dev *accel_dev = dev_ptr;
-
-#ifdef CONFIG_PCI_IOV
-       /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
-       if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev))
-               return IRQ_HANDLED;
-#endif /* CONFIG_PCI_IOV */
-
-       if (adf_handle_pm_int(accel_dev))
-               return IRQ_HANDLED;
-
-       dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
-               accel_dev->accel_id);
-
-       return IRQ_NONE;
-}
-
-static void adf_free_irqs(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
-       struct adf_etr_data *etr_data = accel_dev->transport;
-       int clust_irq = hw_data->num_banks;
-       int irq, i = 0;
-
-       if (pci_dev_info->msix_entries.num_entries > 1) {
-               for (i = 0; i < hw_data->num_banks; i++) {
-                       if (irqs[i].enabled) {
-                               irq = pci_irq_vector(pci_dev_info->pci_dev, i);
-                               irq_set_affinity_hint(irq, NULL);
-                               free_irq(irq, &etr_data->banks[i]);
-                       }
-               }
-       }
-
-       if (irqs[i].enabled) {
-               irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
-               free_irq(irq, accel_dev);
-       }
-}
-
-static int adf_request_irqs(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
-       struct adf_etr_data *etr_data = accel_dev->transport;
-       int clust_irq = hw_data->num_banks;
-       int ret, irq, i = 0;
-       char *name;
-
-       /* Request msix irq for all banks unless SR-IOV enabled */
-       if (!accel_dev->pf.vf_info) {
-               for (i = 0; i < hw_data->num_banks; i++) {
-                       struct adf_etr_bank_data *bank = &etr_data->banks[i];
-                       unsigned int cpu, cpus = num_online_cpus();
-
-                       name = irqs[i].name;
-                       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
-                                "qat%d-bundle%d", accel_dev->accel_id, i);
-                       irq = pci_irq_vector(pci_dev_info->pci_dev, i);
-                       if (unlikely(irq < 0)) {
-                               dev_err(&GET_DEV(accel_dev),
-                                       "Failed to get IRQ number of device vector %d - %s\n",
-                                       i, name);
-                               ret = irq;
-                               goto err;
-                       }
-                       ret = request_irq(irq, adf_msix_isr_bundle, 0,
-                                         &name[0], bank);
-                       if (ret) {
-                               dev_err(&GET_DEV(accel_dev),
-                                       "Failed to allocate IRQ %d for %s\n",
-                                       irq, name);
-                               goto err;
-                       }
-
-                       cpu = ((accel_dev->accel_id * hw_data->num_banks) +
-                              i) % cpus;
-                       irq_set_affinity_hint(irq, get_cpu_mask(cpu));
-                       irqs[i].enabled = true;
-               }
-       }
-
-       /* Request msix irq for AE */
-       name = irqs[i].name;
-       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
-                "qat%d-ae-cluster", accel_dev->accel_id);
-       irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
-       if (unlikely(irq < 0)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to get IRQ number of device vector %d - %s\n",
-                       i, name);
-               ret = irq;
-               goto err;
-       }
-       ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to allocate IRQ %d for %s\n", irq, name);
-               goto err;
-       }
-       irqs[i].enabled = true;
-       return ret;
-err:
-       adf_free_irqs(accel_dev);
-       return ret;
-}
-
-static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u32 msix_num_entries = 1;
-       struct adf_irq *irqs;
-
-       /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
-       if (!accel_dev->pf.vf_info)
-               msix_num_entries += hw_data->num_banks;
-
-       irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
-                           GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
-       if (!irqs)
-               return -ENOMEM;
-
-       accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
-       accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
-       return 0;
-}
-
-static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
-{
-       kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
-       accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
-}
-
-static int adf_setup_bh(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *priv_data = accel_dev->transport;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       int i;
-
-       for (i = 0; i < hw_data->num_banks; i++)
-               tasklet_init(&priv_data->banks[i].resp_handler,
-                            adf_response_handler,
-                            (unsigned long)&priv_data->banks[i]);
-       return 0;
-}
-
-static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *priv_data = accel_dev->transport;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       int i;
-
-       for (i = 0; i < hw_data->num_banks; i++) {
-               tasklet_disable(&priv_data->banks[i].resp_handler);
-               tasklet_kill(&priv_data->banks[i].resp_handler);
-       }
-}
-
-/**
- * adf_isr_resource_free() - Free IRQ for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function frees interrupts for acceleration device.
- */
-void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
-{
-       adf_free_irqs(accel_dev);
-       adf_cleanup_bh(accel_dev);
-       adf_disable_msix(&accel_dev->accel_pci_dev);
-       adf_isr_free_msix_vectors_data(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_isr_resource_free);
-
-/**
- * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function allocates interrupts for acceleration device.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       ret = adf_isr_alloc_msix_vectors_data(accel_dev);
-       if (ret)
-               goto err_out;
-
-       ret = adf_enable_msix(accel_dev);
-       if (ret)
-               goto err_free_msix_table;
-
-       ret = adf_setup_bh(accel_dev);
-       if (ret)
-               goto err_disable_msix;
-
-       ret = adf_request_irqs(accel_dev);
-       if (ret)
-               goto err_cleanup_bh;
-
-       return 0;
-
-err_cleanup_bh:
-       adf_cleanup_bh(accel_dev);
-
-err_disable_msix:
-       adf_disable_msix(&accel_dev->accel_pci_dev);
-
-err_free_msix_table:
-       adf_isr_free_msix_vectors_data(accel_dev);
-
-err_out:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
-
-/**
- * adf_init_misc_wq() - Init misc workqueue
- *
- * Function init workqueue 'qat_misc_wq' for general purpose.
- *
- * Return: 0 on success, error code otherwise.
- */
-int __init adf_init_misc_wq(void)
-{
-       adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
-
-       return !adf_misc_wq ? -ENOMEM : 0;
-}
-
-void adf_exit_misc_wq(void)
-{
-       if (adf_misc_wq)
-               destroy_workqueue(adf_misc_wq);
-
-       adf_misc_wq = NULL;
-}
-
-bool adf_misc_wq_queue_work(struct work_struct *work)
-{
-       return queue_work(adf_misc_wq, work);
-}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h
deleted file mode 100644 (file)
index 204a424..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#ifndef ADF_PFVF_MSG_H
-#define ADF_PFVF_MSG_H
-
-#include <linux/bits.h>
-
-/*
- * PF<->VF Gen2 Messaging format
- *
- * The PF has an array of 32-bit PF2VF registers, one for each VF. The
- * PF can access all these registers while each VF can access only the one
- * register associated with that particular VF.
- *
- * The register functionally is split into two parts:
- * The bottom half is for PF->VF messages. In particular when the first
- * bit of this register (bit 0) gets set an interrupt will be triggered
- * in the respective VF.
- * The top half is for VF->PF messages. In particular when the first bit
- * of this half of register (bit 16) gets set an interrupt will be triggered
- * in the PF.
- *
- * The remaining bits within this register are available to encode messages.
- * and implement a collision control mechanism to prevent concurrent use of
- * the PF2VF register by both the PF and VF.
- *
- *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
- *  _______________________________________________
- * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
- * +-----------------------------------------------+
- *  \___________________________/ \_________/ ^   ^
- *                ^                    ^      |   |
- *                |                    |      |   VF2PF Int
- *                |                    |      Message Origin
- *                |                    Message Type
- *                Message-specific Data/Reserved
- *
- *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
- *  _______________________________________________
- * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
- * +-----------------------------------------------+
- *  \___________________________/ \_________/ ^   ^
- *                ^                    ^      |   |
- *                |                    |      |   PF2VF Int
- *                |                    |      Message Origin
- *                |                    Message Type
- *                Message-specific Data/Reserved
- *
- * Message Origin (Should always be 1)
- * A legacy out-of-tree QAT driver allowed for a set of messages not supported
- * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
- *
- * When a PF or VF attempts to send a message in the lower or upper 16 bits,
- * respectively, the other 16 bits are written to first with a defined
- * IN_USE_BY pattern as part of a collision control scheme (see function
- * adf_gen2_pfvf_send() in adf_pf2vf_msg.c).
- *
- *
- * PF<->VF Gen4 Messaging format
- *
- * Similarly to the gen2 messaging format, 32-bit long registers are used for
- * communication between PF and VFs. However, each VF and PF share a pair of
- * 32-bits register to avoid collisions: one for PV to VF messages and one
- * for VF to PF messages.
- *
- * Both the Interrupt bit and the Message Origin bit retain the same position
- * and meaning, although non-system messages are now deprecated and not
- * expected.
- *
- *  31 30              9  8  7  6  5  4  3  2  1  0
- *  _______________________________________________
- * |  |  |   . . .   |  |  |  |  |  |  |  |  |  |  |
- * +-----------------------------------------------+
- *  \_____________________/ \_______________/  ^  ^
- *             ^                     ^         |  |
- *             |                     |         |  PF/VF Int
- *             |                     |         Message Origin
- *             |                     Message Type
- *             Message-specific Data/Reserved
- *
- * For both formats, the message reception is acknowledged by lowering the
- * interrupt bit on the register where the message was sent.
- */
-
-/* PFVF message common bits */
-#define ADF_PFVF_INT                           BIT(0)
-#define ADF_PFVF_MSGORIGIN_SYSTEM              BIT(1)
-
-/* Different generations have different CSR layouts, use this struct
- * to abstract these differences away
- */
-struct pfvf_message {
-       u8 type;
-       u32 data;
-};
-
-/* PF->VF messages */
-enum pf2vf_msgtype {
-       ADF_PF2VF_MSGTYPE_RESTARTING            = 0x01,
-       ADF_PF2VF_MSGTYPE_VERSION_RESP          = 0x02,
-       ADF_PF2VF_MSGTYPE_BLKMSG_RESP           = 0x03,
-/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
-       ADF_PF2VF_MSGTYPE_RP_RESET_RESP         = 0x10,
-};
-
-/* VF->PF messages */
-enum vf2pf_msgtype {
-       ADF_VF2PF_MSGTYPE_INIT                  = 0x03,
-       ADF_VF2PF_MSGTYPE_SHUTDOWN              = 0x04,
-       ADF_VF2PF_MSGTYPE_VERSION_REQ           = 0x05,
-       ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ        = 0x06,
-       ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ       = 0x07,
-       ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ      = 0x08,
-       ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ       = 0x09,
-/* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */
-       ADF_VF2PF_MSGTYPE_RP_RESET              = 0x10,
-};
-
-/* VF/PF compatibility version. */
-enum pfvf_compatibility_version {
-       /* Support for extended capabilities */
-       ADF_PFVF_COMPAT_CAPABILITIES            = 0x02,
-       /* In-use pattern cleared by receiver */
-       ADF_PFVF_COMPAT_FAST_ACK                = 0x03,
-       /* Ring to service mapping support for non-standard mappings */
-       ADF_PFVF_COMPAT_RING_TO_SVC_MAP         = 0x04,
-       /* Reference to the latest version */
-       ADF_PFVF_COMPAT_THIS_VERSION            = 0x04,
-};
-
-/* PF->VF Version Response */
-#define ADF_PF2VF_VERSION_RESP_VERS_MASK       GENMASK(7, 0)
-#define ADF_PF2VF_VERSION_RESP_RESULT_MASK     GENMASK(9, 8)
-
-enum pf2vf_compat_response {
-       ADF_PF2VF_VF_COMPATIBLE                 = 0x01,
-       ADF_PF2VF_VF_INCOMPATIBLE               = 0x02,
-       ADF_PF2VF_VF_COMPAT_UNKNOWN             = 0x03,
-};
-
-enum ring_reset_result {
-       RPRESET_SUCCESS                         = 0x00,
-       RPRESET_NOT_SUPPORTED                   = 0x01,
-       RPRESET_INVAL_BANK                      = 0x02,
-       RPRESET_TIMEOUT                         = 0x03,
-};
-
-#define ADF_VF2PF_RNG_RESET_RP_MASK            GENMASK(1, 0)
-#define ADF_VF2PF_RNG_RESET_RSVD_MASK          GENMASK(25, 2)
-
-/* PF->VF Block Responses */
-#define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK                GENMASK(1, 0)
-#define ADF_PF2VF_BLKMSG_RESP_DATA_MASK                GENMASK(9, 2)
-
-enum pf2vf_blkmsg_resp_type {
-       ADF_PF2VF_BLKMSG_RESP_TYPE_DATA         = 0x00,
-       ADF_PF2VF_BLKMSG_RESP_TYPE_CRC          = 0x01,
-       ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR        = 0x02,
-};
-
-/* PF->VF Block Error Code */
-enum pf2vf_blkmsg_error {
-       ADF_PF2VF_INVALID_BLOCK_TYPE            = 0x00,
-       ADF_PF2VF_INVALID_BYTE_NUM_REQ          = 0x01,
-       ADF_PF2VF_PAYLOAD_TRUNCATED             = 0x02,
-       ADF_PF2VF_UNSPECIFIED_ERROR             = 0x03,
-};
-
-/* VF->PF Block Requests */
-#define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK                GENMASK(1, 0)
-#define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK                GENMASK(8, 2)
-#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK       GENMASK(2, 0)
-#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK       GENMASK(8, 3)
-#define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK                GENMASK(3, 0)
-#define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK                GENMASK(8, 4)
-#define ADF_VF2PF_BLOCK_CRC_REQ_MASK           BIT(9)
-
-/* PF->VF Block Request Types
- *  0..15 - 32 byte message
- * 16..23 - 64 byte message
- * 24..27 - 128 byte message
- */
-enum vf2pf_blkmsg_req_type {
-       ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY        = 0x02,
-       ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP       = 0x03,
-};
-
-#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
-               (FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK))
-
-#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \
-               (FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \
-               ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1)
-
-#define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \
-               (FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \
-               ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX)
-
-#define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \
-               FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK)
-
-#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \
-               FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK)
-
-#define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \
-               FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK)
-
-struct pfvf_blkmsg_header {
-       u8 version;
-       u8 payload_size;
-} __packed;
-
-#define ADF_PFVF_BLKMSG_HEADER_SIZE            (sizeof(struct pfvf_blkmsg_header))
-#define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg)   (sizeof(blkmsg) - \
-                                                       ADF_PFVF_BLKMSG_HEADER_SIZE)
-#define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg)       (ADF_PFVF_BLKMSG_HEADER_SIZE + \
-                                                       (blkmsg)->hdr.payload_size)
-#define ADF_PFVF_BLKMSG_MSG_MAX_SIZE           128
-
-/* PF->VF Block message header bytes */
-#define ADF_PFVF_BLKMSG_VER_BYTE               0
-#define ADF_PFVF_BLKMSG_LEN_BYTE               1
-
-/* PF/VF Capabilities message values */
-enum blkmsg_capabilities_versions {
-       ADF_PFVF_CAPABILITIES_V1_VERSION        = 0x01,
-       ADF_PFVF_CAPABILITIES_V2_VERSION        = 0x02,
-       ADF_PFVF_CAPABILITIES_V3_VERSION        = 0x03,
-};
-
-struct capabilities_v1 {
-       struct pfvf_blkmsg_header hdr;
-       u32 ext_dc_caps;
-} __packed;
-
-struct capabilities_v2 {
-       struct pfvf_blkmsg_header hdr;
-       u32 ext_dc_caps;
-       u32 capabilities;
-} __packed;
-
-struct capabilities_v3 {
-       struct pfvf_blkmsg_header hdr;
-       u32 ext_dc_caps;
-       u32 capabilities;
-       u32 frequency;
-} __packed;
-
-/* PF/VF Ring to service mapping values */
-enum blkmsg_ring_to_svc_versions {
-       ADF_PFVF_RING_TO_SVC_VERSION            = 0x01,
-};
-
-struct ring_to_svc_map_v1 {
-       struct pfvf_blkmsg_header hdr;
-       u16 map;
-} __packed;
-
-#endif /* ADF_PFVF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.c
deleted file mode 100644 (file)
index 14c069f..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/pci.h>
-#include "adf_accel_devices.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_pf_msg.h"
-#include "adf_pfvf_pf_proto.h"
-
-void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_vf_info *vf;
-       struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING };
-       int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
-
-       for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
-               if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to send restarting msg to VF%d\n", i);
-       }
-}
-
-int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
-                                    u8 *buffer, u8 compat)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct capabilities_v2 caps_msg;
-
-       caps_msg.ext_dc_caps = hw_data->extended_dc_capabilities;
-       caps_msg.capabilities = hw_data->accel_capabilities_mask;
-
-       caps_msg.hdr.version = ADF_PFVF_CAPABILITIES_V2_VERSION;
-       caps_msg.hdr.payload_size =
-                       ADF_PFVF_BLKMSG_PAYLOAD_SIZE(struct capabilities_v2);
-
-       memcpy(buffer, &caps_msg, sizeof(caps_msg));
-
-       return 0;
-}
-
-int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
-                                   u8 *buffer, u8 compat)
-{
-       struct ring_to_svc_map_v1 rts_map_msg;
-
-       rts_map_msg.map = accel_dev->hw_device->ring_to_svc_map;
-       rts_map_msg.hdr.version = ADF_PFVF_RING_TO_SVC_VERSION;
-       rts_map_msg.hdr.payload_size = ADF_PFVF_BLKMSG_PAYLOAD_SIZE(rts_map_msg);
-
-       memcpy(buffer, &rts_map_msg, sizeof(rts_map_msg));
-
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_pf_msg.h
deleted file mode 100644 (file)
index e8982d1..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_PF_MSG_H
-#define ADF_PFVF_PF_MSG_H
-
-#include "adf_accel_devices.h"
-
-void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
-
-typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev,
-                                        u8 *buffer, u8 compat);
-
-int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
-                                    u8 *buffer, u8 comapt);
-int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
-                                   u8 *buffer, u8 comapt);
-
-#endif /* ADF_PFVF_PF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c
deleted file mode 100644 (file)
index 388e58b..0000000
+++ /dev/null
@@ -1,348 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/bitfield.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_pf_msg.h"
-#include "adf_pfvf_pf_proto.h"
-#include "adf_pfvf_utils.h"
-
-typedef u8 (*pf2vf_blkmsg_data_getter_fn)(u8 const *blkmsg, u8 byte);
-
-static const adf_pf2vf_blkmsg_provider pf2vf_blkmsg_providers[] = {
-       NULL,                             /* no message type defined for value 0 */
-       NULL,                             /* no message type defined for value 1 */
-       adf_pf_capabilities_msg_provider, /* ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY */
-       adf_pf_ring_to_svc_msg_provider,  /* ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP */
-};
-
-/**
- * adf_send_pf2vf_msg() - send PF to VF message
- * @accel_dev: Pointer to acceleration device
- * @vf_nr:     VF number to which the message will be sent
- * @msg:       Message to send
- *
- * This function allows the PF to send a message to a specific VF.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg)
-{
-       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
-       u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(vf_nr);
-
-       return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
-                                 &accel_dev->pf.vf_info[vf_nr].pf2vf_lock);
-}
-
-/**
- * adf_recv_vf2pf_msg() - receive a VF to PF message
- * @accel_dev: Pointer to acceleration device
- * @vf_nr:     Number of the VF from where the message will be received
- *
- * This function allows the PF to receive a message from a specific VF.
- *
- * Return: a valid message on success, zero otherwise.
- */
-static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr)
-{
-       struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
-       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
-       u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(vf_nr);
-
-       return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver);
-}
-
-static adf_pf2vf_blkmsg_provider get_blkmsg_response_provider(u8 type)
-{
-       if (type >= ARRAY_SIZE(pf2vf_blkmsg_providers))
-               return NULL;
-
-       return pf2vf_blkmsg_providers[type];
-}
-
-/* Byte pf2vf_blkmsg_data_getter_fn callback */
-static u8 adf_pf2vf_blkmsg_get_byte(u8 const *blkmsg, u8 index)
-{
-       return blkmsg[index];
-}
-
-/* CRC pf2vf_blkmsg_data_getter_fn callback */
-static u8 adf_pf2vf_blkmsg_get_crc(u8 const *blkmsg, u8 count)
-{
-       /* count is 0-based, turn it into a length */
-       return adf_pfvf_calc_blkmsg_crc(blkmsg, count + 1);
-}
-
-static int adf_pf2vf_blkmsg_get_data(struct adf_accel_vf_info *vf_info,
-                                    u8 type, u8 byte, u8 max_size, u8 *data,
-                                    pf2vf_blkmsg_data_getter_fn data_getter)
-{
-       u8 blkmsg[ADF_PFVF_BLKMSG_MSG_MAX_SIZE] = { 0 };
-       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
-       adf_pf2vf_blkmsg_provider provider;
-       u8 msg_size;
-
-       provider = get_blkmsg_response_provider(type);
-
-       if (unlikely(!provider)) {
-               pr_err("QAT: No registered provider for message %d\n", type);
-               *data = ADF_PF2VF_INVALID_BLOCK_TYPE;
-               return -EINVAL;
-       }
-
-       if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) {
-               pr_err("QAT: unknown error from provider for message %d\n", type);
-               *data = ADF_PF2VF_UNSPECIFIED_ERROR;
-               return -EINVAL;
-       }
-
-       msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE];
-
-       if (unlikely(msg_size >= max_size)) {
-               pr_err("QAT: Invalid size %d provided for message type %d\n",
-                      msg_size, type);
-               *data = ADF_PF2VF_PAYLOAD_TRUNCATED;
-               return -EINVAL;
-       }
-
-       if (unlikely(byte >= msg_size)) {
-               pr_err("QAT: Out-of-bound byte number %d (msg size %d)\n",
-                      byte, msg_size);
-               *data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
-               return -EINVAL;
-       }
-
-       *data = data_getter(blkmsg, byte);
-       return 0;
-}
-
-static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info,
-                                            struct pfvf_message req)
-{
-       u8 resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR;
-       struct pfvf_message resp = { 0 };
-       u8 resp_data = 0;
-       u8 blk_type;
-       u8 blk_byte;
-       u8 byte_max;
-
-       switch (req.type) {
-       case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
-               blk_type = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK, req.data)
-                          + ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX + 1;
-               blk_byte = FIELD_GET(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, req.data);
-               byte_max = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
-               break;
-       case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
-               blk_type = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK, req.data)
-                          + ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1;
-               blk_byte = FIELD_GET(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, req.data);
-               byte_max = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
-               break;
-       case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
-               blk_type = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, req.data);
-               blk_byte = FIELD_GET(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, req.data);
-               byte_max = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
-               break;
-       }
-
-       /* Is this a request for CRC or data? */
-       if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) {
-               dev_dbg(&GET_DEV(vf_info->accel_dev),
-                       "BlockMsg of type %d for CRC over %d bytes received from VF%d\n",
-                       blk_type, blk_byte + 1, vf_info->vf_nr);
-
-               if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
-                                              byte_max, &resp_data,
-                                              adf_pf2vf_blkmsg_get_crc))
-                       resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_CRC;
-       } else {
-               dev_dbg(&GET_DEV(vf_info->accel_dev),
-                       "BlockMsg of type %d for data byte %d received from VF%d\n",
-                       blk_type, blk_byte, vf_info->vf_nr);
-
-               if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte,
-                                              byte_max, &resp_data,
-                                              adf_pf2vf_blkmsg_get_byte))
-                       resp_type = ADF_PF2VF_BLKMSG_RESP_TYPE_DATA;
-       }
-
-       resp.type = ADF_PF2VF_MSGTYPE_BLKMSG_RESP;
-       resp.data = FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp_type) |
-                   FIELD_PREP(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp_data);
-
-       return resp;
-}
-
-static struct pfvf_message handle_rp_reset_req(struct adf_accel_dev *accel_dev, u8 vf_nr,
-                                              struct pfvf_message req)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct pfvf_message resp = {
-               .type = ADF_PF2VF_MSGTYPE_RP_RESET_RESP,
-               .data = RPRESET_SUCCESS
-       };
-       u32 bank_number;
-       u32 rsvd_field;
-
-       bank_number = FIELD_GET(ADF_VF2PF_RNG_RESET_RP_MASK, req.data);
-       rsvd_field = FIELD_GET(ADF_VF2PF_RNG_RESET_RSVD_MASK, req.data);
-
-       dev_dbg(&GET_DEV(accel_dev),
-               "Ring Pair Reset Message received from VF%d for bank 0x%x\n",
-               vf_nr, bank_number);
-
-       if (!hw_data->ring_pair_reset || rsvd_field) {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Ring Pair Reset for VF%d is not supported\n", vf_nr);
-               resp.data = RPRESET_NOT_SUPPORTED;
-               goto out;
-       }
-
-       if (bank_number >= hw_data->num_banks_per_vf) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid bank number (0x%x) from VF%d for Ring Reset\n",
-                       bank_number, vf_nr);
-               resp.data = RPRESET_INVAL_BANK;
-               goto out;
-       }
-
-       /* Convert the VF provided value to PF bank number */
-       bank_number = vf_nr * hw_data->num_banks_per_vf + bank_number;
-       if (hw_data->ring_pair_reset(accel_dev, bank_number)) {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Ring pair reset for VF%d failure\n", vf_nr);
-               resp.data = RPRESET_TIMEOUT;
-               goto out;
-       }
-
-       dev_dbg(&GET_DEV(accel_dev),
-               "Ring pair reset for VF%d successfully\n", vf_nr);
-
-out:
-       return resp;
-}
-
-static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr,
-                               struct pfvf_message msg, struct pfvf_message *resp)
-{
-       struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
-
-       switch (msg.type) {
-       case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
-               {
-               u8 vf_compat_ver = msg.data;
-               u8 compat;
-
-               dev_dbg(&GET_DEV(accel_dev),
-                       "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n",
-                       vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
-
-               if (vf_compat_ver == 0)
-                       compat = ADF_PF2VF_VF_INCOMPATIBLE;
-               else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION)
-                       compat = ADF_PF2VF_VF_COMPATIBLE;
-               else
-                       compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
-
-               vf_info->vf_compat_ver = vf_compat_ver;
-
-               resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
-               resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK,
-                                       ADF_PFVF_COMPAT_THIS_VERSION) |
-                            FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
-               }
-               break;
-       case ADF_VF2PF_MSGTYPE_VERSION_REQ:
-               {
-               u8 compat;
-
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Legacy VersionRequest received from VF%d to PF (vers 1.1)\n",
-                       vf_nr);
-
-               /* legacy driver, VF compat_ver is 0 */
-               vf_info->vf_compat_ver = 0;
-
-               /* PF always newer than legacy VF */
-               compat = ADF_PF2VF_VF_COMPATIBLE;
-
-               /* Set legacy major and minor version to the latest, 1.1 */
-               resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP;
-               resp->data = FIELD_PREP(ADF_PF2VF_VERSION_RESP_VERS_MASK, 0x11) |
-                            FIELD_PREP(ADF_PF2VF_VERSION_RESP_RESULT_MASK, compat);
-               }
-               break;
-       case ADF_VF2PF_MSGTYPE_INIT:
-               {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Init message received from VF%d\n", vf_nr);
-               vf_info->init = true;
-               }
-               break;
-       case ADF_VF2PF_MSGTYPE_SHUTDOWN:
-               {
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Shutdown message received from VF%d\n", vf_nr);
-               vf_info->init = false;
-               }
-               break;
-       case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ:
-       case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ:
-       case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ:
-               *resp = handle_blkmsg_req(vf_info, msg);
-               break;
-       case ADF_VF2PF_MSGTYPE_RP_RESET:
-               *resp = handle_rp_reset_req(accel_dev, vf_nr, msg);
-               break;
-       default:
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Unknown message from VF%d (type 0x%.4x, data: 0x%.4x)\n",
-                       vf_nr, msg.type, msg.data);
-               return -ENOMSG;
-       }
-
-       return 0;
-}
-
-bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr)
-{
-       struct pfvf_message req;
-       struct pfvf_message resp = {0};
-
-       req = adf_recv_vf2pf_msg(accel_dev, vf_nr);
-       if (!req.type)  /* Legacy or no message */
-               return true;
-
-       if (adf_handle_vf2pf_msg(accel_dev, vf_nr, req, &resp))
-               return false;
-
-       if (resp.type && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send response to VF%d\n", vf_nr);
-
-       return true;
-}
-
-/**
- * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
- *
- * @accel_dev: Pointer to acceleration device virtual function.
- *
- * This function carries out the necessary steps to setup and start the PFVF
- * communication channel, if any.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
-       adf_pfvf_crc_init();
-       spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.h
deleted file mode 100644 (file)
index 165d266..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_PF_PROTO_H
-#define ADF_PFVF_PF_PROTO_H
-
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-
-int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg);
-
-int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
-
-#endif /* ADF_PFVF_PF_PROTO_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.c b/drivers/crypto/qat/qat_common/adf_pfvf_utils.c
deleted file mode 100644 (file)
index c5f6d77..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2021 Intel Corporation */
-#include <linux/crc8.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_utils.h"
-
-/* CRC Calculation */
-DECLARE_CRC8_TABLE(pfvf_crc8_table);
-#define ADF_PFVF_CRC8_POLYNOMIAL 0x97
-
-void adf_pfvf_crc_init(void)
-{
-       crc8_populate_msb(pfvf_crc8_table, ADF_PFVF_CRC8_POLYNOMIAL);
-}
-
-u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
-{
-       return crc8(pfvf_crc8_table, buf, buf_len, CRC8_INIT_VALUE);
-}
-
-static bool set_value_on_csr_msg(struct adf_accel_dev *accel_dev, u32 *csr_msg,
-                                u32 value, const struct pfvf_field_format *fmt)
-{
-       if (unlikely((value & fmt->mask) != value)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "PFVF message value 0x%X out of range, %u max allowed\n",
-                       value, fmt->mask);
-               return false;
-       }
-
-       *csr_msg |= value << fmt->offset;
-
-       return true;
-}
-
-u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
-                       struct pfvf_message msg,
-                       const struct pfvf_csr_format *fmt)
-{
-       u32 csr_msg = 0;
-
-       if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
-           !set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
-               return 0;
-
-       return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
-}
-
-struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 csr_msg,
-                                       const struct pfvf_csr_format *fmt)
-{
-       struct pfvf_message msg = { 0 };
-
-       msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
-       msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
-
-       if (unlikely(!msg.type))
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid PFVF msg with no type received\n");
-
-       return msg;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/qat/qat_common/adf_pfvf_utils.h
deleted file mode 100644 (file)
index 2be048e..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_UTILS_H
-#define ADF_PFVF_UTILS_H
-
-#include <linux/types.h>
-#include "adf_pfvf_msg.h"
-
-/* How long to wait for far side to acknowledge receipt */
-#define ADF_PFVF_MSG_ACK_DELAY_US      4
-#define ADF_PFVF_MSG_ACK_MAX_DELAY_US  (1 * USEC_PER_SEC)
-
-u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len);
-void adf_pfvf_crc_init(void);
-
-struct pfvf_field_format {
-       u8  offset;
-       u32 mask;
-};
-
-struct pfvf_csr_format {
-       struct pfvf_field_format type;
-       struct pfvf_field_format data;
-};
-
-u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                       const struct pfvf_csr_format *fmt);
-struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg,
-                                       const struct pfvf_csr_format *fmt);
-
-#endif /* ADF_PFVF_UTILS_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.c
deleted file mode 100644 (file)
index 1141258..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/bitfield.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_vf_msg.h"
-#include "adf_pfvf_vf_proto.h"
-
-/**
- * adf_vf2pf_notify_init() - send init msg to PF
- * @accel_dev:  Pointer to acceleration VF device.
- *
- * Function sends an init message from the VF to a PF
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
-{
-       struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
-
-       if (adf_send_vf2pf_msg(accel_dev, msg)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send Init event to PF\n");
-               return -EFAULT;
-       }
-       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
-
-/**
- * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
- * @accel_dev:  Pointer to acceleration VF device.
- *
- * Function sends a shutdown message from the VF to a PF
- *
- * Return: void
- */
-void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
-{
-       struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
-
-       if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
-               if (adf_send_vf2pf_msg(accel_dev, msg))
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to send Shutdown event to PF\n");
-}
-EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
-
-int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
-{
-       u8 pf_version;
-       int compat;
-       int ret;
-       struct pfvf_message resp;
-       struct pfvf_message msg = {
-               .type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
-               .data = ADF_PFVF_COMPAT_THIS_VERSION,
-       };
-
-       BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
-
-       ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to send Compatibility Version Request.\n");
-               return ret;
-       }
-
-       pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
-       compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
-
-       /* Response from PF received, check compatibility */
-       switch (compat) {
-       case ADF_PF2VF_VF_COMPATIBLE:
-               break;
-       case ADF_PF2VF_VF_COMPAT_UNKNOWN:
-               /* VF is newer than PF - compatible for now */
-               break;
-       case ADF_PF2VF_VF_INCOMPATIBLE:
-               dev_err(&GET_DEV(accel_dev),
-                       "PF (vers %d) and VF (vers %d) are not compatible\n",
-                       pf_version, ADF_PFVF_COMPAT_THIS_VERSION);
-               return -EINVAL;
-       default:
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid response from PF; assume not compatible\n");
-               return -EINVAL;
-       }
-
-       accel_dev->vf.pf_compat_ver = pf_version;
-       return 0;
-}
-
-int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct capabilities_v3 cap_msg = { 0 };
-       unsigned int len = sizeof(cap_msg);
-
-       if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
-               /* The PF is too old to support the extended capabilities */
-               return 0;
-
-       if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
-                                     (u8 *)&cap_msg, &len)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "QAT: Failed to get block message response\n");
-               return -EFAULT;
-       }
-
-       switch (cap_msg.hdr.version) {
-       default:
-               /* Newer version received, handle only the know parts */
-               fallthrough;
-       case ADF_PFVF_CAPABILITIES_V3_VERSION:
-               if (likely(len >= sizeof(struct capabilities_v3)))
-                       hw_data->clock_frequency = cap_msg.frequency;
-               else
-                       dev_info(&GET_DEV(accel_dev), "Could not get frequency");
-               fallthrough;
-       case ADF_PFVF_CAPABILITIES_V2_VERSION:
-               if (likely(len >= sizeof(struct capabilities_v2)))
-                       hw_data->accel_capabilities_mask = cap_msg.capabilities;
-               else
-                       dev_info(&GET_DEV(accel_dev), "Could not get capabilities");
-               fallthrough;
-       case ADF_PFVF_CAPABILITIES_V1_VERSION:
-               if (likely(len >= sizeof(struct capabilities_v1))) {
-                       hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
-               } else {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Capabilities message truncated to %d bytes\n", len);
-                       return -EFAULT;
-               }
-       }
-
-       return 0;
-}
-
-int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
-{
-       struct ring_to_svc_map_v1 rts_map_msg = { 0 };
-       unsigned int len = sizeof(rts_map_msg);
-
-       if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
-               /* Use already set default mappings */
-               return 0;
-
-       if (adf_send_vf2pf_blkmsg_req(accel_dev, ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
-                                     (u8 *)&rts_map_msg, &len)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "QAT: Failed to get block message response\n");
-               return -EFAULT;
-       }
-
-       if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
-               dev_err(&GET_DEV(accel_dev),
-                       "RING_TO_SVC message truncated to %d bytes\n", len);
-               return -EFAULT;
-       }
-
-       /* Only v1 at present */
-       accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_vf_msg.h
deleted file mode 100644 (file)
index 71bc0e3..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_VF_MSG_H
-#define ADF_PFVF_VF_MSG_H
-
-#if defined(CONFIG_PCI_IOV)
-int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
-void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
-int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
-int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
-int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
-#else
-static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-
-static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
-{
-}
-#endif
-
-#endif /* ADF_PFVF_VF_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.c
deleted file mode 100644 (file)
index 1015155..0000000
+++ /dev/null
@@ -1,368 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/bitfield.h>
-#include <linux/completion.h>
-#include <linux/minmax.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_pfvf_msg.h"
-#include "adf_pfvf_utils.h"
-#include "adf_pfvf_vf_msg.h"
-#include "adf_pfvf_vf_proto.h"
-
-#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY    10
-#define ADF_PFVF_MSG_ACK_DELAY                 2
-#define ADF_PFVF_MSG_ACK_MAX_RETRY             100
-
-/* How often to retry if there is no response */
-#define ADF_PFVF_MSG_RESP_RETRIES      5
-#define ADF_PFVF_MSG_RESP_TIMEOUT      (ADF_PFVF_MSG_ACK_DELAY * \
-                                        ADF_PFVF_MSG_ACK_MAX_RETRY + \
-                                        ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
-
-/**
- * adf_send_vf2pf_msg() - send VF to PF message
- * @accel_dev: Pointer to acceleration device
- * @msg:       Message to send
- *
- * This function allows the VF to send a message to the PF.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
-{
-       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
-       u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0);
-
-       return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset,
-                                 &accel_dev->vf.vf2pf_lock);
-}
-
-/**
- * adf_recv_pf2vf_msg() - receive a PF to VF message
- * @accel_dev: Pointer to acceleration device
- *
- * This function allows the VF to receive a message from the PF.
- *
- * Return: a valid message on success, zero otherwise.
- */
-static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
-{
-       struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
-       u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
-
-       return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver);
-}
-
-/**
- * adf_send_vf2pf_req() - send VF2PF request message
- * @accel_dev: Pointer to acceleration device.
- * @msg:       Request message to send
- * @resp:      Returned PF response
- *
- * This function sends a message that requires a response from the VF to the PF
- * and waits for a reply.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                      struct pfvf_message *resp)
-{
-       unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
-       unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
-       int ret;
-
-       reinit_completion(&accel_dev->vf.msg_received);
-
-       /* Send request from VF to PF */
-       do {
-               ret = adf_send_vf2pf_msg(accel_dev, msg);
-               if (ret) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Failed to send request msg to PF\n");
-                       return ret;
-               }
-
-               /* Wait for response, if it times out retry */
-               ret = wait_for_completion_timeout(&accel_dev->vf.msg_received,
-                                                 timeout);
-               if (ret) {
-                       if (likely(resp))
-                               *resp = accel_dev->vf.response;
-
-                       /* Once copied, set to an invalid value */
-                       accel_dev->vf.response.type = 0;
-
-                       return 0;
-               }
-
-               dev_err(&GET_DEV(accel_dev), "PFVF response message timeout\n");
-       } while (--retries);
-
-       return -EIO;
-}
-
-static int adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev, bool crc,
-                                    u8 *type, u8 *data)
-{
-       struct pfvf_message req = { 0 };
-       struct pfvf_message resp = { 0 };
-       u8 blk_type;
-       u8 blk_byte;
-       u8 msg_type;
-       u8 max_data;
-       int err;
-
-       /* Convert the block type to {small, medium, large} size category */
-       if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
-               msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
-               blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
-               blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
-               max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
-       } else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
-               msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
-               blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
-                                     *type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
-               blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
-               max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
-       } else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
-               msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
-               blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
-                                     *type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
-               blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
-               max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
-       } else {
-               dev_err(&GET_DEV(accel_dev), "Invalid message type %u\n", *type);
-               return -EINVAL;
-       }
-
-       /* Sanity check */
-       if (*data > max_data) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid byte %s %u for message type %u\n",
-                       crc ? "count" : "index", *data, *type);
-               return -EINVAL;
-       }
-
-       /* Build the block message */
-       req.type = msg_type;
-       req.data = blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
-
-       err = adf_send_vf2pf_req(accel_dev, req, &resp);
-       if (err)
-               return err;
-
-       *type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
-       *data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
-
-       return 0;
-}
-
-static int adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev, u8 type,
-                                    u8 index, u8 *data)
-{
-       int ret;
-
-       ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
-       if (ret < 0)
-               return ret;
-
-       if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Unexpected BLKMSG response type %u, byte 0x%x\n",
-                       type, index);
-               return -EFAULT;
-       }
-
-       *data = index;
-       return 0;
-}
-
-static int adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev, u8 type,
-                                   u8 bytes, u8 *crc)
-{
-       int ret;
-
-       /* The count of bytes refers to a length, however shift it to a 0-based
-        * count to avoid overflows. Thus, a request for 0 bytes is technically
-        * valid.
-        */
-       --bytes;
-
-       ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
-       if (ret < 0)
-               return ret;
-
-       if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
-                       type, bytes);
-               return  -EFAULT;
-       }
-
-       *crc = bytes;
-       return 0;
-}
-
-/**
- * adf_send_vf2pf_blkmsg_req() - retrieve block message
- * @accel_dev: Pointer to acceleration VF device.
- * @type:      The block message type, see adf_pfvf_msg.h for allowed values
- * @buffer:    input buffer where to place the received data
- * @buffer_len:        buffer length as input, the amount of written bytes on output
- *
- * Request a message of type 'type' over the block message transport.
- * This function will send the required amount block message requests and
- * return the overall content back to the caller through the provided buffer.
- * The buffer should be large enough to contain the requested message type,
- * otherwise the response will be truncated.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
-                             u8 *buffer, unsigned int *buffer_len)
-{
-       unsigned int index;
-       unsigned int msg_len;
-       int ret;
-       u8 remote_crc;
-       u8 local_crc;
-
-       if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
-               dev_err(&GET_DEV(accel_dev), "Invalid block message type %d\n",
-                       type);
-               return -EINVAL;
-       }
-
-       if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Buffer size too small for a block message\n");
-               return -EINVAL;
-       }
-
-       ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
-                                       ADF_PFVF_BLKMSG_VER_BYTE,
-                                       &buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
-       if (unlikely(ret))
-               return ret;
-
-       if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid version 0 received for block request %u", type);
-               return -EFAULT;
-       }
-
-       ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type,
-                                       ADF_PFVF_BLKMSG_LEN_BYTE,
-                                       &buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
-       if (unlikely(ret))
-               return ret;
-
-       if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid size 0 received for block request %u", type);
-               return -EFAULT;
-       }
-
-       /* We need to pick the minimum since there is no way to request a
-        * specific version. As a consequence any scenario is possible:
-        * - PF has a newer (longer) version which doesn't fit in the buffer
-        * - VF expects a newer (longer) version, so we must not ask for
-        *   bytes in excess
-        * - PF and VF share the same version, no problem
-        */
-       msg_len = ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
-       msg_len = min(*buffer_len, msg_len);
-
-       /* Get the payload */
-       for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
-               ret = adf_vf2pf_blkmsg_get_byte(accel_dev, type, index,
-                                               &buffer[index]);
-               if (unlikely(ret))
-                       return ret;
-       }
-
-       ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
-       if (unlikely(ret))
-               return ret;
-
-       local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
-       if (unlikely(local_crc != remote_crc)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "CRC error on msg type %d. Local %02X, remote %02X\n",
-                       type, local_crc, remote_crc);
-               return -EIO;
-       }
-
-       *buffer_len = msg_len;
-       return 0;
-}
-
-static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev,
-                                struct pfvf_message msg)
-{
-       switch (msg.type) {
-       case ADF_PF2VF_MSGTYPE_RESTARTING:
-               dev_dbg(&GET_DEV(accel_dev), "Restarting message received from PF\n");
-
-               adf_pf2vf_handle_pf_restarting(accel_dev);
-               return false;
-       case ADF_PF2VF_MSGTYPE_VERSION_RESP:
-       case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
-       case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
-               dev_dbg(&GET_DEV(accel_dev),
-                       "Response Message received from PF (type 0x%.4x, data 0x%.4x)\n",
-                       msg.type, msg.data);
-               accel_dev->vf.response = msg;
-               complete(&accel_dev->vf.msg_received);
-               return true;
-       default:
-               dev_err(&GET_DEV(accel_dev),
-                       "Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
-                       msg.type, msg.data);
-       }
-
-       return false;
-}
-
-bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
-{
-       struct pfvf_message msg;
-
-       msg = adf_recv_pf2vf_msg(accel_dev);
-       if (msg.type)  /* Invalid or no message */
-               return adf_handle_pf2vf_msg(accel_dev, msg);
-
-       /* No replies for PF->VF messages at present */
-
-       return true;
-}
-
-/**
- * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
- *
- * @accel_dev: Pointer to acceleration device virtual function.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       adf_pfvf_crc_init();
-       adf_enable_pf2vf_interrupts(accel_dev);
-
-       ret = adf_vf2pf_request_version(accel_dev);
-       if (ret)
-               return ret;
-
-       ret = adf_vf2pf_get_capabilities(accel_dev);
-       if (ret)
-               return ret;
-
-       ret = adf_vf2pf_get_ring_to_svc(accel_dev);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h b/drivers/crypto/qat/qat_common/adf_pfvf_vf_proto.h
deleted file mode 100644 (file)
index f6ee9b3..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2021 Intel Corporation */
-#ifndef ADF_PFVF_VF_PROTO_H
-#define ADF_PFVF_VF_PROTO_H
-
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-
-int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg);
-int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
-                      struct pfvf_message *resp);
-int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev, u8 type,
-                             u8 *buffer, unsigned int *buffer_len);
-
-int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
-
-#endif /* ADF_PFVF_VF_PROTO_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
deleted file mode 100644 (file)
index f44025b..0000000
+++ /dev/null
@@ -1,215 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <linux/workqueue.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include "adf_common_drv.h"
-#include "adf_cfg.h"
-#include "adf_pfvf_pf_msg.h"
-
-#define ADF_VF2PF_RATELIMIT_INTERVAL   8
-#define ADF_VF2PF_RATELIMIT_BURST      130
-
-static struct workqueue_struct *pf2vf_resp_wq;
-
-struct adf_pf2vf_resp {
-       struct work_struct pf2vf_resp_work;
-       struct adf_accel_vf_info *vf_info;
-};
-
-static void adf_iov_send_resp(struct work_struct *work)
-{
-       struct adf_pf2vf_resp *pf2vf_resp =
-               container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
-       struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
-       struct adf_accel_dev *accel_dev = vf_info->accel_dev;
-       u32 vf_nr = vf_info->vf_nr;
-       bool ret;
-
-       ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
-       if (ret)
-               /* re-enable interrupt on PF from this VF */
-               adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
-
-       kfree(pf2vf_resp);
-}
-
-void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
-{
-       struct adf_pf2vf_resp *pf2vf_resp;
-
-       pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
-       if (!pf2vf_resp)
-               return;
-
-       pf2vf_resp->vf_info = vf_info;
-       INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
-       queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
-}
-
-static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-       int totalvfs = pci_sriov_get_totalvfs(pdev);
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_accel_vf_info *vf_info;
-       int i;
-
-       for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
-            i++, vf_info++) {
-               /* This ptr will be populated when VFs will be created */
-               vf_info->accel_dev = accel_dev;
-               vf_info->vf_nr = i;
-               vf_info->vf_compat_ver = 0;
-
-               mutex_init(&vf_info->pf2vf_lock);
-               ratelimit_state_init(&vf_info->vf2pf_ratelimit,
-                                    ADF_VF2PF_RATELIMIT_INTERVAL,
-                                    ADF_VF2PF_RATELIMIT_BURST);
-       }
-
-       /* Set Valid bits in AE Thread to PCIe Function Mapping */
-       if (hw_data->configure_iov_threads)
-               hw_data->configure_iov_threads(accel_dev, true);
-
-       /* Enable VF to PF interrupts for all VFs */
-       adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
-
-       /*
-        * Due to the hardware design, when SR-IOV and the ring arbiter
-        * are enabled all the VFs supported in hardware must be enabled in
-        * order for all the hardware resources (i.e. bundles) to be usable.
-        * When SR-IOV is enabled, each of the VFs will own one bundle.
-        */
-       return pci_enable_sriov(pdev, totalvfs);
-}
-
-/**
- * adf_disable_sriov() - Disable SRIOV for the device
- * @accel_dev:  Pointer to accel device.
- *
- * Function disables SRIOV for the accel device.
- *
- * Return: 0 on success, error code otherwise.
- */
-void adf_disable_sriov(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
-       struct adf_accel_vf_info *vf;
-       int i;
-
-       if (!accel_dev->pf.vf_info)
-               return;
-
-       adf_pf2vf_notify_restarting(accel_dev);
-       pci_disable_sriov(accel_to_pci_dev(accel_dev));
-
-       /* Disable VF to PF interrupts */
-       adf_disable_all_vf2pf_interrupts(accel_dev);
-
-       /* Clear Valid bits in AE Thread to PCIe Function Mapping */
-       if (hw_data->configure_iov_threads)
-               hw_data->configure_iov_threads(accel_dev, false);
-
-       for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
-               mutex_destroy(&vf->pf2vf_lock);
-
-       kfree(accel_dev->pf.vf_info);
-       accel_dev->pf.vf_info = NULL;
-}
-EXPORT_SYMBOL_GPL(adf_disable_sriov);
-
-/**
- * adf_sriov_configure() - Enable SRIOV for the device
- * @pdev:  Pointer to PCI device.
- * @numvfs: Number of virtual functions (VFs) to enable.
- *
- * Note that the @numvfs parameter is ignored and all VFs supported by the
- * device are enabled due to the design of the hardware.
- *
- * Function enables SRIOV for the PCI device.
- *
- * Return: number of VFs enabled on success, error code otherwise.
- */
-int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-       int totalvfs = pci_sriov_get_totalvfs(pdev);
-       unsigned long val;
-       int ret;
-
-       if (!accel_dev) {
-               dev_err(&pdev->dev, "Failed to find accel_dev\n");
-               return -EFAULT;
-       }
-
-       if (!device_iommu_mapped(&pdev->dev))
-               dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
-
-       if (accel_dev->pf.vf_info) {
-               dev_info(&pdev->dev, "Already enabled for this device\n");
-               return -EINVAL;
-       }
-
-       if (adf_dev_started(accel_dev)) {
-               if (adf_devmgr_in_reset(accel_dev) ||
-                   adf_dev_in_use(accel_dev)) {
-                       dev_err(&GET_DEV(accel_dev), "Device busy\n");
-                       return -EBUSY;
-               }
-
-               ret = adf_dev_down(accel_dev, true);
-               if (ret)
-                       return ret;
-       }
-
-       if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
-               return -EFAULT;
-       val = 0;
-       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-                                       ADF_NUM_CY, (void *)&val, ADF_DEC))
-               return -EFAULT;
-       ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
-                                         &val, ADF_DEC);
-       if (ret)
-               return ret;
-
-       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
-       /* Allocate memory for VF info structs */
-       accel_dev->pf.vf_info = kcalloc(totalvfs,
-                                       sizeof(struct adf_accel_vf_info),
-                                       GFP_KERNEL);
-       if (!accel_dev->pf.vf_info)
-               return -ENOMEM;
-
-       if (adf_dev_up(accel_dev, false)) {
-               dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
-                       accel_dev->accel_id);
-               return -EFAULT;
-       }
-
-       ret = adf_enable_sriov(accel_dev);
-       if (ret)
-               return ret;
-
-       return numvfs;
-}
-EXPORT_SYMBOL_GPL(adf_sriov_configure);
-
-int __init adf_init_pf_wq(void)
-{
-       /* Workqueue for PF2VF responses */
-       pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
-
-       return !pf2vf_resp_wq ? -ENOMEM : 0;
-}
-
-void adf_exit_pf_wq(void)
-{
-       if (pf2vf_resp_wq) {
-               destroy_workqueue(pf2vf_resp_wq);
-               pf2vf_resp_wq = NULL;
-       }
-}
diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c
deleted file mode 100644 (file)
index 3eb6611..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2022 Intel Corporation */
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include "adf_accel_devices.h"
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-static const char * const state_operations[] = {
-       [DEV_DOWN] = "down",
-       [DEV_UP] = "up",
-};
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct adf_accel_dev *accel_dev;
-       char *state;
-
-       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
-       if (!accel_dev)
-               return -EINVAL;
-
-       state = adf_dev_started(accel_dev) ? "up" : "down";
-       return sysfs_emit(buf, "%s\n", state);
-}
-
-static ssize_t state_store(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
-{
-       struct adf_accel_dev *accel_dev;
-       u32 accel_id;
-       int ret;
-
-       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
-       if (!accel_dev)
-               return -EINVAL;
-
-       accel_id = accel_dev->accel_id;
-
-       if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
-               dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
-               return -EBUSY;
-       }
-
-       ret = sysfs_match_string(state_operations, buf);
-       if (ret < 0)
-               return ret;
-
-       switch (ret) {
-       case DEV_DOWN:
-               dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
-
-               ret = adf_dev_down(accel_dev, true);
-               if (ret < 0)
-                       return -EINVAL;
-
-               break;
-       case DEV_UP:
-               dev_info(dev, "Starting device qat_dev%d\n", accel_id);
-
-               ret = adf_dev_up(accel_dev, true);
-               if (ret < 0) {
-                       dev_err(dev, "Failed to start device qat_dev%d\n",
-                               accel_id);
-                       adf_dev_down(accel_dev, true);
-                       return ret;
-               }
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return count;
-}
-
-static const char * const services_operations[] = {
-       ADF_CFG_CY,
-       ADF_CFG_DC,
-};
-
-static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
-                                char *buf)
-{
-       char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
-       struct adf_accel_dev *accel_dev;
-       int ret;
-
-       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
-       if (!accel_dev)
-               return -EINVAL;
-
-       ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
-                                     ADF_SERVICES_ENABLED, services);
-       if (ret)
-               return ret;
-
-       return sysfs_emit(buf, "%s\n", services);
-}
-
-static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
-                                      const char *services)
-{
-       return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
-                                          ADF_SERVICES_ENABLED, services,
-                                          ADF_STR);
-}
-
-static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct adf_hw_device_data *hw_data;
-       struct adf_accel_dev *accel_dev;
-       int ret;
-
-       ret = sysfs_match_string(services_operations, buf);
-       if (ret < 0)
-               return ret;
-
-       accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
-       if (!accel_dev)
-               return -EINVAL;
-
-       if (adf_dev_started(accel_dev)) {
-               dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
-                        accel_dev->accel_id);
-               return -EINVAL;
-       }
-
-       ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]);
-       if (ret < 0)
-               return ret;
-
-       hw_data = GET_HW_DATA(accel_dev);
-
-       /* Update capabilities mask after change in configuration.
-        * A call to this function is required as capabilities are, at the
-        * moment, tied to configuration
-        */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-       if (!hw_data->accel_capabilities_mask)
-               return -EINVAL;
-
-       return count;
-}
-
-static DEVICE_ATTR_RW(state);
-static DEVICE_ATTR_RW(cfg_services);
-
-static struct attribute *qat_attrs[] = {
-       &dev_attr_state.attr,
-       &dev_attr_cfg_services.attr,
-       NULL,
-};
-
-static struct attribute_group qat_group = {
-       .attrs = qat_attrs,
-       .name = "qat",
-};
-
-int adf_sysfs_init(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to create qat attribute group: %d\n", ret);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_sysfs_init);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
deleted file mode 100644 (file)
index 630d048..0000000
+++ /dev/null
@@ -1,577 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/delay.h>
-#include <linux/nospec.h>
-#include "adf_accel_devices.h"
-#include "adf_transport_internal.h"
-#include "adf_transport_access_macros.h"
-#include "adf_cfg.h"
-#include "adf_common_drv.h"
-
-#define ADF_MAX_RING_THRESHOLD         80
-#define ADF_PERCENT(tot, percent)      (((tot) * (percent)) / 100)
-
-static inline u32 adf_modulo(u32 data, u32 shift)
-{
-       u32 div = data >> shift;
-       u32 mult = div << shift;
-
-       return data - mult;
-}
-
-static inline int adf_check_ring_alignment(u64 addr, u64 size)
-{
-       if (((size - 1) & addr) != 0)
-               return -EFAULT;
-       return 0;
-}
-
-static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
-{
-       int i = ADF_MIN_RING_SIZE;
-
-       for (; i <= ADF_MAX_RING_SIZE; i++)
-               if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
-                       return i;
-
-       return ADF_DEFAULT_RING_SIZE;
-}
-
-static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
-{
-       spin_lock(&bank->lock);
-       if (bank->ring_mask & (1 << ring)) {
-               spin_unlock(&bank->lock);
-               return -EFAULT;
-       }
-       bank->ring_mask |= (1 << ring);
-       spin_unlock(&bank->lock);
-       return 0;
-}
-
-static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
-{
-       spin_lock(&bank->lock);
-       bank->ring_mask &= ~(1 << ring);
-       spin_unlock(&bank->lock);
-}
-
-static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       spin_lock_bh(&bank->lock);
-       bank->irq_mask |= (1 << ring);
-       spin_unlock_bh(&bank->lock);
-       csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
-                                     bank->irq_mask);
-       csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
-                                      bank->irq_coalesc_timer);
-}
-
-static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       spin_lock_bh(&bank->lock);
-       bank->irq_mask &= ~(1 << ring);
-       spin_unlock_bh(&bank->lock);
-       csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
-                                     bank->irq_mask);
-}
-
-bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
-{
-       return atomic_read(ring->inflights) > ring->threshold;
-}
-
-int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
-
-       if (atomic_add_return(1, ring->inflights) >
-           ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
-               atomic_dec(ring->inflights);
-               return -EAGAIN;
-       }
-       spin_lock_bh(&ring->lock);
-       memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
-              ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
-
-       ring->tail = adf_modulo(ring->tail +
-                               ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
-                               ADF_RING_SIZE_MODULO(ring->ring_size));
-       csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
-                                    ring->bank->bank_number, ring->ring_number,
-                                    ring->tail);
-       spin_unlock_bh(&ring->lock);
-
-       return 0;
-}
-
-static int adf_handle_response(struct adf_etr_ring_data *ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
-       u32 msg_counter = 0;
-       u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
-
-       while (*msg != ADF_RING_EMPTY_SIG) {
-               ring->callback((u32 *)msg);
-               atomic_dec(ring->inflights);
-               *msg = ADF_RING_EMPTY_SIG;
-               ring->head = adf_modulo(ring->head +
-                                       ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
-                                       ADF_RING_SIZE_MODULO(ring->ring_size));
-               msg_counter++;
-               msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
-       }
-       if (msg_counter > 0) {
-               csr_ops->write_csr_ring_head(ring->bank->csr_addr,
-                                            ring->bank->bank_number,
-                                            ring->ring_number, ring->head);
-       }
-       return 0;
-}
-
-static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
-       u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
-
-       csr_ops->write_csr_ring_config(ring->bank->csr_addr,
-                                      ring->bank->bank_number,
-                                      ring->ring_number, ring_config);
-
-}
-
-static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
-{
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
-       u32 ring_config =
-                       BUILD_RESP_RING_CONFIG(ring->ring_size,
-                                              ADF_RING_NEAR_WATERMARK_512,
-                                              ADF_RING_NEAR_WATERMARK_0);
-
-       csr_ops->write_csr_ring_config(ring->bank->csr_addr,
-                                      ring->bank->bank_number,
-                                      ring->ring_number, ring_config);
-}
-
-static int adf_init_ring(struct adf_etr_ring_data *ring)
-{
-       struct adf_etr_bank_data *bank = ring->bank;
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
-       u64 ring_base;
-       u32 ring_size_bytes =
-                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
-
-       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
-       ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
-                                            ring_size_bytes, &ring->dma_addr,
-                                            GFP_KERNEL);
-       if (!ring->base_addr)
-               return -ENOMEM;
-
-       memset(ring->base_addr, 0x7F, ring_size_bytes);
-       /* The base_addr has to be aligned to the size of the buffer */
-       if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
-               dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
-               dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
-                                 ring->base_addr, ring->dma_addr);
-               ring->base_addr = NULL;
-               return -EFAULT;
-       }
-
-       if (hw_data->tx_rings_mask & (1 << ring->ring_number))
-               adf_configure_tx_ring(ring);
-
-       else
-               adf_configure_rx_ring(ring);
-
-       ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
-                                                     ring->ring_size);
-
-       csr_ops->write_csr_ring_base(ring->bank->csr_addr,
-                                    ring->bank->bank_number, ring->ring_number,
-                                    ring_base);
-       spin_lock_init(&ring->lock);
-       return 0;
-}
-
-static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
-{
-       u32 ring_size_bytes =
-                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
-       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
-
-       if (ring->base_addr) {
-               memset(ring->base_addr, 0x7F, ring_size_bytes);
-               dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
-                                 ring_size_bytes, ring->base_addr,
-                                 ring->dma_addr);
-       }
-}
-
-int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
-                   u32 bank_num, u32 num_msgs,
-                   u32 msg_size, const char *ring_name,
-                   adf_callback_fn callback, int poll_mode,
-                   struct adf_etr_ring_data **ring_ptr)
-{
-       struct adf_etr_data *transport_data = accel_dev->transport;
-       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
-       struct adf_etr_bank_data *bank;
-       struct adf_etr_ring_data *ring;
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       int max_inflights;
-       u32 ring_num;
-       int ret;
-
-       if (bank_num >= GET_MAX_BANKS(accel_dev)) {
-               dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
-               return -EFAULT;
-       }
-       if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
-               dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
-               return -EFAULT;
-       }
-       if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
-                             ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Invalid ring size for given msg size\n");
-               return -EFAULT;
-       }
-       if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
-               dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
-                       section, ring_name);
-               return -EFAULT;
-       }
-       if (kstrtouint(val, 10, &ring_num)) {
-               dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
-               return -EFAULT;
-       }
-       if (ring_num >= num_rings_per_bank) {
-               dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
-               return -EFAULT;
-       }
-
-       ring_num = array_index_nospec(ring_num, num_rings_per_bank);
-       bank = &transport_data->banks[bank_num];
-       if (adf_reserve_ring(bank, ring_num)) {
-               dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
-                       ring_num, ring_name);
-               return -EFAULT;
-       }
-       ring = &bank->rings[ring_num];
-       ring->ring_number = ring_num;
-       ring->bank = bank;
-       ring->callback = callback;
-       ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
-       ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
-       ring->head = 0;
-       ring->tail = 0;
-       max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
-       ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
-       atomic_set(ring->inflights, 0);
-       ret = adf_init_ring(ring);
-       if (ret)
-               goto err;
-
-       /* Enable HW arbitration for the given ring */
-       adf_update_ring_arb(ring);
-
-       if (adf_ring_debugfs_add(ring, ring_name)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Couldn't add ring debugfs entry\n");
-               ret = -EFAULT;
-               goto err;
-       }
-
-       /* Enable interrupts if needed */
-       if (callback && (!poll_mode))
-               adf_enable_ring_irq(bank, ring->ring_number);
-       *ring_ptr = ring;
-       return 0;
-err:
-       adf_cleanup_ring(ring);
-       adf_unreserve_ring(bank, ring_num);
-       adf_update_ring_arb(ring);
-       return ret;
-}
-
-void adf_remove_ring(struct adf_etr_ring_data *ring)
-{
-       struct adf_etr_bank_data *bank = ring->bank;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       /* Disable interrupts for the given ring */
-       adf_disable_ring_irq(bank, ring->ring_number);
-
-       /* Clear PCI config space */
-
-       csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
-                                      ring->ring_number, 0);
-       csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
-                                    ring->ring_number, 0);
-       adf_ring_debugfs_rm(ring);
-       adf_unreserve_ring(bank, ring->ring_number);
-       /* Disable HW arbitration for the given ring */
-       adf_update_ring_arb(ring);
-       adf_cleanup_ring(ring);
-}
-
-static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
-{
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
-       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
-       unsigned long empty_rings;
-       int i;
-
-       empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
-                                              bank->bank_number);
-       empty_rings = ~empty_rings & bank->irq_mask;
-
-       for_each_set_bit(i, &empty_rings, num_rings_per_bank)
-               adf_handle_response(&bank->rings[i]);
-}
-
-void adf_response_handler(uintptr_t bank_addr)
-{
-       struct adf_etr_bank_data *bank = (void *)bank_addr;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       /* Handle all the responses and reenable IRQs */
-       adf_ring_response_handler(bank);
-
-       csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
-                                           bank->irq_mask);
-}
-
-static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
-                                 const char *section, const char *format,
-                                 u32 key, u32 *value)
-{
-       char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-
-       snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
-
-       if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
-               return -EFAULT;
-
-       if (kstrtouint(val_buf, 10, value))
-               return -EFAULT;
-       return 0;
-}
-
-static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
-                                 const char *section,
-                                 u32 bank_num_in_accel)
-{
-       if (adf_get_cfg_int(bank->accel_dev, section,
-                           ADF_ETRMGR_COALESCE_TIMER_FORMAT,
-                           bank_num_in_accel, &bank->irq_coalesc_timer))
-               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
-
-       if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
-           ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
-               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
-}
-
-static int adf_init_bank(struct adf_accel_dev *accel_dev,
-                        struct adf_etr_bank_data *bank,
-                        u32 bank_num, void __iomem *csr_addr)
-{
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u8 num_rings_per_bank = hw_data->num_rings_per_bank;
-       struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
-       u32 irq_mask = BIT(num_rings_per_bank) - 1;
-       struct adf_etr_ring_data *ring;
-       struct adf_etr_ring_data *tx_ring;
-       u32 i, coalesc_enabled = 0;
-       unsigned long ring_mask;
-       int size;
-
-       memset(bank, 0, sizeof(*bank));
-       bank->bank_number = bank_num;
-       bank->csr_addr = csr_addr;
-       bank->accel_dev = accel_dev;
-       spin_lock_init(&bank->lock);
-
-       /* Allocate the rings in the bank */
-       size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
-       bank->rings = kzalloc_node(size, GFP_KERNEL,
-                                  dev_to_node(&GET_DEV(accel_dev)));
-       if (!bank->rings)
-               return -ENOMEM;
-
-       /* Enable IRQ coalescing always. This will allow to use
-        * the optimised flag and coalesc register.
-        * If it is disabled in the config file just use min time value */
-       if ((adf_get_cfg_int(accel_dev, "Accelerator0",
-                            ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
-                            &coalesc_enabled) == 0) && coalesc_enabled)
-               adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
-       else
-               bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
-
-       for (i = 0; i < num_rings_per_bank; i++) {
-               csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
-               csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
-
-               ring = &bank->rings[i];
-               if (hw_data->tx_rings_mask & (1 << i)) {
-                       ring->inflights =
-                               kzalloc_node(sizeof(atomic_t),
-                                            GFP_KERNEL,
-                                            dev_to_node(&GET_DEV(accel_dev)));
-                       if (!ring->inflights)
-                               goto err;
-               } else {
-                       if (i < hw_data->tx_rx_gap) {
-                               dev_err(&GET_DEV(accel_dev),
-                                       "Invalid tx rings mask config\n");
-                               goto err;
-                       }
-                       tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
-                       ring->inflights = tx_ring->inflights;
-               }
-       }
-       if (adf_bank_debugfs_add(bank)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to add bank debugfs entry\n");
-               goto err;
-       }
-
-       csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
-       csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
-
-       return 0;
-err:
-       ring_mask = hw_data->tx_rings_mask;
-       for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
-               ring = &bank->rings[i];
-               kfree(ring->inflights);
-               ring->inflights = NULL;
-       }
-       kfree(bank->rings);
-       return -ENOMEM;
-}
-
-/**
- * adf_init_etr_data() - Initialize transport rings for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function is the initializes the communications channels (rings) to the
- * acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_init_etr_data(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *etr_data;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *csr_addr;
-       u32 size;
-       u32 num_banks = 0;
-       int i, ret;
-
-       etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
-                               dev_to_node(&GET_DEV(accel_dev)));
-       if (!etr_data)
-               return -ENOMEM;
-
-       num_banks = GET_MAX_BANKS(accel_dev);
-       size = num_banks * sizeof(struct adf_etr_bank_data);
-       etr_data->banks = kzalloc_node(size, GFP_KERNEL,
-                                      dev_to_node(&GET_DEV(accel_dev)));
-       if (!etr_data->banks) {
-               ret = -ENOMEM;
-               goto err_bank;
-       }
-
-       accel_dev->transport = etr_data;
-       i = hw_data->get_etr_bar_id(hw_data);
-       csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
-
-       /* accel_dev->debugfs_dir should always be non-NULL here */
-       etr_data->debug = debugfs_create_dir("transport",
-                                            accel_dev->debugfs_dir);
-
-       for (i = 0; i < num_banks; i++) {
-               ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
-                                   csr_addr);
-               if (ret)
-                       goto err_bank_all;
-       }
-
-       return 0;
-
-err_bank_all:
-       debugfs_remove(etr_data->debug);
-       kfree(etr_data->banks);
-err_bank:
-       kfree(etr_data);
-       accel_dev->transport = NULL;
-       return ret;
-}
-EXPORT_SYMBOL_GPL(adf_init_etr_data);
-
-static void cleanup_bank(struct adf_etr_bank_data *bank)
-{
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       u8 num_rings_per_bank = hw_data->num_rings_per_bank;
-       u32 i;
-
-       for (i = 0; i < num_rings_per_bank; i++) {
-               struct adf_etr_ring_data *ring = &bank->rings[i];
-
-               if (bank->ring_mask & (1 << i))
-                       adf_cleanup_ring(ring);
-
-               if (hw_data->tx_rings_mask & (1 << i))
-                       kfree(ring->inflights);
-       }
-       kfree(bank->rings);
-       adf_bank_debugfs_rm(bank);
-       memset(bank, 0, sizeof(*bank));
-}
-
-static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *etr_data = accel_dev->transport;
-       u32 i, num_banks = GET_MAX_BANKS(accel_dev);
-
-       for (i = 0; i < num_banks; i++)
-               cleanup_bank(&etr_data->banks[i]);
-}
-
-/**
- * adf_cleanup_etr_data() - Clear transport rings for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function is the clears the communications channels (rings) of the
- * acceleration device accel_dev.
- * To be used by QAT device specific drivers.
- *
- * Return: void
- */
-void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *etr_data = accel_dev->transport;
-
-       if (etr_data) {
-               adf_cleanup_etr_handles(accel_dev);
-               debugfs_remove(etr_data->debug);
-               kfree(etr_data->banks->rings);
-               kfree(etr_data->banks);
-               kfree(etr_data);
-               accel_dev->transport = NULL;
-       }
-}
-EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
deleted file mode 100644 (file)
index e6ef6f9..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_TRANSPORT_H
-#define ADF_TRANSPORT_H
-
-#include "adf_accel_devices.h"
-
-struct adf_etr_ring_data;
-
-typedef void (*adf_callback_fn)(void *resp_msg);
-
-int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
-                   u32 bank_num, u32 num_mgs, u32 msg_size,
-                   const char *ring_name, adf_callback_fn callback,
-                   int poll_mode, struct adf_etr_ring_data **ring_ptr);
-
-bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
-int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
-void adf_remove_ring(struct adf_etr_ring_data *ring);
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
deleted file mode 100644 (file)
index d3667db..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
-#define ADF_TRANSPORT_ACCESS_MACROS_H
-
-#include "adf_accel_devices.h"
-#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
-#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
-#define ADF_COALESCING_MIN_TIME 0x1FF
-#define ADF_COALESCING_MAX_TIME 0xFFFFF
-#define ADF_COALESCING_DEF_TIME 0x27FF
-#define ADF_RING_NEAR_WATERMARK_512 0x08
-#define ADF_RING_NEAR_WATERMARK_0 0x00
-#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
-
-/* Valid internal ring size values */
-#define ADF_RING_SIZE_128 0x01
-#define ADF_RING_SIZE_256 0x02
-#define ADF_RING_SIZE_512 0x03
-#define ADF_RING_SIZE_4K 0x06
-#define ADF_RING_SIZE_16K 0x08
-#define ADF_RING_SIZE_4M 0x10
-#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
-#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
-#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-
-/* Valid internal msg size values */
-#define ADF_MSG_SIZE_32 0x01
-#define ADF_MSG_SIZE_64 0x02
-#define ADF_MSG_SIZE_128 0x04
-#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
-#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
-
-/* Size to bytes conversion macros for ring and msg size values */
-#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
-#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
-#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
-#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-
-/* Minimum ring buffer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
-       ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
-               ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
-#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
-#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
-                               SIZE) & ~0x4)
-/* Max outstanding requests */
-#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
-       ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
-#define BUILD_RING_CONFIG(size)        \
-       ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
-       | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
-       | size)
-#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
-       ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
-       | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
-       | size)
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
deleted file mode 100644 (file)
index 08bca1c..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include "adf_accel_devices.h"
-#include "adf_transport_internal.h"
-#include "adf_transport_access_macros.h"
-
-static DEFINE_MUTEX(ring_read_lock);
-static DEFINE_MUTEX(bank_read_lock);
-
-static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
-{
-       struct adf_etr_ring_data *ring = sfile->private;
-
-       mutex_lock(&ring_read_lock);
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-
-       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
-                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
-               return NULL;
-
-       return ring->base_addr +
-               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
-}
-
-static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
-{
-       struct adf_etr_ring_data *ring = sfile->private;
-
-       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
-                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
-               return NULL;
-
-       return ring->base_addr +
-               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
-}
-
-static int adf_ring_show(struct seq_file *sfile, void *v)
-{
-       struct adf_etr_ring_data *ring = sfile->private;
-       struct adf_etr_bank_data *bank = ring->bank;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-       void __iomem *csr = ring->bank->csr_addr;
-
-       if (v == SEQ_START_TOKEN) {
-               int head, tail, empty;
-
-               head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
-                                                  ring->ring_number);
-               tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
-                                                  ring->ring_number);
-               empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
-
-               seq_puts(sfile, "------- Ring configuration -------\n");
-               seq_printf(sfile, "ring name: %s\n",
-                          ring->ring_debug->ring_name);
-               seq_printf(sfile, "ring num %d, bank num %d\n",
-                          ring->ring_number, ring->bank->bank_number);
-               seq_printf(sfile, "head %x, tail %x, empty: %d\n",
-                          head, tail, (empty & 1 << ring->ring_number)
-                          >> ring->ring_number);
-               seq_printf(sfile, "ring size %lld, msg size %d\n",
-                          (long long)ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
-                          ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
-               seq_puts(sfile, "----------- Ring data ------------\n");
-               return 0;
-       }
-       seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
-                    v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
-       return 0;
-}
-
-static void adf_ring_stop(struct seq_file *sfile, void *v)
-{
-       mutex_unlock(&ring_read_lock);
-}
-
-static const struct seq_operations adf_ring_debug_sops = {
-       .start = adf_ring_start,
-       .next = adf_ring_next,
-       .stop = adf_ring_stop,
-       .show = adf_ring_show
-};
-
-DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
-
-int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
-{
-       struct adf_etr_ring_debug_entry *ring_debug;
-       char entry_name[8];
-
-       ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
-       if (!ring_debug)
-               return -ENOMEM;
-
-       strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
-       snprintf(entry_name, sizeof(entry_name), "ring_%02d",
-                ring->ring_number);
-
-       ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
-                                               ring->bank->bank_debug_dir,
-                                               ring, &adf_ring_debug_fops);
-       ring->ring_debug = ring_debug;
-       return 0;
-}
-
-void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
-{
-       if (ring->ring_debug) {
-               debugfs_remove(ring->ring_debug->debug);
-               kfree(ring->ring_debug);
-               ring->ring_debug = NULL;
-       }
-}
-
-static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
-{
-       struct adf_etr_bank_data *bank = sfile->private;
-       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
-
-       mutex_lock(&bank_read_lock);
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-
-       if (*pos >= num_rings_per_bank)
-               return NULL;
-
-       return pos;
-}
-
-static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
-{
-       struct adf_etr_bank_data *bank = sfile->private;
-       u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(bank->accel_dev);
-
-       if (++(*pos) >= num_rings_per_bank)
-               return NULL;
-
-       return pos;
-}
-
-static int adf_bank_show(struct seq_file *sfile, void *v)
-{
-       struct adf_etr_bank_data *bank = sfile->private;
-       struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
-
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(sfile, "------- Bank %d configuration -------\n",
-                          bank->bank_number);
-       } else {
-               int ring_id = *((int *)v) - 1;
-               struct adf_etr_ring_data *ring = &bank->rings[ring_id];
-               void __iomem *csr = bank->csr_addr;
-               int head, tail, empty;
-
-               if (!(bank->ring_mask & 1 << ring_id))
-                       return 0;
-
-               head = csr_ops->read_csr_ring_head(csr, bank->bank_number,
-                                                  ring->ring_number);
-               tail = csr_ops->read_csr_ring_tail(csr, bank->bank_number,
-                                                  ring->ring_number);
-               empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
-
-               seq_printf(sfile,
-                          "ring num %02d, head %04x, tail %04x, empty: %d\n",
-                          ring->ring_number, head, tail,
-                          (empty & 1 << ring->ring_number) >>
-                          ring->ring_number);
-       }
-       return 0;
-}
-
-static void adf_bank_stop(struct seq_file *sfile, void *v)
-{
-       mutex_unlock(&bank_read_lock);
-}
-
-static const struct seq_operations adf_bank_debug_sops = {
-       .start = adf_bank_start,
-       .next = adf_bank_next,
-       .stop = adf_bank_stop,
-       .show = adf_bank_show
-};
-
-DEFINE_SEQ_ATTRIBUTE(adf_bank_debug);
-
-int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
-{
-       struct adf_accel_dev *accel_dev = bank->accel_dev;
-       struct dentry *parent = accel_dev->transport->debug;
-       char name[8];
-
-       snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
-       bank->bank_debug_dir = debugfs_create_dir(name, parent);
-       bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
-                                                  bank->bank_debug_dir, bank,
-                                                  &adf_bank_debug_fops);
-       return 0;
-}
-
-void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
-{
-       debugfs_remove(bank->bank_debug_cfg);
-       debugfs_remove(bank->bank_debug_dir);
-}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
deleted file mode 100644 (file)
index 8b2c92b..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_TRANSPORT_INTRN_H
-#define ADF_TRANSPORT_INTRN_H
-
-#include <linux/interrupt.h>
-#include <linux/spinlock_types.h>
-#include "adf_transport.h"
-
-struct adf_etr_ring_debug_entry {
-       char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       struct dentry *debug;
-};
-
-struct adf_etr_ring_data {
-       void *base_addr;
-       atomic_t *inflights;
-       adf_callback_fn callback;
-       struct adf_etr_bank_data *bank;
-       dma_addr_t dma_addr;
-       struct adf_etr_ring_debug_entry *ring_debug;
-       spinlock_t lock;        /* protects ring data struct */
-       u16 head;
-       u16 tail;
-       u32 threshold;
-       u8 ring_number;
-       u8 ring_size;
-       u8 msg_size;
-};
-
-struct adf_etr_bank_data {
-       struct adf_etr_ring_data *rings;
-       struct tasklet_struct resp_handler;
-       void __iomem *csr_addr;
-       u32 irq_coalesc_timer;
-       u32 bank_number;
-       u16 ring_mask;
-       u16 irq_mask;
-       spinlock_t lock;        /* protects bank data struct */
-       struct adf_accel_dev *accel_dev;
-       struct dentry *bank_debug_dir;
-       struct dentry *bank_debug_cfg;
-};
-
-struct adf_etr_data {
-       struct adf_etr_bank_data *banks;
-       struct dentry *debug;
-};
-
-void adf_response_handler(uintptr_t bank_addr);
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
-void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
-int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
-void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
-#else
-static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
-{
-       return 0;
-}
-
-#define adf_bank_debugfs_rm(bank) do {} while (0)
-
-static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
-                                      const char *name)
-{
-       return 0;
-}
-
-#define adf_ring_debugfs_rm(ring) do {} while (0)
-#endif
-#endif
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
deleted file mode 100644 (file)
index b05c395..0000000
+++ /dev/null
@@ -1,313 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "adf_cfg_common.h"
-#include "adf_transport_access_macros.h"
-#include "adf_transport_internal.h"
-
-#define ADF_VINTSOU_OFFSET     0x204
-#define ADF_VINTMSK_OFFSET     0x208
-#define ADF_VINTSOU_BUN                BIT(0)
-#define ADF_VINTSOU_PF2VF      BIT(1)
-
-static struct workqueue_struct *adf_vf_stop_wq;
-
-struct adf_vf_stop_data {
-       struct adf_accel_dev *accel_dev;
-       struct work_struct work;
-};
-
-void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-
-       ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
-}
-
-void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
-{
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-
-       ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
-}
-EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
-
-static int adf_enable_msi(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
-       int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
-                                        PCI_IRQ_MSI);
-       if (unlikely(stat < 0)) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Failed to enable MSI interrupt: %d\n", stat);
-               return stat;
-       }
-
-       return 0;
-}
-
-static void adf_disable_msi(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
-       pci_free_irq_vectors(pdev);
-}
-
-static void adf_dev_stop_async(struct work_struct *work)
-{
-       struct adf_vf_stop_data *stop_data =
-               container_of(work, struct adf_vf_stop_data, work);
-       struct adf_accel_dev *accel_dev = stop_data->accel_dev;
-
-       adf_dev_restarting_notify(accel_dev);
-       adf_dev_down(accel_dev, false);
-
-       /* Re-enable PF2VF interrupts */
-       adf_enable_pf2vf_interrupts(accel_dev);
-       kfree(stop_data);
-}
-
-int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
-{
-       struct adf_vf_stop_data *stop_data;
-
-       clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-       stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
-       if (!stop_data) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Couldn't schedule stop for vf_%d\n",
-                       accel_dev->accel_id);
-               return -ENOMEM;
-       }
-       stop_data->accel_dev = accel_dev;
-       INIT_WORK(&stop_data->work, adf_dev_stop_async);
-       queue_work(adf_vf_stop_wq, &stop_data->work);
-
-       return 0;
-}
-
-static void adf_pf2vf_bh_handler(void *data)
-{
-       struct adf_accel_dev *accel_dev = data;
-       bool ret;
-
-       ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
-       if (ret)
-               /* Re-enable PF2VF interrupts */
-               adf_enable_pf2vf_interrupts(accel_dev);
-
-       return;
-
-}
-
-static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
-{
-       tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
-                    (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
-
-       mutex_init(&accel_dev->vf.vf2pf_lock);
-       return 0;
-}
-
-static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
-{
-       tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
-       tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
-       mutex_destroy(&accel_dev->vf.vf2pf_lock);
-}
-
-static irqreturn_t adf_isr(int irq, void *privdata)
-{
-       struct adf_accel_dev *accel_dev = privdata;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
-       struct adf_bar *pmisc =
-                       &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
-       void __iomem *pmisc_bar_addr = pmisc->virt_addr;
-       bool handled = false;
-       u32 v_int, v_mask;
-
-       /* Read VF INT source CSR to determine the source of VF interrupt */
-       v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
-
-       /* Read VF INT mask CSR to determine which sources are masked */
-       v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
-
-       /*
-        * Recompute v_int ignoring sources that are masked. This is to
-        * avoid rescheduling the tasklet for interrupts already handled
-        */
-       v_int &= ~v_mask;
-
-       /* Check for PF2VF interrupt */
-       if (v_int & ADF_VINTSOU_PF2VF) {
-               /* Disable PF to VF interrupt */
-               adf_disable_pf2vf_interrupts(accel_dev);
-
-               /* Schedule tasklet to handle interrupt BH */
-               tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
-               handled = true;
-       }
-
-       /* Check bundle interrupt */
-       if (v_int & ADF_VINTSOU_BUN) {
-               struct adf_etr_data *etr_data = accel_dev->transport;
-               struct adf_etr_bank_data *bank = &etr_data->banks[0];
-
-               /* Disable Flag and Coalesce Ring Interrupts */
-               csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
-                                                   bank->bank_number, 0);
-               tasklet_hi_schedule(&bank->resp_handler);
-               handled = true;
-       }
-
-       return handled ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-       unsigned int cpu;
-       int ret;
-
-       snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
-                "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
-                PCI_FUNC(pdev->devfn));
-       ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
-                         (void *)accel_dev);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
-                       accel_dev->vf.irq_name);
-               return ret;
-       }
-       cpu = accel_dev->accel_id % num_online_cpus();
-       irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
-       accel_dev->vf.irq_enabled = true;
-
-       return ret;
-}
-
-static int adf_setup_bh(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *priv_data = accel_dev->transport;
-
-       tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
-                    (unsigned long)priv_data->banks);
-       return 0;
-}
-
-static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
-{
-       struct adf_etr_data *priv_data = accel_dev->transport;
-
-       tasklet_disable(&priv_data->banks[0].resp_handler);
-       tasklet_kill(&priv_data->banks[0].resp_handler);
-}
-
-/**
- * adf_vf_isr_resource_free() - Free IRQ for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function frees interrupts for acceleration device virtual function.
- */
-void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
-
-       if (accel_dev->vf.irq_enabled) {
-               irq_set_affinity_hint(pdev->irq, NULL);
-               free_irq(pdev->irq, accel_dev);
-       }
-       adf_cleanup_bh(accel_dev);
-       adf_cleanup_pf2vf_bh(accel_dev);
-       adf_disable_msi(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
-
-/**
- * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function allocates interrupts for acceleration device virtual function.
- *
- * Return: 0 on success, error code otherwise.
- */
-int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
-{
-       if (adf_enable_msi(accel_dev))
-               goto err_out;
-
-       if (adf_setup_pf2vf_bh(accel_dev))
-               goto err_disable_msi;
-
-       if (adf_setup_bh(accel_dev))
-               goto err_cleanup_pf2vf_bh;
-
-       if (adf_request_msi_irq(accel_dev))
-               goto err_cleanup_bh;
-
-       return 0;
-
-err_cleanup_bh:
-       adf_cleanup_bh(accel_dev);
-
-err_cleanup_pf2vf_bh:
-       adf_cleanup_pf2vf_bh(accel_dev);
-
-err_disable_msi:
-       adf_disable_msi(accel_dev);
-
-err_out:
-       return -EFAULT;
-}
-EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
-
-/**
- * adf_flush_vf_wq() - Flush workqueue for VF
- * @accel_dev:  Pointer to acceleration device.
- *
- * Function disables the PF/VF interrupts on the VF so that no new messages
- * are received and flushes the workqueue 'adf_vf_stop_wq'.
- *
- * Return: void.
- */
-void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
-{
-       adf_disable_pf2vf_interrupts(accel_dev);
-
-       flush_workqueue(adf_vf_stop_wq);
-}
-EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
-
-/**
- * adf_init_vf_wq() - Init workqueue for VF
- *
- * Function init workqueue 'adf_vf_stop_wq' for VF.
- *
- * Return: 0 on success, error code otherwise.
- */
-int __init adf_init_vf_wq(void)
-{
-       adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
-
-       return !adf_vf_stop_wq ? -EFAULT : 0;
-}
-
-void adf_exit_vf_wq(void)
-{
-       if (adf_vf_stop_wq)
-               destroy_workqueue(adf_vf_stop_wq);
-
-       adf_vf_stop_wq = NULL;
-}
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
deleted file mode 100644 (file)
index c141160..0000000
+++ /dev/null
@@ -1,298 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_FW_H_
-#define _ICP_QAT_FW_H_
-#include <linux/types.h>
-#include "icp_qat_hw.h"
-
-#define QAT_FIELD_SET(flags, val, bitpos, mask) \
-{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
-               (((val) & (mask)) << (bitpos))) ; }
-
-#define QAT_FIELD_GET(flags, bitpos, mask) \
-       (((flags) >> (bitpos)) & (mask))
-
-#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
-#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
-#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
-#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
-#define ICP_QAT_FW_NUM_LONGWORDS_1 1
-#define ICP_QAT_FW_NUM_LONGWORDS_2 2
-#define ICP_QAT_FW_NUM_LONGWORDS_3 3
-#define ICP_QAT_FW_NUM_LONGWORDS_4 4
-#define ICP_QAT_FW_NUM_LONGWORDS_5 5
-#define ICP_QAT_FW_NUM_LONGWORDS_6 6
-#define ICP_QAT_FW_NUM_LONGWORDS_7 7
-#define ICP_QAT_FW_NUM_LONGWORDS_10 10
-#define ICP_QAT_FW_NUM_LONGWORDS_13 13
-#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
-
-enum icp_qat_fw_comn_resp_serv_id {
-       ICP_QAT_FW_COMN_RESP_SERV_NULL,
-       ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
-       ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
-};
-
-enum icp_qat_fw_comn_request_id {
-       ICP_QAT_FW_COMN_REQ_NULL = 0,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
-       ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
-       ICP_QAT_FW_COMN_REQ_DELIMITER
-};
-
-struct icp_qat_fw_comn_req_hdr_cd_pars {
-       union {
-               struct {
-                       __u64 content_desc_addr;
-                       __u16 content_desc_resrvd1;
-                       __u8 content_desc_params_sz;
-                       __u8 content_desc_hdr_resrvd2;
-                       __u32 content_desc_resrvd3;
-               } s;
-               struct {
-                       __u32 serv_specif_fields[4];
-               } s1;
-       } u;
-};
-
-struct icp_qat_fw_comn_req_mid {
-       __u64 opaque_data;
-       __u64 src_data_addr;
-       __u64 dest_data_addr;
-       __u32 src_length;
-       __u32 dst_length;
-};
-
-struct icp_qat_fw_comn_req_cd_ctrl {
-       __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
-};
-
-struct icp_qat_fw_comn_req_hdr {
-       __u8 resrvd1;
-       __u8 service_cmd_id;
-       __u8 service_type;
-       __u8 hdr_flags;
-       __u16 serv_specif_flags;
-       __u16 comn_req_flags;
-};
-
-struct icp_qat_fw_comn_req_rqpars {
-       __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
-};
-
-struct icp_qat_fw_comn_req {
-       struct icp_qat_fw_comn_req_hdr comn_hdr;
-       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
-       struct icp_qat_fw_comn_req_mid comn_mid;
-       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
-       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-struct icp_qat_fw_comn_error {
-       __u8 xlat_err_code;
-       __u8 cmp_err_code;
-};
-
-struct icp_qat_fw_comn_resp_hdr {
-       __u8 resrvd1;
-       __u8 service_id;
-       __u8 response_type;
-       __u8 hdr_flags;
-       struct icp_qat_fw_comn_error comn_error;
-       __u8 comn_status;
-       __u8 cmd_id;
-};
-
-struct icp_qat_fw_comn_resp {
-       struct icp_qat_fw_comn_resp_hdr comn_hdr;
-       __u64 opaque_data;
-       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
-#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
-#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
-#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
-#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
-#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
-       icp_qat_fw_comn_req_hdr_t.service_type
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
-       icp_qat_fw_comn_req_hdr_t.service_type = val
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
-       icp_qat_fw_comn_req_hdr_t.service_cmd_id
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
-       icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
-       ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
-
-#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
-       QAT_FIELD_GET(hdr_flags, \
-       ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-       ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
-       QAT_FIELD_GET(hdr_flags, \
-       ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_CNV_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-       ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_CNV_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
-       ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
-       QAT_FIELD_GET(hdr_flags, \
-       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
-       (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
-       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
-       (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
-        ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
-
-#define QAT_COMN_PTR_TYPE_BITPOS 0
-#define QAT_COMN_PTR_TYPE_MASK 0x1
-#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
-#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
-#define QAT_COMN_PTR_TYPE_FLAT 0x0
-#define QAT_COMN_PTR_TYPE_SGL 0x1
-#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
-#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
-
-#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
-       ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
-        | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
-                       QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
-                       QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
-                       QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
-#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
-#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
-#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
-
-#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
-       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-        & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
-
-#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
-
-#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
-#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
-#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
-#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
-#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
-#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
-#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
-
-#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
-       ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
-       QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
-       (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
-       QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
-       (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
-       QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
-       (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
-       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
-
-#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
-       QAT_COMN_RESP_CRYPTO_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
-       QAT_COMN_RESP_CMP_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
-       QAT_COMN_RESP_XLAT_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
-       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
-       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
-
-#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
-#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
-#define ERR_CODE_NO_ERROR 0
-#define ERR_CODE_INVALID_BLOCK_TYPE -1
-#define ERR_CODE_NO_MATCH_ONES_COMP -2
-#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
-#define ERR_CODE_INCOMPLETE_LEN -4
-#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
-#define ERR_CODE_RPT_GT_SPEC_LEN -6
-#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
-#define ERR_CODE_INV_DIS_CODE_LEN -8
-#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
-#define ERR_CODE_DIS_TOO_FAR_BACK -10
-#define ERR_CODE_OVERFLOW_ERROR -11
-#define ERR_CODE_SOFT_ERROR -12
-#define ERR_CODE_FATAL_ERROR -13
-#define ERR_CODE_SSM_ERROR -14
-#define ERR_CODE_ENDPOINT_ERROR -15
-
-enum icp_qat_fw_slice {
-       ICP_QAT_FW_SLICE_NULL = 0,
-       ICP_QAT_FW_SLICE_CIPHER = 1,
-       ICP_QAT_FW_SLICE_AUTH = 2,
-       ICP_QAT_FW_SLICE_DRAM_RD = 3,
-       ICP_QAT_FW_SLICE_DRAM_WR = 4,
-       ICP_QAT_FW_SLICE_COMP = 5,
-       ICP_QAT_FW_SLICE_XLAT = 6,
-       ICP_QAT_FW_SLICE_DELIMITER
-};
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/qat/qat_common/icp_qat_fw_comp.h
deleted file mode 100644 (file)
index a03d43f..0000000
+++ /dev/null
@@ -1,404 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _ICP_QAT_FW_COMP_H_
-#define _ICP_QAT_FW_COMP_H_
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_comp_cmd_id {
-       ICP_QAT_FW_COMP_CMD_STATIC = 0,
-       ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
-       ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
-       ICP_QAT_FW_COMP_CMD_DELIMITER
-};
-
-enum icp_qat_fw_comp_20_cmd_id {
-       ICP_QAT_FW_COMP_20_CMD_LZ4_COMPRESS = 3,
-       ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4,
-       ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5,
-       ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
-       ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
-       ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
-       ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9,
-       ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10,
-       ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11,
-       ICP_QAT_FW_COMP_20_CMD_DELIMITER
-};
-
-#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
-#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
-#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
-#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
-#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
-#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
-#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
-#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
-#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
-#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
-#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
-#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
-#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
-#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
-#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
-#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
-#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
-#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
-#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
-#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
-
-#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
-       ret_uncomp, secure_ram) \
-       ((((sesstype) & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) << \
-       ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
-       (((autoselect) & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) << \
-       ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
-       (((enhanced_asb) & ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) << \
-       ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
-       (((ret_uncomp) & ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) << \
-       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
-       (((secure_ram) & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) << \
-       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
-
-#define ICP_QAT_FW_COMP_SESSION_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
-       ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
-
-#define ICP_QAT_FW_COMP_SESSION_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS, \
-       ICP_QAT_FW_COMP_SESSION_TYPE_MASK)
-
-#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS, \
-       ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK)
-
-#define ICP_QAT_FW_COMP_EN_ASB_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS, \
-       ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK)
-
-#define ICP_QAT_FW_COMP_RET_UNCOMP_GET(flags) \
-       QAT_FIELD_GET(flags, \
-       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS, \
-       ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK)
-
-#define ICP_QAT_FW_COMP_SECURE_RAM_USE_GET(flags) \
-       QAT_FIELD_GET(flags, \
-       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS, \
-       ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK)
-
-struct icp_qat_fw_comp_req_hdr_cd_pars {
-       union {
-               struct {
-                       __u64 content_desc_addr;
-                       __u16 content_desc_resrvd1;
-                       __u8 content_desc_params_sz;
-                       __u8 content_desc_hdr_resrvd2;
-                       __u32 content_desc_resrvd3;
-               } s;
-               struct {
-                       __u32 comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
-                       __u32 content_desc_resrvd4;
-               } sl;
-       } u;
-};
-
-struct icp_qat_fw_comp_req_params {
-       __u32 comp_len;
-       __u32 out_buffer_sz;
-       union {
-               struct {
-                       __u32 initial_crc32;
-                       __u32 initial_adler;
-               } legacy;
-               __u64 crc_data_addr;
-       } crc;
-       __u32 req_par_flags;
-       __u32 rsrvd;
-};
-
-#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
-                                             cnvdfx, crc, xxhash_acc, \
-                                             cnv_error_type, append_crc, \
-                                             drop_data) \
-       ((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
-       ICP_QAT_FW_COMP_SOP_BITPOS) | \
-       (((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
-       ICP_QAT_FW_COMP_EOP_BITPOS) | \
-       (((bfinal) & ICP_QAT_FW_COMP_BFINAL_MASK) \
-       << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
-       (((cnv) & ICP_QAT_FW_COMP_CNV_MASK) << \
-       ICP_QAT_FW_COMP_CNV_BITPOS) | \
-       (((cnvnr) & ICP_QAT_FW_COMP_CNVNR_MASK) \
-       << ICP_QAT_FW_COMP_CNVNR_BITPOS) | \
-       (((cnvdfx) & ICP_QAT_FW_COMP_CNV_DFX_MASK) \
-       << ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \
-       (((crc) & ICP_QAT_FW_COMP_CRC_MODE_MASK) \
-       << ICP_QAT_FW_COMP_CRC_MODE_BITPOS) | \
-       (((xxhash_acc) & ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK) \
-       << ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS) | \
-       (((cnv_error_type) & ICP_QAT_FW_COMP_CNV_ERROR_MASK) \
-       << ICP_QAT_FW_COMP_CNV_ERROR_BITPOS) | \
-       (((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
-       << ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
-       (((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
-       << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
-
-#define ICP_QAT_FW_COMP_NOT_SOP 0
-#define ICP_QAT_FW_COMP_SOP 1
-#define ICP_QAT_FW_COMP_NOT_EOP 0
-#define ICP_QAT_FW_COMP_EOP 1
-#define ICP_QAT_FW_COMP_NOT_BFINAL 0
-#define ICP_QAT_FW_COMP_BFINAL 1
-#define ICP_QAT_FW_COMP_NO_CNV 0
-#define ICP_QAT_FW_COMP_CNV 1
-#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
-#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
-#define ICP_QAT_FW_COMP_NO_CNV_DFX 0
-#define ICP_QAT_FW_COMP_CNV_DFX 1
-#define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0
-#define ICP_QAT_FW_COMP_CRC_MODE_E2E 1
-#define ICP_QAT_FW_COMP_NO_XXHASH_ACC 0
-#define ICP_QAT_FW_COMP_XXHASH_ACC 1
-#define ICP_QAT_FW_COMP_APPEND_CRC 1
-#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
-#define ICP_QAT_FW_COMP_DROP_DATA 1
-#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
-#define ICP_QAT_FW_COMP_SOP_BITPOS 0
-#define ICP_QAT_FW_COMP_SOP_MASK 0x1
-#define ICP_QAT_FW_COMP_EOP_BITPOS 1
-#define ICP_QAT_FW_COMP_EOP_MASK 0x1
-#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
-#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
-#define ICP_QAT_FW_COMP_CNV_BITPOS 16
-#define ICP_QAT_FW_COMP_CNV_MASK 0x1
-#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17
-#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1
-#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18
-#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1
-#define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19
-#define ICP_QAT_FW_COMP_CRC_MODE_MASK 0x1
-#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20
-#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1
-#define ICP_QAT_FW_COMP_CNV_ERROR_BITPOS 21
-#define ICP_QAT_FW_COMP_CNV_ERROR_MASK 0b111
-#define ICP_QAT_FW_COMP_CNV_ERROR_NONE 0b000
-#define ICP_QAT_FW_COMP_CNV_ERROR_CHECKSUM 0b001
-#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_OBC_DIFF 0b010
-#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR 0b011
-#define ICP_QAT_FW_COMP_CNV_ERROR_XLT 0b100
-#define ICP_QAT_FW_COMP_CNV_ERROR_DCPR_IBC_DIFF 0b101
-#define ICP_QAT_FW_COMP_APPEND_CRC_BITPOS 24
-#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
-#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
-#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
-
-#define ICP_QAT_FW_COMP_SOP_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
-       ICP_QAT_FW_COMP_SOP_MASK)
-
-#define ICP_QAT_FW_COMP_SOP_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_SOP_BITPOS, \
-       ICP_QAT_FW_COMP_SOP_MASK)
-
-#define ICP_QAT_FW_COMP_EOP_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_EOP_BITPOS, \
-       ICP_QAT_FW_COMP_EOP_MASK)
-
-#define ICP_QAT_FW_COMP_EOP_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_EOP_BITPOS, \
-       ICP_QAT_FW_COMP_EOP_MASK)
-
-#define ICP_QAT_FW_COMP_BFINAL_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
-       ICP_QAT_FW_COMP_BFINAL_MASK)
-
-#define ICP_QAT_FW_COMP_BFINAL_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_BFINAL_BITPOS, \
-       ICP_QAT_FW_COMP_BFINAL_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_MASK)
-
-#define ICP_QAT_FW_COMP_CNVNR_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNVNR_BITPOS, \
-       ICP_QAT_FW_COMP_CNVNR_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_DFX_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_DFX_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_DFX_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_DFX_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_DFX_MASK)
-
-#define ICP_QAT_FW_COMP_CRC_MODE_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \
-       ICP_QAT_FW_COMP_CRC_MODE_MASK)
-
-#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
-       ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
-
-#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
-       ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_GET(flags) \
-       QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_ERROR_MASK)
-
-#define ICP_QAT_FW_COMP_CNV_ERROR_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, ICP_QAT_FW_COMP_CNV_ERROR_BITPOS, \
-       ICP_QAT_FW_COMP_CNV_ERROR_MASK)
-
-struct icp_qat_fw_xlt_req_params {
-       __u64 inter_buff_ptr;
-};
-
-struct icp_qat_fw_comp_cd_hdr {
-       __u16 ram_bank_flags;
-       __u8 comp_cfg_offset;
-       __u8 next_curr_id;
-       __u32 resrvd;
-       __u64 comp_state_addr;
-       __u64 ram_banks_addr;
-};
-
-#define COMP_CPR_INITIAL_CRC 0
-#define COMP_CPR_INITIAL_ADLER 1
-
-struct icp_qat_fw_xlt_cd_hdr {
-       __u16 resrvd1;
-       __u8 resrvd2;
-       __u8 next_curr_id;
-       __u32 resrvd3;
-};
-
-struct icp_qat_fw_comp_req {
-       struct icp_qat_fw_comn_req_hdr comn_hdr;
-       struct icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
-       struct icp_qat_fw_comn_req_mid comn_mid;
-       struct icp_qat_fw_comp_req_params comp_pars;
-       union {
-               struct icp_qat_fw_xlt_req_params xlt_pars;
-               __u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
-       } u1;
-       __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
-       struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
-       union {
-               struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
-               __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
-       } u2;
-};
-
-struct icp_qat_fw_resp_comp_pars {
-       __u32 input_byte_counter;
-       __u32 output_byte_counter;
-       union {
-               struct {
-                       __u32 curr_crc32;
-                       __u32 curr_adler_32;
-               } legacy;
-               __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_2];
-       } crc;
-};
-
-struct icp_qat_fw_comp_state {
-       __u32 rd8_counter;
-       __u32 status_flags;
-       __u32 in_counter;
-       __u32 out_counter;
-       __u64 intermediate_state;
-       __u32 lobc;
-       __u32 replaybc;
-       __u64 pcrc64_poly;
-       __u32 crc32;
-       __u32 adler_xxhash32;
-       __u64 pcrc64_xorout;
-       __u32 out_buf_size;
-       __u32 in_buf_size;
-       __u64 in_pcrc64;
-       __u64 out_pcrc64;
-       __u32 lobs;
-       __u32 libc;
-       __u64 reserved;
-       __u32 xxhash_state[4];
-       __u32 cleartext[4];
-};
-
-struct icp_qat_fw_comp_resp {
-       struct icp_qat_fw_comn_resp_hdr comn_resp;
-       __u64 opaque_data;
-       struct icp_qat_fw_resp_comp_pars comp_resp_pars;
-};
-
-#define QAT_FW_COMP_BANK_FLAG_MASK 0x1
-#define QAT_FW_COMP_BANK_I_BITPOS 8
-#define QAT_FW_COMP_BANK_H_BITPOS 7
-#define QAT_FW_COMP_BANK_G_BITPOS 6
-#define QAT_FW_COMP_BANK_F_BITPOS 5
-#define QAT_FW_COMP_BANK_E_BITPOS 4
-#define QAT_FW_COMP_BANK_D_BITPOS 3
-#define QAT_FW_COMP_BANK_C_BITPOS 2
-#define QAT_FW_COMP_BANK_B_BITPOS 1
-#define QAT_FW_COMP_BANK_A_BITPOS 0
-
-enum icp_qat_fw_comp_bank_enabled {
-       ICP_QAT_FW_COMP_BANK_DISABLED = 0,
-       ICP_QAT_FW_COMP_BANK_ENABLED = 1,
-       ICP_QAT_FW_COMP_BANK_DELIMITER = 2
-};
-
-#define ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(bank_i_enable, bank_h_enable, \
-                                       bank_g_enable, bank_f_enable, \
-                                       bank_e_enable, bank_d_enable, \
-                                       bank_c_enable, bank_b_enable, \
-                                       bank_a_enable) \
-       ((((bank_i_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_I_BITPOS) | \
-       (((bank_h_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_H_BITPOS) | \
-       (((bank_g_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_G_BITPOS) | \
-       (((bank_f_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_F_BITPOS) | \
-       (((bank_e_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_E_BITPOS) | \
-       (((bank_d_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_D_BITPOS) | \
-       (((bank_c_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_C_BITPOS) | \
-       (((bank_b_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_B_BITPOS) | \
-       (((bank_a_enable) & QAT_FW_COMP_BANK_FLAG_MASK) << \
-       QAT_FW_COMP_BANK_A_BITPOS))
-
-struct icp_qat_fw_comp_crc_data_struct {
-       __u32 crc32;
-       union {
-               __u32 adler;
-               __u32 xxhash;
-       } adler_xxhash_u;
-       __u32 cpr_in_crc_lo;
-       __u32 cpr_in_crc_hi;
-       __u32 cpr_out_crc_lo;
-       __u32 cpr_out_crc_hi;
-       __u32 xlt_in_crc_lo;
-       __u32 xlt_in_crc_hi;
-       __u32 xlt_out_crc_lo;
-       __u32 xlt_out_crc_hi;
-       __u32 prog_crc_poly_lo;
-       __u32 prog_crc_poly_hi;
-       __u32 xor_out_lo;
-       __u32 xor_out_hi;
-       __u32 append_crc_lo;
-       __u32 append_crc_hi;
-};
-
-struct xxhash_acc_state_buff {
-       __u32 in_counter;
-       __u32 out_counter;
-       __u32 xxhash_state[4];
-       __u32 clear_txt[4];
-};
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
deleted file mode 100644 (file)
index 56cb827..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
-#define _ICP_QAT_FW_INIT_ADMIN_H_
-
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_init_admin_cmd_id {
-       ICP_QAT_FW_INIT_AE = 0,
-       ICP_QAT_FW_TRNG_ENABLE = 1,
-       ICP_QAT_FW_TRNG_DISABLE = 2,
-       ICP_QAT_FW_CONSTANTS_CFG = 3,
-       ICP_QAT_FW_STATUS_GET = 4,
-       ICP_QAT_FW_COUNTERS_GET = 5,
-       ICP_QAT_FW_LOOPBACK = 6,
-       ICP_QAT_FW_HEARTBEAT_SYNC = 7,
-       ICP_QAT_FW_HEARTBEAT_GET = 8,
-       ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
-       ICP_QAT_FW_PM_STATE_CONFIG = 128,
-};
-
-enum icp_qat_fw_init_admin_resp_status {
-       ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
-       ICP_QAT_FW_INIT_RESP_STATUS_FAIL
-};
-
-struct icp_qat_fw_init_admin_req {
-       __u16 init_cfg_sz;
-       __u8 resrvd1;
-       __u8 cmd_id;
-       __u32 resrvd2;
-       __u64 opaque_data;
-       __u64 init_cfg_ptr;
-
-       union {
-               struct {
-                       __u16 ibuf_size_in_kb;
-                       __u16 resrvd3;
-               };
-               __u32 idle_filter;
-       };
-
-       __u32 resrvd4;
-} __packed;
-
-struct icp_qat_fw_init_admin_resp {
-       __u8 flags;
-       __u8 resrvd1;
-       __u8 status;
-       __u8 cmd_id;
-       union {
-               __u32 resrvd2;
-               struct {
-                       __u16 version_minor_num;
-                       __u16 version_major_num;
-               };
-               __u32 extended_features;
-       };
-       __u64 opaque_data;
-       union {
-               __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4];
-               struct {
-                       __u32 version_patch_num;
-                       __u8 context_id;
-                       __u8 ae_id;
-                       __u16 resrvd4;
-                       __u64 resrvd5;
-               };
-               struct {
-                       __u64 req_rec_count;
-                       __u64 resp_sent_count;
-               };
-               struct {
-                       __u16 compression_algos;
-                       __u16 checksum_algos;
-                       __u32 deflate_capabilities;
-                       __u32 resrvd6;
-                       __u32 lzs_capabilities;
-               };
-               struct {
-                       __u32 cipher_algos;
-                       __u32 hash_algos;
-                       __u16 keygen_algos;
-                       __u16 other;
-                       __u16 public_key_algos;
-                       __u16 prime_algos;
-               };
-               struct {
-                       __u64 timestamp;
-                       __u64 resrvd7;
-               };
-               struct {
-                       __u32 successful_count;
-                       __u32 unsuccessful_count;
-                       __u64 resrvd8;
-               };
-       };
-} __packed;
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
-       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
-       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, \
-                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
-                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
deleted file mode 100644 (file)
index 28fa17f..0000000
+++ /dev/null
@@ -1,367 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_FW_LA_H_
-#define _ICP_QAT_FW_LA_H_
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_la_cmd_id {
-       ICP_QAT_FW_LA_CMD_CIPHER = 0,
-       ICP_QAT_FW_LA_CMD_AUTH = 1,
-       ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
-       ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
-       ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
-       ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
-       ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
-       ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
-       ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
-       ICP_QAT_FW_LA_CMD_MGF1 = 9,
-       ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
-       ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
-       ICP_QAT_FW_LA_CMD_DELIMITER = 12
-};
-
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-
-struct icp_qat_fw_la_bulk_req {
-       struct icp_qat_fw_comn_req_hdr comn_hdr;
-       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
-       struct icp_qat_fw_comn_req_mid comn_mid;
-       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
-       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-#define ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE 1
-#define QAT_LA_SLICE_TYPE_BITPOS 14
-#define QAT_LA_SLICE_TYPE_MASK 0x3
-#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
-#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
-#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
-#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
-#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
-#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
-#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
-#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
-#define ICP_QAT_FW_LA_GCM_PROTO        2
-#define ICP_QAT_FW_LA_CCM_PROTO        1
-#define ICP_QAT_FW_LA_NO_PROTO 0
-#define QAT_LA_PROTO_BITPOS 7
-#define QAT_LA_PROTO_MASK 0x7
-#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
-#define QAT_LA_CMP_AUTH_RES_BITPOS 6
-#define QAT_LA_CMP_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_RET_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
-#define QAT_LA_RET_AUTH_RES_BITPOS 5
-#define QAT_LA_RET_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_UPDATE_STATE 1
-#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
-#define QAT_LA_UPDATE_STATE_BITPOS 4
-#define QAT_LA_UPDATE_STATE_MASK 0x1
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
-#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
-#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
-#define QAT_LA_CIPH_IV_FLD_BITPOS 2
-#define QAT_LA_CIPH_IV_FLD_MASK   0x1
-#define ICP_QAT_FW_LA_PARTIAL_NONE 0
-#define ICP_QAT_FW_LA_PARTIAL_START 1
-#define ICP_QAT_FW_LA_PARTIAL_MID 3
-#define ICP_QAT_FW_LA_PARTIAL_END 2
-#define QAT_LA_PARTIAL_BITPOS 0
-#define QAT_LA_PARTIAL_MASK 0x3
-#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
-       cmp_auth, ret_auth, update_state, \
-       ciph_iv, ciphcfg, partial) \
-       (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
-       ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
-       QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
-       ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
-       QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
-       ((proto & QAT_LA_PROTO_MASK) << \
-       QAT_LA_PROTO_BITPOS)    | \
-       ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
-       QAT_LA_CMP_AUTH_RES_BITPOS) | \
-       ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
-       QAT_LA_RET_AUTH_RES_BITPOS) | \
-       ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
-       QAT_LA_UPDATE_STATE_BITPOS) | \
-       ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
-       QAT_LA_CIPH_IV_FLD_BITPOS) | \
-       ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
-       ((partial & QAT_LA_PARTIAL_MASK) << \
-       QAT_LA_PARTIAL_BITPOS))
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
-       QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
-       QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
-       QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
-       QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
-       QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
-       QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
-       QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
-       QAT_LA_PARTIAL_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
-       QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
-       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
-       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
-       QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
-       QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
-       QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
-       QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
-       QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
-       QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
-       QAT_LA_PARTIAL_MASK)
-
-#define ICP_QAT_FW_LA_SLICE_TYPE_SET(flags, val) \
-       QAT_FIELD_SET(flags, val, QAT_LA_SLICE_TYPE_BITPOS, \
-       QAT_LA_SLICE_TYPE_MASK)
-
-struct icp_qat_fw_cipher_req_hdr_cd_pars {
-       union {
-               struct {
-                       __u64 content_desc_addr;
-                       __u16 content_desc_resrvd1;
-                       __u8 content_desc_params_sz;
-                       __u8 content_desc_hdr_resrvd2;
-                       __u32 content_desc_resrvd3;
-               } s;
-               struct {
-                       __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               } s1;
-       } u;
-};
-
-struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
-       union {
-               struct {
-                       __u64 content_desc_addr;
-                       __u16 content_desc_resrvd1;
-                       __u8 content_desc_params_sz;
-                       __u8 content_desc_hdr_resrvd2;
-                       __u32 content_desc_resrvd3;
-               } s;
-               struct {
-                       __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               } sl;
-       } u;
-};
-
-struct icp_qat_fw_cipher_cd_ctrl_hdr {
-       __u8 cipher_state_sz;
-       __u8 cipher_key_sz;
-       __u8 cipher_cfg_offset;
-       __u8 next_curr_id;
-       __u8 cipher_padding_sz;
-       __u8 resrvd1;
-       __u16 resrvd2;
-       __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
-};
-
-struct icp_qat_fw_auth_cd_ctrl_hdr {
-       __u32 resrvd1;
-       __u8 resrvd2;
-       __u8 hash_flags;
-       __u8 hash_cfg_offset;
-       __u8 next_curr_id;
-       __u8 resrvd3;
-       __u8 outer_prefix_sz;
-       __u8 final_sz;
-       __u8 inner_res_sz;
-       __u8 resrvd4;
-       __u8 inner_state1_sz;
-       __u8 inner_state2_offset;
-       __u8 inner_state2_sz;
-       __u8 outer_config_offset;
-       __u8 outer_state1_sz;
-       __u8 outer_res_sz;
-       __u8 outer_prefix_offset;
-};
-
-struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
-       __u8 cipher_state_sz;
-       __u8 cipher_key_sz;
-       __u8 cipher_cfg_offset;
-       __u8 next_curr_id_cipher;
-       __u8 cipher_padding_sz;
-       __u8 hash_flags;
-       __u8 hash_cfg_offset;
-       __u8 next_curr_id_auth;
-       __u8 resrvd1;
-       __u8 outer_prefix_sz;
-       __u8 final_sz;
-       __u8 inner_res_sz;
-       __u8 resrvd2;
-       __u8 inner_state1_sz;
-       __u8 inner_state2_offset;
-       __u8 inner_state2_sz;
-       __u8 outer_config_offset;
-       __u8 outer_state1_sz;
-       __u8 outer_res_sz;
-       __u8 outer_prefix_offset;
-};
-
-#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
-#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
-#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX  240
-#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
-       (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
-#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
-
-struct icp_qat_fw_la_cipher_req_params {
-       __u32 cipher_offset;
-       __u32 cipher_length;
-       union {
-               __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
-               struct {
-                       __u64 cipher_IV_ptr;
-                       __u64 resrvd1;
-               } s;
-       } u;
-};
-
-struct icp_qat_fw_la_auth_req_params {
-       __u32 auth_off;
-       __u32 auth_len;
-       union {
-               __u64 auth_partial_st_prefix;
-               __u64 aad_adr;
-       } u1;
-       __u64 auth_res_addr;
-       union {
-               __u8 inner_prefix_sz;
-               __u8 aad_sz;
-       } u2;
-       __u8 resrvd1;
-       __u8 hash_state_sz;
-       __u8 auth_res_sz;
-} __packed;
-
-struct icp_qat_fw_la_auth_req_params_resrvd_flds {
-       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
-       union {
-               __u8 inner_prefix_sz;
-               __u8 aad_sz;
-       } u2;
-       __u8 resrvd1;
-       __u16 resrvd2;
-};
-
-struct icp_qat_fw_la_resp {
-       struct icp_qat_fw_comn_resp_hdr comn_resp;
-       __u64 opaque_data;
-       __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
-         ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
-       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
-       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
-       (((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
-       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
-       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
-       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
deleted file mode 100644 (file)
index 7eb5dae..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
-#define __ICP_QAT_FW_LOADER_HANDLE_H__
-#include "icp_qat_uclo.h"
-
-struct icp_qat_fw_loader_ae_data {
-       unsigned int state;
-       unsigned int ustore_size;
-       unsigned int free_addr;
-       unsigned int free_size;
-       unsigned int live_ctx_mask;
-};
-
-struct icp_qat_fw_loader_hal_handle {
-       struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
-       unsigned int ae_mask;
-       unsigned int admin_ae_mask;
-       unsigned int slice_mask;
-       unsigned int revision_id;
-       unsigned int ae_max_num;
-       unsigned int upc_mask;
-       unsigned int max_ustore;
-};
-
-struct icp_qat_fw_loader_chip_info {
-       int mmp_sram_size;
-       bool nn;
-       bool lm2lm3;
-       u32 lm_size;
-       u32 icp_rst_csr;
-       u32 icp_rst_mask;
-       u32 glb_clk_enable_csr;
-       u32 misc_ctl_csr;
-       u32 wakeup_event_val;
-       bool fw_auth;
-       bool css_3k;
-       bool tgroup_share_ustore;
-       u32 fcu_ctl_csr;
-       u32 fcu_sts_csr;
-       u32 fcu_dram_addr_hi;
-       u32 fcu_dram_addr_lo;
-       u32 fcu_loaded_ae_csr;
-       u8 fcu_loaded_ae_pos;
-};
-
-struct icp_qat_fw_loader_handle {
-       struct icp_qat_fw_loader_hal_handle *hal_handle;
-       struct icp_qat_fw_loader_chip_info *chip_info;
-       struct pci_dev *pci_dev;
-       void *obj_handle;
-       void *sobj_handle;
-       void *mobj_handle;
-       unsigned int cfg_ae_mask;
-       void __iomem *hal_sram_addr_v;
-       void __iomem *hal_cap_g_ctl_csr_addr_v;
-       void __iomem *hal_cap_ae_xfer_csr_addr_v;
-       void __iomem *hal_cap_ae_local_csr_addr_v;
-       void __iomem *hal_ep_csr_addr_v;
-};
-
-struct icp_firml_dram_desc {
-       void __iomem *dram_base_addr;
-       void *dram_base_addr_v;
-       dma_addr_t dram_bus_addr;
-       u64 dram_size;
-};
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
deleted file mode 100644 (file)
index 9dddae0..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_FW_PKE_
-#define _ICP_QAT_FW_PKE_
-
-#include "icp_qat_fw.h"
-
-struct icp_qat_fw_req_hdr_pke_cd_pars {
-       __u64 content_desc_addr;
-       __u32 content_desc_resrvd;
-       __u32 func_id;
-};
-
-struct icp_qat_fw_req_pke_mid {
-       __u64 opaque;
-       __u64 src_data_addr;
-       __u64 dest_data_addr;
-};
-
-struct icp_qat_fw_req_pke_hdr {
-       __u8 resrvd1;
-       __u8 resrvd2;
-       __u8 service_type;
-       __u8 hdr_flags;
-       __u16 comn_req_flags;
-       __u16 resrvd4;
-       struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
-};
-
-struct icp_qat_fw_pke_request {
-       struct icp_qat_fw_req_pke_hdr pke_hdr;
-       struct icp_qat_fw_req_pke_mid pke_mid;
-       __u8 output_param_count;
-       __u8 input_param_count;
-       __u16 resrvd1;
-       __u32 resrvd2;
-       __u64 next_req_adr;
-};
-
-struct icp_qat_fw_resp_pke_hdr {
-       __u8 resrvd1;
-       __u8 resrvd2;
-       __u8 response_type;
-       __u8 hdr_flags;
-       __u16 comn_resp_flags;
-       __u16 resrvd4;
-};
-
-struct icp_qat_fw_pke_resp {
-       struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
-       __u64 opaque;
-       __u64 src_data_addr;
-       __u64 dest_data_addr;
-};
-
-#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS              7
-#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK                0x1
-#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
-       QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
-               ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
-               QAT_COMN_RESP_PKE_STATUS_BITPOS, \
-               QAT_COMN_RESP_PKE_STATUS_MASK)
-
-#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
-       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
-               ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
-               ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
deleted file mode 100644 (file)
index 20b2ee1..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef __ICP_QAT_HAL_H
-#define __ICP_QAT_HAL_H
-#include "icp_qat_fw_loader_handle.h"
-
-enum hal_global_csr {
-       MISC_CONTROL = 0xA04,
-       ICP_RESET = 0xA0c,
-       ICP_GLOBAL_CLK_ENABLE = 0xA50
-};
-
-enum {
-       MISC_CONTROL_C4XXX = 0xAA0,
-       ICP_RESET_CPP0 = 0x938,
-       ICP_RESET_CPP1 = 0x93c,
-       ICP_GLOBAL_CLK_ENABLE_CPP0 = 0x964,
-       ICP_GLOBAL_CLK_ENABLE_CPP1 = 0x968
-};
-
-enum hal_ae_csr {
-       USTORE_ADDRESS = 0x000,
-       USTORE_DATA_LOWER = 0x004,
-       USTORE_DATA_UPPER = 0x008,
-       ALU_OUT = 0x010,
-       CTX_ARB_CNTL = 0x014,
-       CTX_ENABLES = 0x018,
-       CC_ENABLE = 0x01c,
-       CSR_CTX_POINTER = 0x020,
-       CTX_STS_INDIRECT = 0x040,
-       ACTIVE_CTX_STATUS = 0x044,
-       CTX_SIG_EVENTS_INDIRECT = 0x048,
-       CTX_SIG_EVENTS_ACTIVE = 0x04c,
-       CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
-       LM_ADDR_0_INDIRECT = 0x060,
-       LM_ADDR_1_INDIRECT = 0x068,
-       LM_ADDR_2_INDIRECT = 0x0cc,
-       LM_ADDR_3_INDIRECT = 0x0d4,
-       INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
-       INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
-       INDIRECT_LM_ADDR_2_BYTE_INDEX = 0x10c,
-       INDIRECT_LM_ADDR_3_BYTE_INDEX = 0x114,
-       INDIRECT_T_INDEX = 0x0f8,
-       INDIRECT_T_INDEX_BYTE_INDEX = 0x0fc,
-       FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
-       TIMESTAMP_LOW = 0x0c0,
-       TIMESTAMP_HIGH = 0x0c4,
-       PROFILE_COUNT = 0x144,
-       SIGNATURE_ENABLE = 0x150,
-       AE_MISC_CONTROL = 0x160,
-       LOCAL_CSR_STATUS = 0x180,
-};
-
-enum fcu_csr {
-       FCU_CONTROL           = 0x8c0,
-       FCU_STATUS            = 0x8c4,
-       FCU_STATUS1           = 0x8c8,
-       FCU_DRAM_ADDR_LO      = 0x8cc,
-       FCU_DRAM_ADDR_HI      = 0x8d0,
-       FCU_RAMBASE_ADDR_HI   = 0x8d4,
-       FCU_RAMBASE_ADDR_LO   = 0x8d8
-};
-
-enum fcu_csr_4xxx {
-       FCU_CONTROL_4XXX           = 0x1000,
-       FCU_STATUS_4XXX            = 0x1004,
-       FCU_ME_BROADCAST_MASK_TYPE = 0x1008,
-       FCU_AE_LOADED_4XXX         = 0x1010,
-       FCU_DRAM_ADDR_LO_4XXX      = 0x1014,
-       FCU_DRAM_ADDR_HI_4XXX      = 0x1018,
-};
-
-enum fcu_cmd {
-       FCU_CTRL_CMD_NOOP  = 0,
-       FCU_CTRL_CMD_AUTH  = 1,
-       FCU_CTRL_CMD_LOAD  = 2,
-       FCU_CTRL_CMD_START = 3
-};
-
-enum fcu_sts {
-       FCU_STS_NO_STS    = 0,
-       FCU_STS_VERI_DONE = 1,
-       FCU_STS_LOAD_DONE = 2,
-       FCU_STS_VERI_FAIL = 3,
-       FCU_STS_LOAD_FAIL = 4,
-       FCU_STS_BUSY      = 5
-};
-
-#define ALL_AE_MASK                 0xFFFFFFFF
-#define UA_ECS                      (0x1 << 31)
-#define ACS_ABO_BITPOS              31
-#define ACS_ACNO                    0x7
-#define CE_ENABLE_BITPOS            0x8
-#define CE_LMADDR_0_GLOBAL_BITPOS   16
-#define CE_LMADDR_1_GLOBAL_BITPOS   17
-#define CE_LMADDR_2_GLOBAL_BITPOS   22
-#define CE_LMADDR_3_GLOBAL_BITPOS   23
-#define CE_T_INDEX_GLOBAL_BITPOS    21
-#define CE_NN_MODE_BITPOS           20
-#define CE_REG_PAR_ERR_BITPOS       25
-#define CE_BREAKPOINT_BITPOS        27
-#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
-#define CE_INUSE_CONTEXTS_BITPOS    31
-#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
-#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
-#define XCWE_VOLUNTARY              (0x1)
-#define LCS_STATUS          (0x1)
-#define MMC_SHARE_CS_BITPOS         2
-#define WAKEUP_EVENT 0x10000
-#define FCU_CTRL_BROADCAST_POS   0x4
-#define FCU_CTRL_AE_POS     0x8
-#define FCU_AUTH_STS_MASK   0x7
-#define FCU_STS_DONE_POS    0x9
-#define FCU_STS_AUTHFWLD_POS 0X8
-#define FCU_LOADED_AE_POS   0x16
-#define FW_AUTH_WAIT_PERIOD 10
-#define FW_AUTH_MAX_RETRY   300
-#define ICP_QAT_AE_OFFSET 0x20000
-#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000)
-#define LOCAL_TO_XFER_REG_OFFSET 0x800
-#define ICP_QAT_EP_OFFSET 0x3a000
-#define ICP_QAT_EP_OFFSET_4XXX   0x200000 /* HI MMIO CSRs */
-#define ICP_QAT_AE_OFFSET_4XXX   0x600000
-#define ICP_QAT_CAP_OFFSET_4XXX  0x640000
-#define SET_CAP_CSR(handle, csr, val) \
-       ADF_CSR_WR((handle)->hal_cap_g_ctl_csr_addr_v, csr, val)
-#define GET_CAP_CSR(handle, csr) \
-       ADF_CSR_RD((handle)->hal_cap_g_ctl_csr_addr_v, csr)
-#define AE_CSR(handle, ae) \
-       ((char __iomem *)(handle)->hal_cap_ae_local_csr_addr_v + ((ae) << 12))
-#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & (csr)))
-#define SET_AE_CSR(handle, ae, csr, val) \
-       ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
-#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
-#define AE_XFER(handle, ae) \
-       ((char __iomem *)(handle)->hal_cap_ae_xfer_csr_addr_v + ((ae) << 12))
-#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
-       (((reg) & 0xff) << 2))
-#define SET_AE_XFER(handle, ae, reg, val) \
-       ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
-#define SRAM_WRITE(handle, addr, val) \
-       ADF_CSR_WR((handle)->hal_sram_addr_v, addr, val)
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
deleted file mode 100644 (file)
index 4042739..0000000
+++ /dev/null
@@ -1,376 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _ICP_QAT_HW_H_
-#define _ICP_QAT_HW_H_
-
-enum icp_qat_hw_ae_id {
-       ICP_QAT_HW_AE_0 = 0,
-       ICP_QAT_HW_AE_1 = 1,
-       ICP_QAT_HW_AE_2 = 2,
-       ICP_QAT_HW_AE_3 = 3,
-       ICP_QAT_HW_AE_4 = 4,
-       ICP_QAT_HW_AE_5 = 5,
-       ICP_QAT_HW_AE_6 = 6,
-       ICP_QAT_HW_AE_7 = 7,
-       ICP_QAT_HW_AE_8 = 8,
-       ICP_QAT_HW_AE_9 = 9,
-       ICP_QAT_HW_AE_10 = 10,
-       ICP_QAT_HW_AE_11 = 11,
-       ICP_QAT_HW_AE_DELIMITER = 12
-};
-
-enum icp_qat_hw_qat_id {
-       ICP_QAT_HW_QAT_0 = 0,
-       ICP_QAT_HW_QAT_1 = 1,
-       ICP_QAT_HW_QAT_2 = 2,
-       ICP_QAT_HW_QAT_3 = 3,
-       ICP_QAT_HW_QAT_4 = 4,
-       ICP_QAT_HW_QAT_5 = 5,
-       ICP_QAT_HW_QAT_DELIMITER = 6
-};
-
-enum icp_qat_hw_auth_algo {
-       ICP_QAT_HW_AUTH_ALGO_NULL = 0,
-       ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
-       ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
-       ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
-       ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
-       ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
-       ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
-       ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
-       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
-       ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
-       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
-       ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
-       ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
-       ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
-       ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
-       ICP_QAT_HW_AUTH_RESERVED_1 = 15,
-       ICP_QAT_HW_AUTH_RESERVED_2 = 16,
-       ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
-       ICP_QAT_HW_AUTH_RESERVED_3 = 18,
-       ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
-       ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
-};
-
-enum icp_qat_hw_auth_mode {
-       ICP_QAT_HW_AUTH_MODE0 = 0,
-       ICP_QAT_HW_AUTH_MODE1 = 1,
-       ICP_QAT_HW_AUTH_MODE2 = 2,
-       ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
-};
-
-struct icp_qat_hw_auth_config {
-       __u32 config;
-       __u32 reserved;
-};
-
-struct icp_qat_hw_ucs_cipher_config {
-       __u32 val;
-       __u32 reserved[3];
-};
-
-enum icp_qat_slice_mask {
-       ICP_ACCEL_MASK_CIPHER_SLICE = BIT(0),
-       ICP_ACCEL_MASK_AUTH_SLICE = BIT(1),
-       ICP_ACCEL_MASK_PKE_SLICE = BIT(2),
-       ICP_ACCEL_MASK_COMPRESS_SLICE = BIT(3),
-       ICP_ACCEL_MASK_LZS_SLICE = BIT(4),
-       ICP_ACCEL_MASK_EIA3_SLICE = BIT(5),
-       ICP_ACCEL_MASK_SHA3_SLICE = BIT(6),
-};
-
-enum icp_qat_capabilities_mask {
-       ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = BIT(0),
-       ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = BIT(1),
-       ICP_ACCEL_CAPABILITIES_CIPHER = BIT(2),
-       ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
-       ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
-       ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
-       ICP_ACCEL_CAPABILITIES_LZS_COMPRESSION = BIT(6),
-       ICP_ACCEL_CAPABILITIES_RAND = BIT(7),
-       ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
-       ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
-       /* Bits 10-11 are currently reserved */
-       ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
-       ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
-       /* Bit 14 is currently reserved */
-       ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
-       ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
-       ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
-       /* Bits 18-21 are currently reserved */
-       ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
-       ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
-       ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
-       ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
-       ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
-};
-
-#define QAT_AUTH_MODE_BITPOS 4
-#define QAT_AUTH_MODE_MASK 0xF
-#define QAT_AUTH_ALGO_BITPOS 0
-#define QAT_AUTH_ALGO_MASK 0xF
-#define QAT_AUTH_CMP_BITPOS 8
-#define QAT_AUTH_CMP_MASK 0x7F
-#define QAT_AUTH_SHA3_PADDING_BITPOS 16
-#define QAT_AUTH_SHA3_PADDING_MASK 0x1
-#define QAT_AUTH_ALGO_SHA3_BITPOS 22
-#define QAT_AUTH_ALGO_SHA3_MASK 0x3
-#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
-       (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
-       ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
-       (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
-        QAT_AUTH_ALGO_SHA3_BITPOS) | \
-        (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
-       (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
-       & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
-       ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
-
-struct icp_qat_hw_auth_counter {
-       __be32 counter;
-       __u32 reserved;
-};
-
-#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
-#define QAT_AUTH_COUNT_BITPOS 0
-#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
-       (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
-
-struct icp_qat_hw_auth_setup {
-       struct icp_qat_hw_auth_config auth_config;
-       struct icp_qat_hw_auth_counter auth_counter;
-};
-
-#define QAT_HW_DEFAULT_ALIGNMENT 8
-#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
-#define ICP_QAT_HW_NULL_STATE1_SZ 32
-#define ICP_QAT_HW_MD5_STATE1_SZ 16
-#define ICP_QAT_HW_SHA1_STATE1_SZ 20
-#define ICP_QAT_HW_SHA224_STATE1_SZ 32
-#define ICP_QAT_HW_SHA256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA384_STATE1_SZ 64
-#define ICP_QAT_HW_SHA512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
-#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
-#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
-#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
-#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
-#define ICP_QAT_HW_NULL_STATE2_SZ 32
-#define ICP_QAT_HW_MD5_STATE2_SZ 16
-#define ICP_QAT_HW_SHA1_STATE2_SZ 20
-#define ICP_QAT_HW_SHA224_STATE2_SZ 32
-#define ICP_QAT_HW_SHA256_STATE2_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
-#define ICP_QAT_HW_SHA384_STATE2_SZ 64
-#define ICP_QAT_HW_SHA512_STATE2_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
-#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
-#define ICP_QAT_HW_F9_IK_SZ 16
-#define ICP_QAT_HW_F9_FK_SZ 16
-#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
-       ICP_QAT_HW_F9_FK_SZ)
-#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
-#define ICP_QAT_HW_GALOIS_H_SZ 16
-#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
-#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
-
-struct icp_qat_hw_auth_sha512 {
-       struct icp_qat_hw_auth_setup inner_setup;
-       __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ];
-       struct icp_qat_hw_auth_setup outer_setup;
-       __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ];
-};
-
-struct icp_qat_hw_auth_algo_blk {
-       struct icp_qat_hw_auth_sha512 sha;
-};
-
-#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
-#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
-
-enum icp_qat_hw_cipher_algo {
-       ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
-       ICP_QAT_HW_CIPHER_ALGO_DES = 1,
-       ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
-       ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
-       ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
-       ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
-       ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
-       ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
-       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
-       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
-       ICP_QAT_HW_CIPHER_DELIMITER = 10
-};
-
-enum icp_qat_hw_cipher_mode {
-       ICP_QAT_HW_CIPHER_ECB_MODE = 0,
-       ICP_QAT_HW_CIPHER_CBC_MODE = 1,
-       ICP_QAT_HW_CIPHER_CTR_MODE = 2,
-       ICP_QAT_HW_CIPHER_F8_MODE = 3,
-       ICP_QAT_HW_CIPHER_XTS_MODE = 6,
-       ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
-};
-
-struct icp_qat_hw_cipher_config {
-       __u32 val;
-       __u32 reserved;
-};
-
-enum icp_qat_hw_cipher_dir {
-       ICP_QAT_HW_CIPHER_ENCRYPT = 0,
-       ICP_QAT_HW_CIPHER_DECRYPT = 1,
-};
-
-enum icp_qat_hw_cipher_convert {
-       ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
-       ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
-};
-
-#define QAT_CIPHER_MODE_BITPOS 4
-#define QAT_CIPHER_MODE_MASK 0xF
-#define QAT_CIPHER_ALGO_BITPOS 0
-#define QAT_CIPHER_ALGO_MASK 0xF
-#define QAT_CIPHER_CONVERT_BITPOS 9
-#define QAT_CIPHER_CONVERT_MASK 0x1
-#define QAT_CIPHER_DIR_BITPOS 8
-#define QAT_CIPHER_DIR_MASK 0x1
-#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
-#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
-#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
-       (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
-       ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
-       ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
-       ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
-#define ICP_QAT_HW_DES_BLK_SZ 8
-#define ICP_QAT_HW_3DES_BLK_SZ 8
-#define ICP_QAT_HW_NULL_BLK_SZ 8
-#define ICP_QAT_HW_AES_BLK_SZ 16
-#define ICP_QAT_HW_KASUMI_BLK_SZ 8
-#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
-#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
-#define ICP_QAT_HW_NULL_KEY_SZ 256
-#define ICP_QAT_HW_DES_KEY_SZ 8
-#define ICP_QAT_HW_3DES_KEY_SZ 24
-#define ICP_QAT_HW_AES_128_KEY_SZ 16
-#define ICP_QAT_HW_AES_192_KEY_SZ 24
-#define ICP_QAT_HW_AES_256_KEY_SZ 32
-#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_KASUMI_KEY_SZ 16
-#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
-       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
-       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_ARC4_KEY_SZ 256
-#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
-#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
-#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
-
-struct icp_qat_hw_cipher_aes256_f8 {
-       struct icp_qat_hw_cipher_config cipher_config;
-       __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
-};
-
-struct icp_qat_hw_ucs_cipher_aes256_f8 {
-       struct icp_qat_hw_ucs_cipher_config cipher_config;
-       __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
-};
-
-struct icp_qat_hw_cipher_algo_blk {
-       union {
-               struct icp_qat_hw_cipher_aes256_f8 aes;
-               struct icp_qat_hw_ucs_cipher_aes256_f8 ucs_aes;
-       };
-} __aligned(64);
-
-enum icp_qat_hw_compression_direction {
-       ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
-       ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
-       ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
-};
-
-enum icp_qat_hw_compression_delayed_match {
-       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
-       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
-       ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
-};
-
-enum icp_qat_hw_compression_algo {
-       ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
-       ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
-       ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
-};
-
-enum icp_qat_hw_compression_depth {
-       ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
-       ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
-       ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
-       ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
-       ICP_QAT_HW_COMPRESSION_DEPTH_128 = 4,
-       ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 5
-};
-
-enum icp_qat_hw_compression_file_type {
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
-       ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
-};
-
-struct icp_qat_hw_compression_config {
-       __u32 lower_val;
-       __u32 upper_val;
-};
-
-#define QAT_COMPRESSION_DIR_BITPOS 4
-#define QAT_COMPRESSION_DIR_MASK 0x7
-#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
-#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
-#define QAT_COMPRESSION_ALGO_BITPOS 31
-#define QAT_COMPRESSION_ALGO_MASK 0x1
-#define QAT_COMPRESSION_DEPTH_BITPOS 28
-#define QAT_COMPRESSION_DEPTH_MASK 0x7
-#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
-#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
-
-#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \
-       algo, depth, filetype) \
-       ((((dir) & QAT_COMPRESSION_DIR_MASK) << \
-       QAT_COMPRESSION_DIR_BITPOS) | \
-       (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \
-       QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
-       (((algo) & QAT_COMPRESSION_ALGO_MASK) << \
-       QAT_COMPRESSION_ALGO_BITPOS) | \
-       (((depth) & QAT_COMPRESSION_DEPTH_MASK) << \
-       QAT_COMPRESSION_DEPTH_BITPOS) | \
-       (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) << \
-       QAT_COMPRESSION_FILE_TYPE_BITPOS))
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp.h
deleted file mode 100644 (file)
index 7ea8962..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _ICP_QAT_HW_20_COMP_H_
-#define _ICP_QAT_HW_20_COMP_H_
-
-#include "icp_qat_hw_20_comp_defs.h"
-#include "icp_qat_fw.h"
-
-struct icp_qat_hw_comp_20_config_csr_lower {
-       enum icp_qat_hw_comp_20_extended_delay_match_mode edmm;
-       enum icp_qat_hw_comp_20_hw_comp_format algo;
-       enum icp_qat_hw_comp_20_search_depth sd;
-       enum icp_qat_hw_comp_20_hbs_control hbs;
-       enum icp_qat_hw_comp_20_abd abd;
-       enum icp_qat_hw_comp_20_lllbd_ctrl lllbd;
-       enum icp_qat_hw_comp_20_min_match_control mmctrl;
-       enum icp_qat_hw_comp_20_skip_hash_collision hash_col;
-       enum icp_qat_hw_comp_20_skip_hash_update hash_update;
-       enum icp_qat_hw_comp_20_byte_skip skip_ctrl;
-};
-
-static inline __u32
-ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower csr)
-{
-       u32 val32 = 0;
-
-       QAT_FIELD_SET(val32, csr.algo,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
-       QAT_FIELD_SET(val32, csr.sd,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
-       QAT_FIELD_SET(val32, csr.edmm,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
-       QAT_FIELD_SET(val32, csr.hbs,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.lllbd,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
-       QAT_FIELD_SET(val32, csr.mmctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.hash_col,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
-       QAT_FIELD_SET(val32, csr.hash_update,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
-       QAT_FIELD_SET(val32, csr.skip_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
-       QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
-
-       return __builtin_bswap32(val32);
-}
-
-struct icp_qat_hw_comp_20_config_csr_upper {
-       enum icp_qat_hw_comp_20_scb_control scb_ctrl;
-       enum icp_qat_hw_comp_20_rmb_control rmb_ctrl;
-       enum icp_qat_hw_comp_20_som_control som_ctrl;
-       enum icp_qat_hw_comp_20_skip_hash_rd_control skip_hash_ctrl;
-       enum icp_qat_hw_comp_20_scb_unload_control scb_unload_ctrl;
-       enum icp_qat_hw_comp_20_disable_token_fusion_control disable_token_fusion_ctrl;
-       enum icp_qat_hw_comp_20_lbms lbms;
-       enum icp_qat_hw_comp_20_scb_mode_reset_mask scb_mode_reset;
-       __u16 lazy;
-       __u16 nice;
-};
-
-static inline __u32
-ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper csr)
-{
-       u32 val32 = 0;
-
-       QAT_FIELD_SET(val32, csr.scb_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.rmb_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.som_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.skip_hash_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.scb_unload_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.lbms,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK);
-       QAT_FIELD_SET(val32, csr.scb_mode_reset,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
-       QAT_FIELD_SET(val32, csr.lazy,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
-       QAT_FIELD_SET(val32, csr.nice,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
-                     ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
-
-       return __builtin_bswap32(val32);
-}
-
-struct icp_qat_hw_decomp_20_config_csr_lower {
-       enum icp_qat_hw_decomp_20_hbs_control hbs;
-       enum icp_qat_hw_decomp_20_lbms lbms;
-       enum icp_qat_hw_decomp_20_hw_comp_format algo;
-       enum icp_qat_hw_decomp_20_min_match_control mmctrl;
-       enum icp_qat_hw_decomp_20_lz4_block_checksum_present lbc;
-};
-
-static inline __u32
-ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_lower csr)
-{
-       u32 val32 = 0;
-
-       QAT_FIELD_SET(val32, csr.hbs,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.lbms,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK);
-       QAT_FIELD_SET(val32, csr.algo,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
-       QAT_FIELD_SET(val32, csr.mmctrl,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.lbc,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
-
-       return __builtin_bswap32(val32);
-}
-
-struct icp_qat_hw_decomp_20_config_csr_upper {
-       enum icp_qat_hw_decomp_20_speculative_decoder_control sdc;
-       enum icp_qat_hw_decomp_20_mini_cam_control mcc;
-};
-
-static inline __u32
-ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_upper csr)
-{
-       u32 val32 = 0;
-
-       QAT_FIELD_SET(val32, csr.sdc,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
-       QAT_FIELD_SET(val32, csr.mcc,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
-                     ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
-
-       return __builtin_bswap32(val32);
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h b/drivers/crypto/qat/qat_common/icp_qat_hw_20_comp_defs.h
deleted file mode 100644 (file)
index 208d455..0000000
+++ /dev/null
@@ -1,300 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _ICP_QAT_HW_20_COMP_DEFS_H
-#define _ICP_QAT_HW_20_COMP_DEFS_H
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_scb_control {
-       ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0,
-       ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_rmb_control {
-       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0,
-       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3
-
-enum icp_qat_hw_comp_20_som_control {
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0,
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1,
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2,
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_skip_hash_rd_control {
-       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
-       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_scb_unload_control {
-       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0,
-       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_disable_token_fusion_control {
-       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0,
-       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3
-
-enum icp_qat_hw_comp_20_lbms {
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0,
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1,
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2,
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1
-
-enum icp_qat_hw_comp_20_scb_mode_reset_mask {
-       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0,
-       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
-
-enum icp_qat_hw_comp_20_hbs_control {
-       ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
-       ICP_QAT_HW_COMP_23_HBS_CONTROL_HBS_IS_64KB = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1
-
-enum icp_qat_hw_comp_20_abd {
-       ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0,
-       ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1
-
-enum icp_qat_hw_comp_20_lllbd_ctrl {
-       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
-       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf
-
-enum icp_qat_hw_comp_20_search_depth {
-       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1,
-       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3,
-       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7
-
-enum icp_qat_hw_comp_20_hw_comp_format {
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0,
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1,
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2,
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3,
-       ICP_QAT_HW_COMP_23_HW_COMP_FORMAT_ZSTD = 0x4,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
-
-enum icp_qat_hw_comp_20_min_match_control {
-       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
-       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1
-
-enum icp_qat_hw_comp_20_skip_hash_collision {
-       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0,
-       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1
-
-enum icp_qat_hw_comp_20_skip_hash_update {
-       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0,
-       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1
-
-enum icp_qat_hw_comp_20_byte_skip {
-       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0,
-       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1
-
-enum icp_qat_hw_comp_20_extended_delay_match_mode {
-       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0,
-       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1,
-};
-
-#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \
-       ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1
-
-enum icp_qat_hw_decomp_20_speculative_decoder_control {
-       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0,
-       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1
-
-enum icp_qat_hw_decomp_20_mini_cam_control {
-       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0,
-       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
-
-enum icp_qat_hw_decomp_20_hbs_control {
-       ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3
-
-enum icp_qat_hw_decomp_20_lbms {
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0,
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1,
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2,
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7
-
-enum icp_qat_hw_decomp_20_hw_comp_format {
-       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1,
-       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2,
-       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3,
-       ICP_QAT_HW_DECOMP_23_HW_DECOMP_FORMAT_ZSTD = 0x4,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
-
-enum icp_qat_hw_decomp_20_min_match_control {
-       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
-       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1
-
-enum icp_qat_hw_decomp_20_lz4_block_checksum_present {
-       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0,
-       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1,
-};
-
-#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \
-       ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
deleted file mode 100644 (file)
index 69482ab..0000000
+++ /dev/null
@@ -1,585 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef __ICP_QAT_UCLO_H__
-#define __ICP_QAT_UCLO_H__
-
-#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000
-#define ICP_QAT_AC_C62X_DEV_TYPE   0x01000000
-#define ICP_QAT_AC_C3XXX_DEV_TYPE  0x02000000
-#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
-#define ICP_QAT_UCLO_MAX_AE       12
-#define ICP_QAT_UCLO_MAX_CTX      8
-#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
-#define ICP_QAT_UCLO_MAX_USTORE   0x4000
-#define ICP_QAT_UCLO_MAX_XFER_REG 128
-#define ICP_QAT_UCLO_MAX_GPR_REG  128
-#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
-#define ICP_QAT_UCLO_MAX_LMEM_REG_2X 1280
-#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
-#define ICP_QAT_UOF_OBJID_LEN     8
-#define ICP_QAT_UOF_FID 0xc6c2
-#define ICP_QAT_UOF_MAJVER 0x4
-#define ICP_QAT_UOF_MINVER 0x11
-#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
-#define ICP_QAT_UOF_STRT        "UOF_STRT"
-#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
-#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
-#define ICP_QAT_UOF_LOCAL_SCOPE     1
-#define ICP_QAT_UOF_INIT_EXPR               0
-#define ICP_QAT_UOF_INIT_REG                1
-#define ICP_QAT_UOF_INIT_REG_CTX            2
-#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
-#define ICP_QAT_SUOF_OBJ_ID_LEN             8
-#define ICP_QAT_SUOF_FID  0x53554f46
-#define ICP_QAT_SUOF_MAJVER 0x0
-#define ICP_QAT_SUOF_MINVER 0x1
-#define ICP_QAT_SUOF_OBJ_NAME_LEN 128
-#define ICP_QAT_MOF_OBJ_ID_LEN 8
-#define ICP_QAT_MOF_OBJ_CHUNKID_LEN 8
-#define ICP_QAT_MOF_FID 0x00666f6d
-#define ICP_QAT_MOF_MAJVER 0x0
-#define ICP_QAT_MOF_MINVER 0x1
-#define ICP_QAT_MOF_SYM_OBJS "SYM_OBJS"
-#define ICP_QAT_SUOF_OBJS "SUF_OBJS"
-#define ICP_QAT_SUOF_IMAG "SUF_IMAG"
-#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN    (50 * sizeof(unsigned long long))
-#define ICP_QAT_SIMG_AE_INSTS_LEN       (0x4000 * sizeof(unsigned long long))
-
-#define DSS_FWSK_MODULUS_LEN    384 /* RSA3K */
-#define DSS_FWSK_EXPONENT_LEN   4
-#define DSS_FWSK_PADDING_LEN    380
-#define DSS_SIGNATURE_LEN       384 /* RSA3K */
-
-#define CSS_FWSK_MODULUS_LEN    256 /* RSA2K */
-#define CSS_FWSK_EXPONENT_LEN   4
-#define CSS_FWSK_PADDING_LEN    252
-#define CSS_SIGNATURE_LEN       256 /* RSA2K */
-
-#define ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)   ((handle)->chip_info->css_3k ? \
-                                               DSS_FWSK_MODULUS_LEN  : \
-                                               CSS_FWSK_MODULUS_LEN)
-
-#define ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)  ((handle)->chip_info->css_3k ? \
-                                               DSS_FWSK_EXPONENT_LEN : \
-                                               CSS_FWSK_EXPONENT_LEN)
-
-#define ICP_QAT_CSS_FWSK_PAD_LEN(handle)       ((handle)->chip_info->css_3k ? \
-                                               DSS_FWSK_PADDING_LEN : \
-                                               CSS_FWSK_PADDING_LEN)
-
-#define ICP_QAT_CSS_FWSK_PUB_LEN(handle)       (ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
-                                               ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
-                                               ICP_QAT_CSS_FWSK_PAD_LEN(handle))
-
-#define ICP_QAT_CSS_SIGNATURE_LEN(handle)      ((handle)->chip_info->css_3k ? \
-                                               DSS_SIGNATURE_LEN : \
-                                               CSS_SIGNATURE_LEN)
-
-#define ICP_QAT_CSS_AE_IMG_LEN     (sizeof(struct icp_qat_simg_ae_mode) + \
-                                   ICP_QAT_SIMG_AE_INIT_SEQ_LEN +         \
-                                   ICP_QAT_SIMG_AE_INSTS_LEN)
-#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \
-                                       ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \
-                                       ICP_QAT_CSS_SIGNATURE_LEN(handle) + \
-                                       ICP_QAT_CSS_AE_IMG_LEN)
-#define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \
-                                       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
-                                       ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
-                                       ICP_QAT_CSS_SIGNATURE_LEN(handle))
-#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN    0x40000
-#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN    0x30000
-
-#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
-#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
-#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
-#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
-
-#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
-#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
-#define ICP_QAT_LOC_MEM2_MODE(ae_mode) (((ae_mode) >> 0x6) & 0x1)
-#define ICP_QAT_LOC_MEM3_MODE(ae_mode) (((ae_mode) >> 0x7) & 0x1)
-#define ICP_QAT_LOC_TINDEX_MODE(ae_mode) (((ae_mode) >> 0xe) & 0x1)
-
-enum icp_qat_uof_mem_region {
-       ICP_QAT_UOF_SRAM_REGION = 0x0,
-       ICP_QAT_UOF_LMEM_REGION = 0x3,
-       ICP_QAT_UOF_UMEM_REGION = 0x5
-};
-
-enum icp_qat_uof_regtype {
-       ICP_NO_DEST     = 0,
-       ICP_GPA_REL     = 1,
-       ICP_GPA_ABS     = 2,
-       ICP_GPB_REL     = 3,
-       ICP_GPB_ABS     = 4,
-       ICP_SR_REL      = 5,
-       ICP_SR_RD_REL   = 6,
-       ICP_SR_WR_REL   = 7,
-       ICP_SR_ABS      = 8,
-       ICP_SR_RD_ABS   = 9,
-       ICP_SR_WR_ABS   = 10,
-       ICP_DR_REL      = 19,
-       ICP_DR_RD_REL   = 20,
-       ICP_DR_WR_REL   = 21,
-       ICP_DR_ABS      = 22,
-       ICP_DR_RD_ABS   = 23,
-       ICP_DR_WR_ABS   = 24,
-       ICP_LMEM        = 26,
-       ICP_LMEM0       = 27,
-       ICP_LMEM1       = 28,
-       ICP_NEIGH_REL   = 31,
-       ICP_LMEM2       = 61,
-       ICP_LMEM3       = 62,
-};
-
-enum icp_qat_css_fwtype {
-       CSS_AE_FIRMWARE = 0,
-       CSS_MMP_FIRMWARE = 1
-};
-
-struct icp_qat_uclo_page {
-       struct icp_qat_uclo_encap_page *encap_page;
-       struct icp_qat_uclo_region *region;
-       unsigned int flags;
-};
-
-struct icp_qat_uclo_region {
-       struct icp_qat_uclo_page *loaded;
-       struct icp_qat_uclo_page *page;
-};
-
-struct icp_qat_uclo_aeslice {
-       struct icp_qat_uclo_region *region;
-       struct icp_qat_uclo_page *page;
-       struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
-       struct icp_qat_uclo_encapme *encap_image;
-       unsigned int ctx_mask_assigned;
-       unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
-};
-
-struct icp_qat_uclo_aedata {
-       unsigned int slice_num;
-       unsigned int eff_ustore_size;
-       struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
-};
-
-struct icp_qat_uof_encap_obj {
-       char *beg_uof;
-       struct icp_qat_uof_objhdr *obj_hdr;
-       struct icp_qat_uof_chunkhdr *chunk_hdr;
-       struct icp_qat_uof_varmem_seg *var_mem_seg;
-};
-
-struct icp_qat_uclo_encap_uwblock {
-       unsigned int start_addr;
-       unsigned int words_num;
-       u64 micro_words;
-};
-
-struct icp_qat_uclo_encap_page {
-       unsigned int def_page;
-       unsigned int page_region;
-       unsigned int beg_addr_v;
-       unsigned int beg_addr_p;
-       unsigned int micro_words_num;
-       unsigned int uwblock_num;
-       struct icp_qat_uclo_encap_uwblock *uwblock;
-};
-
-struct icp_qat_uclo_encapme {
-       struct icp_qat_uof_image *img_ptr;
-       struct icp_qat_uclo_encap_page *page;
-       unsigned int ae_reg_num;
-       struct icp_qat_uof_ae_reg *ae_reg;
-       unsigned int init_regsym_num;
-       struct icp_qat_uof_init_regsym *init_regsym;
-       unsigned int sbreak_num;
-       struct icp_qat_uof_sbreak *sbreak;
-       unsigned int uwords_num;
-};
-
-struct icp_qat_uclo_init_mem_table {
-       unsigned int entry_num;
-       struct icp_qat_uof_initmem *init_mem;
-};
-
-struct icp_qat_uclo_objhdr {
-       char *file_buff;
-       unsigned int checksum;
-       unsigned int size;
-};
-
-struct icp_qat_uof_strtable {
-       unsigned int table_len;
-       unsigned int reserved;
-       u64 strings;
-};
-
-struct icp_qat_uclo_objhandle {
-       unsigned int prod_type;
-       unsigned int prod_rev;
-       struct icp_qat_uclo_objhdr *obj_hdr;
-       struct icp_qat_uof_encap_obj encap_uof_obj;
-       struct icp_qat_uof_strtable str_table;
-       struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
-       struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
-       struct icp_qat_uclo_init_mem_table init_mem_tab;
-       struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
-       struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
-       int uimage_num;
-       int uword_in_bytes;
-       int global_inited;
-       unsigned int ae_num;
-       unsigned int ustore_phy_size;
-       void *obj_buf;
-       u64 *uword_buf;
-};
-
-struct icp_qat_uof_uword_block {
-       unsigned int start_addr;
-       unsigned int words_num;
-       unsigned int uword_offset;
-       unsigned int reserved;
-};
-
-struct icp_qat_uof_filehdr {
-       unsigned short file_id;
-       unsigned short reserved1;
-       char min_ver;
-       char maj_ver;
-       unsigned short reserved2;
-       unsigned short max_chunks;
-       unsigned short num_chunks;
-};
-
-struct icp_qat_uof_filechunkhdr {
-       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
-       unsigned int checksum;
-       unsigned int offset;
-       unsigned int size;
-};
-
-struct icp_qat_uof_objhdr {
-       unsigned int ac_dev_type;
-       unsigned short min_cpu_ver;
-       unsigned short max_cpu_ver;
-       short max_chunks;
-       short num_chunks;
-       unsigned int reserved1;
-       unsigned int reserved2;
-};
-
-struct icp_qat_uof_chunkhdr {
-       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
-       unsigned int offset;
-       unsigned int size;
-};
-
-struct icp_qat_uof_memvar_attr {
-       unsigned int offset_in_byte;
-       unsigned int value;
-};
-
-struct icp_qat_uof_initmem {
-       unsigned int sym_name;
-       char region;
-       char scope;
-       unsigned short reserved1;
-       unsigned int addr;
-       unsigned int num_in_bytes;
-       unsigned int val_attr_num;
-};
-
-struct icp_qat_uof_init_regsym {
-       unsigned int sym_name;
-       char init_type;
-       char value_type;
-       char reg_type;
-       unsigned char ctx;
-       unsigned int reg_addr;
-       unsigned int value;
-};
-
-struct icp_qat_uof_varmem_seg {
-       unsigned int sram_base;
-       unsigned int sram_size;
-       unsigned int sram_alignment;
-       unsigned int sdram_base;
-       unsigned int sdram_size;
-       unsigned int sdram_alignment;
-       unsigned int sdram1_base;
-       unsigned int sdram1_size;
-       unsigned int sdram1_alignment;
-       unsigned int scratch_base;
-       unsigned int scratch_size;
-       unsigned int scratch_alignment;
-};
-
-struct icp_qat_uof_gtid {
-       char tool_id[ICP_QAT_UOF_OBJID_LEN];
-       int tool_ver;
-       unsigned int reserved1;
-       unsigned int reserved2;
-};
-
-struct icp_qat_uof_sbreak {
-       unsigned int page_num;
-       unsigned int virt_uaddr;
-       unsigned char sbreak_type;
-       unsigned char reg_type;
-       unsigned short reserved1;
-       unsigned int addr_offset;
-       unsigned int reg_addr;
-};
-
-struct icp_qat_uof_code_page {
-       unsigned int page_region;
-       unsigned int page_num;
-       unsigned char def_page;
-       unsigned char reserved2;
-       unsigned short reserved1;
-       unsigned int beg_addr_v;
-       unsigned int beg_addr_p;
-       unsigned int neigh_reg_tab_offset;
-       unsigned int uc_var_tab_offset;
-       unsigned int imp_var_tab_offset;
-       unsigned int imp_expr_tab_offset;
-       unsigned int code_area_offset;
-};
-
-struct icp_qat_uof_image {
-       unsigned int img_name;
-       unsigned int ae_assigned;
-       unsigned int ctx_assigned;
-       unsigned int ac_dev_type;
-       unsigned int entry_address;
-       unsigned int fill_pattern[2];
-       unsigned int reloadable_size;
-       unsigned char sensitivity;
-       unsigned char reserved;
-       unsigned short ae_mode;
-       unsigned short max_ver;
-       unsigned short min_ver;
-       unsigned short image_attrib;
-       unsigned short reserved2;
-       unsigned short page_region_num;
-       unsigned short numpages;
-       unsigned int reg_tab_offset;
-       unsigned int init_reg_sym_tab;
-       unsigned int sbreak_tab;
-       unsigned int app_metadata;
-};
-
-struct icp_qat_uof_objtable {
-       unsigned int entry_num;
-};
-
-struct icp_qat_uof_ae_reg {
-       unsigned int name;
-       unsigned int vis_name;
-       unsigned short type;
-       unsigned short addr;
-       unsigned short access_mode;
-       unsigned char visible;
-       unsigned char reserved1;
-       unsigned short ref_count;
-       unsigned short reserved2;
-       unsigned int xo_id;
-};
-
-struct icp_qat_uof_code_area {
-       unsigned int micro_words_num;
-       unsigned int uword_block_tab;
-};
-
-struct icp_qat_uof_batch_init {
-       unsigned int ae;
-       unsigned int addr;
-       unsigned int *value;
-       unsigned int size;
-       struct icp_qat_uof_batch_init *next;
-};
-
-struct icp_qat_suof_img_hdr {
-       char          *simg_buf;
-       unsigned long simg_len;
-       char          *css_header;
-       char          *css_key;
-       char          *css_signature;
-       char          *css_simg;
-       unsigned long simg_size;
-       unsigned int  ae_num;
-       unsigned int  ae_mask;
-       unsigned int  fw_type;
-       unsigned long simg_name;
-       unsigned long appmeta_data;
-};
-
-struct icp_qat_suof_img_tbl {
-       unsigned int num_simgs;
-       struct icp_qat_suof_img_hdr *simg_hdr;
-};
-
-struct icp_qat_suof_handle {
-       unsigned int  file_id;
-       unsigned int  check_sum;
-       char          min_ver;
-       char          maj_ver;
-       char          fw_type;
-       char          *suof_buf;
-       unsigned int  suof_size;
-       char          *sym_str;
-       unsigned int  sym_size;
-       struct icp_qat_suof_img_tbl img_table;
-};
-
-struct icp_qat_fw_auth_desc {
-       unsigned int   img_len;
-       unsigned int   ae_mask;
-       unsigned int   css_hdr_high;
-       unsigned int   css_hdr_low;
-       unsigned int   img_high;
-       unsigned int   img_low;
-       unsigned int   signature_high;
-       unsigned int   signature_low;
-       unsigned int   fwsk_pub_high;
-       unsigned int   fwsk_pub_low;
-       unsigned int   img_ae_mode_data_high;
-       unsigned int   img_ae_mode_data_low;
-       unsigned int   img_ae_init_data_high;
-       unsigned int   img_ae_init_data_low;
-       unsigned int   img_ae_insts_high;
-       unsigned int   img_ae_insts_low;
-};
-
-struct icp_qat_auth_chunk {
-       struct icp_qat_fw_auth_desc fw_auth_desc;
-       u64 chunk_size;
-       u64 chunk_bus_addr;
-};
-
-struct icp_qat_css_hdr {
-       unsigned int module_type;
-       unsigned int header_len;
-       unsigned int header_ver;
-       unsigned int module_id;
-       unsigned int module_vendor;
-       unsigned int date;
-       unsigned int size;
-       unsigned int key_size;
-       unsigned int module_size;
-       unsigned int exponent_size;
-       unsigned int fw_type;
-       unsigned int reserved[21];
-};
-
-struct icp_qat_simg_ae_mode {
-       unsigned int     file_id;
-       unsigned short   maj_ver;
-       unsigned short   min_ver;
-       unsigned int     dev_type;
-       unsigned short   devmax_ver;
-       unsigned short   devmin_ver;
-       unsigned int     ae_mask;
-       unsigned int     ctx_enables;
-       char             fw_type;
-       char             ctx_mode;
-       char             nn_mode;
-       char             lm0_mode;
-       char             lm1_mode;
-       char             scs_mode;
-       char             lm2_mode;
-       char             lm3_mode;
-       char             tindex_mode;
-       unsigned char    reserved[7];
-       char             simg_name[256];
-       char             appmeta_data[256];
-};
-
-struct icp_qat_suof_filehdr {
-       unsigned int     file_id;
-       unsigned int     check_sum;
-       char             min_ver;
-       char             maj_ver;
-       char             fw_type;
-       char             reserved;
-       unsigned short   max_chunks;
-       unsigned short   num_chunks;
-};
-
-struct icp_qat_suof_chunk_hdr {
-       char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN];
-       u64 offset;
-       u64 size;
-};
-
-struct icp_qat_suof_strtable {
-       unsigned int tab_length;
-       unsigned int strings;
-};
-
-struct icp_qat_suof_objhdr {
-       unsigned int img_length;
-       unsigned int reserved;
-};
-
-struct icp_qat_mof_file_hdr {
-       unsigned int file_id;
-       unsigned int checksum;
-       char min_ver;
-       char maj_ver;
-       unsigned short reserved;
-       unsigned short max_chunks;
-       unsigned short num_chunks;
-};
-
-struct icp_qat_mof_chunkhdr {
-       char chunk_id[ICP_QAT_MOF_OBJ_ID_LEN];
-       u64 offset;
-       u64 size;
-};
-
-struct icp_qat_mof_str_table {
-       unsigned int tab_len;
-       unsigned int strings;
-};
-
-struct icp_qat_mof_obj_hdr {
-       unsigned short max_chunks;
-       unsigned short num_chunks;
-       unsigned int reserved;
-};
-
-struct icp_qat_mof_obj_chunkhdr {
-       char chunk_id[ICP_QAT_MOF_OBJ_CHUNKID_LEN];
-       u64 offset;
-       u64 size;
-       unsigned int name;
-       unsigned int reserved;
-};
-
-struct icp_qat_mof_objhdr {
-       char *obj_name;
-       char *obj_buf;
-       unsigned int obj_size;
-};
-
-struct icp_qat_mof_table {
-       unsigned int num_objs;
-       struct icp_qat_mof_objhdr *obj_hdr;
-};
-
-struct icp_qat_mof_handle {
-       unsigned int file_id;
-       unsigned int checksum;
-       char min_ver;
-       char maj_ver;
-       char *mof_buf;
-       u32 mof_size;
-       char *sym_str;
-       unsigned int sym_size;
-       char *uobjs_hdr;
-       char *sobjs_hdr;
-       struct icp_qat_mof_table obj_table;
-};
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
deleted file mode 100644 (file)
index 538dcbf..0000000
+++ /dev/null
@@ -1,1424 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/crypto.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/cipher.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/aes.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
-#include <crypto/algapi.h>
-#include <crypto/authenc.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/xts.h>
-#include <linux/dma-mapping.h>
-#include "adf_accel_devices.h"
-#include "qat_algs_send.h"
-#include "adf_common_drv.h"
-#include "qat_crypto.h"
-#include "icp_qat_hw.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-#include "qat_bl.h"
-
-#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
-       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
-                                      ICP_QAT_HW_CIPHER_NO_CONVERT, \
-                                      ICP_QAT_HW_CIPHER_ENCRYPT)
-
-#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
-       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
-                                      ICP_QAT_HW_CIPHER_KEY_CONVERT, \
-                                      ICP_QAT_HW_CIPHER_DECRYPT)
-
-#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
-       ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
-                                      ICP_QAT_HW_CIPHER_NO_CONVERT, \
-                                      ICP_QAT_HW_CIPHER_DECRYPT)
-
-#define HW_CAP_AES_V2(accel_dev) \
-       (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
-        ICP_ACCEL_CAPABILITIES_AES_V2)
-
-static DEFINE_MUTEX(algs_lock);
-static unsigned int active_devs;
-
-/* Common content descriptor */
-struct qat_alg_cd {
-       union {
-               struct qat_enc { /* Encrypt content desc */
-                       struct icp_qat_hw_cipher_algo_blk cipher;
-                       struct icp_qat_hw_auth_algo_blk hash;
-               } qat_enc_cd;
-               struct qat_dec { /* Decrypt content desc */
-                       struct icp_qat_hw_auth_algo_blk hash;
-                       struct icp_qat_hw_cipher_algo_blk cipher;
-               } qat_dec_cd;
-       };
-} __aligned(64);
-
-struct qat_alg_aead_ctx {
-       struct qat_alg_cd *enc_cd;
-       struct qat_alg_cd *dec_cd;
-       dma_addr_t enc_cd_paddr;
-       dma_addr_t dec_cd_paddr;
-       struct icp_qat_fw_la_bulk_req enc_fw_req;
-       struct icp_qat_fw_la_bulk_req dec_fw_req;
-       struct crypto_shash *hash_tfm;
-       enum icp_qat_hw_auth_algo qat_hash_alg;
-       struct qat_crypto_instance *inst;
-       union {
-               struct sha1_state sha1;
-               struct sha256_state sha256;
-               struct sha512_state sha512;
-       };
-       char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
-       char opad[SHA512_BLOCK_SIZE];
-};
-
-struct qat_alg_skcipher_ctx {
-       struct icp_qat_hw_cipher_algo_blk *enc_cd;
-       struct icp_qat_hw_cipher_algo_blk *dec_cd;
-       dma_addr_t enc_cd_paddr;
-       dma_addr_t dec_cd_paddr;
-       struct icp_qat_fw_la_bulk_req enc_fw_req;
-       struct icp_qat_fw_la_bulk_req dec_fw_req;
-       struct qat_crypto_instance *inst;
-       struct crypto_skcipher *ftfm;
-       struct crypto_cipher *tweak;
-       bool fallback;
-       int mode;
-};
-
-static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
-       switch (qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               return ICP_QAT_HW_SHA1_STATE1_SZ;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               return ICP_QAT_HW_SHA256_STATE1_SZ;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               return ICP_QAT_HW_SHA512_STATE1_SZ;
-       default:
-               return -EFAULT;
-       }
-       return -EFAULT;
-}
-
-static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
-                                 struct qat_alg_aead_ctx *ctx,
-                                 const u8 *auth_key,
-                                 unsigned int auth_keylen)
-{
-       SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
-       int block_size = crypto_shash_blocksize(ctx->hash_tfm);
-       int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
-       __be32 *hash_state_out;
-       __be64 *hash512_state_out;
-       int i, offset;
-
-       memset(ctx->ipad, 0, block_size);
-       memset(ctx->opad, 0, block_size);
-       shash->tfm = ctx->hash_tfm;
-
-       if (auth_keylen > block_size) {
-               int ret = crypto_shash_digest(shash, auth_key,
-                                             auth_keylen, ctx->ipad);
-               if (ret)
-                       return ret;
-
-               memcpy(ctx->opad, ctx->ipad, digest_size);
-       } else {
-               memcpy(ctx->ipad, auth_key, auth_keylen);
-               memcpy(ctx->opad, auth_key, auth_keylen);
-       }
-
-       for (i = 0; i < block_size; i++) {
-               char *ipad_ptr = ctx->ipad + i;
-               char *opad_ptr = ctx->opad + i;
-               *ipad_ptr ^= HMAC_IPAD_VALUE;
-               *opad_ptr ^= HMAC_OPAD_VALUE;
-       }
-
-       if (crypto_shash_init(shash))
-               return -EFAULT;
-
-       if (crypto_shash_update(shash, ctx->ipad, block_size))
-               return -EFAULT;
-
-       hash_state_out = (__be32 *)hash->sha.state1;
-       hash512_state_out = (__be64 *)hash_state_out;
-
-       switch (ctx->qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               if (crypto_shash_export(shash, &ctx->sha1))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
-                       *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               if (crypto_shash_export(shash, &ctx->sha256))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
-                       *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               if (crypto_shash_export(shash, &ctx->sha512))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
-                       *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
-               break;
-       default:
-               return -EFAULT;
-       }
-
-       if (crypto_shash_init(shash))
-               return -EFAULT;
-
-       if (crypto_shash_update(shash, ctx->opad, block_size))
-               return -EFAULT;
-
-       offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
-       if (offset < 0)
-               return -EFAULT;
-
-       hash_state_out = (__be32 *)(hash->sha.state1 + offset);
-       hash512_state_out = (__be64 *)hash_state_out;
-
-       switch (ctx->qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               if (crypto_shash_export(shash, &ctx->sha1))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
-                       *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               if (crypto_shash_export(shash, &ctx->sha256))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
-                       *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               if (crypto_shash_export(shash, &ctx->sha512))
-                       return -EFAULT;
-               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
-                       *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
-               break;
-       default:
-               return -EFAULT;
-       }
-       memzero_explicit(ctx->ipad, block_size);
-       memzero_explicit(ctx->opad, block_size);
-       return 0;
-}
-
-static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
-{
-       header->hdr_flags =
-               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
-       header->comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
-                                           QAT_COMN_PTR_TYPE_SGL);
-       ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
-                                 ICP_QAT_FW_LA_PARTIAL_NONE);
-       ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
-                                          ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
-       ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-                               ICP_QAT_FW_LA_NO_PROTO);
-       ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
-                                      ICP_QAT_FW_LA_NO_UPDATE_STATE);
-}
-
-static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
-                                        int alg,
-                                        struct crypto_authenc_keys *keys,
-                                        int mode)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
-       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
-       struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
-       struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
-       struct icp_qat_hw_auth_algo_blk *hash =
-               (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
-               sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
-       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
-       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
-       void *ptr = &req_tmpl->cd_ctrl;
-       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
-       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
-
-       /* CD setup */
-       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
-       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
-       hash->sha.inner_setup.auth_config.config =
-               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
-                                            ctx->qat_hash_alg, digestsize);
-       hash->sha.inner_setup.auth_counter.counter =
-               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
-
-       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
-               return -EFAULT;
-
-       /* Request setup */
-       qat_alg_init_common_hdr(header);
-       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
-       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
-                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
-                                  ICP_QAT_FW_LA_RET_AUTH_RES);
-       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
-                                  ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
-       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
-       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
-
-       /* Cipher CD config setup */
-       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
-       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
-       cipher_cd_ctrl->cipher_cfg_offset = 0;
-       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
-       /* Auth CD config setup */
-       hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
-       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
-       hash_cd_ctrl->inner_res_sz = digestsize;
-       hash_cd_ctrl->final_sz = digestsize;
-
-       switch (ctx->qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               hash_cd_ctrl->inner_state1_sz =
-                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
-               hash_cd_ctrl->inner_state2_sz =
-                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
-               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
-               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
-               break;
-       default:
-               break;
-       }
-       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
-                       ((sizeof(struct icp_qat_hw_auth_setup) +
-                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
-       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-       return 0;
-}
-
-static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
-                                        int alg,
-                                        struct crypto_authenc_keys *keys,
-                                        int mode)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
-       unsigned int digestsize = crypto_aead_authsize(aead_tfm);
-       struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
-       struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
-       struct icp_qat_hw_cipher_algo_blk *cipher =
-               (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
-               sizeof(struct icp_qat_hw_auth_setup) +
-               roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
-       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
-       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
-       void *ptr = &req_tmpl->cd_ctrl;
-       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
-       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
-       struct icp_qat_fw_la_auth_req_params *auth_param =
-               (struct icp_qat_fw_la_auth_req_params *)
-               ((char *)&req_tmpl->serv_specif_rqpars +
-               sizeof(struct icp_qat_fw_la_cipher_req_params));
-
-       /* CD setup */
-       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
-       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
-       hash->sha.inner_setup.auth_config.config =
-               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
-                                            ctx->qat_hash_alg,
-                                            digestsize);
-       hash->sha.inner_setup.auth_counter.counter =
-               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
-
-       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
-               return -EFAULT;
-
-       /* Request setup */
-       qat_alg_init_common_hdr(header);
-       header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
-       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
-                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
-       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
-                                  ICP_QAT_FW_LA_NO_RET_AUTH_RES);
-       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
-                                  ICP_QAT_FW_LA_CMP_AUTH_RES);
-       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
-       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
-
-       /* Cipher CD config setup */
-       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
-       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
-       cipher_cd_ctrl->cipher_cfg_offset =
-               (sizeof(struct icp_qat_hw_auth_setup) +
-                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
-       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-
-       /* Auth CD config setup */
-       hash_cd_ctrl->hash_cfg_offset = 0;
-       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
-       hash_cd_ctrl->inner_res_sz = digestsize;
-       hash_cd_ctrl->final_sz = digestsize;
-
-       switch (ctx->qat_hash_alg) {
-       case ICP_QAT_HW_AUTH_ALGO_SHA1:
-               hash_cd_ctrl->inner_state1_sz =
-                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
-               hash_cd_ctrl->inner_state2_sz =
-                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA256:
-               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
-               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
-               break;
-       case ICP_QAT_HW_AUTH_ALGO_SHA512:
-               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
-               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
-               break;
-       default:
-               break;
-       }
-
-       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
-                       ((sizeof(struct icp_qat_hw_auth_setup) +
-                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
-       auth_param->auth_res_sz = digestsize;
-       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-       return 0;
-}
-
-static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
-                                     struct icp_qat_fw_la_bulk_req *req,
-                                     struct icp_qat_hw_cipher_algo_blk *cd,
-                                     const u8 *key, unsigned int keylen)
-{
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
-       struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
-       struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
-       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
-       int mode = ctx->mode;
-
-       qat_alg_init_common_hdr(header);
-       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
-       cd_pars->u.s.content_desc_params_sz =
-                               sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
-
-       if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
-               ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
-                                            ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
-
-               /* Store both XTS keys in CD, only the first key is sent
-                * to the HW, the second key is used for tweak calculation
-                */
-               memcpy(cd->ucs_aes.key, key, keylen);
-               keylen = keylen / 2;
-       } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
-               ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
-                                            ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
-               memcpy(cd->ucs_aes.key, key, keylen);
-               keylen = round_up(keylen, 16);
-       } else {
-               memcpy(cd->aes.key, key, keylen);
-       }
-
-       /* Cipher CD config setup */
-       cd_ctrl->cipher_key_sz = keylen >> 3;
-       cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
-       cd_ctrl->cipher_cfg_offset = 0;
-       ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
-       ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-}
-
-static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
-                                     int alg, const u8 *key,
-                                     unsigned int keylen, int mode)
-{
-       struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
-       struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
-
-       qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
-       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
-       enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
-}
-
-static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
-                                   u8 *key_reverse)
-{
-       struct crypto_aes_ctx aes_expanded;
-       int nrounds;
-       u8 *key;
-
-       aes_expandkey(&aes_expanded, key_forward, keylen);
-       if (keylen == AES_KEYSIZE_128) {
-               nrounds = 10;
-               key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
-               memcpy(key_reverse, key, AES_BLOCK_SIZE);
-       } else {
-               /* AES_KEYSIZE_256 */
-               nrounds = 14;
-               key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
-               memcpy(key_reverse, key, AES_BLOCK_SIZE);
-               memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
-                      AES_BLOCK_SIZE);
-       }
-}
-
-static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
-                                     int alg, const u8 *key,
-                                     unsigned int keylen, int mode)
-{
-       struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
-       struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
-       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
-       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
-
-       qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
-       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
-
-       if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
-               /* Key reversing not supported, set no convert */
-               dec_cd->aes.cipher_config.val =
-                               QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
-
-               /* In-place key reversal */
-               qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
-                                       dec_cd->ucs_aes.key);
-       } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
-               dec_cd->aes.cipher_config.val =
-                                       QAT_AES_HW_CONFIG_DEC(alg, mode);
-       } else {
-               dec_cd->aes.cipher_config.val =
-                                       QAT_AES_HW_CONFIG_ENC(alg, mode);
-       }
-}
-
-static int qat_alg_validate_key(int key_len, int *alg, int mode)
-{
-       if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
-               switch (key_len) {
-               case AES_KEYSIZE_128:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
-                       break;
-               case AES_KEYSIZE_192:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
-                       break;
-               case AES_KEYSIZE_256:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       } else {
-               switch (key_len) {
-               case AES_KEYSIZE_128 << 1:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
-                       break;
-               case AES_KEYSIZE_256 << 1:
-                       *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       }
-       return 0;
-}
-
-static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
-                                     unsigned int keylen,  int mode)
-{
-       struct crypto_authenc_keys keys;
-       int alg;
-
-       if (crypto_authenc_extractkeys(&keys, key, keylen))
-               goto bad_key;
-
-       if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
-               goto bad_key;
-
-       if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
-               goto error;
-
-       if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
-               goto error;
-
-       memzero_explicit(&keys, sizeof(keys));
-       return 0;
-bad_key:
-       memzero_explicit(&keys, sizeof(keys));
-       return -EINVAL;
-error:
-       memzero_explicit(&keys, sizeof(keys));
-       return -EFAULT;
-}
-
-static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
-                                         const u8 *key,
-                                         unsigned int keylen,
-                                         int mode)
-{
-       int alg;
-
-       if (qat_alg_validate_key(keylen, &alg, mode))
-               return -EINVAL;
-
-       qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
-       qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
-       return 0;
-}
-
-static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
-                             unsigned int keylen)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
-       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
-       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
-       memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
-       memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
-
-       return qat_alg_aead_init_sessions(tfm, key, keylen,
-                                         ICP_QAT_HW_CIPHER_CBC_MODE);
-}
-
-static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
-                              unsigned int keylen)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-       struct qat_crypto_instance *inst = NULL;
-       int node = numa_node_id();
-       struct device *dev;
-       int ret;
-
-       inst = qat_crypto_get_instance_node(node);
-       if (!inst)
-               return -EINVAL;
-       dev = &GET_DEV(inst->accel_dev);
-       ctx->inst = inst;
-       ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
-                                        &ctx->enc_cd_paddr,
-                                        GFP_ATOMIC);
-       if (!ctx->enc_cd) {
-               ret = -ENOMEM;
-               goto out_free_inst;
-       }
-       ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
-                                        &ctx->dec_cd_paddr,
-                                        GFP_ATOMIC);
-       if (!ctx->dec_cd) {
-               ret = -ENOMEM;
-               goto out_free_enc;
-       }
-
-       ret = qat_alg_aead_init_sessions(tfm, key, keylen,
-                                        ICP_QAT_HW_CIPHER_CBC_MODE);
-       if (ret)
-               goto out_free_all;
-
-       return 0;
-
-out_free_all:
-       memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
-       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
-                         ctx->dec_cd, ctx->dec_cd_paddr);
-       ctx->dec_cd = NULL;
-out_free_enc:
-       memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
-       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
-                         ctx->enc_cd, ctx->enc_cd_paddr);
-       ctx->enc_cd = NULL;
-out_free_inst:
-       ctx->inst = NULL;
-       qat_crypto_put_instance(inst);
-       return ret;
-}
-
-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
-                              unsigned int keylen)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
-       if (ctx->enc_cd)
-               return qat_alg_aead_rekey(tfm, key, keylen);
-       else
-               return qat_alg_aead_newkey(tfm, key, keylen);
-}
-
-static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
-                                 struct qat_crypto_request *qat_req)
-{
-       struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct aead_request *areq = qat_req->aead_req;
-       u8 stat_filed = qat_resp->comn_resp.comn_status;
-       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
-
-       qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
-       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
-               res = -EBADMSG;
-       aead_request_complete(areq, res);
-}
-
-static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
-{
-       struct skcipher_request *sreq = qat_req->skcipher_req;
-       u64 iv_lo_prev;
-       u64 iv_lo;
-       u64 iv_hi;
-
-       memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
-
-       iv_lo = be64_to_cpu(qat_req->iv_lo);
-       iv_hi = be64_to_cpu(qat_req->iv_hi);
-
-       iv_lo_prev = iv_lo;
-       iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
-       if (iv_lo < iv_lo_prev)
-               iv_hi++;
-
-       qat_req->iv_lo = cpu_to_be64(iv_lo);
-       qat_req->iv_hi = cpu_to_be64(iv_hi);
-}
-
-static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
-{
-       struct skcipher_request *sreq = qat_req->skcipher_req;
-       int offset = sreq->cryptlen - AES_BLOCK_SIZE;
-       struct scatterlist *sgl;
-
-       if (qat_req->encryption)
-               sgl = sreq->dst;
-       else
-               sgl = sreq->src;
-
-       scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
-}
-
-static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
-{
-       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-
-       switch (ctx->mode) {
-       case ICP_QAT_HW_CIPHER_CTR_MODE:
-               qat_alg_update_iv_ctr_mode(qat_req);
-               break;
-       case ICP_QAT_HW_CIPHER_CBC_MODE:
-               qat_alg_update_iv_cbc_mode(qat_req);
-               break;
-       case ICP_QAT_HW_CIPHER_XTS_MODE:
-               break;
-       default:
-               dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
-                        ctx->mode);
-       }
-}
-
-static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
-                                     struct qat_crypto_request *qat_req)
-{
-       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct skcipher_request *sreq = qat_req->skcipher_req;
-       u8 stat_filed = qat_resp->comn_resp.comn_status;
-       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
-
-       qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
-       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
-               res = -EINVAL;
-
-       if (qat_req->encryption)
-               qat_alg_update_iv(qat_req);
-
-       memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
-
-       skcipher_request_complete(sreq, res);
-}
-
-void qat_alg_callback(void *resp)
-{
-       struct icp_qat_fw_la_resp *qat_resp = resp;
-       struct qat_crypto_request *qat_req =
-                               (void *)(__force long)qat_resp->opaque_data;
-       struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
-
-       qat_req->cb(qat_resp, qat_req);
-
-       qat_alg_send_backlog(backlog);
-}
-
-static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
-                                   struct qat_crypto_instance *inst,
-                                   struct crypto_async_request *base)
-{
-       struct qat_alg_req *alg_req = &qat_req->alg_req;
-
-       alg_req->fw_req = (u32 *)&qat_req->req;
-       alg_req->tx_ring = inst->sym_tx;
-       alg_req->base = base;
-       alg_req->backlog = &inst->backlog;
-
-       return qat_alg_send_message(alg_req);
-}
-
-static int qat_alg_aead_dec(struct aead_request *areq)
-{
-       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       struct icp_qat_fw_la_auth_req_params *auth_param;
-       struct icp_qat_fw_la_bulk_req *msg;
-       int digst_size = crypto_aead_authsize(aead_tfm);
-       gfp_t f = qat_algs_alloc_flags(&areq->base);
-       int ret;
-       u32 cipher_len;
-
-       cipher_len = areq->cryptlen - digst_size;
-       if (cipher_len % AES_BLOCK_SIZE != 0)
-               return -EINVAL;
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
-                                &qat_req->buf, NULL, f);
-       if (unlikely(ret))
-               return ret;
-
-       msg = &qat_req->req;
-       *msg = ctx->dec_fw_req;
-       qat_req->aead_ctx = ctx;
-       qat_req->aead_req = areq;
-       qat_req->cb = qat_aead_alg_callback;
-       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
-       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
-       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-       cipher_param->cipher_length = cipher_len;
-       cipher_param->cipher_offset = areq->assoclen;
-       memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
-       auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
-       auth_param->auth_off = 0;
-       auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
-
-       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_alg_aead_enc(struct aead_request *areq)
-{
-       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
-       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
-       struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       struct icp_qat_fw_la_auth_req_params *auth_param;
-       gfp_t f = qat_algs_alloc_flags(&areq->base);
-       struct icp_qat_fw_la_bulk_req *msg;
-       u8 *iv = areq->iv;
-       int ret;
-
-       if (areq->cryptlen % AES_BLOCK_SIZE != 0)
-               return -EINVAL;
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
-                                &qat_req->buf, NULL, f);
-       if (unlikely(ret))
-               return ret;
-
-       msg = &qat_req->req;
-       *msg = ctx->enc_fw_req;
-       qat_req->aead_ctx = ctx;
-       qat_req->aead_req = areq;
-       qat_req->cb = qat_aead_alg_callback;
-       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
-       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
-       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-       auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
-
-       memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
-       cipher_param->cipher_length = areq->cryptlen;
-       cipher_param->cipher_offset = areq->assoclen;
-
-       auth_param->auth_off = 0;
-       auth_param->auth_len = areq->assoclen + areq->cryptlen;
-
-       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
-                                 const u8 *key, unsigned int keylen,
-                                 int mode)
-{
-       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
-       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
-       memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
-       memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
-
-       return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
-}
-
-static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
-                                  const u8 *key, unsigned int keylen,
-                                  int mode)
-{
-       struct qat_crypto_instance *inst = NULL;
-       struct device *dev;
-       int node = numa_node_id();
-       int ret;
-
-       inst = qat_crypto_get_instance_node(node);
-       if (!inst)
-               return -EINVAL;
-       dev = &GET_DEV(inst->accel_dev);
-       ctx->inst = inst;
-       ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
-                                        &ctx->enc_cd_paddr,
-                                        GFP_ATOMIC);
-       if (!ctx->enc_cd) {
-               ret = -ENOMEM;
-               goto out_free_instance;
-       }
-       ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
-                                        &ctx->dec_cd_paddr,
-                                        GFP_ATOMIC);
-       if (!ctx->dec_cd) {
-               ret = -ENOMEM;
-               goto out_free_enc;
-       }
-
-       ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
-       if (ret)
-               goto out_free_all;
-
-       return 0;
-
-out_free_all:
-       memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
-       dma_free_coherent(dev, sizeof(*ctx->dec_cd),
-                         ctx->dec_cd, ctx->dec_cd_paddr);
-       ctx->dec_cd = NULL;
-out_free_enc:
-       memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
-       dma_free_coherent(dev, sizeof(*ctx->enc_cd),
-                         ctx->enc_cd, ctx->enc_cd_paddr);
-       ctx->enc_cd = NULL;
-out_free_instance:
-       ctx->inst = NULL;
-       qat_crypto_put_instance(inst);
-       return ret;
-}
-
-static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
-                                  const u8 *key, unsigned int keylen,
-                                  int mode)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-       ctx->mode = mode;
-
-       if (ctx->enc_cd)
-               return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
-       else
-               return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
-}
-
-static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
-                                      const u8 *key, unsigned int keylen)
-{
-       return qat_alg_skcipher_setkey(tfm, key, keylen,
-                                      ICP_QAT_HW_CIPHER_CBC_MODE);
-}
-
-static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
-                                      const u8 *key, unsigned int keylen)
-{
-       return qat_alg_skcipher_setkey(tfm, key, keylen,
-                                      ICP_QAT_HW_CIPHER_CTR_MODE);
-}
-
-static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
-                                      const u8 *key, unsigned int keylen)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-       int ret;
-
-       ret = xts_verify_key(tfm, key, keylen);
-       if (ret)
-               return ret;
-
-       if (keylen >> 1 == AES_KEYSIZE_192) {
-               ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
-               if (ret)
-                       return ret;
-
-               ctx->fallback = true;
-
-               return 0;
-       }
-
-       ctx->fallback = false;
-
-       ret = qat_alg_skcipher_setkey(tfm, key, keylen,
-                                     ICP_QAT_HW_CIPHER_XTS_MODE);
-       if (ret)
-               return ret;
-
-       if (HW_CAP_AES_V2(ctx->inst->accel_dev))
-               ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
-                                          keylen / 2);
-
-       return ret;
-}
-
-static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
-{
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
-       bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
-       u8 *iv = qat_req->skcipher_req->iv;
-
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-
-       if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
-               crypto_cipher_encrypt_one(ctx->tweak,
-                                         (u8 *)cipher_param->u.cipher_IV_array,
-                                         iv);
-       else
-               memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
-}
-
-static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
-       struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
-       struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       gfp_t f = qat_algs_alloc_flags(&req->base);
-       struct icp_qat_fw_la_bulk_req *msg;
-       int ret;
-
-       if (req->cryptlen == 0)
-               return 0;
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
-                                &qat_req->buf, NULL, f);
-       if (unlikely(ret))
-               return ret;
-
-       msg = &qat_req->req;
-       *msg = ctx->enc_fw_req;
-       qat_req->skcipher_ctx = ctx;
-       qat_req->skcipher_req = req;
-       qat_req->cb = qat_skcipher_alg_callback;
-       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
-       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
-       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
-       qat_req->encryption = true;
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-       cipher_param->cipher_length = req->cryptlen;
-       cipher_param->cipher_offset = 0;
-
-       qat_alg_set_req_iv(qat_req);
-
-       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
-{
-       if (req->cryptlen % AES_BLOCK_SIZE != 0)
-               return -EINVAL;
-
-       return qat_alg_skcipher_encrypt(req);
-}
-
-static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
-       struct skcipher_request *nreq = skcipher_request_ctx(req);
-
-       if (req->cryptlen < XTS_BLOCK_SIZE)
-               return -EINVAL;
-
-       if (ctx->fallback) {
-               memcpy(nreq, req, sizeof(*req));
-               skcipher_request_set_tfm(nreq, ctx->ftfm);
-               return crypto_skcipher_encrypt(nreq);
-       }
-
-       return qat_alg_skcipher_encrypt(req);
-}
-
-static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
-       struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
-       struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
-       struct icp_qat_fw_la_cipher_req_params *cipher_param;
-       gfp_t f = qat_algs_alloc_flags(&req->base);
-       struct icp_qat_fw_la_bulk_req *msg;
-       int ret;
-
-       if (req->cryptlen == 0)
-               return 0;
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
-                                &qat_req->buf, NULL, f);
-       if (unlikely(ret))
-               return ret;
-
-       msg = &qat_req->req;
-       *msg = ctx->dec_fw_req;
-       qat_req->skcipher_ctx = ctx;
-       qat_req->skcipher_req = req;
-       qat_req->cb = qat_skcipher_alg_callback;
-       qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
-       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
-       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
-       qat_req->encryption = false;
-       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
-       cipher_param->cipher_length = req->cryptlen;
-       cipher_param->cipher_offset = 0;
-
-       qat_alg_set_req_iv(qat_req);
-       qat_alg_update_iv(qat_req);
-
-       ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
-{
-       if (req->cryptlen % AES_BLOCK_SIZE != 0)
-               return -EINVAL;
-
-       return qat_alg_skcipher_decrypt(req);
-}
-
-static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
-       struct skcipher_request *nreq = skcipher_request_ctx(req);
-
-       if (req->cryptlen < XTS_BLOCK_SIZE)
-               return -EINVAL;
-
-       if (ctx->fallback) {
-               memcpy(nreq, req, sizeof(*req));
-               skcipher_request_set_tfm(nreq, ctx->ftfm);
-               return crypto_skcipher_decrypt(nreq);
-       }
-
-       return qat_alg_skcipher_decrypt(req);
-}
-
-static int qat_alg_aead_init(struct crypto_aead *tfm,
-                            enum icp_qat_hw_auth_algo hash,
-                            const char *hash_name)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-
-       ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
-       if (IS_ERR(ctx->hash_tfm))
-               return PTR_ERR(ctx->hash_tfm);
-       ctx->qat_hash_alg = hash;
-       crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
-       return 0;
-}
-
-static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
-{
-       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
-}
-
-static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
-{
-       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
-}
-
-static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
-{
-       return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
-}
-
-static void qat_alg_aead_exit(struct crypto_aead *tfm)
-{
-       struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev;
-
-       crypto_free_shash(ctx->hash_tfm);
-
-       if (!inst)
-               return;
-
-       dev = &GET_DEV(inst->accel_dev);
-       if (ctx->enc_cd) {
-               memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
-               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
-                                 ctx->enc_cd, ctx->enc_cd_paddr);
-       }
-       if (ctx->dec_cd) {
-               memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
-               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
-                                 ctx->dec_cd, ctx->dec_cd_paddr);
-       }
-       qat_crypto_put_instance(inst);
-}
-
-static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
-{
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
-       return 0;
-}
-
-static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-       int reqsize;
-
-       ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
-                                         CRYPTO_ALG_NEED_FALLBACK);
-       if (IS_ERR(ctx->ftfm))
-               return PTR_ERR(ctx->ftfm);
-
-       ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
-       if (IS_ERR(ctx->tweak)) {
-               crypto_free_skcipher(ctx->ftfm);
-               return PTR_ERR(ctx->tweak);
-       }
-
-       reqsize = max(sizeof(struct qat_crypto_request),
-                     sizeof(struct skcipher_request) +
-                     crypto_skcipher_reqsize(ctx->ftfm));
-       crypto_skcipher_set_reqsize(tfm, reqsize);
-
-       return 0;
-}
-
-static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev;
-
-       if (!inst)
-               return;
-
-       dev = &GET_DEV(inst->accel_dev);
-       if (ctx->enc_cd) {
-               memset(ctx->enc_cd, 0,
-                      sizeof(struct icp_qat_hw_cipher_algo_blk));
-               dma_free_coherent(dev,
-                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
-                                 ctx->enc_cd, ctx->enc_cd_paddr);
-       }
-       if (ctx->dec_cd) {
-               memset(ctx->dec_cd, 0,
-                      sizeof(struct icp_qat_hw_cipher_algo_blk));
-               dma_free_coherent(dev,
-                                 sizeof(struct icp_qat_hw_cipher_algo_blk),
-                                 ctx->dec_cd, ctx->dec_cd_paddr);
-       }
-       qat_crypto_put_instance(inst);
-}
-
-static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
-{
-       struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-       if (ctx->ftfm)
-               crypto_free_skcipher(ctx->ftfm);
-
-       if (ctx->tweak)
-               crypto_free_cipher(ctx->tweak);
-
-       qat_alg_skcipher_exit_tfm(tfm);
-}
-
-static struct aead_alg qat_aeads[] = { {
-       .base = {
-               .cra_name = "authenc(hmac(sha1),cbc(aes))",
-               .cra_driver_name = "qat_aes_cbc_hmac_sha1",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-               .cra_blocksize = AES_BLOCK_SIZE,
-               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_alg_aead_sha1_init,
-       .exit = qat_alg_aead_exit,
-       .setkey = qat_alg_aead_setkey,
-       .decrypt = qat_alg_aead_dec,
-       .encrypt = qat_alg_aead_enc,
-       .ivsize = AES_BLOCK_SIZE,
-       .maxauthsize = SHA1_DIGEST_SIZE,
-}, {
-       .base = {
-               .cra_name = "authenc(hmac(sha256),cbc(aes))",
-               .cra_driver_name = "qat_aes_cbc_hmac_sha256",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-               .cra_blocksize = AES_BLOCK_SIZE,
-               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_alg_aead_sha256_init,
-       .exit = qat_alg_aead_exit,
-       .setkey = qat_alg_aead_setkey,
-       .decrypt = qat_alg_aead_dec,
-       .encrypt = qat_alg_aead_enc,
-       .ivsize = AES_BLOCK_SIZE,
-       .maxauthsize = SHA256_DIGEST_SIZE,
-}, {
-       .base = {
-               .cra_name = "authenc(hmac(sha512),cbc(aes))",
-               .cra_driver_name = "qat_aes_cbc_hmac_sha512",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-               .cra_blocksize = AES_BLOCK_SIZE,
-               .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_alg_aead_sha512_init,
-       .exit = qat_alg_aead_exit,
-       .setkey = qat_alg_aead_setkey,
-       .decrypt = qat_alg_aead_dec,
-       .encrypt = qat_alg_aead_enc,
-       .ivsize = AES_BLOCK_SIZE,
-       .maxauthsize = SHA512_DIGEST_SIZE,
-} };
-
-static struct skcipher_alg qat_skciphers[] = { {
-       .base.cra_name = "cbc(aes)",
-       .base.cra_driver_name = "qat_aes_cbc",
-       .base.cra_priority = 4001,
-       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-       .base.cra_blocksize = AES_BLOCK_SIZE,
-       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
-       .base.cra_alignmask = 0,
-       .base.cra_module = THIS_MODULE,
-
-       .init = qat_alg_skcipher_init_tfm,
-       .exit = qat_alg_skcipher_exit_tfm,
-       .setkey = qat_alg_skcipher_cbc_setkey,
-       .decrypt = qat_alg_skcipher_blk_decrypt,
-       .encrypt = qat_alg_skcipher_blk_encrypt,
-       .min_keysize = AES_MIN_KEY_SIZE,
-       .max_keysize = AES_MAX_KEY_SIZE,
-       .ivsize = AES_BLOCK_SIZE,
-}, {
-       .base.cra_name = "ctr(aes)",
-       .base.cra_driver_name = "qat_aes_ctr",
-       .base.cra_priority = 4001,
-       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-       .base.cra_blocksize = 1,
-       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
-       .base.cra_alignmask = 0,
-       .base.cra_module = THIS_MODULE,
-
-       .init = qat_alg_skcipher_init_tfm,
-       .exit = qat_alg_skcipher_exit_tfm,
-       .setkey = qat_alg_skcipher_ctr_setkey,
-       .decrypt = qat_alg_skcipher_decrypt,
-       .encrypt = qat_alg_skcipher_encrypt,
-       .min_keysize = AES_MIN_KEY_SIZE,
-       .max_keysize = AES_MAX_KEY_SIZE,
-       .ivsize = AES_BLOCK_SIZE,
-}, {
-       .base.cra_name = "xts(aes)",
-       .base.cra_driver_name = "qat_aes_xts",
-       .base.cra_priority = 4001,
-       .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
-                         CRYPTO_ALG_ALLOCATES_MEMORY,
-       .base.cra_blocksize = AES_BLOCK_SIZE,
-       .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
-       .base.cra_alignmask = 0,
-       .base.cra_module = THIS_MODULE,
-
-       .init = qat_alg_skcipher_init_xts_tfm,
-       .exit = qat_alg_skcipher_exit_xts_tfm,
-       .setkey = qat_alg_skcipher_xts_setkey,
-       .decrypt = qat_alg_skcipher_xts_decrypt,
-       .encrypt = qat_alg_skcipher_xts_encrypt,
-       .min_keysize = 2 * AES_MIN_KEY_SIZE,
-       .max_keysize = 2 * AES_MAX_KEY_SIZE,
-       .ivsize = AES_BLOCK_SIZE,
-} };
-
-int qat_algs_register(void)
-{
-       int ret = 0;
-
-       mutex_lock(&algs_lock);
-       if (++active_devs != 1)
-               goto unlock;
-
-       ret = crypto_register_skciphers(qat_skciphers,
-                                       ARRAY_SIZE(qat_skciphers));
-       if (ret)
-               goto unlock;
-
-       ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
-       if (ret)
-               goto unreg_algs;
-
-unlock:
-       mutex_unlock(&algs_lock);
-       return ret;
-
-unreg_algs:
-       crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
-       goto unlock;
-}
-
-void qat_algs_unregister(void)
-{
-       mutex_lock(&algs_lock);
-       if (--active_devs != 0)
-               goto unlock;
-
-       crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
-       crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
-
-unlock:
-       mutex_unlock(&algs_lock);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
deleted file mode 100644 (file)
index bb80455..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2022 Intel Corporation */
-#include <crypto/algapi.h>
-#include "adf_transport.h"
-#include "qat_algs_send.h"
-#include "qat_crypto.h"
-
-#define ADF_MAX_RETRIES                20
-
-static int qat_alg_send_message_retry(struct qat_alg_req *req)
-{
-       int ret = 0, ctr = 0;
-
-       do {
-               ret = adf_send_message(req->tx_ring, req->fw_req);
-       } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
-
-       if (ret == -EAGAIN)
-               return -ENOSPC;
-
-       return -EINPROGRESS;
-}
-
-void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
-{
-       struct qat_alg_req *req, *tmp;
-
-       spin_lock_bh(&backlog->lock);
-       list_for_each_entry_safe(req, tmp, &backlog->list, list) {
-               if (adf_send_message(req->tx_ring, req->fw_req)) {
-                       /* The HW ring is full. Do nothing.
-                        * qat_alg_send_backlog() will be invoked again by
-                        * another callback.
-                        */
-                       break;
-               }
-               list_del(&req->list);
-               crypto_request_complete(req->base, -EINPROGRESS);
-       }
-       spin_unlock_bh(&backlog->lock);
-}
-
-static void qat_alg_backlog_req(struct qat_alg_req *req,
-                               struct qat_instance_backlog *backlog)
-{
-       INIT_LIST_HEAD(&req->list);
-
-       spin_lock_bh(&backlog->lock);
-       list_add_tail(&req->list, &backlog->list);
-       spin_unlock_bh(&backlog->lock);
-}
-
-static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
-{
-       struct qat_instance_backlog *backlog = req->backlog;
-       struct adf_etr_ring_data *tx_ring = req->tx_ring;
-       u32 *fw_req = req->fw_req;
-
-       /* If any request is already backlogged, then add to backlog list */
-       if (!list_empty(&backlog->list))
-               goto enqueue;
-
-       /* If ring is nearly full, then add to backlog list */
-       if (adf_ring_nearly_full(tx_ring))
-               goto enqueue;
-
-       /* If adding request to HW ring fails, then add to backlog list */
-       if (adf_send_message(tx_ring, fw_req))
-               goto enqueue;
-
-       return -EINPROGRESS;
-
-enqueue:
-       qat_alg_backlog_req(req, backlog);
-
-       return -EBUSY;
-}
-
-int qat_alg_send_message(struct qat_alg_req *req)
-{
-       u32 flags = req->base->flags;
-
-       if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
-               return qat_alg_send_message_maybacklog(req);
-       else
-               return qat_alg_send_message_retry(req);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h
deleted file mode 100644 (file)
index 0baca16..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef QAT_ALGS_SEND_H
-#define QAT_ALGS_SEND_H
-
-#include <linux/list.h>
-#include "adf_transport_internal.h"
-
-struct qat_instance_backlog {
-       struct list_head list;
-       spinlock_t lock; /* protects backlog list */
-};
-
-struct qat_alg_req {
-       u32 *fw_req;
-       struct adf_etr_ring_data *tx_ring;
-       struct crypto_async_request *base;
-       struct list_head list;
-       struct qat_instance_backlog *backlog;
-};
-
-int qat_alg_send_message(struct qat_alg_req *req);
-void qat_alg_send_backlog(struct qat_instance_backlog *backlog);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
deleted file mode 100644 (file)
index 935a7e0..0000000
+++ /dev/null
@@ -1,1309 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/module.h>
-#include <crypto/internal/rsa.h>
-#include <crypto/internal/akcipher.h>
-#include <crypto/akcipher.h>
-#include <crypto/kpp.h>
-#include <crypto/internal/kpp.h>
-#include <crypto/dh.h>
-#include <linux/dma-mapping.h>
-#include <linux/fips.h>
-#include <crypto/scatterwalk.h>
-#include "icp_qat_fw_pke.h"
-#include "adf_accel_devices.h"
-#include "qat_algs_send.h"
-#include "adf_transport.h"
-#include "adf_common_drv.h"
-#include "qat_crypto.h"
-
-static DEFINE_MUTEX(algs_lock);
-static unsigned int active_devs;
-
-struct qat_rsa_input_params {
-       union {
-               struct {
-                       dma_addr_t m;
-                       dma_addr_t e;
-                       dma_addr_t n;
-               } enc;
-               struct {
-                       dma_addr_t c;
-                       dma_addr_t d;
-                       dma_addr_t n;
-               } dec;
-               struct {
-                       dma_addr_t c;
-                       dma_addr_t p;
-                       dma_addr_t q;
-                       dma_addr_t dp;
-                       dma_addr_t dq;
-                       dma_addr_t qinv;
-               } dec_crt;
-               u64 in_tab[8];
-       };
-} __packed __aligned(64);
-
-struct qat_rsa_output_params {
-       union {
-               struct {
-                       dma_addr_t c;
-               } enc;
-               struct {
-                       dma_addr_t m;
-               } dec;
-               u64 out_tab[8];
-       };
-} __packed __aligned(64);
-
-struct qat_rsa_ctx {
-       char *n;
-       char *e;
-       char *d;
-       char *p;
-       char *q;
-       char *dp;
-       char *dq;
-       char *qinv;
-       dma_addr_t dma_n;
-       dma_addr_t dma_e;
-       dma_addr_t dma_d;
-       dma_addr_t dma_p;
-       dma_addr_t dma_q;
-       dma_addr_t dma_dp;
-       dma_addr_t dma_dq;
-       dma_addr_t dma_qinv;
-       unsigned int key_sz;
-       bool crt_mode;
-       struct qat_crypto_instance *inst;
-} __packed __aligned(64);
-
-struct qat_dh_input_params {
-       union {
-               struct {
-                       dma_addr_t b;
-                       dma_addr_t xa;
-                       dma_addr_t p;
-               } in;
-               struct {
-                       dma_addr_t xa;
-                       dma_addr_t p;
-               } in_g2;
-               u64 in_tab[8];
-       };
-} __packed __aligned(64);
-
-struct qat_dh_output_params {
-       union {
-               dma_addr_t r;
-               u64 out_tab[8];
-       };
-} __packed __aligned(64);
-
-struct qat_dh_ctx {
-       char *g;
-       char *xa;
-       char *p;
-       dma_addr_t dma_g;
-       dma_addr_t dma_xa;
-       dma_addr_t dma_p;
-       unsigned int p_size;
-       bool g2;
-       struct qat_crypto_instance *inst;
-} __packed __aligned(64);
-
-struct qat_asym_request {
-       union {
-               struct qat_rsa_input_params rsa;
-               struct qat_dh_input_params dh;
-       } in;
-       union {
-               struct qat_rsa_output_params rsa;
-               struct qat_dh_output_params dh;
-       } out;
-       dma_addr_t phy_in;
-       dma_addr_t phy_out;
-       char *src_align;
-       char *dst_align;
-       struct icp_qat_fw_pke_request req;
-       union {
-               struct qat_rsa_ctx *rsa;
-               struct qat_dh_ctx *dh;
-       } ctx;
-       union {
-               struct akcipher_request *rsa;
-               struct kpp_request *dh;
-       } areq;
-       int err;
-       void (*cb)(struct icp_qat_fw_pke_resp *resp);
-       struct qat_alg_req alg_req;
-} __aligned(64);
-
-static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
-                                    struct qat_crypto_instance *inst,
-                                    struct crypto_async_request *base)
-{
-       struct qat_alg_req *alg_req = &qat_req->alg_req;
-
-       alg_req->fw_req = (u32 *)&qat_req->req;
-       alg_req->tx_ring = inst->pke_tx;
-       alg_req->base = base;
-       alg_req->backlog = &inst->backlog;
-
-       return qat_alg_send_message(alg_req);
-}
-
-static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
-{
-       struct qat_asym_request *req = (void *)(__force long)resp->opaque;
-       struct kpp_request *areq = req->areq.dh;
-       struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
-       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-                               resp->pke_resp_hdr.comn_resp_flags);
-
-       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
-
-       if (areq->src) {
-               dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
-                                DMA_TO_DEVICE);
-               kfree_sensitive(req->src_align);
-       }
-
-       areq->dst_len = req->ctx.dh->p_size;
-       if (req->dst_align) {
-               scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
-                                        areq->dst_len, 1);
-               kfree_sensitive(req->dst_align);
-       }
-
-       dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
-                        DMA_FROM_DEVICE);
-
-       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
-                        DMA_TO_DEVICE);
-       dma_unmap_single(dev, req->phy_out,
-                        sizeof(struct qat_dh_output_params),
-                        DMA_TO_DEVICE);
-
-       kpp_request_complete(areq, err);
-}
-
-#define PKE_DH_1536 0x390c1a49
-#define PKE_DH_G2_1536 0x2e0b1a3e
-#define PKE_DH_2048 0x4d0c1a60
-#define PKE_DH_G2_2048 0x3e0b1a55
-#define PKE_DH_3072 0x510c1a77
-#define PKE_DH_G2_3072 0x3a0b1a6c
-#define PKE_DH_4096 0x690c1a8e
-#define PKE_DH_G2_4096 0x4a0b1a83
-
-static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
-{
-       unsigned int bitslen = len << 3;
-
-       switch (bitslen) {
-       case 1536:
-               return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
-       case 2048:
-               return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
-       case 3072:
-               return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
-       case 4096:
-               return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
-       default:
-               return 0;
-       }
-}
-
-static int qat_dh_compute_value(struct kpp_request *req)
-{
-       struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       struct qat_asym_request *qat_req =
-                       PTR_ALIGN(kpp_request_ctx(req), 64);
-       struct icp_qat_fw_pke_request *msg = &qat_req->req;
-       gfp_t flags = qat_algs_alloc_flags(&req->base);
-       int n_input_params = 0;
-       u8 *vaddr;
-       int ret;
-
-       if (unlikely(!ctx->xa))
-               return -EINVAL;
-
-       if (req->dst_len < ctx->p_size) {
-               req->dst_len = ctx->p_size;
-               return -EOVERFLOW;
-       }
-
-       if (req->src_len > ctx->p_size)
-               return -EINVAL;
-
-       memset(msg, '\0', sizeof(*msg));
-       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
-                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
-
-       msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
-                                                   !req->src && ctx->g2);
-       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
-               return -EINVAL;
-
-       qat_req->cb = qat_dh_cb;
-       qat_req->ctx.dh = ctx;
-       qat_req->areq.dh = req;
-       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-       msg->pke_hdr.comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
-                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
-
-       /*
-        * If no source is provided use g as base
-        */
-       if (req->src) {
-               qat_req->in.dh.in.xa = ctx->dma_xa;
-               qat_req->in.dh.in.p = ctx->dma_p;
-               n_input_params = 3;
-       } else {
-               if (ctx->g2) {
-                       qat_req->in.dh.in_g2.xa = ctx->dma_xa;
-                       qat_req->in.dh.in_g2.p = ctx->dma_p;
-                       n_input_params = 2;
-               } else {
-                       qat_req->in.dh.in.b = ctx->dma_g;
-                       qat_req->in.dh.in.xa = ctx->dma_xa;
-                       qat_req->in.dh.in.p = ctx->dma_p;
-                       n_input_params = 3;
-               }
-       }
-
-       ret = -ENOMEM;
-       if (req->src) {
-               /*
-                * src can be of any size in valid range, but HW expects it to
-                * be the same as modulo p so in case it is different we need
-                * to allocate a new buf and copy src data.
-                * In other case we just need to map the user provided buffer.
-                * Also need to make sure that it is in contiguous buffer.
-                */
-               if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
-                       qat_req->src_align = NULL;
-                       vaddr = sg_virt(req->src);
-               } else {
-                       int shift = ctx->p_size - req->src_len;
-
-                       qat_req->src_align = kzalloc(ctx->p_size, flags);
-                       if (unlikely(!qat_req->src_align))
-                               return ret;
-
-                       scatterwalk_map_and_copy(qat_req->src_align + shift,
-                                                req->src, 0, req->src_len, 0);
-
-                       vaddr = qat_req->src_align;
-               }
-
-               qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
-                                                    DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
-                       goto unmap_src;
-       }
-       /*
-        * dst can be of any size in valid range, but HW expects it to be the
-        * same as modulo m so in case it is different we need to allocate a
-        * new buf and copy src data.
-        * In other case we just need to map the user provided buffer.
-        * Also need to make sure that it is in contiguous buffer.
-        */
-       if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
-               qat_req->dst_align = NULL;
-               vaddr = sg_virt(req->dst);
-       } else {
-               qat_req->dst_align = kzalloc(ctx->p_size, flags);
-               if (unlikely(!qat_req->dst_align))
-                       goto unmap_src;
-
-               vaddr = qat_req->dst_align;
-       }
-       qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
-                                          DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
-               goto unmap_dst;
-
-       qat_req->in.dh.in_tab[n_input_params] = 0;
-       qat_req->out.dh.out_tab[1] = 0;
-       /* Mapping in.in.b or in.in_g2.xa is the same */
-       qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
-                                        sizeof(struct qat_dh_input_params),
-                                        DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
-               goto unmap_dst;
-
-       qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
-                                         sizeof(struct qat_dh_output_params),
-                                         DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
-               goto unmap_in_params;
-
-       msg->pke_mid.src_data_addr = qat_req->phy_in;
-       msg->pke_mid.dest_data_addr = qat_req->phy_out;
-       msg->pke_mid.opaque = (u64)(__force long)qat_req;
-       msg->input_param_count = n_input_params;
-       msg->output_param_count = 1;
-
-       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
-       if (ret == -ENOSPC)
-               goto unmap_all;
-
-       return ret;
-
-unmap_all:
-       if (!dma_mapping_error(dev, qat_req->phy_out))
-               dma_unmap_single(dev, qat_req->phy_out,
-                                sizeof(struct qat_dh_output_params),
-                                DMA_TO_DEVICE);
-unmap_in_params:
-       if (!dma_mapping_error(dev, qat_req->phy_in))
-               dma_unmap_single(dev, qat_req->phy_in,
-                                sizeof(struct qat_dh_input_params),
-                                DMA_TO_DEVICE);
-unmap_dst:
-       if (!dma_mapping_error(dev, qat_req->out.dh.r))
-               dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
-                                DMA_FROM_DEVICE);
-       kfree_sensitive(qat_req->dst_align);
-unmap_src:
-       if (req->src) {
-               if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
-                       dma_unmap_single(dev, qat_req->in.dh.in.b,
-                                        ctx->p_size,
-                                        DMA_TO_DEVICE);
-               kfree_sensitive(qat_req->src_align);
-       }
-       return ret;
-}
-
-static int qat_dh_check_params_length(unsigned int p_len)
-{
-       switch (p_len) {
-       case 1536:
-       case 2048:
-       case 3072:
-       case 4096:
-               return 0;
-       }
-       return -EINVAL;
-}
-
-static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-
-       if (qat_dh_check_params_length(params->p_size << 3))
-               return -EINVAL;
-
-       ctx->p_size = params->p_size;
-       ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
-       if (!ctx->p)
-               return -ENOMEM;
-       memcpy(ctx->p, params->p, ctx->p_size);
-
-       /* If g equals 2 don't copy it */
-       if (params->g_size == 1 && *(char *)params->g == 0x02) {
-               ctx->g2 = true;
-               return 0;
-       }
-
-       ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
-       if (!ctx->g)
-               return -ENOMEM;
-       memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
-              params->g_size);
-
-       return 0;
-}
-
-static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
-{
-       if (ctx->g) {
-               memset(ctx->g, 0, ctx->p_size);
-               dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
-               ctx->g = NULL;
-       }
-       if (ctx->xa) {
-               memset(ctx->xa, 0, ctx->p_size);
-               dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
-               ctx->xa = NULL;
-       }
-       if (ctx->p) {
-               memset(ctx->p, 0, ctx->p_size);
-               dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
-               ctx->p = NULL;
-       }
-       ctx->p_size = 0;
-       ctx->g2 = false;
-}
-
-static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
-                            unsigned int len)
-{
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-       struct dh params;
-       int ret;
-
-       if (crypto_dh_decode_key(buf, len, &params) < 0)
-               return -EINVAL;
-
-       /* Free old secret if any */
-       qat_dh_clear_ctx(dev, ctx);
-
-       ret = qat_dh_set_params(ctx, &params);
-       if (ret < 0)
-               goto err_clear_ctx;
-
-       ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
-                                    GFP_KERNEL);
-       if (!ctx->xa) {
-               ret = -ENOMEM;
-               goto err_clear_ctx;
-       }
-       memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
-              params.key_size);
-
-       return 0;
-
-err_clear_ctx:
-       qat_dh_clear_ctx(dev, ctx);
-       return ret;
-}
-
-static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
-{
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-
-       return ctx->p_size;
-}
-
-static int qat_dh_init_tfm(struct crypto_kpp *tfm)
-{
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst =
-                       qat_crypto_get_instance_node(numa_node_id());
-
-       if (!inst)
-               return -EINVAL;
-
-       kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
-
-       ctx->p_size = 0;
-       ctx->g2 = false;
-       ctx->inst = inst;
-       return 0;
-}
-
-static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
-{
-       struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-
-       qat_dh_clear_ctx(dev, ctx);
-       qat_crypto_put_instance(ctx->inst);
-}
-
-static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
-{
-       struct qat_asym_request *req = (void *)(__force long)resp->opaque;
-       struct akcipher_request *areq = req->areq.rsa;
-       struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
-       int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
-                               resp->pke_resp_hdr.comn_resp_flags);
-
-       err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
-
-       kfree_sensitive(req->src_align);
-
-       dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
-                        DMA_TO_DEVICE);
-
-       areq->dst_len = req->ctx.rsa->key_sz;
-       if (req->dst_align) {
-               scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
-                                        areq->dst_len, 1);
-
-               kfree_sensitive(req->dst_align);
-       }
-
-       dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
-                        DMA_FROM_DEVICE);
-
-       dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
-                        DMA_TO_DEVICE);
-       dma_unmap_single(dev, req->phy_out,
-                        sizeof(struct qat_rsa_output_params),
-                        DMA_TO_DEVICE);
-
-       akcipher_request_complete(areq, err);
-}
-
-void qat_alg_asym_callback(void *_resp)
-{
-       struct icp_qat_fw_pke_resp *resp = _resp;
-       struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
-       struct qat_instance_backlog *backlog = areq->alg_req.backlog;
-
-       areq->cb(resp);
-
-       qat_alg_send_backlog(backlog);
-}
-
-#define PKE_RSA_EP_512 0x1c161b21
-#define PKE_RSA_EP_1024 0x35111bf7
-#define PKE_RSA_EP_1536 0x4d111cdc
-#define PKE_RSA_EP_2048 0x6e111dba
-#define PKE_RSA_EP_3072 0x7d111ea3
-#define PKE_RSA_EP_4096 0xa5101f7e
-
-static unsigned long qat_rsa_enc_fn_id(unsigned int len)
-{
-       unsigned int bitslen = len << 3;
-
-       switch (bitslen) {
-       case 512:
-               return PKE_RSA_EP_512;
-       case 1024:
-               return PKE_RSA_EP_1024;
-       case 1536:
-               return PKE_RSA_EP_1536;
-       case 2048:
-               return PKE_RSA_EP_2048;
-       case 3072:
-               return PKE_RSA_EP_3072;
-       case 4096:
-               return PKE_RSA_EP_4096;
-       default:
-               return 0;
-       }
-}
-
-#define PKE_RSA_DP1_512 0x1c161b3c
-#define PKE_RSA_DP1_1024 0x35111c12
-#define PKE_RSA_DP1_1536 0x4d111cf7
-#define PKE_RSA_DP1_2048 0x6e111dda
-#define PKE_RSA_DP1_3072 0x7d111ebe
-#define PKE_RSA_DP1_4096 0xa5101f98
-
-static unsigned long qat_rsa_dec_fn_id(unsigned int len)
-{
-       unsigned int bitslen = len << 3;
-
-       switch (bitslen) {
-       case 512:
-               return PKE_RSA_DP1_512;
-       case 1024:
-               return PKE_RSA_DP1_1024;
-       case 1536:
-               return PKE_RSA_DP1_1536;
-       case 2048:
-               return PKE_RSA_DP1_2048;
-       case 3072:
-               return PKE_RSA_DP1_3072;
-       case 4096:
-               return PKE_RSA_DP1_4096;
-       default:
-               return 0;
-       }
-}
-
-#define PKE_RSA_DP2_512 0x1c131b57
-#define PKE_RSA_DP2_1024 0x26131c2d
-#define PKE_RSA_DP2_1536 0x45111d12
-#define PKE_RSA_DP2_2048 0x59121dfa
-#define PKE_RSA_DP2_3072 0x81121ed9
-#define PKE_RSA_DP2_4096 0xb1111fb2
-
-static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
-{
-       unsigned int bitslen = len << 3;
-
-       switch (bitslen) {
-       case 512:
-               return PKE_RSA_DP2_512;
-       case 1024:
-               return PKE_RSA_DP2_1024;
-       case 1536:
-               return PKE_RSA_DP2_1536;
-       case 2048:
-               return PKE_RSA_DP2_2048;
-       case 3072:
-               return PKE_RSA_DP2_3072;
-       case 4096:
-               return PKE_RSA_DP2_4096;
-       default:
-               return 0;
-       }
-}
-
-static int qat_rsa_enc(struct akcipher_request *req)
-{
-       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       struct qat_asym_request *qat_req =
-                       PTR_ALIGN(akcipher_request_ctx(req), 64);
-       struct icp_qat_fw_pke_request *msg = &qat_req->req;
-       gfp_t flags = qat_algs_alloc_flags(&req->base);
-       u8 *vaddr;
-       int ret;
-
-       if (unlikely(!ctx->n || !ctx->e))
-               return -EINVAL;
-
-       if (req->dst_len < ctx->key_sz) {
-               req->dst_len = ctx->key_sz;
-               return -EOVERFLOW;
-       }
-
-       if (req->src_len > ctx->key_sz)
-               return -EINVAL;
-
-       memset(msg, '\0', sizeof(*msg));
-       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
-                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
-       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
-               return -EINVAL;
-
-       qat_req->cb = qat_rsa_cb;
-       qat_req->ctx.rsa = ctx;
-       qat_req->areq.rsa = req;
-       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-       msg->pke_hdr.comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
-                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
-
-       qat_req->in.rsa.enc.e = ctx->dma_e;
-       qat_req->in.rsa.enc.n = ctx->dma_n;
-       ret = -ENOMEM;
-
-       /*
-        * src can be of any size in valid range, but HW expects it to be the
-        * same as modulo n so in case it is different we need to allocate a
-        * new buf and copy src data.
-        * In other case we just need to map the user provided buffer.
-        * Also need to make sure that it is in contiguous buffer.
-        */
-       if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
-               qat_req->src_align = NULL;
-               vaddr = sg_virt(req->src);
-       } else {
-               int shift = ctx->key_sz - req->src_len;
-
-               qat_req->src_align = kzalloc(ctx->key_sz, flags);
-               if (unlikely(!qat_req->src_align))
-                       return ret;
-
-               scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
-                                        0, req->src_len, 0);
-               vaddr = qat_req->src_align;
-       }
-
-       qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
-                                              DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
-               goto unmap_src;
-
-       if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
-               qat_req->dst_align = NULL;
-               vaddr = sg_virt(req->dst);
-       } else {
-               qat_req->dst_align = kzalloc(ctx->key_sz, flags);
-               if (unlikely(!qat_req->dst_align))
-                       goto unmap_src;
-               vaddr = qat_req->dst_align;
-       }
-
-       qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
-                                               DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
-               goto unmap_dst;
-
-       qat_req->in.rsa.in_tab[3] = 0;
-       qat_req->out.rsa.out_tab[1] = 0;
-       qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
-                                        sizeof(struct qat_rsa_input_params),
-                                        DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
-               goto unmap_dst;
-
-       qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
-                                         sizeof(struct qat_rsa_output_params),
-                                         DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
-               goto unmap_in_params;
-
-       msg->pke_mid.src_data_addr = qat_req->phy_in;
-       msg->pke_mid.dest_data_addr = qat_req->phy_out;
-       msg->pke_mid.opaque = (u64)(__force long)qat_req;
-       msg->input_param_count = 3;
-       msg->output_param_count = 1;
-
-       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
-       if (ret == -ENOSPC)
-               goto unmap_all;
-
-       return ret;
-
-unmap_all:
-       if (!dma_mapping_error(dev, qat_req->phy_out))
-               dma_unmap_single(dev, qat_req->phy_out,
-                                sizeof(struct qat_rsa_output_params),
-                                DMA_TO_DEVICE);
-unmap_in_params:
-       if (!dma_mapping_error(dev, qat_req->phy_in))
-               dma_unmap_single(dev, qat_req->phy_in,
-                                sizeof(struct qat_rsa_input_params),
-                                DMA_TO_DEVICE);
-unmap_dst:
-       if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
-               dma_unmap_single(dev, qat_req->out.rsa.enc.c,
-                                ctx->key_sz, DMA_FROM_DEVICE);
-       kfree_sensitive(qat_req->dst_align);
-unmap_src:
-       if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
-               dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
-                                DMA_TO_DEVICE);
-       kfree_sensitive(qat_req->src_align);
-       return ret;
-}
-
-static int qat_rsa_dec(struct akcipher_request *req)
-{
-       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       struct qat_asym_request *qat_req =
-                       PTR_ALIGN(akcipher_request_ctx(req), 64);
-       struct icp_qat_fw_pke_request *msg = &qat_req->req;
-       gfp_t flags = qat_algs_alloc_flags(&req->base);
-       u8 *vaddr;
-       int ret;
-
-       if (unlikely(!ctx->n || !ctx->d))
-               return -EINVAL;
-
-       if (req->dst_len < ctx->key_sz) {
-               req->dst_len = ctx->key_sz;
-               return -EOVERFLOW;
-       }
-
-       if (req->src_len > ctx->key_sz)
-               return -EINVAL;
-
-       memset(msg, '\0', sizeof(*msg));
-       ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
-                                         ICP_QAT_FW_COMN_REQ_FLAG_SET);
-       msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
-               qat_rsa_dec_fn_id_crt(ctx->key_sz) :
-               qat_rsa_dec_fn_id(ctx->key_sz);
-       if (unlikely(!msg->pke_hdr.cd_pars.func_id))
-               return -EINVAL;
-
-       qat_req->cb = qat_rsa_cb;
-       qat_req->ctx.rsa = ctx;
-       qat_req->areq.rsa = req;
-       msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
-       msg->pke_hdr.comn_req_flags =
-               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
-                                           QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
-
-       if (ctx->crt_mode) {
-               qat_req->in.rsa.dec_crt.p = ctx->dma_p;
-               qat_req->in.rsa.dec_crt.q = ctx->dma_q;
-               qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
-               qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
-               qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
-       } else {
-               qat_req->in.rsa.dec.d = ctx->dma_d;
-               qat_req->in.rsa.dec.n = ctx->dma_n;
-       }
-       ret = -ENOMEM;
-
-       /*
-        * src can be of any size in valid range, but HW expects it to be the
-        * same as modulo n so in case it is different we need to allocate a
-        * new buf and copy src data.
-        * In other case we just need to map the user provided buffer.
-        * Also need to make sure that it is in contiguous buffer.
-        */
-       if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
-               qat_req->src_align = NULL;
-               vaddr = sg_virt(req->src);
-       } else {
-               int shift = ctx->key_sz - req->src_len;
-
-               qat_req->src_align = kzalloc(ctx->key_sz, flags);
-               if (unlikely(!qat_req->src_align))
-                       return ret;
-
-               scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
-                                        0, req->src_len, 0);
-               vaddr = qat_req->src_align;
-       }
-
-       qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
-                                              DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
-               goto unmap_src;
-
-       if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
-               qat_req->dst_align = NULL;
-               vaddr = sg_virt(req->dst);
-       } else {
-               qat_req->dst_align = kzalloc(ctx->key_sz, flags);
-               if (unlikely(!qat_req->dst_align))
-                       goto unmap_src;
-               vaddr = qat_req->dst_align;
-       }
-       qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
-                                               DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
-               goto unmap_dst;
-
-       if (ctx->crt_mode)
-               qat_req->in.rsa.in_tab[6] = 0;
-       else
-               qat_req->in.rsa.in_tab[3] = 0;
-       qat_req->out.rsa.out_tab[1] = 0;
-       qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
-                                        sizeof(struct qat_rsa_input_params),
-                                        DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
-               goto unmap_dst;
-
-       qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
-                                         sizeof(struct qat_rsa_output_params),
-                                         DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
-               goto unmap_in_params;
-
-       msg->pke_mid.src_data_addr = qat_req->phy_in;
-       msg->pke_mid.dest_data_addr = qat_req->phy_out;
-       msg->pke_mid.opaque = (u64)(__force long)qat_req;
-       if (ctx->crt_mode)
-               msg->input_param_count = 6;
-       else
-               msg->input_param_count = 3;
-
-       msg->output_param_count = 1;
-
-       ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
-       if (ret == -ENOSPC)
-               goto unmap_all;
-
-       return ret;
-
-unmap_all:
-       if (!dma_mapping_error(dev, qat_req->phy_out))
-               dma_unmap_single(dev, qat_req->phy_out,
-                                sizeof(struct qat_rsa_output_params),
-                                DMA_TO_DEVICE);
-unmap_in_params:
-       if (!dma_mapping_error(dev, qat_req->phy_in))
-               dma_unmap_single(dev, qat_req->phy_in,
-                                sizeof(struct qat_rsa_input_params),
-                                DMA_TO_DEVICE);
-unmap_dst:
-       if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
-               dma_unmap_single(dev, qat_req->out.rsa.dec.m,
-                                ctx->key_sz, DMA_FROM_DEVICE);
-       kfree_sensitive(qat_req->dst_align);
-unmap_src:
-       if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
-               dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
-                                DMA_TO_DEVICE);
-       kfree_sensitive(qat_req->src_align);
-       return ret;
-}
-
-static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
-                        size_t vlen)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       const char *ptr = value;
-       int ret;
-
-       while (!*ptr && vlen) {
-               ptr++;
-               vlen--;
-       }
-
-       ctx->key_sz = vlen;
-       ret = -EINVAL;
-       /* invalid key size provided */
-       if (!qat_rsa_enc_fn_id(ctx->key_sz))
-               goto err;
-
-       ret = -ENOMEM;
-       ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
-       if (!ctx->n)
-               goto err;
-
-       memcpy(ctx->n, ptr, ctx->key_sz);
-       return 0;
-err:
-       ctx->key_sz = 0;
-       ctx->n = NULL;
-       return ret;
-}
-
-static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
-                        size_t vlen)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       const char *ptr = value;
-
-       while (!*ptr && vlen) {
-               ptr++;
-               vlen--;
-       }
-
-       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
-               ctx->e = NULL;
-               return -EINVAL;
-       }
-
-       ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
-       if (!ctx->e)
-               return -ENOMEM;
-
-       memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
-       return 0;
-}
-
-static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
-                        size_t vlen)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       const char *ptr = value;
-       int ret;
-
-       while (!*ptr && vlen) {
-               ptr++;
-               vlen--;
-       }
-
-       ret = -EINVAL;
-       if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
-               goto err;
-
-       ret = -ENOMEM;
-       ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
-       if (!ctx->d)
-               goto err;
-
-       memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
-       return 0;
-err:
-       ctx->d = NULL;
-       return ret;
-}
-
-static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
-{
-       while (!**ptr && *len) {
-               (*ptr)++;
-               (*len)--;
-       }
-}
-
-static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
-{
-       struct qat_crypto_instance *inst = ctx->inst;
-       struct device *dev = &GET_DEV(inst->accel_dev);
-       const char *ptr;
-       unsigned int len;
-       unsigned int half_key_sz = ctx->key_sz / 2;
-
-       /* p */
-       ptr = rsa_key->p;
-       len = rsa_key->p_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto err;
-       ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
-       if (!ctx->p)
-               goto err;
-       memcpy(ctx->p + (half_key_sz - len), ptr, len);
-
-       /* q */
-       ptr = rsa_key->q;
-       len = rsa_key->q_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto free_p;
-       ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
-       if (!ctx->q)
-               goto free_p;
-       memcpy(ctx->q + (half_key_sz - len), ptr, len);
-
-       /* dp */
-       ptr = rsa_key->dp;
-       len = rsa_key->dp_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto free_q;
-       ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
-                                    GFP_KERNEL);
-       if (!ctx->dp)
-               goto free_q;
-       memcpy(ctx->dp + (half_key_sz - len), ptr, len);
-
-       /* dq */
-       ptr = rsa_key->dq;
-       len = rsa_key->dq_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto free_dp;
-       ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
-                                    GFP_KERNEL);
-       if (!ctx->dq)
-               goto free_dp;
-       memcpy(ctx->dq + (half_key_sz - len), ptr, len);
-
-       /* qinv */
-       ptr = rsa_key->qinv;
-       len = rsa_key->qinv_sz;
-       qat_rsa_drop_leading_zeros(&ptr, &len);
-       if (!len)
-               goto free_dq;
-       ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
-                                      GFP_KERNEL);
-       if (!ctx->qinv)
-               goto free_dq;
-       memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
-
-       ctx->crt_mode = true;
-       return;
-
-free_dq:
-       memset(ctx->dq, '\0', half_key_sz);
-       dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
-       ctx->dq = NULL;
-free_dp:
-       memset(ctx->dp, '\0', half_key_sz);
-       dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
-       ctx->dp = NULL;
-free_q:
-       memset(ctx->q, '\0', half_key_sz);
-       dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
-       ctx->q = NULL;
-free_p:
-       memset(ctx->p, '\0', half_key_sz);
-       dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
-       ctx->p = NULL;
-err:
-       ctx->crt_mode = false;
-}
-
-static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
-{
-       unsigned int half_key_sz = ctx->key_sz / 2;
-
-       /* Free the old key if any */
-       if (ctx->n)
-               dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
-       if (ctx->e)
-               dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
-       if (ctx->d) {
-               memset(ctx->d, '\0', ctx->key_sz);
-               dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
-       }
-       if (ctx->p) {
-               memset(ctx->p, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
-       }
-       if (ctx->q) {
-               memset(ctx->q, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
-       }
-       if (ctx->dp) {
-               memset(ctx->dp, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
-       }
-       if (ctx->dq) {
-               memset(ctx->dq, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
-       }
-       if (ctx->qinv) {
-               memset(ctx->qinv, '\0', half_key_sz);
-               dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
-       }
-
-       ctx->n = NULL;
-       ctx->e = NULL;
-       ctx->d = NULL;
-       ctx->p = NULL;
-       ctx->q = NULL;
-       ctx->dp = NULL;
-       ctx->dq = NULL;
-       ctx->qinv = NULL;
-       ctx->crt_mode = false;
-       ctx->key_sz = 0;
-}
-
-static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
-                         unsigned int keylen, bool private)
-{
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-       struct rsa_key rsa_key;
-       int ret;
-
-       qat_rsa_clear_ctx(dev, ctx);
-
-       if (private)
-               ret = rsa_parse_priv_key(&rsa_key, key, keylen);
-       else
-               ret = rsa_parse_pub_key(&rsa_key, key, keylen);
-       if (ret < 0)
-               goto free;
-
-       ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
-       if (ret < 0)
-               goto free;
-       ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
-       if (ret < 0)
-               goto free;
-       if (private) {
-               ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
-               if (ret < 0)
-                       goto free;
-               qat_rsa_setkey_crt(ctx, &rsa_key);
-       }
-
-       if (!ctx->n || !ctx->e) {
-               /* invalid key provided */
-               ret = -EINVAL;
-               goto free;
-       }
-       if (private && !ctx->d) {
-               /* invalid private key provided */
-               ret = -EINVAL;
-               goto free;
-       }
-
-       return 0;
-free:
-       qat_rsa_clear_ctx(dev, ctx);
-       return ret;
-}
-
-static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
-                            unsigned int keylen)
-{
-       return qat_rsa_setkey(tfm, key, keylen, false);
-}
-
-static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
-                             unsigned int keylen)
-{
-       return qat_rsa_setkey(tfm, key, keylen, true);
-}
-
-static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
-{
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-
-       return ctx->key_sz;
-}
-
-static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
-{
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct qat_crypto_instance *inst =
-                       qat_crypto_get_instance_node(numa_node_id());
-
-       if (!inst)
-               return -EINVAL;
-
-       akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
-
-       ctx->key_sz = 0;
-       ctx->inst = inst;
-       return 0;
-}
-
-static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
-{
-       struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-       struct device *dev = &GET_DEV(ctx->inst->accel_dev);
-
-       qat_rsa_clear_ctx(dev, ctx);
-       qat_crypto_put_instance(ctx->inst);
-}
-
-static struct akcipher_alg rsa = {
-       .encrypt = qat_rsa_enc,
-       .decrypt = qat_rsa_dec,
-       .set_pub_key = qat_rsa_setpubkey,
-       .set_priv_key = qat_rsa_setprivkey,
-       .max_size = qat_rsa_max_size,
-       .init = qat_rsa_init_tfm,
-       .exit = qat_rsa_exit_tfm,
-       .base = {
-               .cra_name = "rsa",
-               .cra_driver_name = "qat-rsa",
-               .cra_priority = 1000,
-               .cra_module = THIS_MODULE,
-               .cra_ctxsize = sizeof(struct qat_rsa_ctx),
-       },
-};
-
-static struct kpp_alg dh = {
-       .set_secret = qat_dh_set_secret,
-       .generate_public_key = qat_dh_compute_value,
-       .compute_shared_secret = qat_dh_compute_value,
-       .max_size = qat_dh_max_size,
-       .init = qat_dh_init_tfm,
-       .exit = qat_dh_exit_tfm,
-       .base = {
-               .cra_name = "dh",
-               .cra_driver_name = "qat-dh",
-               .cra_priority = 1000,
-               .cra_module = THIS_MODULE,
-               .cra_ctxsize = sizeof(struct qat_dh_ctx),
-       },
-};
-
-int qat_asym_algs_register(void)
-{
-       int ret = 0;
-
-       mutex_lock(&algs_lock);
-       if (++active_devs == 1) {
-               rsa.base.cra_flags = 0;
-               ret = crypto_register_akcipher(&rsa);
-               if (ret)
-                       goto unlock;
-               ret = crypto_register_kpp(&dh);
-       }
-unlock:
-       mutex_unlock(&algs_lock);
-       return ret;
-}
-
-void qat_asym_algs_unregister(void)
-{
-       mutex_lock(&algs_lock);
-       if (--active_devs == 0) {
-               crypto_unregister_akcipher(&rsa);
-               crypto_unregister_kpp(&dh);
-       }
-       mutex_unlock(&algs_lock);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/qat/qat_common/qat_bl.c
deleted file mode 100644 (file)
index 76baed0..0000000
+++ /dev/null
@@ -1,410 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2014 - 2022 Intel Corporation */
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "qat_bl.h"
-#include "qat_crypto.h"
-
-void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
-                     struct qat_request_buffs *buf)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       struct qat_alg_buf_list *bl = buf->bl;
-       struct qat_alg_buf_list *blout = buf->blout;
-       dma_addr_t blp = buf->blp;
-       dma_addr_t blpout = buf->bloutp;
-       size_t sz = buf->sz;
-       size_t sz_out = buf->sz_out;
-       int bl_dma_dir;
-       int i;
-
-       bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
-       for (i = 0; i < bl->num_bufs; i++)
-               dma_unmap_single(dev, bl->buffers[i].addr,
-                                bl->buffers[i].len, bl_dma_dir);
-
-       dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
-
-       if (!buf->sgl_src_valid)
-               kfree(bl);
-
-       if (blp != blpout) {
-               for (i = 0; i < blout->num_mapped_bufs; i++) {
-                       dma_unmap_single(dev, blout->buffers[i].addr,
-                                        blout->buffers[i].len,
-                                        DMA_FROM_DEVICE);
-               }
-               dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
-
-               if (!buf->sgl_dst_valid)
-                       kfree(blout);
-       }
-}
-
-static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
-                               struct scatterlist *sgl,
-                               struct scatterlist *sglout,
-                               struct qat_request_buffs *buf,
-                               dma_addr_t extra_dst_buff,
-                               size_t sz_extra_dst_buff,
-                               unsigned int sskip,
-                               unsigned int dskip,
-                               gfp_t flags)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       int i, sg_nctr = 0;
-       int n = sg_nents(sgl);
-       struct qat_alg_buf_list *bufl;
-       struct qat_alg_buf_list *buflout = NULL;
-       dma_addr_t blp = DMA_MAPPING_ERROR;
-       dma_addr_t bloutp = DMA_MAPPING_ERROR;
-       struct scatterlist *sg;
-       size_t sz_out, sz = struct_size(bufl, buffers, n);
-       int node = dev_to_node(&GET_DEV(accel_dev));
-       unsigned int left;
-       int bufl_dma_dir;
-
-       if (unlikely(!n))
-               return -EINVAL;
-
-       buf->sgl_src_valid = false;
-       buf->sgl_dst_valid = false;
-
-       if (n > QAT_MAX_BUFF_DESC) {
-               bufl = kzalloc_node(sz, flags, node);
-               if (unlikely(!bufl))
-                       return -ENOMEM;
-       } else {
-               bufl = &buf->sgl_src.sgl_hdr;
-               memset(bufl, 0, sizeof(struct qat_alg_buf_list));
-               buf->sgl_src_valid = true;
-       }
-
-       bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
-       for (i = 0; i < n; i++)
-               bufl->buffers[i].addr = DMA_MAPPING_ERROR;
-
-       left = sskip;
-
-       for_each_sg(sgl, sg, n, i) {
-               int y = sg_nctr;
-
-               if (!sg->length)
-                       continue;
-
-               if (left >= sg->length) {
-                       left -= sg->length;
-                       continue;
-               }
-               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
-                                                      sg->length - left,
-                                                      bufl_dma_dir);
-               bufl->buffers[y].len = sg->length;
-               if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
-                       goto err_in;
-               sg_nctr++;
-               if (left) {
-                       bufl->buffers[y].len -= left;
-                       left = 0;
-               }
-       }
-       bufl->num_bufs = sg_nctr;
-       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, blp)))
-               goto err_in;
-       buf->bl = bufl;
-       buf->blp = blp;
-       buf->sz = sz;
-       /* Handle out of place operation */
-       if (sgl != sglout) {
-               struct qat_alg_buf *buffers;
-               int extra_buff = extra_dst_buff ? 1 : 0;
-               int n_sglout = sg_nents(sglout);
-
-               n = n_sglout + extra_buff;
-               sz_out = struct_size(buflout, buffers, n);
-               left = dskip;
-
-               sg_nctr = 0;
-
-               if (n > QAT_MAX_BUFF_DESC) {
-                       buflout = kzalloc_node(sz_out, flags, node);
-                       if (unlikely(!buflout))
-                               goto err_in;
-               } else {
-                       buflout = &buf->sgl_dst.sgl_hdr;
-                       memset(buflout, 0, sizeof(struct qat_alg_buf_list));
-                       buf->sgl_dst_valid = true;
-               }
-
-               buffers = buflout->buffers;
-               for (i = 0; i < n; i++)
-                       buffers[i].addr = DMA_MAPPING_ERROR;
-
-               for_each_sg(sglout, sg, n_sglout, i) {
-                       int y = sg_nctr;
-
-                       if (!sg->length)
-                               continue;
-
-                       if (left >= sg->length) {
-                               left -= sg->length;
-                               continue;
-                       }
-                       buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
-                                                        sg->length - left,
-                                                        DMA_FROM_DEVICE);
-                       if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
-                               goto err_out;
-                       buffers[y].len = sg->length;
-                       sg_nctr++;
-                       if (left) {
-                               buffers[y].len -= left;
-                               left = 0;
-                       }
-               }
-               if (extra_buff) {
-                       buffers[sg_nctr].addr = extra_dst_buff;
-                       buffers[sg_nctr].len = sz_extra_dst_buff;
-               }
-
-               buflout->num_bufs = sg_nctr;
-               buflout->num_bufs += extra_buff;
-               buflout->num_mapped_bufs = sg_nctr;
-               bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(dev, bloutp)))
-                       goto err_out;
-               buf->blout = buflout;
-               buf->bloutp = bloutp;
-               buf->sz_out = sz_out;
-       } else {
-               /* Otherwise set the src and dst to the same address */
-               buf->bloutp = buf->blp;
-               buf->sz_out = 0;
-       }
-       return 0;
-
-err_out:
-       if (!dma_mapping_error(dev, bloutp))
-               dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
-
-       n = sg_nents(sglout);
-       for (i = 0; i < n; i++) {
-               if (buflout->buffers[i].addr == extra_dst_buff)
-                       break;
-               if (!dma_mapping_error(dev, buflout->buffers[i].addr))
-                       dma_unmap_single(dev, buflout->buffers[i].addr,
-                                        buflout->buffers[i].len,
-                                        DMA_FROM_DEVICE);
-       }
-
-       if (!buf->sgl_dst_valid)
-               kfree(buflout);
-
-err_in:
-       if (!dma_mapping_error(dev, blp))
-               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
-
-       n = sg_nents(sgl);
-       for (i = 0; i < n; i++)
-               if (!dma_mapping_error(dev, bufl->buffers[i].addr))
-                       dma_unmap_single(dev, bufl->buffers[i].addr,
-                                        bufl->buffers[i].len,
-                                        bufl_dma_dir);
-
-       if (!buf->sgl_src_valid)
-               kfree(bufl);
-
-       dev_err(dev, "Failed to map buf for dma\n");
-       return -ENOMEM;
-}
-
-int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
-                      struct scatterlist *sgl,
-                      struct scatterlist *sglout,
-                      struct qat_request_buffs *buf,
-                      struct qat_sgl_to_bufl_params *params,
-                      gfp_t flags)
-{
-       dma_addr_t extra_dst_buff = 0;
-       size_t sz_extra_dst_buff = 0;
-       unsigned int sskip = 0;
-       unsigned int dskip = 0;
-
-       if (params) {
-               extra_dst_buff = params->extra_dst_buff;
-               sz_extra_dst_buff = params->sz_extra_dst_buff;
-               sskip = params->sskip;
-               dskip = params->dskip;
-       }
-
-       return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
-                                   extra_dst_buff, sz_extra_dst_buff,
-                                   sskip, dskip, flags);
-}
-
-static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
-                            struct qat_alg_buf_list *bl)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       int n = bl->num_bufs;
-       int i;
-
-       for (i = 0; i < n; i++)
-               if (!dma_mapping_error(dev, bl->buffers[i].addr))
-                       dma_unmap_single(dev, bl->buffers[i].addr,
-                                        bl->buffers[i].len, DMA_FROM_DEVICE);
-}
-
-static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
-                         struct scatterlist *sgl,
-                         struct qat_alg_buf_list **bl)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       struct qat_alg_buf_list *bufl;
-       int node = dev_to_node(dev);
-       struct scatterlist *sg;
-       int n, i, sg_nctr;
-       size_t sz;
-
-       n = sg_nents(sgl);
-       sz = struct_size(bufl, buffers, n);
-       bufl = kzalloc_node(sz, GFP_KERNEL, node);
-       if (unlikely(!bufl))
-               return -ENOMEM;
-
-       for (i = 0; i < n; i++)
-               bufl->buffers[i].addr = DMA_MAPPING_ERROR;
-
-       sg_nctr = 0;
-       for_each_sg(sgl, sg, n, i) {
-               int y = sg_nctr;
-
-               if (!sg->length)
-                       continue;
-
-               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
-                                                      sg->length,
-                                                      DMA_FROM_DEVICE);
-               bufl->buffers[y].len = sg->length;
-               if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
-                       goto err_map;
-               sg_nctr++;
-       }
-       bufl->num_bufs = sg_nctr;
-       bufl->num_mapped_bufs = sg_nctr;
-
-       *bl = bufl;
-
-       return 0;
-
-err_map:
-       for (i = 0; i < n; i++)
-               if (!dma_mapping_error(dev, bufl->buffers[i].addr))
-                       dma_unmap_single(dev, bufl->buffers[i].addr,
-                                        bufl->buffers[i].len,
-                                        DMA_FROM_DEVICE);
-       kfree(bufl);
-       *bl = NULL;
-
-       return -ENOMEM;
-}
-
-static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
-                                 struct scatterlist *sgl,
-                                 struct qat_alg_buf_list *bl,
-                                 bool free_bl)
-{
-       if (bl) {
-               qat_bl_sgl_unmap(accel_dev, bl);
-
-               if (free_bl)
-                       kfree(bl);
-       }
-       if (sgl)
-               sgl_free(sgl);
-}
-
-static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
-                               struct scatterlist **sgl,
-                               struct qat_alg_buf_list **bl,
-                               unsigned int dlen,
-                               gfp_t gfp)
-{
-       struct scatterlist *dst;
-       int ret;
-
-       dst = sgl_alloc(dlen, gfp, NULL);
-       if (!dst) {
-               dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
-               return -ENOMEM;
-       }
-
-       ret = qat_bl_sgl_map(accel_dev, dst, bl);
-       if (ret)
-               goto err;
-
-       *sgl = dst;
-
-       return 0;
-
-err:
-       sgl_free(dst);
-       *sgl = NULL;
-       return ret;
-}
-
-int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
-                              struct scatterlist **sg,
-                              unsigned int dlen,
-                              struct qat_request_buffs *qat_bufs,
-                              gfp_t gfp)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       dma_addr_t new_blp = DMA_MAPPING_ERROR;
-       struct qat_alg_buf_list *new_bl;
-       struct scatterlist *new_sg;
-       size_t new_bl_size;
-       int ret;
-
-       ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
-       if (ret)
-               return ret;
-
-       new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
-
-       /* Map new firmware SGL descriptor */
-       new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(dev, new_blp)))
-               goto err;
-
-       /* Unmap old firmware SGL descriptor */
-       dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
-
-       /* Free and unmap old scatterlist */
-       qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
-                             !qat_bufs->sgl_dst_valid);
-
-       qat_bufs->sgl_dst_valid = false;
-       qat_bufs->blout = new_bl;
-       qat_bufs->bloutp = new_blp;
-       qat_bufs->sz_out = new_bl_size;
-
-       *sg = new_sg;
-
-       return 0;
-err:
-       qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
-
-       if (!dma_mapping_error(dev, new_blp))
-               dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
-
-       return -ENOMEM;
-}
diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/qat/qat_common/qat_bl.h
deleted file mode 100644 (file)
index d87e4f3..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2014 - 2022 Intel Corporation */
-#ifndef QAT_BL_H
-#define QAT_BL_H
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include <linux/types.h>
-
-#define QAT_MAX_BUFF_DESC      4
-
-struct qat_alg_buf {
-       u32 len;
-       u32 resrvd;
-       u64 addr;
-} __packed;
-
-struct qat_alg_buf_list {
-       u64 resrvd;
-       u32 num_bufs;
-       u32 num_mapped_bufs;
-       struct qat_alg_buf buffers[];
-} __packed;
-
-struct qat_alg_fixed_buf_list {
-       struct qat_alg_buf_list sgl_hdr;
-       struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
-} __packed __aligned(64);
-
-struct qat_request_buffs {
-       struct qat_alg_buf_list *bl;
-       dma_addr_t blp;
-       struct qat_alg_buf_list *blout;
-       dma_addr_t bloutp;
-       size_t sz;
-       size_t sz_out;
-       bool sgl_src_valid;
-       bool sgl_dst_valid;
-       struct qat_alg_fixed_buf_list sgl_src;
-       struct qat_alg_fixed_buf_list sgl_dst;
-};
-
-struct qat_sgl_to_bufl_params {
-       dma_addr_t extra_dst_buff;
-       size_t sz_extra_dst_buff;
-       unsigned int sskip;
-       unsigned int dskip;
-};
-
-void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
-                     struct qat_request_buffs *buf);
-int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
-                      struct scatterlist *sgl,
-                      struct scatterlist *sglout,
-                      struct qat_request_buffs *buf,
-                      struct qat_sgl_to_bufl_params *params,
-                      gfp_t flags);
-
-static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
-{
-       return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
-}
-
-int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
-                              struct scatterlist **newd,
-                              unsigned int dlen,
-                              struct qat_request_buffs *qat_bufs,
-                              gfp_t gfp);
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_comp_algs.c b/drivers/crypto/qat/qat_common/qat_comp_algs.c
deleted file mode 100644 (file)
index b533984..0000000
+++ /dev/null
@@ -1,489 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include <linux/crypto.h>
-#include <crypto/acompress.h>
-#include <crypto/internal/acompress.h>
-#include <crypto/scatterwalk.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "qat_bl.h"
-#include "qat_comp_req.h"
-#include "qat_compression.h"
-#include "qat_algs_send.h"
-
-#define QAT_RFC_1950_HDR_SIZE 2
-#define QAT_RFC_1950_FOOTER_SIZE 4
-#define QAT_RFC_1950_CM_DEFLATE 8
-#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
-#define QAT_RFC_1950_CM_MASK 0x0f
-#define QAT_RFC_1950_CM_OFFSET 4
-#define QAT_RFC_1950_DICT_MASK 0x20
-#define QAT_RFC_1950_COMP_HDR 0x785e
-
-static DEFINE_MUTEX(algs_lock);
-static unsigned int active_devs;
-
-enum direction {
-       DECOMPRESSION = 0,
-       COMPRESSION = 1,
-};
-
-struct qat_compression_req;
-
-struct qat_compression_ctx {
-       u8 comp_ctx[QAT_COMP_CTX_SIZE];
-       struct qat_compression_instance *inst;
-       int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
-};
-
-struct qat_dst {
-       bool is_null;
-       int resubmitted;
-};
-
-struct qat_compression_req {
-       u8 req[QAT_COMP_REQ_SIZE];
-       struct qat_compression_ctx *qat_compression_ctx;
-       struct acomp_req *acompress_req;
-       struct qat_request_buffs buf;
-       enum direction dir;
-       int actual_dlen;
-       struct qat_alg_req alg_req;
-       struct work_struct resubmit;
-       struct qat_dst dst;
-};
-
-static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
-                                  struct qat_compression_instance *inst,
-                                  struct crypto_async_request *base)
-{
-       struct qat_alg_req *alg_req = &qat_req->alg_req;
-
-       alg_req->fw_req = (u32 *)&qat_req->req;
-       alg_req->tx_ring = inst->dc_tx;
-       alg_req->base = base;
-       alg_req->backlog = &inst->backlog;
-
-       return qat_alg_send_message(alg_req);
-}
-
-static void qat_comp_resubmit(struct work_struct *work)
-{
-       struct qat_compression_req *qat_req =
-               container_of(work, struct qat_compression_req, resubmit);
-       struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
-       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
-       struct qat_request_buffs *qat_bufs = &qat_req->buf;
-       struct qat_compression_instance *inst = ctx->inst;
-       struct acomp_req *areq = qat_req->acompress_req;
-       struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
-       unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
-       u8 *req = qat_req->req;
-       dma_addr_t dfbuf;
-       int ret;
-
-       areq->dlen = dlen;
-
-       dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
-               crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
-               qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
-
-       ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
-                                        qat_algs_alloc_flags(&areq->base));
-       if (ret)
-               goto err;
-
-       qat_req->dst.resubmitted = true;
-
-       dfbuf = qat_req->buf.bloutp;
-       qat_comp_override_dst(req, dfbuf, dlen);
-
-       ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
-       if (ret != -ENOSPC)
-               return;
-
-err:
-       qat_bl_free_bufl(accel_dev, qat_bufs);
-       acomp_request_complete(areq, ret);
-}
-
-static int parse_zlib_header(u16 zlib_h)
-{
-       int ret = -EINVAL;
-       __be16 header;
-       u8 *header_p;
-       u8 cmf, flg;
-
-       header = cpu_to_be16(zlib_h);
-       header_p = (u8 *)&header;
-
-       flg = header_p[0];
-       cmf = header_p[1];
-
-       if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
-               return ret;
-
-       if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
-               return ret;
-
-       if (flg & QAT_RFC_1950_DICT_MASK)
-               return ret;
-
-       return 0;
-}
-
-static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
-                                    void *resp)
-{
-       struct acomp_req *areq = qat_req->acompress_req;
-       enum direction dir = qat_req->dir;
-       __be32 qat_produced_adler;
-
-       qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
-
-       if (dir == COMPRESSION) {
-               __be16 zlib_header;
-
-               zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
-               scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
-               areq->dlen += QAT_RFC_1950_HDR_SIZE;
-
-               scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
-                                        QAT_RFC_1950_FOOTER_SIZE, 1);
-               areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
-       } else {
-               __be32 decomp_adler;
-               int footer_offset;
-               int consumed;
-
-               consumed = qat_comp_get_consumed_ctr(resp);
-               footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
-               if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
-                       return -EBADMSG;
-
-               scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
-                                        QAT_RFC_1950_FOOTER_SIZE, 0);
-
-               if (qat_produced_adler != decomp_adler)
-                       return -EBADMSG;
-       }
-       return 0;
-}
-
-static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
-                                     void *resp)
-{
-       struct acomp_req *areq = qat_req->acompress_req;
-       struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
-       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
-       struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
-       struct qat_compression_instance *inst = ctx->inst;
-       int consumed, produced;
-       s8 cmp_err, xlt_err;
-       int res = -EBADMSG;
-       int status;
-       u8 cnv;
-
-       status = qat_comp_get_cmp_status(resp);
-       status |= qat_comp_get_xlt_status(resp);
-       cmp_err = qat_comp_get_cmp_err(resp);
-       xlt_err = qat_comp_get_xlt_err(resp);
-
-       consumed = qat_comp_get_consumed_ctr(resp);
-       produced = qat_comp_get_produced_ctr(resp);
-
-       dev_dbg(&GET_DEV(accel_dev),
-               "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
-               crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
-               qat_req->dir == COMPRESSION ? "comp  " : "decomp",
-               status ? "ERR" : "OK ",
-               areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
-
-       areq->dlen = 0;
-
-       if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
-               if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
-                       if (qat_req->dst.resubmitted) {
-                               dev_dbg(&GET_DEV(accel_dev),
-                                       "Output does not fit destination buffer\n");
-                               res = -EOVERFLOW;
-                               goto end;
-                       }
-
-                       INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
-                       adf_misc_wq_queue_work(&qat_req->resubmit);
-                       return;
-               }
-       }
-
-       if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
-               goto end;
-
-       if (qat_req->dir == COMPRESSION) {
-               cnv = qat_comp_get_cmp_cnv_flag(resp);
-               if (unlikely(!cnv)) {
-                       dev_err(&GET_DEV(accel_dev),
-                               "Verified compression not supported\n");
-                       goto end;
-               }
-
-               if (unlikely(produced > qat_req->actual_dlen)) {
-                       memset(inst->dc_data->ovf_buff, 0,
-                              inst->dc_data->ovf_buff_sz);
-                       dev_dbg(&GET_DEV(accel_dev),
-                               "Actual buffer overflow: produced=%d, dlen=%d\n",
-                               produced, qat_req->actual_dlen);
-                       goto end;
-               }
-       }
-
-       res = 0;
-       areq->dlen = produced;
-
-       if (ctx->qat_comp_callback)
-               res = ctx->qat_comp_callback(qat_req, resp);
-
-end:
-       qat_bl_free_bufl(accel_dev, &qat_req->buf);
-       acomp_request_complete(areq, res);
-}
-
-void qat_comp_alg_callback(void *resp)
-{
-       struct qat_compression_req *qat_req =
-                       (void *)(__force long)qat_comp_get_opaque(resp);
-       struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
-
-       qat_comp_generic_callback(qat_req, resp);
-
-       qat_alg_send_backlog(backlog);
-}
-
-static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
-{
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_compression_instance *inst;
-       int node;
-
-       if (tfm->node == NUMA_NO_NODE)
-               node = numa_node_id();
-       else
-               node = tfm->node;
-
-       memset(ctx, 0, sizeof(*ctx));
-       inst = qat_compression_get_instance_node(node);
-       if (!inst)
-               return -EINVAL;
-       ctx->inst = inst;
-
-       ctx->inst->build_deflate_ctx(ctx->comp_ctx);
-
-       return 0;
-}
-
-static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
-{
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       qat_compression_put_instance(ctx->inst);
-       memset(ctx, 0, sizeof(*ctx));
-}
-
-static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
-{
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-       int ret;
-
-       ret = qat_comp_alg_init_tfm(acomp_tfm);
-       ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
-
-       return ret;
-}
-
-static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
-                                           unsigned int shdr, unsigned int sftr,
-                                           unsigned int dhdr, unsigned int dftr)
-{
-       struct qat_compression_req *qat_req = acomp_request_ctx(areq);
-       struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct qat_compression_instance *inst = ctx->inst;
-       gfp_t f = qat_algs_alloc_flags(&areq->base);
-       struct qat_sgl_to_bufl_params params = {0};
-       int slen = areq->slen - shdr - sftr;
-       int dlen = areq->dlen - dhdr - dftr;
-       dma_addr_t sfbuf, dfbuf;
-       u8 *req = qat_req->req;
-       size_t ovf_buff_sz;
-       int ret;
-
-       params.sskip = shdr;
-       params.dskip = dhdr;
-
-       if (!areq->src || !slen)
-               return -EINVAL;
-
-       if (areq->dst && !dlen)
-               return -EINVAL;
-
-       qat_req->dst.is_null = false;
-
-       /* Handle acomp requests that require the allocation of a destination
-        * buffer. The size of the destination buffer is double the source
-        * buffer (rounded up to the size of a page) to fit the decompressed
-        * output or an expansion on the data for compression.
-        */
-       if (!areq->dst) {
-               qat_req->dst.is_null = true;
-
-               dlen = round_up(2 * slen, PAGE_SIZE);
-               areq->dst = sgl_alloc(dlen, f, NULL);
-               if (!areq->dst)
-                       return -ENOMEM;
-
-               dlen -= dhdr + dftr;
-               areq->dlen = dlen;
-               qat_req->dst.resubmitted = false;
-       }
-
-       if (dir == COMPRESSION) {
-               params.extra_dst_buff = inst->dc_data->ovf_buff_p;
-               ovf_buff_sz = inst->dc_data->ovf_buff_sz;
-               params.sz_extra_dst_buff = ovf_buff_sz;
-       }
-
-       ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
-                                &qat_req->buf, &params, f);
-       if (unlikely(ret))
-               return ret;
-
-       sfbuf = qat_req->buf.blp;
-       dfbuf = qat_req->buf.bloutp;
-       qat_req->qat_compression_ctx = ctx;
-       qat_req->acompress_req = areq;
-       qat_req->dir = dir;
-
-       if (dir == COMPRESSION) {
-               qat_req->actual_dlen = dlen;
-               dlen += ovf_buff_sz;
-               qat_comp_create_compression_req(ctx->comp_ctx, req,
-                                               (u64)(__force long)sfbuf, slen,
-                                               (u64)(__force long)dfbuf, dlen,
-                                               (u64)(__force long)qat_req);
-       } else {
-               qat_comp_create_decompression_req(ctx->comp_ctx, req,
-                                                 (u64)(__force long)sfbuf, slen,
-                                                 (u64)(__force long)dfbuf, dlen,
-                                                 (u64)(__force long)qat_req);
-       }
-
-       ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
-       if (ret == -ENOSPC)
-               qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
-
-       return ret;
-}
-
-static int qat_comp_alg_compress(struct acomp_req *req)
-{
-       return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
-}
-
-static int qat_comp_alg_decompress(struct acomp_req *req)
-{
-       return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
-}
-
-static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
-{
-       if (!req->dst && req->dlen != 0)
-               return -EINVAL;
-
-       if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
-               return -EINVAL;
-
-       return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
-                                               QAT_RFC_1950_HDR_SIZE,
-                                               QAT_RFC_1950_FOOTER_SIZE);
-}
-
-static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
-{
-       struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
-       struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
-       struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
-       u16 zlib_header;
-       int ret;
-
-       if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
-               return -EBADMSG;
-
-       scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
-
-       ret = parse_zlib_header(zlib_header);
-       if (ret) {
-               dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
-               return ret;
-       }
-
-       return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
-                                               QAT_RFC_1950_FOOTER_SIZE, 0, 0);
-}
-
-static struct acomp_alg qat_acomp[] = { {
-       .base = {
-               .cra_name = "deflate",
-               .cra_driver_name = "qat_deflate",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-               .cra_ctxsize = sizeof(struct qat_compression_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_comp_alg_init_tfm,
-       .exit = qat_comp_alg_exit_tfm,
-       .compress = qat_comp_alg_compress,
-       .decompress = qat_comp_alg_decompress,
-       .dst_free = sgl_free,
-       .reqsize = sizeof(struct qat_compression_req),
-}, {
-       .base = {
-               .cra_name = "zlib-deflate",
-               .cra_driver_name = "qat_zlib_deflate",
-               .cra_priority = 4001,
-               .cra_flags = CRYPTO_ALG_ASYNC,
-               .cra_ctxsize = sizeof(struct qat_compression_ctx),
-               .cra_module = THIS_MODULE,
-       },
-       .init = qat_comp_alg_rfc1950_init_tfm,
-       .exit = qat_comp_alg_exit_tfm,
-       .compress = qat_comp_alg_rfc1950_compress,
-       .decompress = qat_comp_alg_rfc1950_decompress,
-       .dst_free = sgl_free,
-       .reqsize = sizeof(struct qat_compression_req),
-} };
-
-int qat_comp_algs_register(void)
-{
-       int ret = 0;
-
-       mutex_lock(&algs_lock);
-       if (++active_devs == 1)
-               ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
-       mutex_unlock(&algs_lock);
-       return ret;
-}
-
-void qat_comp_algs_unregister(void)
-{
-       mutex_lock(&algs_lock);
-       if (--active_devs == 0)
-               crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
-       mutex_unlock(&algs_lock);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_comp_req.h b/drivers/crypto/qat/qat_common/qat_comp_req.h
deleted file mode 100644 (file)
index 404e32c..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _QAT_COMP_REQ_H_
-#define _QAT_COMP_REQ_H_
-
-#include "icp_qat_fw_comp.h"
-
-#define QAT_COMP_REQ_SIZE (sizeof(struct icp_qat_fw_comp_req))
-#define QAT_COMP_CTX_SIZE (QAT_COMP_REQ_SIZE * 2)
-
-static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
-                                      u64 dst, u32 dlen, u64 opaque)
-{
-       struct icp_qat_fw_comp_req *fw_tmpl = ctx;
-       struct icp_qat_fw_comp_req *fw_req = req;
-       struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
-
-       memcpy(fw_req, fw_tmpl, sizeof(*fw_req));
-       fw_req->comn_mid.src_data_addr = src;
-       fw_req->comn_mid.src_length = slen;
-       fw_req->comn_mid.dest_data_addr = dst;
-       fw_req->comn_mid.dst_length = dlen;
-       fw_req->comn_mid.opaque_data = opaque;
-       req_pars->comp_len = slen;
-       req_pars->out_buffer_sz = dlen;
-}
-
-static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen)
-{
-       struct icp_qat_fw_comp_req *fw_req = req;
-       struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars;
-
-       fw_req->comn_mid.dest_data_addr = dst;
-       fw_req->comn_mid.dst_length = dlen;
-       req_pars->out_buffer_sz = dlen;
-}
-
-static inline void qat_comp_create_compression_req(void *ctx, void *req,
-                                                  u64 src, u32 slen,
-                                                  u64 dst, u32 dlen,
-                                                  u64 opaque)
-{
-       qat_comp_create_req(ctx, req, src, slen, dst, dlen, opaque);
-}
-
-static inline void qat_comp_create_decompression_req(void *ctx, void *req,
-                                                    u64 src, u32 slen,
-                                                    u64 dst, u32 dlen,
-                                                    u64 opaque)
-{
-       struct icp_qat_fw_comp_req *fw_tmpl = ctx;
-
-       fw_tmpl++;
-       qat_comp_create_req(fw_tmpl, req, src, slen, dst, dlen, opaque);
-}
-
-static inline u32 qat_comp_get_consumed_ctr(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comp_resp_pars.input_byte_counter;
-}
-
-static inline u32 qat_comp_get_produced_ctr(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comp_resp_pars.output_byte_counter;
-}
-
-static inline u32 qat_comp_get_produced_adler32(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comp_resp_pars.crc.legacy.curr_adler_32;
-}
-
-static inline u64 qat_comp_get_opaque(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->opaque_data;
-}
-
-static inline s8 qat_comp_get_cmp_err(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comn_resp.comn_error.cmp_err_code;
-}
-
-static inline s8 qat_comp_get_xlt_err(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-
-       return qat_resp->comn_resp.comn_error.xlat_err_code;
-}
-
-static inline s8 qat_comp_get_cmp_status(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-       u8 stat_filed = qat_resp->comn_resp.comn_status;
-
-       return ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(stat_filed);
-}
-
-static inline s8 qat_comp_get_xlt_status(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-       u8 stat_filed = qat_resp->comn_resp.comn_status;
-
-       return ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(stat_filed);
-}
-
-static inline u8 qat_comp_get_cmp_cnv_flag(void *resp)
-{
-       struct icp_qat_fw_comp_resp *qat_resp = resp;
-       u8 flags = qat_resp->comn_resp.hdr_flags;
-
-       return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags);
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_compression.c b/drivers/crypto/qat/qat_common/qat_compression.c
deleted file mode 100644 (file)
index 3f1f352..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_transport.h"
-#include "adf_transport_access_macros.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "qat_compression.h"
-#include "icp_qat_fw.h"
-
-#define SEC ADF_KERNEL_SEC
-
-static struct service_hndl qat_compression;
-
-void qat_compression_put_instance(struct qat_compression_instance *inst)
-{
-       atomic_dec(&inst->refctr);
-       adf_dev_put(inst->accel_dev);
-}
-
-static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
-{
-       struct qat_compression_instance *inst;
-       struct list_head *list_ptr, *tmp;
-       int i;
-
-       list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) {
-               inst = list_entry(list_ptr,
-                                 struct qat_compression_instance, list);
-
-               for (i = 0; i < atomic_read(&inst->refctr); i++)
-                       qat_compression_put_instance(inst);
-
-               if (inst->dc_tx)
-                       adf_remove_ring(inst->dc_tx);
-
-               if (inst->dc_rx)
-                       adf_remove_ring(inst->dc_rx);
-
-               list_del(list_ptr);
-               kfree(inst);
-       }
-       return 0;
-}
-
-struct qat_compression_instance *qat_compression_get_instance_node(int node)
-{
-       struct qat_compression_instance *inst = NULL;
-       struct adf_accel_dev *accel_dev = NULL;
-       unsigned long best = ~0;
-       struct list_head *itr;
-
-       list_for_each(itr, adf_devmgr_get_head()) {
-               struct adf_accel_dev *tmp_dev;
-               unsigned long ctr;
-               int tmp_dev_node;
-
-               tmp_dev = list_entry(itr, struct adf_accel_dev, list);
-               tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
-
-               if ((node == tmp_dev_node || tmp_dev_node < 0) &&
-                   adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
-                       ctr = atomic_read(&tmp_dev->ref_count);
-                       if (best > ctr) {
-                               accel_dev = tmp_dev;
-                               best = ctr;
-                       }
-               }
-       }
-
-       if (!accel_dev) {
-               pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
-               /* Get any started device */
-               list_for_each(itr, adf_devmgr_get_head()) {
-                       struct adf_accel_dev *tmp_dev;
-
-                       tmp_dev = list_entry(itr, struct adf_accel_dev, list);
-                       if (adf_dev_started(tmp_dev) &&
-                           !list_empty(&tmp_dev->compression_list)) {
-                               accel_dev = tmp_dev;
-                               break;
-                       }
-               }
-       }
-
-       if (!accel_dev)
-               return NULL;
-
-       best = ~0;
-       list_for_each(itr, &accel_dev->compression_list) {
-               struct qat_compression_instance *tmp_inst;
-               unsigned long ctr;
-
-               tmp_inst = list_entry(itr, struct qat_compression_instance, list);
-               ctr = atomic_read(&tmp_inst->refctr);
-               if (best > ctr) {
-                       inst = tmp_inst;
-                       best = ctr;
-               }
-       }
-       if (inst) {
-               if (adf_dev_get(accel_dev)) {
-                       dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
-                       return NULL;
-               }
-               atomic_inc(&inst->refctr);
-       }
-       return inst;
-}
-
-static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
-{
-       struct qat_compression_instance *inst;
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       unsigned long num_inst, num_msg_dc;
-       unsigned long bank;
-       int msg_size;
-       int ret;
-       int i;
-
-       INIT_LIST_HEAD(&accel_dev->compression_list);
-       strscpy(key, ADF_NUM_DC, sizeof(key));
-       ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-       if (ret)
-               return ret;
-
-       ret = kstrtoul(val, 10, &num_inst);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < num_inst; i++) {
-               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
-                                   dev_to_node(&GET_DEV(accel_dev)));
-               if (!inst) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-
-               list_add_tail(&inst->list, &accel_dev->compression_list);
-               inst->id = i;
-               atomic_set(&inst->refctr, 0);
-               inst->accel_dev = accel_dev;
-               inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
-
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       return ret;
-
-               ret = kstrtoul(val, 10, &bank);
-               if (ret)
-                       return ret;
-
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       return ret;
-
-               ret = kstrtoul(val, 10, &num_msg_dc);
-               if (ret)
-                       return ret;
-
-               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
-               ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
-                                     msg_size, key, NULL, 0, &inst->dc_tx);
-               if (ret)
-                       return ret;
-
-               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
-               snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
-               ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc,
-                                     msg_size, key, qat_comp_alg_callback, 0,
-                                     &inst->dc_rx);
-               if (ret)
-                       return ret;
-
-               inst->dc_data = accel_dev->dc_data;
-               INIT_LIST_HEAD(&inst->backlog.list);
-               spin_lock_init(&inst->backlog.lock);
-       }
-       return 0;
-err:
-       qat_compression_free_instances(accel_dev);
-       return ret;
-}
-
-static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
-{
-       struct device *dev = &GET_DEV(accel_dev);
-       dma_addr_t obuff_p = DMA_MAPPING_ERROR;
-       size_t ovf_buff_sz = QAT_COMP_MAX_SKID;
-       struct adf_dc_data *dc_data = NULL;
-       u8 *obuff = NULL;
-
-       dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
-       if (!dc_data)
-               goto err;
-
-       obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev));
-       if (!obuff)
-               goto err;
-
-       obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(dev, obuff_p)))
-               goto err;
-
-       dc_data->ovf_buff = obuff;
-       dc_data->ovf_buff_p = obuff_p;
-       dc_data->ovf_buff_sz = ovf_buff_sz;
-
-       accel_dev->dc_data = dc_data;
-
-       return 0;
-
-err:
-       accel_dev->dc_data = NULL;
-       kfree(obuff);
-       devm_kfree(dev, dc_data);
-       return -ENOMEM;
-}
-
-static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
-{
-       struct adf_dc_data *dc_data = accel_dev->dc_data;
-       struct device *dev = &GET_DEV(accel_dev);
-
-       if (!dc_data)
-               return;
-
-       dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
-                        DMA_FROM_DEVICE);
-       memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz);
-       kfree(dc_data->ovf_buff);
-       devm_kfree(dev, dc_data);
-       accel_dev->dc_data = NULL;
-}
-
-static int qat_compression_init(struct adf_accel_dev *accel_dev)
-{
-       int ret;
-
-       ret = qat_compression_alloc_dc_data(accel_dev);
-       if (ret)
-               return ret;
-
-       ret = qat_compression_create_instances(accel_dev);
-       if (ret)
-               qat_free_dc_data(accel_dev);
-
-       return ret;
-}
-
-static int qat_compression_shutdown(struct adf_accel_dev *accel_dev)
-{
-       qat_free_dc_data(accel_dev);
-       return qat_compression_free_instances(accel_dev);
-}
-
-static int qat_compression_event_handler(struct adf_accel_dev *accel_dev,
-                                        enum adf_event event)
-{
-       int ret;
-
-       switch (event) {
-       case ADF_EVENT_INIT:
-               ret = qat_compression_init(accel_dev);
-               break;
-       case ADF_EVENT_SHUTDOWN:
-               ret = qat_compression_shutdown(accel_dev);
-               break;
-       case ADF_EVENT_RESTARTING:
-       case ADF_EVENT_RESTARTED:
-       case ADF_EVENT_START:
-       case ADF_EVENT_STOP:
-       default:
-               ret = 0;
-       }
-       return ret;
-}
-
-int qat_compression_register(void)
-{
-       memset(&qat_compression, 0, sizeof(qat_compression));
-       qat_compression.event_hld = qat_compression_event_handler;
-       qat_compression.name = "qat_compression";
-       return adf_service_register(&qat_compression);
-}
-
-int qat_compression_unregister(void)
-{
-       return adf_service_unregister(&qat_compression);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_compression.h b/drivers/crypto/qat/qat_common/qat_compression.h
deleted file mode 100644 (file)
index aebac23..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef _QAT_COMPRESSION_H_
-#define _QAT_COMPRESSION_H_
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include "adf_accel_devices.h"
-#include "qat_algs_send.h"
-
-#define QAT_COMP_MAX_SKID 4096
-
-struct qat_compression_instance {
-       struct adf_etr_ring_data *dc_tx;
-       struct adf_etr_ring_data *dc_rx;
-       struct adf_accel_dev *accel_dev;
-       struct list_head list;
-       unsigned long state;
-       int id;
-       atomic_t refctr;
-       struct qat_instance_backlog backlog;
-       struct adf_dc_data *dc_data;
-       void (*build_deflate_ctx)(void *ctx);
-};
-
-static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       u32 mask = ~hw_device->accel_capabilities_mask;
-
-       if (mask & ADF_ACCEL_CAPABILITIES_COMPRESSION)
-               return false;
-
-       return true;
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
deleted file mode 100644 (file)
index 40c8e74..0000000
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "adf_transport.h"
-#include "adf_cfg.h"
-#include "adf_cfg_strings.h"
-#include "adf_gen2_hw_data.h"
-#include "qat_crypto.h"
-#include "icp_qat_fw.h"
-
-#define SEC ADF_KERNEL_SEC
-
-static struct service_hndl qat_crypto;
-
-void qat_crypto_put_instance(struct qat_crypto_instance *inst)
-{
-       atomic_dec(&inst->refctr);
-       adf_dev_put(inst->accel_dev);
-}
-
-static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
-{
-       struct qat_crypto_instance *inst, *tmp;
-       int i;
-
-       list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
-               for (i = 0; i < atomic_read(&inst->refctr); i++)
-                       qat_crypto_put_instance(inst);
-
-               if (inst->sym_tx)
-                       adf_remove_ring(inst->sym_tx);
-
-               if (inst->sym_rx)
-                       adf_remove_ring(inst->sym_rx);
-
-               if (inst->pke_tx)
-                       adf_remove_ring(inst->pke_tx);
-
-               if (inst->pke_rx)
-                       adf_remove_ring(inst->pke_rx);
-
-               list_del(&inst->list);
-               kfree(inst);
-       }
-       return 0;
-}
-
-struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
-{
-       struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
-       struct qat_crypto_instance *inst = NULL, *tmp_inst;
-       unsigned long best = ~0;
-
-       list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
-               unsigned long ctr;
-
-               if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
-                    dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
-                   adf_dev_started(tmp_dev) &&
-                   !list_empty(&tmp_dev->crypto_list)) {
-                       ctr = atomic_read(&tmp_dev->ref_count);
-                       if (best > ctr) {
-                               accel_dev = tmp_dev;
-                               best = ctr;
-                       }
-               }
-       }
-
-       if (!accel_dev) {
-               pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
-               /* Get any started device */
-               list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
-                       if (adf_dev_started(tmp_dev) &&
-                           !list_empty(&tmp_dev->crypto_list)) {
-                               accel_dev = tmp_dev;
-                               break;
-                       }
-               }
-       }
-
-       if (!accel_dev)
-               return NULL;
-
-       best = ~0;
-       list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
-               unsigned long ctr;
-
-               ctr = atomic_read(&tmp_inst->refctr);
-               if (best > ctr) {
-                       inst = tmp_inst;
-                       best = ctr;
-               }
-       }
-       if (inst) {
-               if (adf_dev_get(accel_dev)) {
-                       dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
-                       return NULL;
-               }
-               atomic_inc(&inst->refctr);
-       }
-       return inst;
-}
-
-/**
- * qat_crypto_vf_dev_config()
- *     create dev config required to create crypto inst.
- *
- * @accel_dev: Pointer to acceleration device.
- *
- * Function creates device configuration required to create
- * asym, sym or, crypto instances
- *
- * Return: 0 on success, error code otherwise.
- */
-int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
-{
-       u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
-
-       if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
-               dev_err(&GET_DEV(accel_dev),
-                       "Unsupported ring/service mapping present on PF");
-               return -EFAULT;
-       }
-
-       return GET_HW_DATA(accel_dev)->dev_config(accel_dev);
-}
-
-static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
-{
-       unsigned long num_inst, num_msg_sym, num_msg_asym;
-       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
-       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
-       unsigned long sym_bank, asym_bank;
-       struct qat_crypto_instance *inst;
-       int msg_size;
-       int ret;
-       int i;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
-       if (ret)
-               return ret;
-
-       ret = kstrtoul(val, 0, &num_inst);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < num_inst; i++) {
-               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
-                                   dev_to_node(&GET_DEV(accel_dev)));
-               if (!inst) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-
-               list_add_tail(&inst->list, &accel_dev->crypto_list);
-               inst->id = i;
-               atomic_set(&inst->refctr, 0);
-               inst->accel_dev = accel_dev;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       goto err;
-
-               ret = kstrtoul(val, 10, &sym_bank);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       goto err;
-
-               ret = kstrtoul(val, 10, &asym_bank);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       goto err;
-
-               ret = kstrtoul(val, 10, &num_msg_sym);
-               if (ret)
-                       goto err;
-
-               num_msg_sym = num_msg_sym >> 1;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
-               ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
-               if (ret)
-                       goto err;
-
-               ret = kstrtoul(val, 10, &num_msg_asym);
-               if (ret)
-                       goto err;
-               num_msg_asym = num_msg_asym >> 1;
-
-               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
-               ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
-                                     msg_size, key, NULL, 0, &inst->sym_tx);
-               if (ret)
-                       goto err;
-
-               msg_size = msg_size >> 1;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
-               ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
-                                     msg_size, key, NULL, 0, &inst->pke_tx);
-               if (ret)
-                       goto err;
-
-               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
-               ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
-                                     msg_size, key, qat_alg_callback, 0,
-                                     &inst->sym_rx);
-               if (ret)
-                       goto err;
-
-               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
-               ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
-                                     msg_size, key, qat_alg_asym_callback, 0,
-                                     &inst->pke_rx);
-               if (ret)
-                       goto err;
-
-               INIT_LIST_HEAD(&inst->backlog.list);
-               spin_lock_init(&inst->backlog.lock);
-       }
-       return 0;
-err:
-       qat_crypto_free_instances(accel_dev);
-       return ret;
-}
-
-static int qat_crypto_init(struct adf_accel_dev *accel_dev)
-{
-       if (qat_crypto_create_instances(accel_dev))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
-{
-       return qat_crypto_free_instances(accel_dev);
-}
-
-static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
-                                   enum adf_event event)
-{
-       int ret;
-
-       switch (event) {
-       case ADF_EVENT_INIT:
-               ret = qat_crypto_init(accel_dev);
-               break;
-       case ADF_EVENT_SHUTDOWN:
-               ret = qat_crypto_shutdown(accel_dev);
-               break;
-       case ADF_EVENT_RESTARTING:
-       case ADF_EVENT_RESTARTED:
-       case ADF_EVENT_START:
-       case ADF_EVENT_STOP:
-       default:
-               ret = 0;
-       }
-       return ret;
-}
-
-int qat_crypto_register(void)
-{
-       memset(&qat_crypto, 0, sizeof(qat_crypto));
-       qat_crypto.event_hld = qat_crypto_event_handler;
-       qat_crypto.name = "qat_crypto";
-       return adf_service_register(&qat_crypto);
-}
-
-int qat_crypto_unregister(void)
-{
-       return adf_service_unregister(&qat_crypto);
-}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
deleted file mode 100644 (file)
index 6a0e961..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef _QAT_CRYPTO_INSTANCE_H_
-#define _QAT_CRYPTO_INSTANCE_H_
-
-#include <crypto/aes.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_la.h"
-#include "qat_algs_send.h"
-#include "qat_bl.h"
-
-struct qat_crypto_instance {
-       struct adf_etr_ring_data *sym_tx;
-       struct adf_etr_ring_data *sym_rx;
-       struct adf_etr_ring_data *pke_tx;
-       struct adf_etr_ring_data *pke_rx;
-       struct adf_accel_dev *accel_dev;
-       struct list_head list;
-       unsigned long state;
-       int id;
-       atomic_t refctr;
-       struct qat_instance_backlog backlog;
-};
-
-struct qat_crypto_request;
-
-struct qat_crypto_request {
-       struct icp_qat_fw_la_bulk_req req;
-       union {
-               struct qat_alg_aead_ctx *aead_ctx;
-               struct qat_alg_skcipher_ctx *skcipher_ctx;
-       };
-       union {
-               struct aead_request *aead_req;
-               struct skcipher_request *skcipher_req;
-       };
-       struct qat_request_buffs buf;
-       void (*cb)(struct icp_qat_fw_la_resp *resp,
-                  struct qat_crypto_request *req);
-       union {
-               struct {
-                       __be64 iv_hi;
-                       __be64 iv_lo;
-               };
-               u8 iv[AES_BLOCK_SIZE];
-       };
-       bool encryption;
-       struct qat_alg_req alg_req;
-};
-
-static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
-{
-       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-       u32 mask = ~hw_device->accel_capabilities_mask;
-
-       if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC)
-               return false;
-       if (mask & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC)
-               return false;
-       if (mask & ADF_ACCEL_CAPABILITIES_AUTHENTICATION)
-               return false;
-
-       return true;
-}
-
-#endif
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
deleted file mode 100644 (file)
index cbb946a..0000000
+++ /dev/null
@@ -1,1594 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci_ids.h>
-
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "icp_qat_hal.h"
-#include "icp_qat_uclo.h"
-
-#define BAD_REGADDR           0xffff
-#define MAX_RETRY_TIMES           10000
-#define INIT_CTX_ARB_VALUE     0x0
-#define INIT_CTX_ENABLE_VALUE     0x0
-#define INIT_PC_VALUE       0x0
-#define INIT_WAKEUP_EVENTS_VALUE  0x1
-#define INIT_SIG_EVENTS_VALUE     0x1
-#define INIT_CCENABLE_VALUE       0x2000
-#define RST_CSR_QAT_LSB           20
-#define RST_CSR_AE_LSB           0
-#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
-
-#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
-       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
-       (~(1 << CE_REG_PAR_ERR_BITPOS)))
-#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
-       (inst = ((inst & 0xFFFF00C03FFull) | \
-               ((((const_val) << 12) & 0x0FF00000ull) | \
-               (((const_val) << 10) & 0x0003FC00ull))))
-#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
-       (inst = ((inst & 0xFFFF00FFF00ull) | \
-               ((((const_val) << 12) & 0x0FF00000ull) | \
-               (((const_val) <<  0) & 0x000000FFull))))
-
-#define AE(handle, ae) ((handle)->hal_handle->aes[ae])
-
-static const u64 inst_4b[] = {
-       0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
-       0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
-       0x0A021000000ull
-};
-
-static const u64 inst[] = {
-       0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
-       0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
-       0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
-       0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
-       0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
-       0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
-       0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
-       0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
-       0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
-       0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
-       0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
-       0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
-       0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
-       0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
-       0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
-       0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
-       0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
-       0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
-       0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
-       0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
-       0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
-       0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
-};
-
-void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
-                         unsigned char ae, unsigned int ctx_mask)
-{
-       AE(handle, ae).live_ctx_mask = ctx_mask;
-}
-
-#define CSR_RETRY_TIMES 500
-static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
-                            unsigned char ae, unsigned int csr)
-{
-       unsigned int iterations = CSR_RETRY_TIMES;
-       int value;
-
-       do {
-               value = GET_AE_CSR(handle, ae, csr);
-               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
-                       return value;
-       } while (iterations--);
-
-       pr_err("QAT: Read CSR timeout\n");
-       return 0;
-}
-
-static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
-                            unsigned char ae, unsigned int csr,
-                            unsigned int value)
-{
-       unsigned int iterations = CSR_RETRY_TIMES;
-
-       do {
-               SET_AE_CSR(handle, ae, csr, value);
-               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
-                       return 0;
-       } while (iterations--);
-
-       pr_err("QAT: Write CSR Timeout\n");
-       return -EFAULT;
-}
-
-static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
-                                    unsigned char ae, unsigned char ctx,
-                                    unsigned int *events)
-{
-       unsigned int cur_ctx;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-       *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-}
-
-static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
-                              unsigned char ae, unsigned int cycles,
-                              int chk_inactive)
-{
-       unsigned int base_cnt = 0, cur_cnt = 0;
-       unsigned int csr = (1 << ACS_ABO_BITPOS);
-       int times = MAX_RETRY_TIMES;
-       int elapsed_cycles = 0;
-
-       base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
-       base_cnt &= 0xffff;
-       while ((int)cycles > elapsed_cycles && times--) {
-               if (chk_inactive)
-                       csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-
-               cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
-               cur_cnt &= 0xffff;
-               elapsed_cycles = cur_cnt - base_cnt;
-
-               if (elapsed_cycles < 0)
-                       elapsed_cycles += 0x10000;
-
-               /* ensure at least 8 time cycles elapsed in wait_cycles */
-               if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
-                       return 0;
-       }
-       if (times < 0) {
-               pr_err("QAT: wait_num_cycles time out\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-#define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
-#define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
-
-int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
-                           unsigned char ae, unsigned char mode)
-{
-       unsigned int csr, new_csr;
-
-       if (mode != 4 && mode != 8) {
-               pr_err("QAT: bad ctx mode=%d\n", mode);
-               return -EINVAL;
-       }
-
-       /* Sets the accelaration engine context mode to either four or eight */
-       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       csr = IGNORE_W1C_MASK & csr;
-       new_csr = (mode == 4) ?
-               SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
-               CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
-       return 0;
-}
-
-int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
-                          unsigned char ae, unsigned char mode)
-{
-       unsigned int csr, new_csr;
-
-       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       csr &= IGNORE_W1C_MASK;
-
-       new_csr = (mode) ?
-               SET_BIT(csr, CE_NN_MODE_BITPOS) :
-               CLR_BIT(csr, CE_NN_MODE_BITPOS);
-
-       if (new_csr != csr)
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
-
-       return 0;
-}
-
-int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
-                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
-                          unsigned char mode)
-{
-       unsigned int csr, new_csr;
-
-       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       csr &= IGNORE_W1C_MASK;
-       switch (lm_type) {
-       case ICP_LMEM0:
-               new_csr = (mode) ?
-                       SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
-                       CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
-               break;
-       case ICP_LMEM1:
-               new_csr = (mode) ?
-                       SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
-                       CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
-               break;
-       case ICP_LMEM2:
-               new_csr = (mode) ?
-                       SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
-                       CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
-               break;
-       case ICP_LMEM3:
-               new_csr = (mode) ?
-                       SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
-                       CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
-               break;
-       default:
-               pr_err("QAT: lmType = 0x%x\n", lm_type);
-               return -EINVAL;
-       }
-
-       if (new_csr != csr)
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
-       return 0;
-}
-
-void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned char mode)
-{
-       unsigned int csr, new_csr;
-
-       csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       csr &= IGNORE_W1C_MASK;
-       new_csr = (mode) ?
-                 SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
-                 CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
-       if (new_csr != csr)
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
-}
-
-static unsigned short qat_hal_get_reg_addr(unsigned int type,
-                                          unsigned short reg_num)
-{
-       unsigned short reg_addr;
-
-       switch (type) {
-       case ICP_GPA_ABS:
-       case ICP_GPB_ABS:
-               reg_addr = 0x80 | (reg_num & 0x7f);
-               break;
-       case ICP_GPA_REL:
-       case ICP_GPB_REL:
-               reg_addr = reg_num & 0x1f;
-               break;
-       case ICP_SR_RD_REL:
-       case ICP_SR_WR_REL:
-       case ICP_SR_REL:
-               reg_addr = 0x180 | (reg_num & 0x1f);
-               break;
-       case ICP_SR_ABS:
-               reg_addr = 0x140 | ((reg_num & 0x3) << 1);
-               break;
-       case ICP_DR_RD_REL:
-       case ICP_DR_WR_REL:
-       case ICP_DR_REL:
-               reg_addr = 0x1c0 | (reg_num & 0x1f);
-               break;
-       case ICP_DR_ABS:
-               reg_addr = 0x100 | ((reg_num & 0x3) << 1);
-               break;
-       case ICP_NEIGH_REL:
-               reg_addr = 0x280 | (reg_num & 0x1f);
-               break;
-       case ICP_LMEM0:
-               reg_addr = 0x200;
-               break;
-       case ICP_LMEM1:
-               reg_addr = 0x220;
-               break;
-       case ICP_LMEM2:
-               reg_addr = 0x2c0;
-               break;
-       case ICP_LMEM3:
-               reg_addr = 0x2e0;
-               break;
-       case ICP_NO_DEST:
-               reg_addr = 0x300 | (reg_num & 0xff);
-               break;
-       default:
-               reg_addr = BAD_REGADDR;
-               break;
-       }
-       return reg_addr;
-}
-
-void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned int reset_mask = handle->chip_info->icp_rst_mask;
-       unsigned int reset_csr = handle->chip_info->icp_rst_csr;
-       unsigned int csr_val;
-
-       csr_val = GET_CAP_CSR(handle, reset_csr);
-       csr_val |= reset_mask;
-       SET_CAP_CSR(handle, reset_csr, csr_val);
-}
-
-static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned int ctx_mask,
-                               unsigned int ae_csr, unsigned int csr_val)
-{
-       unsigned int ctx, cur_ctx;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-
-       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
-               if (!(ctx_mask & (1 << ctx)))
-                       continue;
-               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-               qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
-       }
-
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-}
-
-static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned char ctx,
-                               unsigned int ae_csr)
-{
-       unsigned int cur_ctx, csr_val;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-       csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-
-       return csr_val;
-}
-
-static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
-                                 unsigned char ae, unsigned int ctx_mask,
-                                 unsigned int events)
-{
-       unsigned int ctx, cur_ctx;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
-               if (!(ctx_mask & (1 << ctx)))
-                       continue;
-               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
-       }
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-}
-
-static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
-                                    unsigned char ae, unsigned int ctx_mask,
-                                    unsigned int events)
-{
-       unsigned int ctx, cur_ctx;
-
-       cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
-       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
-               if (!(ctx_mask & (1 << ctx)))
-                       continue;
-               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
-               qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
-                                 events);
-       }
-       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
-}
-
-static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned int base_cnt, cur_cnt;
-       unsigned char ae;
-       int times = MAX_RETRY_TIMES;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
-               base_cnt &= 0xffff;
-
-               do {
-                       cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
-                       cur_cnt &= 0xffff;
-               } while (times-- && (cur_cnt == base_cnt));
-
-               if (times < 0) {
-                       pr_err("QAT: AE%d is inactive!!\n", ae);
-                       return -EFAULT;
-               }
-       }
-
-       return 0;
-}
-
-int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
-                           unsigned int ae)
-{
-       unsigned int enable = 0, active = 0;
-
-       enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-       if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
-           (active & (1 << ACS_ABO_BITPOS)))
-               return 1;
-       else
-               return 0;
-}
-
-static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned int misc_ctl_csr, misc_ctl;
-       unsigned char ae;
-
-       misc_ctl_csr = handle->chip_info->misc_ctl_csr;
-       /* stop the timestamp timers */
-       misc_ctl = GET_CAP_CSR(handle, misc_ctl_csr);
-       if (misc_ctl & MC_TIMESTAMP_ENABLE)
-               SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl &
-                           (~MC_TIMESTAMP_ENABLE));
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
-               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
-       }
-       /* start timestamp timers */
-       SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
-}
-
-#define ESRAM_AUTO_TINIT       BIT(2)
-#define ESRAM_AUTO_TINIT_DONE  BIT(3)
-#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
-#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
-static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
-{
-       void __iomem *csr_addr =
-                       (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
-                       ESRAM_AUTO_INIT_CSR_OFFSET);
-       unsigned int csr_val;
-       int times = 30;
-
-       if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
-               return 0;
-
-       csr_val = ADF_CSR_RD(csr_addr, 0);
-       if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
-               return 0;
-
-       csr_val = ADF_CSR_RD(csr_addr, 0);
-       csr_val |= ESRAM_AUTO_TINIT;
-       ADF_CSR_WR(csr_addr, 0, csr_val);
-
-       do {
-               qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
-               csr_val = ADF_CSR_RD(csr_addr, 0);
-       } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
-       if (times < 0) {
-               pr_err("QAT: Fail to init eSram!\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-#define SHRAM_INIT_CYCLES 2060
-int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned int clk_csr = handle->chip_info->glb_clk_enable_csr;
-       unsigned int reset_mask = handle->chip_info->icp_rst_mask;
-       unsigned int reset_csr = handle->chip_info->icp_rst_csr;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned char ae = 0;
-       unsigned int times = 100;
-       unsigned int csr_val;
-
-       /* write to the reset csr */
-       csr_val = GET_CAP_CSR(handle, reset_csr);
-       csr_val &= ~reset_mask;
-       do {
-               SET_CAP_CSR(handle, reset_csr, csr_val);
-               if (!(times--))
-                       goto out_err;
-               csr_val = GET_CAP_CSR(handle, reset_csr);
-               csr_val &= reset_mask;
-       } while (csr_val);
-       /* enable clock */
-       csr_val = GET_CAP_CSR(handle, clk_csr);
-       csr_val |= reset_mask;
-       SET_CAP_CSR(handle, clk_csr, csr_val);
-       if (qat_hal_check_ae_alive(handle))
-               goto out_err;
-
-       /* Set undefined power-up/reset states to reasonable default values */
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
-                                 INIT_CTX_ENABLE_VALUE);
-               qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
-                                   CTX_STS_INDIRECT,
-                                   handle->hal_handle->upc_mask &
-                                   INIT_PC_VALUE);
-               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
-               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
-               qat_hal_put_wakeup_event(handle, ae,
-                                        ICP_QAT_UCLO_AE_ALL_CTX,
-                                        INIT_WAKEUP_EVENTS_VALUE);
-               qat_hal_put_sig_event(handle, ae,
-                                     ICP_QAT_UCLO_AE_ALL_CTX,
-                                     INIT_SIG_EVENTS_VALUE);
-       }
-       if (qat_hal_init_esram(handle))
-               goto out_err;
-       if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
-               goto out_err;
-       qat_hal_reset_timestamp(handle);
-
-       return 0;
-out_err:
-       pr_err("QAT: failed to get device out of reset\n");
-       return -EFAULT;
-}
-
-static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
-                               unsigned char ae, unsigned int ctx_mask)
-{
-       unsigned int ctx;
-
-       ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx &= IGNORE_W1C_MASK &
-               (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
-}
-
-static u64 qat_hal_parity_64bit(u64 word)
-{
-       word ^= word >> 1;
-       word ^= word >> 2;
-       word ^= word >> 4;
-       word ^= word >> 8;
-       word ^= word >> 16;
-       word ^= word >> 32;
-       return word & 1;
-}
-
-static u64 qat_hal_set_uword_ecc(u64 uword)
-{
-       u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
-               bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
-               bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
-               bit6_mask = 0xdaf69a46910ULL;
-
-       /* clear the ecc bits */
-       uword &= ~(0x7fULL << 0x2C);
-       uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
-       uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
-       uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
-       uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
-       uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
-       uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
-       uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
-       return uword;
-}
-
-void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
-                      unsigned char ae, unsigned int uaddr,
-                      unsigned int words_num, u64 *uword)
-{
-       unsigned int ustore_addr;
-       unsigned int i;
-
-       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
-       uaddr |= UA_ECS;
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-       for (i = 0; i < words_num; i++) {
-               unsigned int uwrd_lo, uwrd_hi;
-               u64 tmp;
-
-               tmp = qat_hal_set_uword_ecc(uword[i]);
-               uwrd_lo = (unsigned int)(tmp & 0xffffffff);
-               uwrd_hi = (unsigned int)(tmp >> 0x20);
-               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
-               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
-       }
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
-}
-
-static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
-                              unsigned char ae, unsigned int ctx_mask)
-{
-       unsigned int ctx;
-
-       ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx &= IGNORE_W1C_MASK;
-       ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
-       ctx |= (ctx_mask << CE_ENABLE_BITPOS);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
-}
-
-static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned char ae;
-       unsigned short reg;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
-                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
-                                            reg, 0);
-                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
-                                            reg, 0);
-               }
-       }
-}
-
-static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned char ae;
-       unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
-       int times = MAX_RETRY_TIMES;
-       unsigned int csr_val = 0;
-       unsigned int savctx = 0;
-       int ret = 0;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
-               csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
-               qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
-               csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-               csr_val &= IGNORE_W1C_MASK;
-               if (handle->chip_info->nn)
-                       csr_val |= CE_NN_MODE;
-
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
-               qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
-                                 (u64 *)inst);
-               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
-                                   handle->hal_handle->upc_mask &
-                                   INIT_PC_VALUE);
-               savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
-               qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
-               qat_hal_wr_indr_csr(handle, ae, ctx_mask,
-                                   CTX_SIG_EVENTS_INDIRECT, 0);
-               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
-               qat_hal_enable_ctx(handle, ae, ctx_mask);
-       }
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               /* wait for AE to finish */
-               do {
-                       ret = qat_hal_wait_cycles(handle, ae, 20, 1);
-               } while (ret && times--);
-
-               if (times < 0) {
-                       pr_err("QAT: clear GPR of AE %d failed", ae);
-                       return -EINVAL;
-               }
-               qat_hal_disable_ctx(handle, ae, ctx_mask);
-               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
-                                 savctx & ACS_ACNO);
-               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
-                                 INIT_CTX_ENABLE_VALUE);
-               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
-                                   handle->hal_handle->upc_mask &
-                                   INIT_PC_VALUE);
-               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
-               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
-               qat_hal_put_wakeup_event(handle, ae, ctx_mask,
-                                        INIT_WAKEUP_EVENTS_VALUE);
-               qat_hal_put_sig_event(handle, ae, ctx_mask,
-                                     INIT_SIG_EVENTS_VALUE);
-       }
-       return 0;
-}
-
-static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
-                            struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
-       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-       void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
-       unsigned int max_en_ae_id = 0;
-       struct adf_bar *sram_bar;
-       unsigned int csr_val = 0;
-       unsigned long ae_mask;
-       unsigned char ae = 0;
-       int ret = 0;
-
-       handle->pci_dev = pci_info->pci_dev;
-       switch (handle->pci_dev->device) {
-       case ADF_4XXX_PCI_DEVICE_ID:
-       case ADF_401XX_PCI_DEVICE_ID:
-       case ADF_402XX_PCI_DEVICE_ID:
-               handle->chip_info->mmp_sram_size = 0;
-               handle->chip_info->nn = false;
-               handle->chip_info->lm2lm3 = true;
-               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
-               handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
-               handle->chip_info->icp_rst_mask = 0x100015;
-               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
-               handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
-               handle->chip_info->wakeup_event_val = 0x80000000;
-               handle->chip_info->fw_auth = true;
-               handle->chip_info->css_3k = true;
-               handle->chip_info->tgroup_share_ustore = true;
-               handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
-               handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
-               handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI_4XXX;
-               handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO_4XXX;
-               handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
-               handle->chip_info->fcu_loaded_ae_pos = 0;
-
-               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
-               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
-               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
-               handle->hal_cap_ae_local_csr_addr_v =
-                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
-                       + LOCAL_TO_XFER_REG_OFFSET);
-               break;
-       case PCI_DEVICE_ID_INTEL_QAT_C62X:
-       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
-               handle->chip_info->mmp_sram_size = 0;
-               handle->chip_info->nn = true;
-               handle->chip_info->lm2lm3 = false;
-               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
-               handle->chip_info->icp_rst_csr = ICP_RESET;
-               handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
-                                                 (hw_data->accel_mask << RST_CSR_QAT_LSB);
-               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
-               handle->chip_info->misc_ctl_csr = MISC_CONTROL;
-               handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
-               handle->chip_info->fw_auth = true;
-               handle->chip_info->css_3k = false;
-               handle->chip_info->tgroup_share_ustore = false;
-               handle->chip_info->fcu_ctl_csr = FCU_CONTROL;
-               handle->chip_info->fcu_sts_csr = FCU_STATUS;
-               handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI;
-               handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
-               handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
-               handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
-               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
-               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
-               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
-               handle->hal_cap_ae_local_csr_addr_v =
-                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
-                       + LOCAL_TO_XFER_REG_OFFSET);
-               break;
-       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
-               handle->chip_info->mmp_sram_size = 0x40000;
-               handle->chip_info->nn = true;
-               handle->chip_info->lm2lm3 = false;
-               handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
-               handle->chip_info->icp_rst_csr = ICP_RESET;
-               handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
-                                                 (hw_data->accel_mask << RST_CSR_QAT_LSB);
-               handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
-               handle->chip_info->misc_ctl_csr = MISC_CONTROL;
-               handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
-               handle->chip_info->fw_auth = false;
-               handle->chip_info->css_3k = false;
-               handle->chip_info->tgroup_share_ustore = false;
-               handle->chip_info->fcu_ctl_csr = 0;
-               handle->chip_info->fcu_sts_csr = 0;
-               handle->chip_info->fcu_dram_addr_hi = 0;
-               handle->chip_info->fcu_dram_addr_lo = 0;
-               handle->chip_info->fcu_loaded_ae_csr = 0;
-               handle->chip_info->fcu_loaded_ae_pos = 0;
-               handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
-               handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
-               handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
-               handle->hal_cap_ae_local_csr_addr_v =
-                       (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
-                       + LOCAL_TO_XFER_REG_OFFSET);
-               break;
-       default:
-               ret = -EINVAL;
-               goto out_err;
-       }
-
-       if (handle->chip_info->mmp_sram_size > 0) {
-               sram_bar =
-                       &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
-               handle->hal_sram_addr_v = sram_bar->virt_addr;
-       }
-       handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
-       handle->hal_handle->ae_mask = hw_data->ae_mask;
-       handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
-       handle->hal_handle->slice_mask = hw_data->accel_mask;
-       handle->cfg_ae_mask = ALL_AE_MASK;
-       /* create AE objects */
-       handle->hal_handle->upc_mask = 0x1ffff;
-       handle->hal_handle->max_ustore = 0x4000;
-
-       ae_mask = handle->hal_handle->ae_mask;
-       for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) {
-               handle->hal_handle->aes[ae].free_addr = 0;
-               handle->hal_handle->aes[ae].free_size =
-                   handle->hal_handle->max_ustore;
-               handle->hal_handle->aes[ae].ustore_size =
-                   handle->hal_handle->max_ustore;
-               handle->hal_handle->aes[ae].live_ctx_mask =
-                                               ICP_QAT_UCLO_AE_ALL_CTX;
-               max_en_ae_id = ae;
-       }
-       handle->hal_handle->ae_max_num = max_en_ae_id + 1;
-
-       /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
-               csr_val |= 0x1;
-               qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
-       }
-out_err:
-       return ret;
-}
-
-int qat_hal_init(struct adf_accel_dev *accel_dev)
-{
-       struct icp_qat_fw_loader_handle *handle;
-       int ret = 0;
-
-       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-       if (!handle)
-               return -ENOMEM;
-
-       handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
-       if (!handle->hal_handle) {
-               ret = -ENOMEM;
-               goto out_hal_handle;
-       }
-
-       handle->chip_info = kzalloc(sizeof(*handle->chip_info), GFP_KERNEL);
-       if (!handle->chip_info) {
-               ret = -ENOMEM;
-               goto out_chip_info;
-       }
-
-       ret = qat_hal_chip_init(handle, accel_dev);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "qat_hal_chip_init error\n");
-               goto out_err;
-       }
-
-       /* take all AEs out of reset */
-       ret = qat_hal_clr_reset(handle);
-       if (ret) {
-               dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
-               goto out_err;
-       }
-
-       qat_hal_clear_xfer(handle);
-       if (!handle->chip_info->fw_auth) {
-               ret = qat_hal_clear_gpr(handle);
-               if (ret)
-                       goto out_err;
-       }
-
-       accel_dev->fw_loader->fw_loader = handle;
-       return 0;
-
-out_err:
-       kfree(handle->chip_info);
-out_chip_info:
-       kfree(handle->hal_handle);
-out_hal_handle:
-       kfree(handle);
-       return ret;
-}
-
-void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
-{
-       if (!handle)
-               return;
-       kfree(handle->chip_info);
-       kfree(handle->hal_handle);
-       kfree(handle);
-}
-
-int qat_hal_start(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       u32 wakeup_val = handle->chip_info->wakeup_event_val;
-       u32 fcu_ctl_csr, fcu_sts_csr;
-       unsigned int fcu_sts;
-       unsigned char ae;
-       u32 ae_ctr = 0;
-       int retry = 0;
-
-       if (handle->chip_info->fw_auth) {
-               fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
-               fcu_sts_csr = handle->chip_info->fcu_sts_csr;
-               ae_ctr = hweight32(ae_mask);
-               SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
-               do {
-                       msleep(FW_AUTH_WAIT_PERIOD);
-                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
-                       if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
-                               return ae_ctr;
-               } while (retry++ < FW_AUTH_MAX_RETRY);
-               pr_err("QAT: start error (FCU_STS = 0x%x)\n", fcu_sts);
-               return 0;
-       } else {
-               for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-                       qat_hal_put_wakeup_event(handle, ae, 0, wakeup_val);
-                       qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
-                       ae_ctr++;
-               }
-               return ae_ctr;
-       }
-}
-
-void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
-                 unsigned int ctx_mask)
-{
-       if (!handle->chip_info->fw_auth)
-               qat_hal_disable_ctx(handle, ae, ctx_mask);
-}
-
-void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
-                   unsigned char ae, unsigned int ctx_mask, unsigned int upc)
-{
-       qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
-                           handle->hal_handle->upc_mask & upc);
-}
-
-static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
-                              unsigned char ae, unsigned int uaddr,
-                              unsigned int words_num, u64 *uword)
-{
-       unsigned int i, uwrd_lo, uwrd_hi;
-       unsigned int ustore_addr, misc_control;
-
-       misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
-       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
-                         misc_control & 0xfffffffb);
-       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
-       uaddr |= UA_ECS;
-       for (i = 0; i < words_num; i++) {
-               qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-               uaddr++;
-               uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
-               uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
-               uword[i] = uwrd_hi;
-               uword[i] = (uword[i] << 0x20) | uwrd_lo;
-       }
-       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
-}
-
-void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
-                    unsigned char ae, unsigned int uaddr,
-                    unsigned int words_num, unsigned int *data)
-{
-       unsigned int i, ustore_addr;
-
-       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
-       uaddr |= UA_ECS;
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-       for (i = 0; i < words_num; i++) {
-               unsigned int uwrd_lo, uwrd_hi, tmp;
-
-               uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
-                         ((data[i] & 0xff00) << 2) |
-                         (0x3 << 8) | (data[i] & 0xff);
-               uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
-               uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
-               tmp = ((data[i] >> 0x10) & 0xffff);
-               uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
-               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
-               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
-       }
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
-}
-
-#define MAX_EXEC_INST 100
-static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
-                                  unsigned char ae, unsigned char ctx,
-                                  u64 *micro_inst, unsigned int inst_num,
-                                  int code_off, unsigned int max_cycle,
-                                  unsigned int *endpc)
-{
-       unsigned int ind_lm_addr_byte0 = 0, ind_lm_addr_byte1 = 0;
-       unsigned int ind_lm_addr_byte2 = 0, ind_lm_addr_byte3 = 0;
-       unsigned int ind_t_index = 0, ind_t_index_byte = 0;
-       unsigned int ind_lm_addr0 = 0, ind_lm_addr1 = 0;
-       unsigned int ind_lm_addr2 = 0, ind_lm_addr3 = 0;
-       u64 savuwords[MAX_EXEC_INST];
-       unsigned int ind_cnt_sig;
-       unsigned int ind_sig, act_sig;
-       unsigned int csr_val = 0, newcsr_val;
-       unsigned int savctx;
-       unsigned int savcc, wakeup_events, savpc;
-       unsigned int ctxarb_ctl, ctx_enables;
-
-       if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
-               pr_err("QAT: invalid instruction num %d\n", inst_num);
-               return -EINVAL;
-       }
-       /* save current context */
-       ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
-       ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
-       ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                               INDIRECT_LM_ADDR_0_BYTE_INDEX);
-       ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                               INDIRECT_LM_ADDR_1_BYTE_INDEX);
-       if (handle->chip_info->lm2lm3) {
-               ind_lm_addr2 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                  LM_ADDR_2_INDIRECT);
-               ind_lm_addr3 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                  LM_ADDR_3_INDIRECT);
-               ind_lm_addr_byte2 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                       INDIRECT_LM_ADDR_2_BYTE_INDEX);
-               ind_lm_addr_byte3 = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                       INDIRECT_LM_ADDR_3_BYTE_INDEX);
-               ind_t_index = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                 INDIRECT_T_INDEX);
-               ind_t_index_byte = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                      INDIRECT_T_INDEX_BYTE_INDEX);
-       }
-       if (inst_num <= MAX_EXEC_INST)
-               qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
-       qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
-       savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
-       savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx_enables &= IGNORE_W1C_MASK;
-       savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
-       savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-       ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
-       ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                         FUTURE_COUNT_SIGNAL_INDIRECT);
-       ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                     CTX_SIG_EVENTS_INDIRECT);
-       act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
-       /* execute micro codes */
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-       qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
-       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
-       if (code_off)
-               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
-       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
-       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
-       qat_hal_enable_ctx(handle, ae, (1 << ctx));
-       /* wait for micro codes to finish */
-       if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
-               return -EFAULT;
-       if (endpc) {
-               unsigned int ctx_status;
-
-               ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
-                                                CTX_STS_INDIRECT);
-               *endpc = ctx_status & handle->hal_handle->upc_mask;
-       }
-       /* retore to saved context */
-       qat_hal_disable_ctx(handle, ae, (1 << ctx));
-       if (inst_num <= MAX_EXEC_INST)
-               qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
-       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
-                           handle->hal_handle->upc_mask & savpc);
-       csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
-       newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
-       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
-       qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
-       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           LM_ADDR_0_INDIRECT, ind_lm_addr0);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           LM_ADDR_1_INDIRECT, ind_lm_addr1);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
-       if (handle->chip_info->lm2lm3) {
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_2_INDIRECT,
-                                   ind_lm_addr2);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_3_INDIRECT,
-                                   ind_lm_addr3);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
-                                   INDIRECT_LM_ADDR_2_BYTE_INDEX,
-                                   ind_lm_addr_byte2);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
-                                   INDIRECT_LM_ADDR_3_BYTE_INDEX,
-                                   ind_lm_addr_byte3);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
-                                   INDIRECT_T_INDEX, ind_t_index);
-               qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
-                                   INDIRECT_T_INDEX_BYTE_INDEX,
-                                   ind_t_index_byte);
-       }
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
-       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
-                           CTX_SIG_EVENTS_INDIRECT, ind_sig);
-       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-
-       return 0;
-}
-
-static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
-                             unsigned char ae, unsigned char ctx,
-                             enum icp_qat_uof_regtype reg_type,
-                             unsigned short reg_num, unsigned int *data)
-{
-       unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
-       unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
-       unsigned short reg_addr;
-       int status = 0;
-       u64 insts, savuword;
-
-       reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
-       if (reg_addr == BAD_REGADDR) {
-               pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
-               return -EINVAL;
-       }
-       switch (reg_type) {
-       case ICP_GPA_REL:
-               insts = 0xA070000000ull | (reg_addr & 0x3ff);
-               break;
-       default:
-               insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
-               break;
-       }
-       savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
-       ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx_enables &= IGNORE_W1C_MASK;
-       if (ctx != (savctx & ACS_ACNO))
-               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
-                                 ctx & ACS_ACNO);
-       qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-       ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
-       uaddr = UA_ECS;
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-       insts = qat_hal_set_uword_ecc(insts);
-       uwrd_lo = (unsigned int)(insts & 0xffffffff);
-       uwrd_hi = (unsigned int)(insts >> 0x20);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
-       /* delay for at least 8 cycles */
-       qat_hal_wait_cycles(handle, ae, 0x8, 0);
-       /*
-        * read ALU output
-        * the instruction should have been executed
-        * prior to clearing the ECS in putUwords
-        */
-       *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
-       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
-       qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
-       if (ctx != (savctx & ACS_ACNO))
-               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
-                                 savctx & ACS_ACNO);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-
-       return status;
-}
-
-static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
-                             unsigned char ae, unsigned char ctx,
-                             enum icp_qat_uof_regtype reg_type,
-                             unsigned short reg_num, unsigned int data)
-{
-       unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
-       u64 insts[] = {
-               0x0F440000000ull,
-               0x0F040000000ull,
-               0x0F0000C0300ull,
-               0x0E000010000ull
-       };
-       const int num_inst = ARRAY_SIZE(insts), code_off = 1;
-       const int imm_w1 = 0, imm_w0 = 1;
-
-       dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
-       if (dest_addr == BAD_REGADDR) {
-               pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
-               return -EINVAL;
-       }
-
-       data16lo = 0xffff & data;
-       data16hi = 0xffff & (data >> 0x10);
-       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
-                                         (0xff & data16hi));
-       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
-                                          (0xff & data16lo));
-       switch (reg_type) {
-       case ICP_GPA_REL:
-               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
-                   ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
-               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
-                   ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
-               break;
-       default:
-               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
-                   ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
-
-               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
-                   ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
-               break;
-       }
-
-       return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
-                                      code_off, num_inst * 0x5, NULL);
-}
-
-int qat_hal_get_ins_num(void)
-{
-       return ARRAY_SIZE(inst_4b);
-}
-
-static int qat_hal_concat_micro_code(u64 *micro_inst,
-                                    unsigned int inst_num, unsigned int size,
-                                    unsigned int addr, unsigned int *value)
-{
-       int i;
-       unsigned int cur_value;
-       const u64 *inst_arr;
-       int fixup_offset;
-       int usize = 0;
-       int orig_num;
-
-       orig_num = inst_num;
-       cur_value = value[0];
-       inst_arr = inst_4b;
-       usize = ARRAY_SIZE(inst_4b);
-       fixup_offset = inst_num;
-       for (i = 0; i < usize; i++)
-               micro_inst[inst_num++] = inst_arr[i];
-       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
-       fixup_offset++;
-       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
-       fixup_offset++;
-       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
-       fixup_offset++;
-       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
-
-       return inst_num - orig_num;
-}
-
-static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
-                                     unsigned char ae, unsigned char ctx,
-                                     int *pfirst_exec, u64 *micro_inst,
-                                     unsigned int inst_num)
-{
-       int stat = 0;
-       unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
-       unsigned int gprb0 = 0, gprb1 = 0;
-
-       if (*pfirst_exec) {
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
-               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
-               *pfirst_exec = 0;
-       }
-       stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
-                                      inst_num * 0x5, NULL);
-       if (stat != 0)
-               return -EFAULT;
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
-
-       return 0;
-}
-
-int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
-                       unsigned char ae,
-                       struct icp_qat_uof_batch_init *lm_init_header)
-{
-       struct icp_qat_uof_batch_init *plm_init;
-       u64 *micro_inst_arry;
-       int micro_inst_num;
-       int alloc_inst_size;
-       int first_exec = 1;
-       int stat = 0;
-
-       plm_init = lm_init_header->next;
-       alloc_inst_size = lm_init_header->size;
-       if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
-               alloc_inst_size = handle->hal_handle->max_ustore;
-       micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
-                                       GFP_KERNEL);
-       if (!micro_inst_arry)
-               return -ENOMEM;
-       micro_inst_num = 0;
-       while (plm_init) {
-               unsigned int addr, *value, size;
-
-               ae = plm_init->ae;
-               addr = plm_init->addr;
-               value = plm_init->value;
-               size = plm_init->size;
-               micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
-                                                           micro_inst_num,
-                                                           size, addr, value);
-               plm_init = plm_init->next;
-       }
-       /* exec micro codes */
-       if (micro_inst_arry && micro_inst_num > 0) {
-               micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
-               stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
-                                                 micro_inst_arry,
-                                                 micro_inst_num);
-       }
-       kfree(micro_inst_arry);
-       return stat;
-}
-
-static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
-                                  unsigned char ae, unsigned char ctx,
-                                  enum icp_qat_uof_regtype reg_type,
-                                  unsigned short reg_num, unsigned int val)
-{
-       int status = 0;
-       unsigned int reg_addr;
-       unsigned int ctx_enables;
-       unsigned short mask;
-       unsigned short dr_offset = 0x10;
-
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       if (CE_INUSE_CONTEXTS & ctx_enables) {
-               if (ctx & 0x1) {
-                       pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
-                       return -EINVAL;
-               }
-               mask = 0x1f;
-               dr_offset = 0x20;
-       } else {
-               mask = 0x0f;
-       }
-       if (reg_num & ~mask)
-               return -EINVAL;
-       reg_addr = reg_num + (ctx << 0x5);
-       switch (reg_type) {
-       case ICP_SR_RD_REL:
-       case ICP_SR_REL:
-               SET_AE_XFER(handle, ae, reg_addr, val);
-               break;
-       case ICP_DR_RD_REL:
-       case ICP_DR_REL:
-               SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
-               break;
-       default:
-               status = -EINVAL;
-               break;
-       }
-       return status;
-}
-
-static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
-                                  unsigned char ae, unsigned char ctx,
-                                  enum icp_qat_uof_regtype reg_type,
-                                  unsigned short reg_num, unsigned int data)
-{
-       unsigned int gprval, ctx_enables;
-       unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
-           data16low;
-       unsigned short reg_mask;
-       int status = 0;
-       u64 micro_inst[] = {
-               0x0F440000000ull,
-               0x0F040000000ull,
-               0x0A000000000ull,
-               0x0F0000C0300ull,
-               0x0E000010000ull
-       };
-       const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
-       const unsigned short gprnum = 0, dly = num_inst * 0x5;
-
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       if (CE_INUSE_CONTEXTS & ctx_enables) {
-               if (ctx & 0x1) {
-                       pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
-                       return -EINVAL;
-               }
-               reg_mask = (unsigned short)~0x1f;
-       } else {
-               reg_mask = (unsigned short)~0xf;
-       }
-       if (reg_num & reg_mask)
-               return -EINVAL;
-       xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
-       if (xfr_addr == BAD_REGADDR) {
-               pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
-               return -EINVAL;
-       }
-       status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
-       if (status) {
-               pr_err("QAT: failed to read register");
-               return status;
-       }
-       gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
-       data16low = 0xffff & data;
-       data16hi = 0xffff & (data >> 0x10);
-       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
-                                         (unsigned short)(0xff & data16hi));
-       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
-                                          (unsigned short)(0xff & data16low));
-       micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
-           ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
-       micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
-           ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
-       micro_inst[0x2] = micro_inst[0x2] |
-           ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
-       status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
-                                        code_off, dly, NULL);
-       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
-       return status;
-}
-
-static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
-                             unsigned char ae, unsigned char ctx,
-                             unsigned short nn, unsigned int val)
-{
-       unsigned int ctx_enables;
-       int stat = 0;
-
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       ctx_enables &= IGNORE_W1C_MASK;
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
-
-       stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
-       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
-       return stat;
-}
-
-static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
-                                     *handle, unsigned char ae,
-                                     unsigned short absreg_num,
-                                     unsigned short *relreg,
-                                     unsigned char *ctx)
-{
-       unsigned int ctx_enables;
-
-       ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
-       if (ctx_enables & CE_INUSE_CONTEXTS) {
-               /* 4-ctx mode */
-               *relreg = absreg_num & 0x1F;
-               *ctx = (absreg_num >> 0x4) & 0x6;
-       } else {
-               /* 8-ctx mode */
-               *relreg = absreg_num & 0x0F;
-               *ctx = (absreg_num >> 0x4) & 0x7;
-       }
-       return 0;
-}
-
-int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
-                    unsigned char ae, unsigned long ctx_mask,
-                    enum icp_qat_uof_regtype reg_type,
-                    unsigned short reg_num, unsigned int regdata)
-{
-       int stat = 0;
-       unsigned short reg;
-       unsigned char ctx = 0;
-       enum icp_qat_uof_regtype type;
-
-       if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
-               return -EINVAL;
-
-       do {
-               if (ctx_mask == 0) {
-                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
-                                                  &ctx);
-                       type = reg_type - 1;
-               } else {
-                       reg = reg_num;
-                       type = reg_type;
-                       if (!test_bit(ctx, &ctx_mask))
-                               continue;
-               }
-               stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
-               if (stat) {
-                       pr_err("QAT: write gpr fail\n");
-                       return -EINVAL;
-               }
-       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
-
-       return 0;
-}
-
-int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
-                        unsigned char ae, unsigned long ctx_mask,
-                        enum icp_qat_uof_regtype reg_type,
-                        unsigned short reg_num, unsigned int regdata)
-{
-       int stat = 0;
-       unsigned short reg;
-       unsigned char ctx = 0;
-       enum icp_qat_uof_regtype type;
-
-       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
-               return -EINVAL;
-
-       do {
-               if (ctx_mask == 0) {
-                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
-                                                  &ctx);
-                       type = reg_type - 3;
-               } else {
-                       reg = reg_num;
-                       type = reg_type;
-                       if (!test_bit(ctx, &ctx_mask))
-                               continue;
-               }
-               stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
-                                              regdata);
-               if (stat) {
-                       pr_err("QAT: write wr xfer fail\n");
-                       return -EINVAL;
-               }
-       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
-
-       return 0;
-}
-
-int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
-                        unsigned char ae, unsigned long ctx_mask,
-                        enum icp_qat_uof_regtype reg_type,
-                        unsigned short reg_num, unsigned int regdata)
-{
-       int stat = 0;
-       unsigned short reg;
-       unsigned char ctx = 0;
-       enum icp_qat_uof_regtype type;
-
-       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
-               return -EINVAL;
-
-       do {
-               if (ctx_mask == 0) {
-                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
-                                                  &ctx);
-                       type = reg_type - 3;
-               } else {
-                       reg = reg_num;
-                       type = reg_type;
-                       if (!test_bit(ctx, &ctx_mask))
-                               continue;
-               }
-               stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
-                                              regdata);
-               if (stat) {
-                       pr_err("QAT: write rd xfer fail\n");
-                       return -EINVAL;
-               }
-       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
-
-       return 0;
-}
-
-int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
-                   unsigned char ae, unsigned long ctx_mask,
-                   unsigned short reg_num, unsigned int regdata)
-{
-       int stat = 0;
-       unsigned char ctx;
-       if (!handle->chip_info->nn) {
-               dev_err(&handle->pci_dev->dev, "QAT: No next neigh in 0x%x\n",
-                       handle->pci_dev->device);
-               return -EINVAL;
-       }
-
-       if (ctx_mask == 0)
-               return -EINVAL;
-
-       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
-               if (!test_bit(ctx, &ctx_mask))
-                       continue;
-               stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
-               if (stat) {
-                       pr_err("QAT: write neigh error\n");
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
deleted file mode 100644 (file)
index 3ba8ca2..0000000
+++ /dev/null
@@ -1,2133 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/pci_ids.h>
-#include "adf_accel_devices.h"
-#include "adf_common_drv.h"
-#include "icp_qat_uclo.h"
-#include "icp_qat_hal.h"
-#include "icp_qat_fw_loader_handle.h"
-
-#define UWORD_CPYBUF_SIZE 1024
-#define INVLD_UWORD 0xffffffffffull
-#define PID_MINOR_REV 0xf
-#define PID_MAJOR_REV (0xf << 4)
-
-static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
-                                unsigned int ae, unsigned int image_num)
-{
-       struct icp_qat_uclo_aedata *ae_data;
-       struct icp_qat_uclo_encapme *encap_image;
-       struct icp_qat_uclo_page *page = NULL;
-       struct icp_qat_uclo_aeslice *ae_slice = NULL;
-
-       ae_data = &obj_handle->ae_data[ae];
-       encap_image = &obj_handle->ae_uimage[image_num];
-       ae_slice = &ae_data->ae_slices[ae_data->slice_num];
-       ae_slice->encap_image = encap_image;
-
-       if (encap_image->img_ptr) {
-               ae_slice->ctx_mask_assigned =
-                                       encap_image->img_ptr->ctx_assigned;
-               ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
-       } else {
-               ae_slice->ctx_mask_assigned = 0;
-       }
-       ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
-       if (!ae_slice->region)
-               return -ENOMEM;
-       ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
-       if (!ae_slice->page)
-               goto out_err;
-       page = ae_slice->page;
-       page->encap_page = encap_image->page;
-       ae_slice->page->region = ae_slice->region;
-       ae_data->slice_num++;
-       return 0;
-out_err:
-       kfree(ae_slice->region);
-       ae_slice->region = NULL;
-       return -ENOMEM;
-}
-
-static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
-{
-       unsigned int i;
-
-       if (!ae_data) {
-               pr_err("QAT: bad argument, ae_data is NULL\n ");
-               return -EINVAL;
-       }
-
-       for (i = 0; i < ae_data->slice_num; i++) {
-               kfree(ae_data->ae_slices[i].region);
-               ae_data->ae_slices[i].region = NULL;
-               kfree(ae_data->ae_slices[i].page);
-               ae_data->ae_slices[i].page = NULL;
-       }
-       return 0;
-}
-
-static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
-                                unsigned int str_offset)
-{
-       if (!str_table->table_len || str_offset > str_table->table_len)
-               return NULL;
-       return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
-}
-
-static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
-{
-       int maj = hdr->maj_ver & 0xff;
-       int min = hdr->min_ver & 0xff;
-
-       if (hdr->file_id != ICP_QAT_UOF_FID) {
-               pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
-               return -EINVAL;
-       }
-       if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
-               pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
-                      maj, min);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
-{
-       int maj = suof_hdr->maj_ver & 0xff;
-       int min = suof_hdr->min_ver & 0xff;
-
-       if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
-               pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
-               return -EINVAL;
-       }
-       if (suof_hdr->fw_type != 0) {
-               pr_err("QAT: unsupported firmware type\n");
-               return -EINVAL;
-       }
-       if (suof_hdr->num_chunks <= 0x1) {
-               pr_err("QAT: SUOF chunk amount is incorrect\n");
-               return -EINVAL;
-       }
-       if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
-               pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
-                      maj, min);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
-                                     unsigned int addr, unsigned int *val,
-                                     unsigned int num_in_bytes)
-{
-       unsigned int outval;
-       unsigned char *ptr = (unsigned char *)val;
-
-       while (num_in_bytes) {
-               memcpy(&outval, ptr, 4);
-               SRAM_WRITE(handle, addr, outval);
-               num_in_bytes -= 4;
-               ptr += 4;
-               addr += 4;
-       }
-}
-
-static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
-                                     unsigned char ae, unsigned int addr,
-                                     unsigned int *val,
-                                     unsigned int num_in_bytes)
-{
-       unsigned int outval;
-       unsigned char *ptr = (unsigned char *)val;
-
-       addr >>= 0x2; /* convert to uword address */
-
-       while (num_in_bytes) {
-               memcpy(&outval, ptr, 4);
-               qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
-               num_in_bytes -= 4;
-               ptr += 4;
-       }
-}
-
-static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
-                                  unsigned char ae,
-                                  struct icp_qat_uof_batch_init
-                                  *umem_init_header)
-{
-       struct icp_qat_uof_batch_init *umem_init;
-
-       if (!umem_init_header)
-               return;
-       umem_init = umem_init_header->next;
-       while (umem_init) {
-               unsigned int addr, *value, size;
-
-               ae = umem_init->ae;
-               addr = umem_init->addr;
-               value = umem_init->value;
-               size = umem_init->size;
-               qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
-               umem_init = umem_init->next;
-       }
-}
-
-static void
-qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
-                                struct icp_qat_uof_batch_init **base)
-{
-       struct icp_qat_uof_batch_init *umem_init;
-
-       umem_init = *base;
-       while (umem_init) {
-               struct icp_qat_uof_batch_init *pre;
-
-               pre = umem_init;
-               umem_init = umem_init->next;
-               kfree(pre);
-       }
-       *base = NULL;
-}
-
-static int qat_uclo_parse_num(char *str, unsigned int *num)
-{
-       char buf[16] = {0};
-       unsigned long ae = 0;
-       int i;
-
-       strncpy(buf, str, 15);
-       for (i = 0; i < 16; i++) {
-               if (!isdigit(buf[i])) {
-                       buf[i] = '\0';
-                       break;
-               }
-       }
-       if ((kstrtoul(buf, 10, &ae)))
-               return -EFAULT;
-
-       *num = (unsigned int)ae;
-       return 0;
-}
-
-static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
-                                    struct icp_qat_uof_initmem *init_mem,
-                                    unsigned int size_range, unsigned int *ae)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       char *str;
-
-       if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
-               pr_err("QAT: initmem is out of range");
-               return -EINVAL;
-       }
-       if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
-               pr_err("QAT: Memory scope for init_mem error\n");
-               return -EINVAL;
-       }
-       str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
-       if (!str) {
-               pr_err("QAT: AE name assigned in UOF init table is NULL\n");
-               return -EINVAL;
-       }
-       if (qat_uclo_parse_num(str, ae)) {
-               pr_err("QAT: Parse num for AE number failed\n");
-               return -EINVAL;
-       }
-       if (*ae >= ICP_QAT_UCLO_MAX_AE) {
-               pr_err("QAT: ae %d out of range\n", *ae);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
-                                          *handle, struct icp_qat_uof_initmem
-                                          *init_mem, unsigned int ae,
-                                          struct icp_qat_uof_batch_init
-                                          **init_tab_base)
-{
-       struct icp_qat_uof_batch_init *init_header, *tail;
-       struct icp_qat_uof_batch_init *mem_init, *tail_old;
-       struct icp_qat_uof_memvar_attr *mem_val_attr;
-       unsigned int i, flag = 0;
-
-       mem_val_attr =
-               (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
-               sizeof(struct icp_qat_uof_initmem));
-
-       init_header = *init_tab_base;
-       if (!init_header) {
-               init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
-               if (!init_header)
-                       return -ENOMEM;
-               init_header->size = 1;
-               *init_tab_base = init_header;
-               flag = 1;
-       }
-       tail_old = init_header;
-       while (tail_old->next)
-               tail_old = tail_old->next;
-       tail = tail_old;
-       for (i = 0; i < init_mem->val_attr_num; i++) {
-               mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
-               if (!mem_init)
-                       goto out_err;
-               mem_init->ae = ae;
-               mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
-               mem_init->value = &mem_val_attr->value;
-               mem_init->size = 4;
-               mem_init->next = NULL;
-               tail->next = mem_init;
-               tail = mem_init;
-               init_header->size += qat_hal_get_ins_num();
-               mem_val_attr++;
-       }
-       return 0;
-out_err:
-       /* Do not free the list head unless we allocated it. */
-       tail_old = tail_old->next;
-       if (flag) {
-               kfree(*init_tab_base);
-               *init_tab_base = NULL;
-       }
-
-       while (tail_old) {
-               mem_init = tail_old->next;
-               kfree(tail_old);
-               tail_old = mem_init;
-       }
-       return -ENOMEM;
-}
-
-static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
-                                 struct icp_qat_uof_initmem *init_mem)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int ae;
-
-       if (qat_uclo_fetch_initmem_ae(handle, init_mem,
-                                     handle->chip_info->lm_size, &ae))
-               return -EINVAL;
-       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
-                                           &obj_handle->lm_init_tab[ae]))
-               return -EINVAL;
-       return 0;
-}
-
-static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
-                                 struct icp_qat_uof_initmem *init_mem)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int ae, ustore_size, uaddr, i;
-       struct icp_qat_uclo_aedata *aed;
-
-       ustore_size = obj_handle->ustore_phy_size;
-       if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
-               return -EINVAL;
-       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
-                                           &obj_handle->umem_init_tab[ae]))
-               return -EINVAL;
-       /* set the highest ustore address referenced */
-       uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
-       aed = &obj_handle->ae_data[ae];
-       for (i = 0; i < aed->slice_num; i++) {
-               if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
-                       aed->ae_slices[i].encap_image->uwords_num = uaddr;
-       }
-       return 0;
-}
-
-static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
-                                  struct icp_qat_uof_initmem *init_mem)
-{
-       switch (init_mem->region) {
-       case ICP_QAT_UOF_LMEM_REGION:
-               if (qat_uclo_init_lmem_seg(handle, init_mem))
-                       return -EINVAL;
-               break;
-       case ICP_QAT_UOF_UMEM_REGION:
-               if (qat_uclo_init_umem_seg(handle, init_mem))
-                       return -EINVAL;
-               break;
-       default:
-               pr_err("QAT: initmem region error. region type=0x%x\n",
-                      init_mem->region);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
-                               struct icp_qat_uclo_encapme *image)
-{
-       unsigned int i;
-       struct icp_qat_uclo_encap_page *page;
-       struct icp_qat_uof_image *uof_image;
-       unsigned char ae;
-       unsigned int ustore_size;
-       unsigned int patt_pos;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
-       u64 *fill_data;
-
-       uof_image = image->img_ptr;
-       fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
-                           GFP_KERNEL);
-       if (!fill_data)
-               return -ENOMEM;
-       for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
-               memcpy(&fill_data[i], &uof_image->fill_pattern,
-                      sizeof(u64));
-       page = image->page;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               unsigned long ae_assigned = uof_image->ae_assigned;
-
-               if (!test_bit(ae, &ae_assigned))
-                       continue;
-
-               if (!test_bit(ae, &cfg_ae_mask))
-                       continue;
-
-               ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
-               patt_pos = page->beg_addr_p + page->micro_words_num;
-
-               qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
-                                 page->beg_addr_p, &fill_data[0]);
-               qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
-                                 ustore_size - patt_pos + 1,
-                                 &fill_data[page->beg_addr_p]);
-       }
-       kfree(fill_data);
-       return 0;
-}
-
-static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
-{
-       int i, ae;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-
-       for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
-               if (initmem->num_in_bytes) {
-                       if (qat_uclo_init_ae_memory(handle, initmem))
-                               return -EINVAL;
-               }
-               initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
-                       (uintptr_t)initmem +
-                       sizeof(struct icp_qat_uof_initmem)) +
-                       (sizeof(struct icp_qat_uof_memvar_attr) *
-                       initmem->val_attr_num));
-       }
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               if (qat_hal_batch_wr_lm(handle, ae,
-                                       obj_handle->lm_init_tab[ae])) {
-                       pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
-                       return -EINVAL;
-               }
-               qat_uclo_cleanup_batch_init_list(handle,
-                                                &obj_handle->lm_init_tab[ae]);
-               qat_uclo_batch_wr_umem(handle, ae,
-                                      obj_handle->umem_init_tab[ae]);
-               qat_uclo_cleanup_batch_init_list(handle,
-                                                &obj_handle->
-                                                umem_init_tab[ae]);
-       }
-       return 0;
-}
-
-static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
-                                char *chunk_id, void *cur)
-{
-       int i;
-       struct icp_qat_uof_chunkhdr *chunk_hdr =
-           (struct icp_qat_uof_chunkhdr *)
-           ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
-
-       for (i = 0; i < obj_hdr->num_chunks; i++) {
-               if ((cur < (void *)&chunk_hdr[i]) &&
-                   !strncmp(chunk_hdr[i].chunk_id, chunk_id,
-                            ICP_QAT_UOF_OBJID_LEN)) {
-                       return &chunk_hdr[i];
-               }
-       }
-       return NULL;
-}
-
-static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
-{
-       int i;
-       unsigned int topbit = 1 << 0xF;
-       unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
-
-       reg ^= inbyte << 0x8;
-       for (i = 0; i < 0x8; i++) {
-               if (reg & topbit)
-                       reg = (reg << 1) ^ 0x1021;
-               else
-                       reg <<= 1;
-       }
-       return reg & 0xFFFF;
-}
-
-static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
-{
-       unsigned int chksum = 0;
-
-       if (ptr)
-               while (num--)
-                       chksum = qat_uclo_calc_checksum(chksum, *ptr++);
-       return chksum;
-}
-
-static struct icp_qat_uclo_objhdr *
-qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
-                  char *chunk_id)
-{
-       struct icp_qat_uof_filechunkhdr *file_chunk;
-       struct icp_qat_uclo_objhdr *obj_hdr;
-       char *chunk;
-       int i;
-
-       file_chunk = (struct icp_qat_uof_filechunkhdr *)
-               (buf + sizeof(struct icp_qat_uof_filehdr));
-       for (i = 0; i < file_hdr->num_chunks; i++) {
-               if (!strncmp(file_chunk->chunk_id, chunk_id,
-                            ICP_QAT_UOF_OBJID_LEN)) {
-                       chunk = buf + file_chunk->offset;
-                       if (file_chunk->checksum != qat_uclo_calc_str_checksum(
-                               chunk, file_chunk->size))
-                               break;
-                       obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
-                       if (!obj_hdr)
-                               break;
-                       obj_hdr->file_buff = chunk;
-                       obj_hdr->checksum = file_chunk->checksum;
-                       obj_hdr->size = file_chunk->size;
-                       return obj_hdr;
-               }
-               file_chunk++;
-       }
-       return NULL;
-}
-
-static int
-qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
-                           struct icp_qat_uof_image *image)
-{
-       struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
-       struct icp_qat_uof_objtable *neigh_reg_tab;
-       struct icp_qat_uof_code_page *code_page;
-
-       code_page = (struct icp_qat_uof_code_page *)
-                       ((char *)image + sizeof(struct icp_qat_uof_image));
-       uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
-                    code_page->uc_var_tab_offset);
-       imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
-                     code_page->imp_var_tab_offset);
-       imp_expr_tab = (struct icp_qat_uof_objtable *)
-                      (encap_uof_obj->beg_uof +
-                      code_page->imp_expr_tab_offset);
-       if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
-           imp_expr_tab->entry_num) {
-               pr_err("QAT: UOF can't contain imported variable to be parsed\n");
-               return -EINVAL;
-       }
-       neigh_reg_tab = (struct icp_qat_uof_objtable *)
-                       (encap_uof_obj->beg_uof +
-                       code_page->neigh_reg_tab_offset);
-       if (neigh_reg_tab->entry_num) {
-               pr_err("QAT: UOF can't contain neighbor register table\n");
-               return -EINVAL;
-       }
-       if (image->numpages > 1) {
-               pr_err("QAT: UOF can't contain multiple pages\n");
-               return -EINVAL;
-       }
-       if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
-               pr_err("QAT: UOF can't use shared control store feature\n");
-               return -EFAULT;
-       }
-       if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
-               pr_err("QAT: UOF can't use reloadable feature\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
-                                    *encap_uof_obj,
-                                    struct icp_qat_uof_image *img,
-                                    struct icp_qat_uclo_encap_page *page)
-{
-       struct icp_qat_uof_code_page *code_page;
-       struct icp_qat_uof_code_area *code_area;
-       struct icp_qat_uof_objtable *uword_block_tab;
-       struct icp_qat_uof_uword_block *uwblock;
-       int i;
-
-       code_page = (struct icp_qat_uof_code_page *)
-                       ((char *)img + sizeof(struct icp_qat_uof_image));
-       page->def_page = code_page->def_page;
-       page->page_region = code_page->page_region;
-       page->beg_addr_v = code_page->beg_addr_v;
-       page->beg_addr_p = code_page->beg_addr_p;
-       code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
-                                               code_page->code_area_offset);
-       page->micro_words_num = code_area->micro_words_num;
-       uword_block_tab = (struct icp_qat_uof_objtable *)
-                         (encap_uof_obj->beg_uof +
-                         code_area->uword_block_tab);
-       page->uwblock_num = uword_block_tab->entry_num;
-       uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
-                       sizeof(struct icp_qat_uof_objtable));
-       page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
-       for (i = 0; i < uword_block_tab->entry_num; i++)
-               page->uwblock[i].micro_words =
-               (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
-}
-
-static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
-                              struct icp_qat_uclo_encapme *ae_uimage,
-                              int max_image)
-{
-       int i, j;
-       struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
-       struct icp_qat_uof_image *image;
-       struct icp_qat_uof_objtable *ae_regtab;
-       struct icp_qat_uof_objtable *init_reg_sym_tab;
-       struct icp_qat_uof_objtable *sbreak_tab;
-       struct icp_qat_uof_encap_obj *encap_uof_obj =
-                                       &obj_handle->encap_uof_obj;
-
-       for (j = 0; j < max_image; j++) {
-               chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
-                                               ICP_QAT_UOF_IMAG, chunk_hdr);
-               if (!chunk_hdr)
-                       break;
-               image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
-                                                    chunk_hdr->offset);
-               ae_regtab = (struct icp_qat_uof_objtable *)
-                          (image->reg_tab_offset +
-                          obj_handle->obj_hdr->file_buff);
-               ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
-               ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
-                       (((char *)ae_regtab) +
-                       sizeof(struct icp_qat_uof_objtable));
-               init_reg_sym_tab = (struct icp_qat_uof_objtable *)
-                                  (image->init_reg_sym_tab +
-                                  obj_handle->obj_hdr->file_buff);
-               ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
-               ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
-                       (((char *)init_reg_sym_tab) +
-                       sizeof(struct icp_qat_uof_objtable));
-               sbreak_tab = (struct icp_qat_uof_objtable *)
-                       (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
-               ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
-               ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
-                                     (((char *)sbreak_tab) +
-                                     sizeof(struct icp_qat_uof_objtable));
-               ae_uimage[j].img_ptr = image;
-               if (qat_uclo_check_image_compat(encap_uof_obj, image))
-                       goto out_err;
-               ae_uimage[j].page =
-                       kzalloc(sizeof(struct icp_qat_uclo_encap_page),
-                               GFP_KERNEL);
-               if (!ae_uimage[j].page)
-                       goto out_err;
-               qat_uclo_map_image_page(encap_uof_obj, image,
-                                       ae_uimage[j].page);
-       }
-       return j;
-out_err:
-       for (i = 0; i < j; i++)
-               kfree(ae_uimage[i].page);
-       return 0;
-}
-
-static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
-{
-       int i, ae;
-       int mflag = 0;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
-
-       for_each_set_bit(ae, &ae_mask, max_ae) {
-               if (!test_bit(ae, &cfg_ae_mask))
-                       continue;
-
-               for (i = 0; i < obj_handle->uimage_num; i++) {
-                       unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
-
-                       if (!test_bit(ae, &ae_assigned))
-                               continue;
-                       mflag = 1;
-                       if (qat_uclo_init_ae_data(obj_handle, ae, i))
-                               return -EINVAL;
-               }
-       }
-       if (!mflag) {
-               pr_err("QAT: uimage uses AE not set\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static struct icp_qat_uof_strtable *
-qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
-                      char *tab_name, struct icp_qat_uof_strtable *str_table)
-{
-       struct icp_qat_uof_chunkhdr *chunk_hdr;
-
-       chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
-                                       obj_hdr->file_buff, tab_name, NULL);
-       if (chunk_hdr) {
-               int hdr_size;
-
-               memcpy(&str_table->table_len, obj_hdr->file_buff +
-                      chunk_hdr->offset, sizeof(str_table->table_len));
-               hdr_size = (char *)&str_table->strings - (char *)str_table;
-               str_table->strings = (uintptr_t)obj_hdr->file_buff +
-                                       chunk_hdr->offset + hdr_size;
-               return str_table;
-       }
-       return NULL;
-}
-
-static void
-qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
-                          struct icp_qat_uclo_init_mem_table *init_mem_tab)
-{
-       struct icp_qat_uof_chunkhdr *chunk_hdr;
-
-       chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
-                                       ICP_QAT_UOF_IMEM, NULL);
-       if (chunk_hdr) {
-               memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
-                       chunk_hdr->offset, sizeof(unsigned int));
-               init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
-               (encap_uof_obj->beg_uof + chunk_hdr->offset +
-               sizeof(unsigned int));
-       }
-}
-
-static unsigned int
-qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
-{
-       switch (handle->pci_dev->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
-               return ICP_QAT_AC_895XCC_DEV_TYPE;
-       case PCI_DEVICE_ID_INTEL_QAT_C62X:
-               return ICP_QAT_AC_C62X_DEV_TYPE;
-       case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
-               return ICP_QAT_AC_C3XXX_DEV_TYPE;
-       case ADF_4XXX_PCI_DEVICE_ID:
-       case ADF_401XX_PCI_DEVICE_ID:
-       case ADF_402XX_PCI_DEVICE_ID:
-               return ICP_QAT_AC_4XXX_A_DEV_TYPE;
-       default:
-               pr_err("QAT: unsupported device 0x%x\n",
-                      handle->pci_dev->device);
-               return 0;
-       }
-}
-
-static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
-{
-       unsigned int maj_ver, prod_type = obj_handle->prod_type;
-
-       if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
-               pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
-                      obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
-                      prod_type);
-               return -EINVAL;
-       }
-       maj_ver = obj_handle->prod_rev & 0xff;
-       if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
-           obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
-               pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
-                            unsigned char ae, unsigned char ctx_mask,
-                            enum icp_qat_uof_regtype reg_type,
-                            unsigned short reg_addr, unsigned int value)
-{
-       switch (reg_type) {
-       case ICP_GPA_ABS:
-       case ICP_GPB_ABS:
-               ctx_mask = 0;
-               fallthrough;
-       case ICP_GPA_REL:
-       case ICP_GPB_REL:
-               return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
-                                       reg_addr, value);
-       case ICP_SR_ABS:
-       case ICP_DR_ABS:
-       case ICP_SR_RD_ABS:
-       case ICP_DR_RD_ABS:
-               ctx_mask = 0;
-               fallthrough;
-       case ICP_SR_REL:
-       case ICP_DR_REL:
-       case ICP_SR_RD_REL:
-       case ICP_DR_RD_REL:
-               return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
-                                           reg_addr, value);
-       case ICP_SR_WR_ABS:
-       case ICP_DR_WR_ABS:
-               ctx_mask = 0;
-               fallthrough;
-       case ICP_SR_WR_REL:
-       case ICP_DR_WR_REL:
-               return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
-                                           reg_addr, value);
-       case ICP_NEIGH_REL:
-               return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
-       default:
-               pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
-                                unsigned int ae,
-                                struct icp_qat_uclo_encapme *encap_ae)
-{
-       unsigned int i;
-       unsigned char ctx_mask;
-       struct icp_qat_uof_init_regsym *init_regsym;
-
-       if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
-           ICP_QAT_UCLO_MAX_CTX)
-               ctx_mask = 0xff;
-       else
-               ctx_mask = 0x55;
-
-       for (i = 0; i < encap_ae->init_regsym_num; i++) {
-               unsigned int exp_res;
-
-               init_regsym = &encap_ae->init_regsym[i];
-               exp_res = init_regsym->value;
-               switch (init_regsym->init_type) {
-               case ICP_QAT_UOF_INIT_REG:
-                       qat_uclo_init_reg(handle, ae, ctx_mask,
-                                         (enum icp_qat_uof_regtype)
-                                         init_regsym->reg_type,
-                                         (unsigned short)init_regsym->reg_addr,
-                                         exp_res);
-                       break;
-               case ICP_QAT_UOF_INIT_REG_CTX:
-                       /* check if ctx is appropriate for the ctxMode */
-                       if (!((1 << init_regsym->ctx) & ctx_mask)) {
-                               pr_err("QAT: invalid ctx num = 0x%x\n",
-                                      init_regsym->ctx);
-                               return -EINVAL;
-                       }
-                       qat_uclo_init_reg(handle, ae,
-                                         (unsigned char)
-                                         (1 << init_regsym->ctx),
-                                         (enum icp_qat_uof_regtype)
-                                         init_regsym->reg_type,
-                                         (unsigned short)init_regsym->reg_addr,
-                                         exp_res);
-                       break;
-               case ICP_QAT_UOF_INIT_EXPR:
-                       pr_err("QAT: INIT_EXPR feature not supported\n");
-                       return -EINVAL;
-               case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
-                       pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
-                       return -EINVAL;
-               default:
-                       break;
-               }
-       }
-       return 0;
-}
-
-static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       struct icp_qat_uclo_aedata *aed;
-       unsigned int s, ae;
-
-       if (obj_handle->global_inited)
-               return 0;
-       if (obj_handle->init_mem_tab.entry_num) {
-               if (qat_uclo_init_memory(handle)) {
-                       pr_err("QAT: initialize memory failed\n");
-                       return -EINVAL;
-               }
-       }
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               aed = &obj_handle->ae_data[ae];
-               for (s = 0; s < aed->slice_num; s++) {
-                       if (!aed->ae_slices[s].encap_image)
-                               continue;
-                       if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
-                               return -EINVAL;
-               }
-       }
-       obj_handle->global_inited = 1;
-       return 0;
-}
-
-static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
-                            struct icp_qat_uclo_objhandle *obj_handle,
-                            unsigned char ae,
-                            struct icp_qat_uof_image *uof_image)
-{
-       unsigned char mode;
-       int ret;
-
-       mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
-       ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
-       if (ret) {
-               pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
-               return ret;
-       }
-       if (handle->chip_info->nn) {
-               mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
-               ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
-               if (ret) {
-                       pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
-                       return ret;
-               }
-       }
-       mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
-       ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
-       if (ret) {
-               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
-               return ret;
-       }
-       mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
-       ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
-       if (ret) {
-               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
-               return ret;
-       }
-       if (handle->chip_info->lm2lm3) {
-               mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
-               ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
-               if (ret) {
-                       pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
-                       return ret;
-               }
-               mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
-               ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
-               if (ret) {
-                       pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
-                       return ret;
-               }
-               mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
-               qat_hal_set_ae_tindex_mode(handle, ae, mode);
-       }
-       return 0;
-}
-
-static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uof_image *uof_image;
-       struct icp_qat_uclo_aedata *ae_data;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
-       unsigned char ae, s;
-       int error;
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               if (!test_bit(ae, &cfg_ae_mask))
-                       continue;
-
-               ae_data = &obj_handle->ae_data[ae];
-               for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
-                                     ICP_QAT_UCLO_MAX_CTX); s++) {
-                       if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
-                               continue;
-                       uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
-                       error = qat_hal_set_modes(handle, obj_handle, ae,
-                                                 uof_image);
-                       if (error)
-                               return error;
-               }
-       }
-       return 0;
-}
-
-static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       struct icp_qat_uclo_encapme *image;
-       int a;
-
-       for (a = 0; a < obj_handle->uimage_num; a++) {
-               image = &obj_handle->ae_uimage[a];
-               image->uwords_num = image->page->beg_addr_p +
-                                       image->page->micro_words_num;
-       }
-}
-
-static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int ae;
-
-       obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
-       obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
-                                            obj_handle->obj_hdr->file_buff;
-       obj_handle->uword_in_bytes = 6;
-       obj_handle->prod_type = qat_uclo_get_dev_type(handle);
-       obj_handle->prod_rev = PID_MAJOR_REV |
-                       (PID_MINOR_REV & handle->hal_handle->revision_id);
-       if (qat_uclo_check_uof_compat(obj_handle)) {
-               pr_err("QAT: UOF incompatible\n");
-               return -EINVAL;
-       }
-       obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
-                                       GFP_KERNEL);
-       if (!obj_handle->uword_buf)
-               return -ENOMEM;
-       obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
-       if (!obj_handle->obj_hdr->file_buff ||
-           !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
-                                   &obj_handle->str_table)) {
-               pr_err("QAT: UOF doesn't have effective images\n");
-               goto out_err;
-       }
-       obj_handle->uimage_num =
-               qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
-                                   ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
-       if (!obj_handle->uimage_num)
-               goto out_err;
-       if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
-               pr_err("QAT: Bad object\n");
-               goto out_check_uof_aemask_err;
-       }
-       qat_uclo_init_uword_num(handle);
-       qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
-                                  &obj_handle->init_mem_tab);
-       if (qat_uclo_set_ae_mode(handle))
-               goto out_check_uof_aemask_err;
-       return 0;
-out_check_uof_aemask_err:
-       for (ae = 0; ae < obj_handle->uimage_num; ae++)
-               kfree(obj_handle->ae_uimage[ae].page);
-out_err:
-       kfree(obj_handle->uword_buf);
-       return -EFAULT;
-}
-
-static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
-                                     struct icp_qat_suof_filehdr *suof_ptr,
-                                     int suof_size)
-{
-       unsigned int check_sum = 0;
-       unsigned int min_ver_offset = 0;
-       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
-
-       suof_handle->file_id = ICP_QAT_SUOF_FID;
-       suof_handle->suof_buf = (char *)suof_ptr;
-       suof_handle->suof_size = suof_size;
-       min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
-                                             min_ver);
-       check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
-                                              min_ver_offset);
-       if (check_sum != suof_ptr->check_sum) {
-               pr_err("QAT: incorrect SUOF checksum\n");
-               return -EINVAL;
-       }
-       suof_handle->check_sum = suof_ptr->check_sum;
-       suof_handle->min_ver = suof_ptr->min_ver;
-       suof_handle->maj_ver = suof_ptr->maj_ver;
-       suof_handle->fw_type = suof_ptr->fw_type;
-       return 0;
-}
-
-static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
-                             struct icp_qat_suof_img_hdr *suof_img_hdr,
-                             struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
-{
-       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
-       struct icp_qat_simg_ae_mode *ae_mode;
-       struct icp_qat_suof_objhdr *suof_objhdr;
-
-       suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
-                                  suof_chunk_hdr->offset +
-                                  sizeof(*suof_objhdr));
-       suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
-                                 (suof_handle->suof_buf +
-                                  suof_chunk_hdr->offset))->img_length;
-
-       suof_img_hdr->css_header = suof_img_hdr->simg_buf;
-       suof_img_hdr->css_key = (suof_img_hdr->css_header +
-                                sizeof(struct icp_qat_css_hdr));
-       suof_img_hdr->css_signature = suof_img_hdr->css_key +
-                                     ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
-                                     ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
-       suof_img_hdr->css_simg = suof_img_hdr->css_signature +
-                                ICP_QAT_CSS_SIGNATURE_LEN(handle);
-
-       ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
-       suof_img_hdr->ae_mask = ae_mode->ae_mask;
-       suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
-       suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
-       suof_img_hdr->fw_type = ae_mode->fw_type;
-}
-
-static void
-qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
-                         struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
-{
-       char **sym_str = (char **)&suof_handle->sym_str;
-       unsigned int *sym_size = &suof_handle->sym_size;
-       struct icp_qat_suof_strtable *str_table_obj;
-
-       *sym_size = *(unsigned int *)(uintptr_t)
-                  (suof_chunk_hdr->offset + suof_handle->suof_buf);
-       *sym_str = (char *)(uintptr_t)
-                  (suof_handle->suof_buf + suof_chunk_hdr->offset +
-                  sizeof(str_table_obj->tab_length));
-}
-
-static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
-                                     struct icp_qat_suof_img_hdr *img_hdr)
-{
-       struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
-       unsigned int prod_rev, maj_ver, prod_type;
-
-       prod_type = qat_uclo_get_dev_type(handle);
-       img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
-       prod_rev = PID_MAJOR_REV |
-                        (PID_MINOR_REV & handle->hal_handle->revision_id);
-       if (img_ae_mode->dev_type != prod_type) {
-               pr_err("QAT: incompatible product type %x\n",
-                      img_ae_mode->dev_type);
-               return -EINVAL;
-       }
-       maj_ver = prod_rev & 0xff;
-       if (maj_ver > img_ae_mode->devmax_ver ||
-           maj_ver < img_ae_mode->devmin_ver) {
-               pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
-
-       kfree(sobj_handle->img_table.simg_hdr);
-       sobj_handle->img_table.simg_hdr = NULL;
-       kfree(handle->sobj_handle);
-       handle->sobj_handle = NULL;
-}
-
-static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
-                             unsigned int img_id, unsigned int num_simgs)
-{
-       struct icp_qat_suof_img_hdr img_header;
-
-       if (img_id != num_simgs - 1) {
-               memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
-                      sizeof(*suof_img_hdr));
-               memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
-                      sizeof(*suof_img_hdr));
-               memcpy(&suof_img_hdr[img_id], &img_header,
-                      sizeof(*suof_img_hdr));
-       }
-}
-
-static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
-                            struct icp_qat_suof_filehdr *suof_ptr,
-                            int suof_size)
-{
-       struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
-       struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
-       struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
-       int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
-       unsigned int i = 0;
-       struct icp_qat_suof_img_hdr img_header;
-
-       if (!suof_ptr || suof_size == 0) {
-               pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
-               return -EINVAL;
-       }
-       if (qat_uclo_check_suof_format(suof_ptr))
-               return -EINVAL;
-       ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
-       if (ret)
-               return ret;
-       suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
-                        ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
-
-       qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
-       suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
-
-       if (suof_handle->img_table.num_simgs != 0) {
-               suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
-                                      sizeof(img_header),
-                                      GFP_KERNEL);
-               if (!suof_img_hdr)
-                       return -ENOMEM;
-               suof_handle->img_table.simg_hdr = suof_img_hdr;
-
-               for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
-                       qat_uclo_map_simg(handle, &suof_img_hdr[i],
-                                         &suof_chunk_hdr[1 + i]);
-                       ret = qat_uclo_check_simg_compat(handle,
-                                                        &suof_img_hdr[i]);
-                       if (ret)
-                               return ret;
-                       suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
-                       if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
-                               ae0_img = i;
-               }
-
-               if (!handle->chip_info->tgroup_share_ustore) {
-                       qat_uclo_tail_img(suof_img_hdr, ae0_img,
-                                         suof_handle->img_table.num_simgs);
-               }
-       }
-       return 0;
-}
-
-#define ADD_ADDR(high, low)  ((((u64)high) << 32) + low)
-#define BITS_IN_DWORD 32
-
-static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
-                           struct icp_qat_fw_auth_desc *desc)
-{
-       u32 fcu_sts, retry = 0;
-       u32 fcu_ctl_csr, fcu_sts_csr;
-       u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
-       u64 bus_addr;
-
-       bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
-                          - sizeof(struct icp_qat_auth_chunk);
-
-       fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
-       fcu_sts_csr = handle->chip_info->fcu_sts_csr;
-       fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
-       fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
-
-       SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
-       SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
-       SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
-
-       do {
-               msleep(FW_AUTH_WAIT_PERIOD);
-               fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
-               if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
-                       goto auth_fail;
-               if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
-                       if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
-                               return 0;
-       } while (retry++ < FW_AUTH_MAX_RETRY);
-auth_fail:
-       pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
-              fcu_sts & FCU_AUTH_STS_MASK, retry);
-       return -EINVAL;
-}
-
-static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
-                                 int imgid)
-{
-       struct icp_qat_suof_handle *sobj_handle;
-
-       if (!handle->chip_info->tgroup_share_ustore)
-               return false;
-
-       sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
-       if (handle->hal_handle->admin_ae_mask &
-           sobj_handle->img_table.simg_hdr[imgid].ae_mask)
-               return false;
-
-       return true;
-}
-
-static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
-                                     struct icp_qat_fw_auth_desc *desc)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long desc_ae_mask = desc->ae_mask;
-       u32 fcu_sts, ae_broadcast_mask = 0;
-       u32 fcu_loaded_csr, ae_loaded;
-       u32 fcu_sts_csr, fcu_ctl_csr;
-       unsigned int ae, retry = 0;
-
-       if (handle->chip_info->tgroup_share_ustore) {
-               fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
-               fcu_sts_csr = handle->chip_info->fcu_sts_csr;
-               fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
-       } else {
-               pr_err("Chip 0x%x doesn't support broadcast load\n",
-                      handle->pci_dev->device);
-               return -EINVAL;
-       }
-
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
-                       pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
-                       return -EINVAL;
-               }
-
-               if (test_bit(ae, &desc_ae_mask))
-                       ae_broadcast_mask |= 1 << ae;
-       }
-
-       if (ae_broadcast_mask) {
-               SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
-                           ae_broadcast_mask);
-
-               SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
-
-               do {
-                       msleep(FW_AUTH_WAIT_PERIOD);
-                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
-                       fcu_sts &= FCU_AUTH_STS_MASK;
-
-                       if (fcu_sts == FCU_STS_LOAD_FAIL) {
-                               pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
-                               return -EINVAL;
-                       } else if (fcu_sts == FCU_STS_LOAD_DONE) {
-                               ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
-                               ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
-
-                               if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
-                                       break;
-                       }
-               } while (retry++ < FW_AUTH_MAX_RETRY);
-
-               if (retry > FW_AUTH_MAX_RETRY) {
-                       pr_err("QAT: broadcast load failed timeout %d\n", retry);
-                       return -EINVAL;
-               }
-       }
-       return 0;
-}
-
-static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
-                              struct icp_firml_dram_desc *dram_desc,
-                              unsigned int size)
-{
-       void *vptr;
-       dma_addr_t ptr;
-
-       vptr = dma_alloc_coherent(&handle->pci_dev->dev,
-                                 size, &ptr, GFP_KERNEL);
-       if (!vptr)
-               return -ENOMEM;
-       dram_desc->dram_base_addr_v = vptr;
-       dram_desc->dram_bus_addr = ptr;
-       dram_desc->dram_size = size;
-       return 0;
-}
-
-static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
-                              struct icp_firml_dram_desc *dram_desc)
-{
-       if (handle && dram_desc && dram_desc->dram_base_addr_v) {
-               dma_free_coherent(&handle->pci_dev->dev,
-                                 (size_t)(dram_desc->dram_size),
-                                 dram_desc->dram_base_addr_v,
-                                 dram_desc->dram_bus_addr);
-       }
-
-       if (dram_desc)
-               memset(dram_desc, 0, sizeof(*dram_desc));
-}
-
-static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
-                                  struct icp_qat_fw_auth_desc **desc)
-{
-       struct icp_firml_dram_desc dram_desc;
-
-       if (*desc) {
-               dram_desc.dram_base_addr_v = *desc;
-               dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
-                                          (*desc))->chunk_bus_addr;
-               dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
-                                      (*desc))->chunk_size;
-               qat_uclo_simg_free(handle, &dram_desc);
-       }
-}
-
-static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
-                               char *image, unsigned int size,
-                               unsigned int fw_type)
-{
-       char *fw_type_name = fw_type ? "MMP" : "AE";
-       unsigned int css_dword_size = sizeof(u32);
-
-       if (handle->chip_info->fw_auth) {
-               struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
-               unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
-
-               if ((css_hdr->header_len * css_dword_size) != header_len)
-                       goto err;
-               if ((css_hdr->size * css_dword_size) != size)
-                       goto err;
-               if (fw_type != css_hdr->fw_type)
-                       goto err;
-               if (size <= header_len)
-                       goto err;
-               size -= header_len;
-       }
-
-       if (fw_type == CSS_AE_FIRMWARE) {
-               if (size < sizeof(struct icp_qat_simg_ae_mode *) +
-                   ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
-                       goto err;
-               if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
-                       goto err;
-       } else if (fw_type == CSS_MMP_FIRMWARE) {
-               if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
-                       goto err;
-       } else {
-               pr_err("QAT: Unsupported firmware type\n");
-               return -EINVAL;
-       }
-       return 0;
-
-err:
-       pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
-       return -EINVAL;
-}
-
-static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
-                               char *image, unsigned int size,
-                               struct icp_qat_fw_auth_desc **desc)
-{
-       struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
-       struct icp_qat_fw_auth_desc *auth_desc;
-       struct icp_qat_auth_chunk *auth_chunk;
-       u64 virt_addr,  bus_addr, virt_base;
-       unsigned int length, simg_offset = sizeof(*auth_chunk);
-       struct icp_qat_simg_ae_mode *simg_ae_mode;
-       struct icp_firml_dram_desc img_desc;
-
-       if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
-               pr_err("QAT: error, input image size overflow %d\n", size);
-               return -EINVAL;
-       }
-       length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
-                ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset :
-                size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset;
-       if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
-               pr_err("QAT: error, allocate continuous dram fail\n");
-               return -ENOMEM;
-       }
-
-       auth_chunk = img_desc.dram_base_addr_v;
-       auth_chunk->chunk_size = img_desc.dram_size;
-       auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
-       virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
-       bus_addr  = img_desc.dram_bus_addr + simg_offset;
-       auth_desc = img_desc.dram_base_addr_v;
-       auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
-       auth_desc->css_hdr_low = (unsigned int)bus_addr;
-       virt_addr = virt_base;
-
-       memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
-       /* pub key */
-       bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
-                          sizeof(*css_hdr);
-       virt_addr = virt_addr + sizeof(*css_hdr);
-
-       auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
-       auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
-
-       memcpy((void *)(uintptr_t)virt_addr,
-              (void *)(image + sizeof(*css_hdr)),
-              ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
-       /* padding */
-       memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
-              0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
-
-       /* exponent */
-       memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
-              ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
-              (void *)(image + sizeof(*css_hdr) +
-                       ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
-              sizeof(unsigned int));
-
-       /* signature */
-       bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
-                           auth_desc->fwsk_pub_low) +
-                  ICP_QAT_CSS_FWSK_PUB_LEN(handle);
-       virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
-       auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
-       auth_desc->signature_low = (unsigned int)bus_addr;
-
-       memcpy((void *)(uintptr_t)virt_addr,
-              (void *)(image + sizeof(*css_hdr) +
-              ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
-              ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
-              ICP_QAT_CSS_SIGNATURE_LEN(handle));
-
-       bus_addr = ADD_ADDR(auth_desc->signature_high,
-                           auth_desc->signature_low) +
-                  ICP_QAT_CSS_SIGNATURE_LEN(handle);
-       virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
-
-       auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
-       auth_desc->img_low = (unsigned int)bus_addr;
-       auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
-       memcpy((void *)(uintptr_t)virt_addr,
-              (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
-              auth_desc->img_len);
-       virt_addr = virt_base;
-       /* AE firmware */
-       if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
-           CSS_AE_FIRMWARE) {
-               auth_desc->img_ae_mode_data_high = auth_desc->img_high;
-               auth_desc->img_ae_mode_data_low = auth_desc->img_low;
-               bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
-                                   auth_desc->img_ae_mode_data_low) +
-                          sizeof(struct icp_qat_simg_ae_mode);
-
-               auth_desc->img_ae_init_data_high = (unsigned int)
-                                                (bus_addr >> BITS_IN_DWORD);
-               auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
-               bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
-               auth_desc->img_ae_insts_high = (unsigned int)
-                                            (bus_addr >> BITS_IN_DWORD);
-               auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
-               virt_addr += sizeof(struct icp_qat_css_hdr);
-               virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
-               virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
-               simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
-               auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
-       } else {
-               auth_desc->img_ae_insts_high = auth_desc->img_high;
-               auth_desc->img_ae_insts_low = auth_desc->img_low;
-       }
-       *desc = auth_desc;
-       return 0;
-}
-
-static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
-                           struct icp_qat_fw_auth_desc *desc)
-{
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       u32 fcu_sts_csr, fcu_ctl_csr;
-       u32 loaded_aes, loaded_csr;
-       unsigned int i;
-       u32 fcu_sts;
-
-       fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
-       fcu_sts_csr = handle->chip_info->fcu_sts_csr;
-       loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
-
-       for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
-               int retry = 0;
-
-               if (!((desc->ae_mask >> i) & 0x1))
-                       continue;
-               if (qat_hal_check_ae_active(handle, i)) {
-                       pr_err("QAT: AE %d is active\n", i);
-                       return -EINVAL;
-               }
-               SET_CAP_CSR(handle, fcu_ctl_csr,
-                           (FCU_CTRL_CMD_LOAD |
-                           (1 << FCU_CTRL_BROADCAST_POS) |
-                           (i << FCU_CTRL_AE_POS)));
-
-               do {
-                       msleep(FW_AUTH_WAIT_PERIOD);
-                       fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
-                       if ((fcu_sts & FCU_AUTH_STS_MASK) ==
-                           FCU_STS_LOAD_DONE) {
-                               loaded_aes = GET_CAP_CSR(handle, loaded_csr);
-                               loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
-                               if (loaded_aes & (1 << i))
-                                       break;
-                       }
-               } while (retry++ < FW_AUTH_MAX_RETRY);
-               if (retry > FW_AUTH_MAX_RETRY) {
-                       pr_err("QAT: firmware load failed timeout %x\n", retry);
-                       return -EINVAL;
-               }
-       }
-       return 0;
-}
-
-static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
-                                void *addr_ptr, int mem_size)
-{
-       struct icp_qat_suof_handle *suof_handle;
-
-       suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
-       if (!suof_handle)
-               return -ENOMEM;
-       handle->sobj_handle = suof_handle;
-       if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
-               qat_uclo_del_suof(handle);
-               pr_err("QAT: map SUOF failed\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
-                      void *addr_ptr, int mem_size)
-{
-       struct icp_qat_fw_auth_desc *desc = NULL;
-       int status = 0;
-       int ret;
-
-       ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
-       if (ret)
-               return ret;
-
-       if (handle->chip_info->fw_auth) {
-               status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
-               if (!status)
-                       status = qat_uclo_auth_fw(handle, desc);
-               qat_uclo_ummap_auth_fw(handle, &desc);
-       } else {
-               if (handle->chip_info->mmp_sram_size < mem_size) {
-                       pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
-                       return -EFBIG;
-               }
-               qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
-       }
-       return status;
-}
-
-static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
-                               void *addr_ptr, int mem_size)
-{
-       struct icp_qat_uof_filehdr *filehdr;
-       struct icp_qat_uclo_objhandle *objhdl;
-
-       objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
-       if (!objhdl)
-               return -ENOMEM;
-       objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
-       if (!objhdl->obj_buf)
-               goto out_objbuf_err;
-       filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
-       if (qat_uclo_check_uof_format(filehdr))
-               goto out_objhdr_err;
-       objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
-                                            ICP_QAT_UOF_OBJS);
-       if (!objhdl->obj_hdr) {
-               pr_err("QAT: object file chunk is null\n");
-               goto out_objhdr_err;
-       }
-       handle->obj_handle = objhdl;
-       if (qat_uclo_parse_uof_obj(handle))
-               goto out_overlay_obj_err;
-       return 0;
-
-out_overlay_obj_err:
-       handle->obj_handle = NULL;
-       kfree(objhdl->obj_hdr);
-out_objhdr_err:
-       kfree(objhdl->obj_buf);
-out_objbuf_err:
-       kfree(objhdl);
-       return -ENOMEM;
-}
-
-static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
-                                    struct icp_qat_mof_file_hdr *mof_ptr,
-                                    u32 mof_size)
-{
-       struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
-       unsigned int min_ver_offset;
-       unsigned int checksum;
-
-       mobj_handle->file_id = ICP_QAT_MOF_FID;
-       mobj_handle->mof_buf = (char *)mof_ptr;
-       mobj_handle->mof_size = mof_size;
-
-       min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
-                                            min_ver);
-       checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
-                                             min_ver_offset);
-       if (checksum != mof_ptr->checksum) {
-               pr_err("QAT: incorrect MOF checksum\n");
-               return -EINVAL;
-       }
-
-       mobj_handle->checksum = mof_ptr->checksum;
-       mobj_handle->min_ver = mof_ptr->min_ver;
-       mobj_handle->maj_ver = mof_ptr->maj_ver;
-       return 0;
-}
-
-static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
-
-       kfree(mobj_handle->obj_table.obj_hdr);
-       mobj_handle->obj_table.obj_hdr = NULL;
-       kfree(handle->mobj_handle);
-       handle->mobj_handle = NULL;
-}
-
-static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
-                                       char *obj_name, char **obj_ptr,
-                                       unsigned int *obj_size)
-{
-       struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
-       unsigned int i;
-
-       for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
-               if (!strncmp(obj_hdr[i].obj_name, obj_name,
-                            ICP_QAT_SUOF_OBJ_NAME_LEN)) {
-                       *obj_ptr  = obj_hdr[i].obj_buf;
-                       *obj_size = obj_hdr[i].obj_size;
-                       return 0;
-               }
-       }
-
-       pr_err("QAT: object %s is not found inside MOF\n", obj_name);
-       return -EINVAL;
-}
-
-static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
-                                    struct icp_qat_mof_objhdr *mobj_hdr,
-                                    struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
-{
-       u8 *obj;
-
-       if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
-                    ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
-               obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
-       } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
-                           ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
-               obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
-       } else {
-               pr_err("QAT: unsupported chunk id\n");
-               return -EINVAL;
-       }
-       mobj_hdr->obj_buf = obj;
-       mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
-       mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
-       return 0;
-}
-
-static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
-{
-       struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
-       struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
-       struct icp_qat_mof_obj_hdr *uobj_hdr;
-       struct icp_qat_mof_obj_hdr *sobj_hdr;
-       struct icp_qat_mof_objhdr *mobj_hdr;
-       unsigned int uobj_chunk_num = 0;
-       unsigned int sobj_chunk_num = 0;
-       unsigned int *valid_chunk;
-       int ret, i;
-
-       uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
-       sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
-       if (uobj_hdr)
-               uobj_chunk_num = uobj_hdr->num_chunks;
-       if (sobj_hdr)
-               sobj_chunk_num = sobj_hdr->num_chunks;
-
-       mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
-                          sizeof(*mobj_hdr), GFP_KERNEL);
-       if (!mobj_hdr)
-               return -ENOMEM;
-
-       mobj_handle->obj_table.obj_hdr = mobj_hdr;
-       valid_chunk = &mobj_handle->obj_table.num_objs;
-       uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
-                        ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
-       sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
-                       ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
-
-       /* map uof objects */
-       for (i = 0; i < uobj_chunk_num; i++) {
-               ret = qat_uclo_map_obj_from_mof(mobj_handle,
-                                               &mobj_hdr[*valid_chunk],
-                                               &uobj_chunkhdr[i]);
-               if (ret)
-                       return ret;
-               (*valid_chunk)++;
-       }
-
-       /* map suof objects */
-       for (i = 0; i < sobj_chunk_num; i++) {
-               ret = qat_uclo_map_obj_from_mof(mobj_handle,
-                                               &mobj_hdr[*valid_chunk],
-                                               &sobj_chunkhdr[i]);
-               if (ret)
-                       return ret;
-               (*valid_chunk)++;
-       }
-
-       if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
-               pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
-                                    struct icp_qat_mof_chunkhdr *mof_chunkhdr)
-{
-       char **sym_str = (char **)&mobj_handle->sym_str;
-       unsigned int *sym_size = &mobj_handle->sym_size;
-       struct icp_qat_mof_str_table *str_table_obj;
-
-       *sym_size = *(unsigned int *)(uintptr_t)
-                   (mof_chunkhdr->offset + mobj_handle->mof_buf);
-       *sym_str = (char *)(uintptr_t)
-                  (mobj_handle->mof_buf + mof_chunkhdr->offset +
-                   sizeof(str_table_obj->tab_len));
-}
-
-static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
-                                  struct icp_qat_mof_chunkhdr *mof_chunkhdr)
-{
-       char *chunk_id = mof_chunkhdr->chunk_id;
-
-       if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
-               qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
-       else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
-               mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
-                                        mof_chunkhdr->offset;
-       else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
-               mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
-                                        mof_chunkhdr->offset;
-}
-
-static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
-{
-       int maj = mof_hdr->maj_ver & 0xff;
-       int min = mof_hdr->min_ver & 0xff;
-
-       if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
-               pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
-               return -EINVAL;
-       }
-
-       if (mof_hdr->num_chunks <= 0x1) {
-               pr_err("QAT: MOF chunk amount is incorrect\n");
-               return -EINVAL;
-       }
-       if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
-               pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
-                      maj, min);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
-                               struct icp_qat_mof_file_hdr *mof_ptr,
-                               u32 mof_size, char *obj_name, char **obj_ptr,
-                               unsigned int *obj_size)
-{
-       struct icp_qat_mof_chunkhdr *mof_chunkhdr;
-       unsigned int file_id = mof_ptr->file_id;
-       struct icp_qat_mof_handle *mobj_handle;
-       unsigned short chunks_num;
-       unsigned int i;
-       int ret;
-
-       if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
-               if (obj_ptr)
-                       *obj_ptr = (char *)mof_ptr;
-               if (obj_size)
-                       *obj_size = mof_size;
-               return 0;
-       }
-       if (qat_uclo_check_mof_format(mof_ptr))
-               return -EINVAL;
-
-       mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
-       if (!mobj_handle)
-               return -ENOMEM;
-
-       handle->mobj_handle = mobj_handle;
-       ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
-       if (ret)
-               return ret;
-
-       mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
-       chunks_num = mof_ptr->num_chunks;
-
-       /* Parse MOF file chunks */
-       for (i = 0; i < chunks_num; i++)
-               qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
-
-       /* All sym_objs uobjs and sobjs should be available */
-       if (!mobj_handle->sym_str ||
-           (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
-               return -EINVAL;
-
-       ret = qat_uclo_map_objs_from_mof(mobj_handle);
-       if (ret)
-               return ret;
-
-       /* Seek specified uof object in MOF */
-       return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
-                                           obj_ptr, obj_size);
-}
-
-int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
-                    void *addr_ptr, u32 mem_size, char *obj_name)
-{
-       char *obj_addr;
-       u32 obj_size;
-       int ret;
-
-       BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
-                    (sizeof(handle->hal_handle->ae_mask) * 8));
-
-       if (!handle || !addr_ptr || mem_size < 24)
-               return -EINVAL;
-
-       if (obj_name) {
-               ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
-                                          &obj_addr, &obj_size);
-               if (ret)
-                       return ret;
-       } else {
-               obj_addr = addr_ptr;
-               obj_size = mem_size;
-       }
-
-       return (handle->chip_info->fw_auth) ?
-                       qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
-                       qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
-}
-
-void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int a;
-
-       if (handle->mobj_handle)
-               qat_uclo_del_mof(handle);
-       if (handle->sobj_handle)
-               qat_uclo_del_suof(handle);
-       if (!obj_handle)
-               return;
-
-       kfree(obj_handle->uword_buf);
-       for (a = 0; a < obj_handle->uimage_num; a++)
-               kfree(obj_handle->ae_uimage[a].page);
-
-       for (a = 0; a < handle->hal_handle->ae_max_num; a++)
-               qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
-
-       kfree(obj_handle->obj_hdr);
-       kfree(obj_handle->obj_buf);
-       kfree(obj_handle);
-       handle->obj_handle = NULL;
-}
-
-static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
-                                struct icp_qat_uclo_encap_page *encap_page,
-                                u64 *uword, unsigned int addr_p,
-                                unsigned int raddr, u64 fill)
-{
-       unsigned int i, addr;
-       u64 uwrd = 0;
-
-       if (!encap_page) {
-               *uword = fill;
-               return;
-       }
-       addr = (encap_page->page_region) ? raddr : addr_p;
-       for (i = 0; i < encap_page->uwblock_num; i++) {
-               if (addr >= encap_page->uwblock[i].start_addr &&
-                   addr <= encap_page->uwblock[i].start_addr +
-                   encap_page->uwblock[i].words_num - 1) {
-                       addr -= encap_page->uwblock[i].start_addr;
-                       addr *= obj_handle->uword_in_bytes;
-                       memcpy(&uwrd, (void *)(((uintptr_t)
-                              encap_page->uwblock[i].micro_words) + addr),
-                              obj_handle->uword_in_bytes);
-                       uwrd = uwrd & GENMASK_ULL(43, 0);
-               }
-       }
-       *uword = uwrd;
-       if (*uword == INVLD_UWORD)
-               *uword = fill;
-}
-
-static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
-                                       struct icp_qat_uclo_encap_page
-                                       *encap_page, unsigned int ae)
-{
-       unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       u64 fill_pat;
-
-       /* load the page starting at appropriate ustore address */
-       /* get fill-pattern from an image -- they are all the same */
-       memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
-              sizeof(u64));
-       uw_physical_addr = encap_page->beg_addr_p;
-       uw_relative_addr = 0;
-       words_num = encap_page->micro_words_num;
-       while (words_num) {
-               if (words_num < UWORD_CPYBUF_SIZE)
-                       cpylen = words_num;
-               else
-                       cpylen = UWORD_CPYBUF_SIZE;
-
-               /* load the buffer */
-               for (i = 0; i < cpylen; i++)
-                       qat_uclo_fill_uwords(obj_handle, encap_page,
-                                            &obj_handle->uword_buf[i],
-                                            uw_physical_addr + i,
-                                            uw_relative_addr + i, fill_pat);
-
-               /* copy the buffer to ustore */
-               qat_hal_wr_uwords(handle, (unsigned char)ae,
-                                 uw_physical_addr, cpylen,
-                                 obj_handle->uword_buf);
-
-               uw_physical_addr += cpylen;
-               uw_relative_addr += cpylen;
-               words_num -= cpylen;
-       }
-}
-
-static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
-                                   struct icp_qat_uof_image *image)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned long ae_mask = handle->hal_handle->ae_mask;
-       unsigned long cfg_ae_mask = handle->cfg_ae_mask;
-       unsigned long ae_assigned = image->ae_assigned;
-       struct icp_qat_uclo_aedata *aed;
-       unsigned int ctx_mask, s;
-       struct icp_qat_uclo_page *page;
-       unsigned char ae;
-       int ctx;
-
-       if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
-               ctx_mask = 0xff;
-       else
-               ctx_mask = 0x55;
-       /* load the default page and set assigned CTX PC
-        * to the entrypoint address */
-       for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
-               if (!test_bit(ae, &cfg_ae_mask))
-                       continue;
-
-               if (!test_bit(ae, &ae_assigned))
-                       continue;
-
-               aed = &obj_handle->ae_data[ae];
-               /* find the slice to which this image is assigned */
-               for (s = 0; s < aed->slice_num; s++) {
-                       if (image->ctx_assigned &
-                           aed->ae_slices[s].ctx_mask_assigned)
-                               break;
-               }
-               if (s >= aed->slice_num)
-                       continue;
-               page = aed->ae_slices[s].page;
-               if (!page->encap_page->def_page)
-                       continue;
-               qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
-
-               page = aed->ae_slices[s].page;
-               for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
-                       aed->ae_slices[s].cur_page[ctx] =
-                                       (ctx_mask & (1 << ctx)) ? page : NULL;
-               qat_hal_set_live_ctx(handle, (unsigned char)ae,
-                                    image->ctx_assigned);
-               qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
-                              image->entry_address);
-       }
-}
-
-static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
-{
-       unsigned int i;
-       struct icp_qat_fw_auth_desc *desc = NULL;
-       struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
-       struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
-       int ret;
-
-       for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
-               ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
-                                          simg_hdr[i].simg_len,
-                                          CSS_AE_FIRMWARE);
-               if (ret)
-                       return ret;
-
-               if (qat_uclo_map_auth_fw(handle,
-                                        (char *)simg_hdr[i].simg_buf,
-                                        (unsigned int)
-                                        simg_hdr[i].simg_len,
-                                        &desc))
-                       goto wr_err;
-               if (qat_uclo_auth_fw(handle, desc))
-                       goto wr_err;
-               if (qat_uclo_is_broadcast(handle, i)) {
-                       if (qat_uclo_broadcast_load_fw(handle, desc))
-                               goto wr_err;
-               } else {
-                       if (qat_uclo_load_fw(handle, desc))
-                               goto wr_err;
-               }
-               qat_uclo_ummap_auth_fw(handle, &desc);
-       }
-       return 0;
-wr_err:
-       qat_uclo_ummap_auth_fw(handle, &desc);
-       return -EINVAL;
-}
-
-static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
-{
-       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-       unsigned int i;
-
-       if (qat_uclo_init_globals(handle))
-               return -EINVAL;
-       for (i = 0; i < obj_handle->uimage_num; i++) {
-               if (!obj_handle->ae_uimage[i].img_ptr)
-                       return -EINVAL;
-               if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
-                       return -EINVAL;
-               qat_uclo_wr_uimage_page(handle,
-                                       obj_handle->ae_uimage[i].img_ptr);
-       }
-       return 0;
-}
-
-int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
-{
-       return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
-                                  qat_uclo_wr_uof_img(handle);
-}
-
-int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
-                            unsigned int cfg_ae_mask)
-{
-       if (!cfg_ae_mask)
-               return -EINVAL;
-
-       handle->cfg_ae_mask = cfg_ae_mask;
-       return 0;
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
deleted file mode 100644 (file)
index 38d6f8e..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
-qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
deleted file mode 100644 (file)
index 1ebe0b3..0000000
+++ /dev/null
@@ -1,252 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include "adf_dh895xcc_hw_data.h"
-#include "icp_qat_hw.h"
-
-#define ADF_DH895XCC_VF_MSK    0xFFFFFFFF
-
-/* Worker thread to service arbiter mappings */
-static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
-       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
-       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
-       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
-};
-
-static struct adf_hw_device_class dh895xcc_class = {
-       .name = ADF_DH895XCC_DEVICE_NAME,
-       .type = DEV_DH895XCC,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       u32 fuses = self->fuses;
-
-       return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
-                        ADF_DH895XCC_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       u32 fuses = self->fuses;
-
-       return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCC_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCC_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCC_SRAM_BAR;
-}
-
-static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
-{
-       struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
-       u32 capabilities;
-       u32 legfuses;
-
-       capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
-                      ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
-                      ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
-                      ICP_ACCEL_CAPABILITIES_CIPHER |
-                      ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       /* Read accelerator capabilities mask */
-       pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
-
-       /* A set bit in legfuses means the feature is OFF in this SKU */
-       if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
-       if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
-       }
-       if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
-               capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
-
-       return capabilities;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
-           >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
-
-       switch (sku) {
-       case ADF_DH895XCC_FUSECTL_SKU_1:
-               return DEV_SKU_1;
-       case ADF_DH895XCC_FUSECTL_SKU_2:
-               return DEV_SKU_2;
-       case ADF_DH895XCC_FUSECTL_SKU_3:
-               return DEV_SKU_3;
-       case ADF_DH895XCC_FUSECTL_SKU_4:
-               return DEV_SKU_4;
-       default:
-               return DEV_SKU_UNKNOWN;
-       }
-       return DEV_SKU_UNKNOWN;
-}
-
-static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
-{
-       return thrd_to_arb_map;
-}
-
-static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
-{
-       /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
-       if (vf_mask & 0xFFFF) {
-               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
-                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
-       }
-
-       /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
-       if (vf_mask >> 16) {
-               u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
-                         & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
-               ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
-       }
-}
-
-static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       u32 val;
-
-       /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
-       val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
-             | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
-
-       /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
-       val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
-             | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
-}
-
-static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
-{
-       u32 sources, pending, disabled;
-       u32 errsou3, errmsk3;
-       u32 errsou5, errmsk5;
-
-       /* Get the interrupt sources triggered by VFs */
-       errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
-       errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
-       sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
-                 | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
-
-       if (!sources)
-               return 0;
-
-       /* Get the already disabled interrupts */
-       errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
-       errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
-       disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
-                  | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
-
-       pending = sources & ~disabled;
-       if (!pending)
-               return 0;
-
-       /* Due to HW limitations, when disabling the interrupts, we can't
-        * just disable the requested sources, as this would lead to missed
-        * interrupts if sources changes just before writing to ERRMSK3 and
-        * ERRMSK5.
-        * To work around it, disable all and re-enable only the sources that
-        * are not in vf_mask and were not already disabled. Re-enabling will
-        * trigger a new interrupt for the sources that have changed in the
-        * meantime, if any.
-        */
-       errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
-       errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
-
-       errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
-       errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
-       ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
-
-       /* Return the sources of the (new) interrupt(s) */
-       return pending;
-}
-
-static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
-{
-       adf_gen2_cfg_iov_thds(accel_dev, enable,
-                             ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
-                             ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
-}
-
-void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &dh895xcc_class;
-       hw_data->instance_id = dh895xcc_class.instances++;
-       hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_isr_resource_alloc;
-       hw_data->free_irq = adf_isr_resource_free;
-       hw_data->enable_error_correction = adf_gen2_enable_error_correction;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_accel_cap = get_accel_cap;
-       hw_data->get_num_accels = adf_gen2_get_num_accels;
-       hw_data->get_num_aes = adf_gen2_get_num_aes;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_admin_info = adf_gen2_get_admin_info;
-       hw_data->get_arb_info = adf_gen2_get_arb_info;
-       hw_data->get_sram_bar_id = get_sram_bar_id;
-       hw_data->get_sku = get_sku;
-       hw_data->fw_name = ADF_DH895XCC_FW;
-       hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
-       hw_data->init_admin_comms = adf_init_admin_comms;
-       hw_data->exit_admin_comms = adf_exit_admin_comms;
-       hw_data->configure_iov_threads = configure_iov_threads;
-       hw_data->send_admin_init = adf_send_admin_init;
-       hw_data->init_arb = adf_init_arb;
-       hw_data->exit_arb = adf_exit_arb;
-       hw_data->get_arb_mapping = adf_get_arbiter_mapping;
-       hw_data->enable_ints = adf_gen2_enable_ints;
-       hw_data->reset_device = adf_reset_sbr;
-       hw_data->disable_iov = adf_disable_sriov;
-       hw_data->dev_config = adf_gen2_dev_config;
-
-       adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
-       hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
-       hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
-       hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
deleted file mode 100644 (file)
index 7b674bb..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#ifndef ADF_DH895x_HW_DATA_H_
-#define ADF_DH895x_HW_DATA_H_
-
-/* PCIe configuration space */
-#define ADF_DH895XCC_SRAM_BAR 0
-#define ADF_DH895XCC_PMISC_BAR 1
-#define ADF_DH895XCC_ETR_BAR 2
-#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
-#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
-#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
-#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
-#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
-#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
-#define ADF_DH895XCC_MAX_ACCELERATORS 6
-#define ADF_DH895XCC_MAX_ACCELENGINES 12
-#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
-#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
-#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
-#define ADF_DH895XCC_ETR_MAX_BANKS 32
-
-/* Masks for VF2PF interrupts */
-#define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src)   (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask)  (((vf_mask) & 0xFFFF) << 9)
-#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src)   (((vf_src) & 0x0000FFFF) << 16)
-#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask)  ((vf_mask) >> 16)
-
-/* AE to function mapping */
-#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
-#define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
-
-/* FW names */
-#define ADF_DH895XCC_FW "qat_895xcc.bin"
-#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
-
-void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
deleted file mode 100644 (file)
index e18860a..0000000
+++ /dev/null
@@ -1,258 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_dh895xcc_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_DH895XCC_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-       .sriov_configure = adf_sriov_configure,
-       .err_handler = &adf_err_handler,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
-                       adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       adf_devmgr_rm_dev(accel_dev, NULL);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
-               /* If the accelerator is connected to a node with no memory
-                * there is no point in using the accelerator since the remote
-                * memory transaction will be very slow. */
-               dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
-               return -EINVAL;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table.
-        * This should be called before adf_cleanup_accel is called */
-       if (adf_devmgr_add_dev(accel_dev, NULL)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_dh895xcc(accel_dev->hw_device);
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
-       pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
-                             &hw_data->fuses);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-       /* If the device has no acceleration engines then ignore it. */
-       if (!hw_data->accel_mask || !hw_data->ae_mask ||
-           ((~hw_data->ae_mask) & 0x01)) {
-               dev_err(&pdev->dev, "No acceleration units found");
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       pcie_set_readrq(pdev, 1024);
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Get accelerator capabilities mask */
-       hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-
-       if (pci_save_state(pdev)) {
-               dev_err(&pdev->dev, "Failed to save pci state\n");
-               ret = -ENOMEM;
-               goto out_err_free_reg;
-       }
-
-       ret = adf_dev_up(accel_dev, true);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_down(accel_dev, false);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_dev_down(accel_dev, false);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_FIRMWARE(ADF_DH895XCC_FW);
-MODULE_FIRMWARE(ADF_DH895XCC_MMP);
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
deleted file mode 100644 (file)
index 0153c85..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../qat_common
-obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
-qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
deleted file mode 100644 (file)
index 70e56cc..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2015 - 2021 Intel Corporation */
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
-#include <adf_gen2_hw_data.h>
-#include <adf_gen2_pfvf.h>
-#include <adf_pfvf_vf_msg.h>
-#include "adf_dh895xccvf_hw_data.h"
-
-static struct adf_hw_device_class dh895xcciov_class = {
-       .name = ADF_DH895XCCVF_DEVICE_NAME,
-       .type = DEV_DH895XCCVF,
-       .instances = 0
-};
-
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_ACCELERATORS_MASK;
-}
-
-static u32 get_ae_mask(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_ACCELENGINES_MASK;
-}
-
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_MAX_ACCELENGINES;
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
-       return ADF_DH895XCCIOV_ETR_BAR;
-}
-
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
-       return DEV_SKU_VF;
-}
-
-static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
-{
-       return 0;
-}
-
-static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
-{
-}
-
-void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class = &dh895xcciov_class;
-       hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
-       hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
-       hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
-       hw_data->num_logical_accel = 1;
-       hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
-       hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
-       hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
-       hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
-       hw_data->alloc_irq = adf_vf_isr_resource_alloc;
-       hw_data->free_irq = adf_vf_isr_resource_free;
-       hw_data->enable_error_correction = adf_vf_void_noop;
-       hw_data->init_admin_comms = adf_vf_int_noop;
-       hw_data->exit_admin_comms = adf_vf_void_noop;
-       hw_data->send_admin_init = adf_vf2pf_notify_init;
-       hw_data->init_arb = adf_vf_int_noop;
-       hw_data->exit_arb = adf_vf_void_noop;
-       hw_data->disable_iov = adf_vf2pf_notify_shutdown;
-       hw_data->get_accel_mask = get_accel_mask;
-       hw_data->get_ae_mask = get_ae_mask;
-       hw_data->get_num_accels = get_num_accels;
-       hw_data->get_num_aes = get_num_aes;
-       hw_data->get_etr_bar_id = get_etr_bar_id;
-       hw_data->get_misc_bar_id = get_misc_bar_id;
-       hw_data->get_sku = get_sku;
-       hw_data->enable_ints = adf_vf_void_noop;
-       hw_data->dev_class->instances++;
-       hw_data->dev_config = adf_gen2_dev_config;
-       adf_devmgr_update_class_index(hw_data);
-       adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
-       adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
-       adf_gen2_init_dc_ops(&hw_data->dc_ops);
-}
-
-void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
-{
-       hw_data->dev_class->instances--;
-       adf_devmgr_update_class_index(hw_data);
-}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
deleted file mode 100644 (file)
index 6973fa9..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
-/* Copyright(c) 2015 - 2020 Intel Corporation */
-#ifndef ADF_DH895XVF_HW_DATA_H_
-#define ADF_DH895XVF_HW_DATA_H_
-
-#define ADF_DH895XCCIOV_PMISC_BAR 1
-#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
-#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
-#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
-#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
-#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
-#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
-#define ADF_DH895XCCIOV_ETR_BAR 0
-#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
-
-void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
-void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
-#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
deleted file mode 100644 (file)
index 96854a1..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-/* Copyright(c) 2014 - 2020 Intel Corporation */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include <adf_cfg.h>
-#include "adf_dh895xccvf_hw_data.h"
-
-static const struct pci_device_id adf_pci_tbl[] = {
-       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF), },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
-       .id_table = adf_pci_tbl,
-       .name = ADF_DH895XCCVF_DEVICE_NAME,
-       .probe = adf_probe,
-       .remove = adf_remove,
-};
-
-static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
-{
-       pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
-       pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
-}
-
-static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
-{
-       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
-       struct adf_accel_dev *pf;
-       int i;
-
-       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
-
-               if (bar->virt_addr)
-                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
-       }
-
-       if (accel_dev->hw_device) {
-               switch (accel_pci_dev->pci_dev->device) {
-               case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
-                       adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
-                       break;
-               default:
-                       break;
-               }
-               kfree(accel_dev->hw_device);
-               accel_dev->hw_device = NULL;
-       }
-       adf_cfg_dev_remove(accel_dev);
-       debugfs_remove(accel_dev->debugfs_dir);
-       pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
-       adf_devmgr_rm_dev(accel_dev, pf);
-}
-
-static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct adf_accel_dev *accel_dev;
-       struct adf_accel_dev *pf;
-       struct adf_accel_pci *accel_pci_dev;
-       struct adf_hw_device_data *hw_data;
-       char name[ADF_DEVICE_NAME_LENGTH];
-       unsigned int i, bar_nr;
-       unsigned long bar_mask;
-       int ret;
-
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
-               break;
-       default:
-               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
-               return -ENODEV;
-       }
-
-       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
-                                dev_to_node(&pdev->dev));
-       if (!accel_dev)
-               return -ENOMEM;
-
-       accel_dev->is_vf = true;
-       pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
-       accel_pci_dev = &accel_dev->accel_pci_dev;
-       accel_pci_dev->pci_dev = pdev;
-
-       /* Add accel device to accel table */
-       if (adf_devmgr_add_dev(accel_dev, pf)) {
-               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
-               kfree(accel_dev);
-               return -EFAULT;
-       }
-       INIT_LIST_HEAD(&accel_dev->crypto_list);
-
-       accel_dev->owner = THIS_MODULE;
-       /* Allocate and configure device configuration structure */
-       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
-                              dev_to_node(&pdev->dev));
-       if (!hw_data) {
-               ret = -ENOMEM;
-               goto out_err;
-       }
-       accel_dev->hw_device = hw_data;
-       adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
-
-       /* Get Accelerators and Accelerators Engines masks */
-       hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
-       hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
-       accel_pci_dev->sku = hw_data->get_sku(hw_data);
-
-       /* Create dev top level debugfs entry */
-       snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
-                hw_data->dev_class->name, pci_name(pdev));
-
-       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-
-       /* Create device configuration table */
-       ret = adf_cfg_dev_add(accel_dev);
-       if (ret)
-               goto out_err;
-
-       /* enable PCI device */
-       if (pci_enable_device(pdev)) {
-               ret = -EFAULT;
-               goto out_err;
-       }
-
-       /* set dma identifier */
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
-       if (ret) {
-               dev_err(&pdev->dev, "No usable DMA configuration\n");
-               goto out_err_disable;
-       }
-
-       if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) {
-               ret = -EFAULT;
-               goto out_err_disable;
-       }
-
-       /* Find and map all the device's BARS */
-       i = 0;
-       bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
-               struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
-
-               bar->base_addr = pci_resource_start(pdev, bar_nr);
-               if (!bar->base_addr)
-                       break;
-               bar->size = pci_resource_len(pdev, bar_nr);
-               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
-               if (!bar->virt_addr) {
-                       dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
-                       ret = -EFAULT;
-                       goto out_err_free_reg;
-               }
-       }
-       pci_set_master(pdev);
-       /* Completion for VF2PF request/response message exchange */
-       init_completion(&accel_dev->vf.msg_received);
-
-       ret = adf_dev_up(accel_dev, false);
-       if (ret)
-               goto out_err_dev_stop;
-
-       return ret;
-
-out_err_dev_stop:
-       adf_dev_down(accel_dev, false);
-out_err_free_reg:
-       pci_release_regions(accel_pci_dev->pci_dev);
-out_err_disable:
-       pci_disable_device(accel_pci_dev->pci_dev);
-out_err:
-       adf_cleanup_accel(accel_dev);
-       kfree(accel_dev);
-       return ret;
-}
-
-static void adf_remove(struct pci_dev *pdev)
-{
-       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
-
-       if (!accel_dev) {
-               pr_err("QAT: Driver removal failed\n");
-               return;
-       }
-       adf_flush_vf_wq(accel_dev);
-       adf_dev_down(accel_dev, false);
-       adf_cleanup_accel(accel_dev);
-       adf_cleanup_pci_dev(accel_dev);
-       kfree(accel_dev);
-}
-
-static int __init adfdrv_init(void)
-{
-       request_module("intel_qat");
-
-       if (pci_register_driver(&adf_driver)) {
-               pr_err("QAT: Driver initialization failed\n");
-               return -EFAULT;
-       }
-       return 0;
-}
-
-static void __exit adfdrv_release(void)
-{
-       pci_unregister_driver(&adf_driver);
-       adf_clean_vf_map(true);
-}
-
-module_init(adfdrv_init);
-module_exit(adfdrv_release);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel");
-MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_VERSION(ADF_DRV_VERSION);