Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 9 Jul 2019 03:57:08 +0000 (20:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 9 Jul 2019 03:57:08 +0000 (20:57 -0700)
Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 5.3:

  API:
   - Test shash interface directly in testmgr
   - cra_driver_name is now mandatory

  Algorithms:
   - Replace arc4 crypto_cipher with library helper
   - Implement 5 way interleave for ECB, CBC and CTR on arm64
   - Add xxhash
   - Add continuous self-test on noise source to drbg
   - Update jitter RNG

  Drivers:
   - Add support for SHA204A random number generator
   - Add support for 7211 in iproc-rng200
   - Fix fuzz test failures in inside-secure
   - Fix fuzz test failures in talitos
   - Fix fuzz test failures in qat"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits)
  crypto: stm32/hash - remove interruptible condition for dma
  crypto: stm32/hash - Fix hmac issue more than 256 bytes
  crypto: stm32/crc32 - rename driver file
  crypto: amcc - remove memset after dma_alloc_coherent
  crypto: ccp - Switch to SPDX license identifiers
  crypto: ccp - Validate the the error value used to index error messages
  crypto: doc - Fix formatting of new crypto engine content
  crypto: doc - Add parameter documentation
  crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
  crypto: arm64/aes-ce - add 5 way interleave routines
  crypto: talitos - drop icv_ool
  crypto: talitos - fix hash on SEC1.
  crypto: talitos - move struct talitos_edesc into talitos.h
  lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE
  crypto/NX: Set receive window credits to max number of CRBs in RxFIFO
  crypto: asymmetric_keys - select CRYPTO_HASH where needed
  crypto: serpent - mark __serpent_setkey_sbox noinline
  crypto: testmgr - dynamically allocate crypto_shash
  crypto: testmgr - dynamically allocate testvec_config
  crypto: talitos - eliminate unneeded 'done' functions at build time
  ...

87 files changed:
1  2 
MAINTAINERS
arch/arm/crypto/sha512-glue.c
arch/arm64/crypto/aes-ce.S
arch/arm64/crypto/aes-modes.S
arch/arm64/crypto/aes-neon.S
arch/arm64/crypto/sha1-ce-glue.c
arch/arm64/crypto/sha2-ce-glue.c
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/chacha_glue.c
crypto/aead.c
crypto/algapi.c
crypto/arc4.c
crypto/chacha20poly1305.c
crypto/chacha_generic.c
crypto/cryptd.c
crypto/crypto_null.c
crypto/deflate.c
crypto/ghash-generic.c
crypto/lrw.c
crypto/lz4.c
crypto/lz4hc.c
crypto/lzo-rle.c
crypto/lzo.c
crypto/michael_mic.c
crypto/rmd128.c
crypto/rmd160.c
crypto/rmd256.c
crypto/rmd320.c
crypto/serpent_generic.c
crypto/skcipher.c
crypto/tea.c
crypto/testmgr.c
crypto/testmgr.h
crypto/tgr192.c
crypto/zstd.c
drivers/crypto/Kconfig
drivers/crypto/amcc/crypto4xx_alg.c
drivers/crypto/amcc/crypto4xx_core.c
drivers/crypto/amcc/crypto4xx_core.h
drivers/crypto/bcm/cipher.c
drivers/crypto/bcm/spu2.c
drivers/crypto/cavium/cpt/cptvf_algs.c
drivers/crypto/ccp/ccp-crypto-aes.c
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/ccp/ccp-dev.h
drivers/crypto/ccp/ccp-ops.c
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/mxs-dcp.c
drivers/crypto/nx/nx-842-powernv.c
drivers/crypto/nx/nx.c
drivers/crypto/nx/nx_debugfs.c
drivers/crypto/sahara.c
drivers/crypto/stm32/Makefile
drivers/crypto/stm32/stm32-crc32.c
drivers/crypto/stm32/stm32-hash.c
drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
drivers/crypto/talitos.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/aes_xts.c
drivers/crypto/vmx/vmx.c
drivers/i2c/i2c-core-acpi.c
drivers/net/ppp/Kconfig
drivers/net/ppp/ppp_mppe.c
fs/cifs/Kconfig
fs/cifs/cifsfs.c
include/crypto/aead.h
include/crypto/algapi.h
include/crypto/internal/hash.h
include/crypto/internal/skcipher.h
include/crypto/skcipher.h
include/linux/crypto.h
lib/scatterlist.c
net/mac80211/Kconfig
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/key.h
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/tkip.c
net/mac80211/tkip.h
net/mac80211/wep.c
net/mac80211/wep.h
net/mac80211/wpa.c
net/wireless/Kconfig
net/wireless/lib80211_crypt_tkip.c
net/wireless/lib80211_crypt_wep.c

diff --combined MAINTAINERS
@@@ -364,7 -364,7 +364,7 @@@ F: drivers/acpi/fan.
  
  ACPI FOR ARM64 (ACPI/arm64)
  M:    Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 -M:    Hanjun Guo <hanjun.guo@linaro.org>
 +M:    Hanjun Guo <guohanjun@huawei.com>
  M:    Sudeep Holla <sudeep.holla@arm.com>
  L:    linux-acpi@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -696,7 -696,6 +696,7 @@@ F: drivers/input/mouse/alps.
  ALTERA I2C CONTROLLER DRIVER
  M:    Thor Thayer <thor.thayer@linux.intel.com>
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-altera.txt
  F:    drivers/i2c/busses/i2c-altera.c
  
  ALTERA MAILBOX DRIVER
@@@ -1175,7 -1174,6 +1175,7 @@@ S:      Maintaine
  F:    Documentation/devicetree/bindings/arm/arm-boards
  F:    Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
  F:    Documentation/devicetree/bindings/clock/arm-integrator.txt
 +F:    Documentation/devicetree/bindings/i2c/i2c-versatile.txt
  F:    Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
  F:    Documentation/devicetree/bindings/mtd/arm-versatile.txt
  F:    arch/arm/mach-integrator/
@@@ -1235,7 -1233,7 +1235,7 @@@ F:      arch/arm/lib/floppydma.
  F:    arch/arm/include/asm/floppy.h
  
  ARM PMU PROFILING AND DEBUGGING
 -M:    Will Deacon <will.deacon@arm.com>
 +M:    Will Deacon <will@kernel.org>
  M:    Mark Rutland <mark.rutland@arm.com>
  S:    Maintained
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -1306,14 -1304,8 +1306,14 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/interrupt-controller/arm,vic.txt
  F:    drivers/irqchip/irq-vic.c
  
 +AMAZON ANNAPURNA LABS FIC DRIVER
 +M:    Talel Shenhar <talel@amazon.com>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
 +F:    drivers/irqchip/irq-al-fic.c
 +
  ARM SMMU DRIVERS
 -M:    Will Deacon <will.deacon@arm.com>
 +M:    Will Deacon <will@kernel.org>
  R:    Robin Murphy <robin.murphy@arm.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
@@@ -1789,7 -1781,6 +1789,7 @@@ ARM/LPC18XX ARCHITECTUR
  M:    Vladimir Zapolskiy <vz@mleia.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-lpc2k.txt
  F:    arch/arm/boot/dts/lpc43*
  F:    drivers/i2c/busses/i2c-lpc2k.c
  F:    drivers/memory/pl172.c
@@@ -1803,7 -1794,6 +1803,7 @@@ M:      Sylvain Lemieux <slemieux.tyco@gmail
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  T:    git git://github.com/vzapolskiy/linux-lpc32xx.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-pnx.txt
  F:    arch/arm/boot/dts/lpc32*
  F:    arch/arm/mach-lpc32xx/
  F:    drivers/i2c/busses/i2c-pnx.c
@@@ -1928,8 -1918,6 +1928,8 @@@ ARM/NOMADIK/U300/Ux500 ARCHITECTURE
  M:    Linus Walleij <linus.walleij@linaro.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-nomadik.txt
 +F:    Documentation/devicetree/bindings/i2c/i2c-stu300.txt
  F:    arch/arm/mach-nomadik/
  F:    arch/arm/mach-u300/
  F:    arch/arm/mach-ux500/
@@@ -2091,7 -2079,7 +2091,7 @@@ F:      drivers/tty/serial/msm_serial.
  F:    drivers/usb/dwc3/dwc3-qcom.c
  F:    include/dt-bindings/*/qcom*
  F:    include/linux/*/qcom*
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
  
  ARM/RADISYS ENP2611 MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
@@@ -2152,7 -2140,6 +2152,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  L:    linux-rockchip@lists.infradead.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
  F:    arch/arm/boot/dts/rk3*
  F:    arch/arm/boot/dts/rv1108*
  F:    arch/arm/mach-rockchip/
@@@ -2288,7 -2275,6 +2288,7 @@@ M:      Patrice Chotard <patrice.chotard@st.
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  W:    http://www.stlinux.com
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-st.txt
  F:    arch/arm/mach-sti/
  F:    arch/arm/boot/dts/sti*
  F:    drivers/char/hw_random/st-rng.c
@@@ -2480,7 -2466,6 +2480,7 @@@ ARM/VT8500 ARM ARCHITECTUR
  M:    Tony Prisk <linux@prisktech.co.nz>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-wmt.txt
  F:    arch/arm/mach-vt8500/
  F:    drivers/clocksource/timer-vt8500.c
  F:    drivers/i2c/busses/i2c-wmt.c
@@@ -2546,8 -2531,6 +2546,8 @@@ F:      drivers/cpuidle/cpuidle-zynq.
  F:    drivers/block/xsysace.c
  N:    zynq
  N:    xilinx
 +F:    Documentation/devicetree/bindings/i2c/i2c-cadence.txt
 +F:    Documentation/devicetree/bindings/i2c/i2c-xiic.txt
  F:    drivers/clocksource/timer-cadence-ttc.c
  F:    drivers/i2c/busses/i2c-cadence.c
  F:    drivers/mmc/host/sdhci-of-arasan.c
@@@ -2556,7 -2539,7 +2556,7 @@@ F:      drivers/i2c/busses/i2c-xiic.
  
  ARM64 PORT (AARCH64 ARCHITECTURE)
  M:    Catalin Marinas <catalin.marinas@arm.com>
 -M:    Will Deacon <will.deacon@arm.com>
 +M:    Will Deacon <will@kernel.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
  S:    Maintained
@@@ -2644,7 -2627,7 +2644,7 @@@ F:      Documentation/devicetree/bindings/ee
  F:    drivers/misc/eeprom/at24.c
  
  ATA OVER ETHERNET (AOE) DRIVER
 -M:    "Ed L. Cashin" <ed.cashin@acm.org>
 +M:    "Justin Sanders" <justin@coraid.com>
  W:    http://www.openaoe.org/
  S:    Supported
  F:    Documentation/aoe/
@@@ -2740,7 -2723,7 +2740,7 @@@ S:      Maintaine
  F:    drivers/net/wireless/atmel/atmel*
  
  ATOMIC INFRASTRUCTURE
 -M:    Will Deacon <will.deacon@arm.com>
 +M:    Will Deacon <will@kernel.org>
  M:    Peter Zijlstra <peterz@infradead.org>
  R:    Boqun Feng <boqun.feng@gmail.com>
  L:    linux-kernel@vger.kernel.org
@@@ -2785,7 -2768,7 +2785,7 @@@ AVIA HX711 ANALOG DIGITAL CONVERTER II
  M:    Andreas Klinger <ak@it-klinger.de>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
 +F:    Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
  F:    drivers/iio/adc/hx711.c
  
  AX.25 NETWORK LAYER
@@@ -3066,9 -3049,8 +3066,9 @@@ S:      Maintaine
  F:    arch/riscv/net/
  
  BPF JIT for S390
 -M:    Martin Schwidefsky <schwidefsky@de.ibm.com>
  M:    Heiko Carstens <heiko.carstens@de.ibm.com>
 +M:    Vasily Gorbik <gor@linux.ibm.com>
 +M:    Christian Borntraeger <borntraeger@de.ibm.com>
  L:    netdev@vger.kernel.org
  L:    bpf@vger.kernel.org
  S:    Maintained
@@@ -3127,8 -3109,7 +3127,8 @@@ F:      arch/arm/mach-bcm
  
  BROADCOM BCM2835 ARM ARCHITECTURE
  M:    Eric Anholt <eric@anholt.net>
 -M:    Stefan Wahren <stefan.wahren@i2se.com>
 +M:    Stefan Wahren <wahrenst@gmx.net>
 +L:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  T:    git git://github.com/anholt/linux
@@@ -3158,7 -3139,6 +3158,7 @@@ F:      arch/arm/boot/dts/bcm953012
  
  BROADCOM BCM53573 ARM ARCHITECTURE
  M:    RafaÅ‚ MiÅ‚ecki <rafal@milecki.pl>
 +L:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
  F:    arch/arm/boot/dts/bcm53573*
@@@ -3948,14 -3928,6 +3948,14 @@@ M:    Miguel Ojeda <miguel.ojeda.sandonis@
  S:    Maintained
  F:    .clang-format
  
 +CLANG/LLVM BUILD SUPPORT
 +L:    clang-built-linux@googlegroups.com
 +W:    https://clangbuiltlinux.github.io/
 +B:    https://github.com/ClangBuiltLinux/linux/issues
 +C:    irc://chat.freenode.net/clangbuiltlinux
 +S:    Supported
 +K:    \b(?i:clang|llvm)\b
 +
  CLEANCACHE API
  M:    Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  L:    linux-kernel@vger.kernel.org
@@@ -4257,6 -4229,7 +4257,7 @@@ F:      crypto
  F:    drivers/crypto/
  F:    include/crypto/
  F:    include/linux/crypto*
+ F:    lib/crypto/
  
  CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
  M:    Neil Horman <nhorman@tuxdriver.com>
@@@ -6250,6 -6223,7 +6251,6 @@@ F:      include/linux/ipmi-fru.
  K:    fmc_d.*register
  
  FPGA MANAGER FRAMEWORK
 -M:    Alan Tull <atull@kernel.org>
  M:    Moritz Fischer <mdf@kernel.org>
  L:    linux-fpga@vger.kernel.org
  S:    Maintained
@@@ -6343,13 -6317,6 +6344,13 @@@ L:    linux-i2c@vger.kernel.or
  S:    Maintained
  F:    drivers/i2c/busses/i2c-cpm.c
  
 +FREESCALE IMX DDR PMU DRIVER
 +M:    Frank Li <Frank.li@nxp.com>
 +L:    linux-arm-kernel@lists.infradead.org
 +S:    Maintained
 +F:    drivers/perf/fsl_imx8_ddr_perf.c
 +F:    Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
 +
  FREESCALE IMX LPI2C DRIVER
  M:    Dong Aisheng <aisheng.dong@nxp.com>
  L:    linux-i2c@vger.kernel.org
@@@ -6687,18 -6654,6 +6688,18 @@@ L:    kvm@vger.kernel.or
  S:    Supported
  F:    drivers/uio/uio_pci_generic.c
  
 +GENERIC VDSO LIBRARY:
 +M:    Andy Lutomirski <luto@kernel.org>
 +M:    Thomas Gleixner <tglx@linutronix.de>
 +M:    Vincenzo Frascino <vincenzo.frascino@arm.com>
 +L:    linux-kernel@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/vdso
 +S:    Maintained
 +F:    lib/vdso/
 +F:    kernel/time/vsyscall.c
 +F:    include/vdso/
 +F:    include/asm-generic/vdso/vsyscall.h
 +
  GENWQE (IBM Generic Workqueue Card)
  M:    Frank Haverkamp <haver@linux.ibm.com>
  S:    Supported
@@@ -7336,7 -7291,6 +7337,7 @@@ F:      arch/x86/include/asm/trace/hyperv.
  F:    arch/x86/include/asm/hyperv-tlfs.h
  F:    arch/x86/kernel/cpu/mshyperv.c
  F:    arch/x86/hyperv
 +F:    drivers/clocksource/hyperv_timer.c
  F:    drivers/hid/hid-hyperv.c
  F:    drivers/hv/
  F:    drivers/input/serio/hyperv-keyboard.c
@@@ -7347,7 -7301,6 +7348,7 @@@ F:      drivers/uio/uio_hv_generic.
  F:    drivers/video/fbdev/hyperv_fb.c
  F:    drivers/iommu/hyperv_iommu.c
  F:    net/vmw_vsock/hyperv_transport.c
 +F:    include/clocksource/hyperv_timer.h
  F:    include/linux/hyperv.h
  F:    include/uapi/linux/hyperv.h
  F:    tools/hv/
@@@ -7389,7 -7342,6 +7390,7 @@@ I2C MV64XXX MARVELL AND ALLWINNER DRIVE
  M:    Gregory CLEMENT <gregory.clement@bootlin.com>
  L:    linux-i2c@vger.kernel.org
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
  F:    drivers/i2c/busses/i2c-mv64xxx.c
  
  I2C OVER PARALLEL PORT
@@@ -7837,7 -7789,7 +7838,7 @@@ INGENIC JZ4780 NAND DRIVE
  M:    Harvey Hunt <harveyhuntnexus@gmail.com>
  L:    linux-mtd@lists.infradead.org
  S:    Maintained
 -F:    drivers/mtd/nand/raw/jz4780_*
 +F:    drivers/mtd/nand/raw/ingenic/
  
  INOTIFY
  M:    Jan Kara <jack@suse.cz>
@@@ -8602,7 -8554,7 +8603,7 @@@ S:      Odd Fixe
  
  KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
  M:    "J. Bruce Fields" <bfields@fieldses.org>
 -M:    Jeff Layton <jlayton@kernel.org>
 +M:    Chuck Lever <chuck.lever@oracle.com>
  L:    linux-nfs@vger.kernel.org
  W:    http://nfs.sourceforge.net/
  T:    git git://linux-nfs.org/~bfields/linux.git
@@@ -8660,12 -8612,14 +8661,12 @@@ F:   arch/x86/include/asm/svm.
  F:    arch/x86/kvm/svm.c
  
  KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
 -M:    Christoffer Dall <christoffer.dall@arm.com>
  M:    Marc Zyngier <marc.zyngier@arm.com>
  R:    James Morse <james.morse@arm.com>
  R:    Julien Thierry <julien.thierry@arm.com>
  R:    Suzuki K Pouloze <suzuki.poulose@arm.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    kvmarm@lists.cs.columbia.edu
 -W:    http://systems.cs.columbia.edu/projects/kvm-arm
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
  S:    Maintained
  F:    arch/arm/include/uapi/asm/kvm*
@@@ -9157,7 -9111,7 +9158,7 @@@ F:      drivers/misc/lkdtm/
  LINUX KERNEL MEMORY CONSISTENCY MODEL (LKMM)
  M:    Alan Stern <stern@rowland.harvard.edu>
  M:    Andrea Parri <andrea.parri@amarulasolutions.com>
 -M:    Will Deacon <will.deacon@arm.com>
 +M:    Will Deacon <will@kernel.org>
  M:    Peter Zijlstra <peterz@infradead.org>
  M:    Boqun Feng <boqun.feng@gmail.com>
  M:    Nicholas Piggin <npiggin@gmail.com>
@@@ -9265,7 -9219,7 +9266,7 @@@ F:      Documentation/admin-guide/LSM/LoadPi
  LOCKING PRIMITIVES
  M:    Peter Zijlstra <peterz@infradead.org>
  M:    Ingo Molnar <mingo@redhat.com>
 -M:    Will Deacon <will.deacon@arm.com>
 +M:    Will Deacon <will@kernel.org>
  L:    linux-kernel@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
  S:    Maintained
@@@ -10586,7 -10540,7 +10587,7 @@@ F:   arch/arm/boot/dts/mmp
  F:    arch/arm/mach-mmp/
  
  MMU GATHER AND TLB INVALIDATION
 -M:    Will Deacon <will.deacon@arm.com>
 +M:    Will Deacon <will@kernel.org>
  M:    "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
  M:    Andrew Morton <akpm@linux-foundation.org>
  M:    Nick Piggin <npiggin@gmail.com>
@@@ -11115,8 -11069,10 +11116,8 @@@ S:  Supporte
  F:    drivers/net/ethernet/qlogic/netxen/
  
  NFC SUBSYSTEM
 -M:    Samuel Ortiz <sameo@linux.intel.com>
 -L:    linux-wireless@vger.kernel.org
 -L:    linux-nfc@lists.01.org (subscribers-only)
 -S:    Supported
 +L:    netdev@vger.kernel.org
 +S:    Orphan
  F:    net/nfc/
  F:    include/net/nfc/
  F:    include/uapi/linux/nfc.h
@@@ -11273,7 -11229,7 +11274,7 @@@ F:   drivers/video/fbdev/riva
  F:    drivers/video/fbdev/nvidia/
  
  NVM EXPRESS DRIVER
 -M:    Keith Busch <keith.busch@intel.com>
 +M:    Keith Busch <kbusch@kernel.org>
  M:    Jens Axboe <axboe@fb.com>
  M:    Christoph Hellwig <hch@lst.de>
  M:    Sagi Grimberg <sagi@grimberg.me>
@@@ -11773,7 -11729,6 +11774,7 @@@ M:   Peter Korsgaard <peter@korsgaard.com
  M:    Andrew Lunn <andrew@lunn.ch>
  L:    linux-i2c@vger.kernel.org
  S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-ocores.txt
  F:    Documentation/i2c/busses/i2c-ocores
  F:    drivers/i2c/busses/i2c-ocores.c
  F:    include/linux/platform_data/i2c-ocores.h
@@@ -12075,7 -12030,7 +12076,7 @@@ S:   Maintaine
  F:    drivers/pci/controller/dwc/*layerscape*
  
  PCI DRIVER FOR GENERIC OF HOSTS
 -M:    Will Deacon <will.deacon@arm.com>
 +M:    Will Deacon <will@kernel.org>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
@@@ -13093,6 -13048,7 +13094,6 @@@ F:   Documentation/devicetree/bindings/ne
  
  QUALCOMM GENERIC INTERFACE I2C DRIVER
  M:    Alok Chauhan <alokc@codeaurora.org>
 -M:    Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
  L:    linux-i2c@vger.kernel.org
  L:    linux-arm-msm@vger.kernel.org
  S:    Supported
@@@ -13412,7 -13368,6 +13413,7 @@@ F:   drivers/clk/renesas
  RENESAS EMEV2 I2C DRIVER
  M:    Wolfram Sang <wsa+renesas@sang-engineering.com>
  S:    Supported
 +F:    Documentation/devicetree/bindings/i2c/i2c-emev2.txt
  F:    drivers/i2c/busses/i2c-emev2.c
  
  RENESAS ETHERNET DRIVERS
@@@ -13434,8 -13389,6 +13435,8 @@@ F:   drivers/iio/adc/rcar-gyroadc.
  RENESAS R-CAR I2C DRIVERS
  M:    Wolfram Sang <wsa+renesas@sang-engineering.com>
  S:    Supported
 +F:    Documentation/devicetree/bindings/i2c/i2c-rcar.txt
 +F:    Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
  F:    drivers/i2c/busses/i2c-rcar.c
  F:    drivers/i2c/busses/i2c-sh_mobile.c
  
@@@ -13666,9 -13619,8 +13667,9 @@@ S:   Maintaine
  F:    drivers/video/fbdev/savage/
  
  S390
 -M:    Martin Schwidefsky <schwidefsky@de.ibm.com>
  M:    Heiko Carstens <heiko.carstens@de.ibm.com>
 +M:    Vasily Gorbik <gor@linux.ibm.com>
 +M:    Christian Borntraeger <borntraeger@de.ibm.com>
  L:    linux-s390@vger.kernel.org
  W:    http://www.ibm.com/developerworks/linux/linux390/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
@@@ -13738,7 -13690,7 +13739,7 @@@ L:   linux-s390@vger.kernel.or
  L:    kvm@vger.kernel.org
  S:    Supported
  F:    drivers/s390/cio/vfio_ccw*
 -F:    Documentation/s390/vfio-ccw.txt
 +F:    Documentation/s390/vfio-ccw.rst
  F:    include/uapi/linux/vfio_ccw.h
  
  S390 ZCRYPT DRIVER
@@@ -13758,7 -13710,7 +13759,7 @@@ S:   Supporte
  F:    drivers/s390/crypto/vfio_ap_drv.c
  F:    drivers/s390/crypto/vfio_ap_private.h
  F:    drivers/s390/crypto/vfio_ap_ops.c
 -F:    Documentation/s390/vfio-ap.txt
 +F:    Documentation/s390/vfio-ap.rst
  
  S390 ZFCP DRIVER
  M:    Steffen Maier <maier@linux.ibm.com>
@@@ -14371,15 -14323,6 +14372,15 @@@ S: Supporte
  K:    sifive
  N:    sifive
  
 +SIFIVE FU540 SYSTEM-ON-CHIP
 +M:    Paul Walmsley <paul.walmsley@sifive.com>
 +M:    Palmer Dabbelt <palmer@sifive.com>
 +L:    linux-riscv@lists.infradead.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
 +S:    Supported
 +K:    fu540
 +N:    fu540
 +
  SILEAD TOUCHSCREEN DRIVER
  M:    Hans de Goede <hdegoede@redhat.com>
  L:    linux-input@vger.kernel.org
@@@ -14411,7 -14354,7 +14412,7 @@@ SIMPLEFB FB DRIVE
  M:    Hans de Goede <hdegoede@redhat.com>
  L:    linux-fbdev@vger.kernel.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/display/simple-framebuffer.txt
 +F:    Documentation/devicetree/bindings/display/simple-framebuffer.yaml
  F:    drivers/video/fbdev/simplefb.c
  F:    include/linux/platform_data/simplefb.h
  
@@@ -14440,7 -14383,7 +14441,7 @@@ F:   lib/test_siphash.
  F:    include/linux/siphash.h
  
  SIOX
 -M:    Gavin Schenk <g.schenk@eckelmann.de>
 +M:    Thorsten Scherer <t.scherer@eckelmann.de>
  M:    Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
  R:    Pengutronix Kernel Team <kernel@pengutronix.de>
  S:    Supported
@@@ -15039,7 -14982,7 +15040,7 @@@ S:   Odd Fixe
  F:    drivers/net/ethernet/adaptec/starfire*
  
  STEC S1220 SKD DRIVER
 -M:    Bart Van Assche <bart.vanassche@wdc.com>
 +M:    Damien Le Moal <Damien.LeMoal@wdc.com>
  L:    linux-block@vger.kernel.org
  S:    Maintained
  F:    drivers/block/skd*[ch]
@@@ -15530,7 -15473,6 +15531,7 @@@ F:   drivers/dma/tegra
  
  TEGRA I2C DRIVER
  M:    Laxman Dewangan <ldewangan@nvidia.com>
 +R:    Dmitry Osipenko <digetx@gmail.com>
  S:    Supported
  F:    drivers/i2c/busses/i2c-tegra.c
  
@@@ -15735,7 -15677,6 +15736,7 @@@ R:   Bartosz Golaszewski <bgolaszewski@ba
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
  S:    Supported
 +F:    Documentation/devicetree/bindings/i2c/i2c-davinci.txt
  F:    arch/arm/mach-davinci/
  F:    drivers/i2c/busses/i2c-davinci.c
  F:    arch/arm/boot/dts/da850*
@@@ -17357,7 -17298,7 +17358,7 @@@ F:   Documentation/ABI/stable/sysfs-hyper
  F:    Documentation/ABI/testing/sysfs-hypervisor-xen
  
  XEN NETWORK BACKEND DRIVER
 -M:    Wei Liu <wei.liu2@citrix.com>
 +M:    Wei Liu <wei.liu@kernel.org>
  M:    Paul Durrant <paul.durrant@citrix.com>
  L:    xen-devel@lists.xenproject.org (moderated for non-subscribers)
  L:    netdev@vger.kernel.org
@@@ -17439,7 -17380,6 +17440,7 @@@ M:   Jan Glauber <jglauber@cavium.com
  L:    linux-i2c@vger.kernel.org
  W:    http://www.cavium.com
  S:    Supported
 +F:    Documentation/devicetree/bindings/i2c/i2c-xlp9xx.txt
  F:    drivers/i2c/busses/i2c-xlp9xx.c
  
  XRA1403 GPIO EXPANDER
@@@ -17523,12 -17463,6 +17524,12 @@@ Q: https://patchwork.linuxtv.org/projec
  S:    Maintained
  F:    drivers/media/dvb-frontends/zd1301_demod*
  
 +ZHAOXIN PROCESSOR SUPPORT
 +M:    Tony W Wang-oc <TonyWWang-oc@zhaoxin.com>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +F:    arch/x86/kernel/cpu/zhaoxin.c
 +
  ZPOOL COMPRESSED PAGE STORAGE API
  M:    Dan Streetman <ddstreet@ieee.org>
  L:    linux-mm@kvack.org
@@@ -1,8 -1,11 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * sha512-glue.c - accelerated SHA-384/512 for ARM
   *
   * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <crypto/internal/hash.h>
@@@ -34,7 -37,7 +34,7 @@@ int sha512_arm_update(struct shash_des
                (sha512_block_fn *)sha512_block_data_order);
  }
  
- int sha512_arm_final(struct shash_desc *desc, u8 *out)
static int sha512_arm_final(struct shash_desc *desc, u8 *out)
  {
        sha512_base_do_finalize(desc,
                (sha512_block_fn *)sha512_block_data_order);
@@@ -1,9 -1,12 +1,9 @@@
 +/* SPDX-License-Identifier: GPL-2.0-only */
  /*
   * linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with
   *                                    Crypto Extensions
   *
   * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <linux/linkage.h>
@@@ -15,6 -18,8 +15,8 @@@
        .arch           armv8-a+crypto
  
        xtsmask         .req    v16
+       cbciv           .req    v16
+       vctr            .req    v16
  
        .macro          xts_reload_mask, tmp
        .endm
@@@ -49,7 -54,7 +51,7 @@@
        load_round_keys \rounds, \temp
        .endm
  
-       .macro          do_enc_Nx, de, mc, k, i0, i1, i2, i3
+       .macro          do_enc_Nx, de, mc, k, i0, i1, i2, i3, i4
        aes\de          \i0\().16b, \k\().16b
        aes\mc          \i0\().16b, \i0\().16b
        .ifnb           \i1
        aes\mc          \i2\().16b, \i2\().16b
        aes\de          \i3\().16b, \k\().16b
        aes\mc          \i3\().16b, \i3\().16b
+       .ifnb           \i4
+       aes\de          \i4\().16b, \k\().16b
+       aes\mc          \i4\().16b, \i4\().16b
+       .endif
        .endif
        .endif
        .endm
  
-       /* up to 4 interleaved encryption rounds with the same round key */
-       .macro          round_Nx, enc, k, i0, i1, i2, i3
+       /* up to 5 interleaved encryption rounds with the same round key */
+       .macro          round_Nx, enc, k, i0, i1, i2, i3, i4
        .ifc            \enc, e
-       do_enc_Nx       e, mc, \k, \i0, \i1, \i2, \i3
+       do_enc_Nx       e, mc, \k, \i0, \i1, \i2, \i3, \i4
        .else
-       do_enc_Nx       d, imc, \k, \i0, \i1, \i2, \i3
+       do_enc_Nx       d, imc, \k, \i0, \i1, \i2, \i3, \i4
        .endif
        .endm
  
-       /* up to 4 interleaved final rounds */
-       .macro          fin_round_Nx, de, k, k2, i0, i1, i2, i3
+       /* up to 5 interleaved final rounds */
+       .macro          fin_round_Nx, de, k, k2, i0, i1, i2, i3, i4
        aes\de          \i0\().16b, \k\().16b
        .ifnb           \i1
        aes\de          \i1\().16b, \k\().16b
        .ifnb           \i3
        aes\de          \i2\().16b, \k\().16b
        aes\de          \i3\().16b, \k\().16b
+       .ifnb           \i4
+       aes\de          \i4\().16b, \k\().16b
+       .endif
        .endif
        .endif
        eor             \i0\().16b, \i0\().16b, \k2\().16b
        .ifnb           \i3
        eor             \i2\().16b, \i2\().16b, \k2\().16b
        eor             \i3\().16b, \i3\().16b, \k2\().16b
+       .ifnb           \i4
+       eor             \i4\().16b, \i4\().16b, \k2\().16b
+       .endif
        .endif
        .endif
        .endm
  
-       /* up to 4 interleaved blocks */
-       .macro          do_block_Nx, enc, rounds, i0, i1, i2, i3
+       /* up to 5 interleaved blocks */
+       .macro          do_block_Nx, enc, rounds, i0, i1, i2, i3, i4
        cmp             \rounds, #12
        blo             2222f           /* 128 bits */
        beq             1111f           /* 192 bits */
-       round_Nx        \enc, v17, \i0, \i1, \i2, \i3
-       round_Nx        \enc, v18, \i0, \i1, \i2, \i3
- 1111: round_Nx        \enc, v19, \i0, \i1, \i2, \i3
-       round_Nx        \enc, v20, \i0, \i1, \i2, \i3
+       round_Nx        \enc, v17, \i0, \i1, \i2, \i3, \i4
+       round_Nx        \enc, v18, \i0, \i1, \i2, \i3, \i4
+ 1111: round_Nx        \enc, v19, \i0, \i1, \i2, \i3, \i4
+       round_Nx        \enc, v20, \i0, \i1, \i2, \i3, \i4
  2222: .irp            key, v21, v22, v23, v24, v25, v26, v27, v28, v29
-       round_Nx        \enc, \key, \i0, \i1, \i2, \i3
+       round_Nx        \enc, \key, \i0, \i1, \i2, \i3, \i4
        .endr
-       fin_round_Nx    \enc, v30, v31, \i0, \i1, \i2, \i3
+       fin_round_Nx    \enc, v30, v31, \i0, \i1, \i2, \i3, \i4
        .endm
  
        .macro          encrypt_block, in, rounds, t0, t1, t2
        do_block_Nx     e, \rounds, \in
        .endm
  
-       .macro          encrypt_block2x, i0, i1, rounds, t0, t1, t2
-       do_block_Nx     e, \rounds, \i0, \i1
-       .endm
        .macro          encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
        do_block_Nx     e, \rounds, \i0, \i1, \i2, \i3
        .endm
  
-       .macro          decrypt_block, in, rounds, t0, t1, t2
-       do_block_Nx     d, \rounds, \in
+       .macro          encrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2
+       do_block_Nx     e, \rounds, \i0, \i1, \i2, \i3, \i4
        .endm
  
-       .macro          decrypt_block2x, i0, i1, rounds, t0, t1, t2
-       do_block_Nx     d, \rounds, \i0, \i1
+       .macro          decrypt_block, in, rounds, t0, t1, t2
+       do_block_Nx     d, \rounds, \in
        .endm
  
        .macro          decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2
        do_block_Nx     d, \rounds, \i0, \i1, \i2, \i3
        .endm
  
+       .macro          decrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2
+       do_block_Nx     d, \rounds, \i0, \i1, \i2, \i3, \i4
+       .endm
+ #define MAX_STRIDE    5
  #include "aes-modes.S"
@@@ -1,8 -1,11 +1,8 @@@
 +/* SPDX-License-Identifier: GPL-2.0-only */
  /*
   * linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
   *
   * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  /* included by aes-ce.S and aes-neon.S */
        .text
        .align          4
  
+ #ifndef MAX_STRIDE
+ #define MAX_STRIDE    4
+ #endif
+ #if MAX_STRIDE == 4
+ #define ST4(x...) x
+ #define ST5(x...)
+ #else
+ #define ST4(x...)
+ #define ST5(x...) x
+ #endif
  aes_encrypt_block4x:
        encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
        ret
@@@ -20,6 -35,18 +32,18 @@@ aes_decrypt_block4x
        ret
  ENDPROC(aes_decrypt_block4x)
  
+ #if MAX_STRIDE == 5
+ aes_encrypt_block5x:
+       encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
+       ret
+ ENDPROC(aes_encrypt_block5x)
+ aes_decrypt_block5x:
+       decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
+       ret
+ ENDPROC(aes_decrypt_block5x)
+ #endif
        /*
         * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
         *                 int blocks)
@@@ -34,14 -61,17 +58,17 @@@ AES_ENTRY(aes_ecb_encrypt
        enc_prepare     w3, x2, x5
  
  .LecbencloopNx:
-       subs            w4, w4, #4
+       subs            w4, w4, #MAX_STRIDE
        bmi             .Lecbenc1x
        ld1             {v0.16b-v3.16b}, [x1], #64      /* get 4 pt blocks */
-       bl              aes_encrypt_block4x
+ ST4(  bl              aes_encrypt_block4x             )
+ ST5(  ld1             {v4.16b}, [x1], #16             )
+ ST5(  bl              aes_encrypt_block5x             )
        st1             {v0.16b-v3.16b}, [x0], #64
+ ST5(  st1             {v4.16b}, [x0], #16             )
        b               .LecbencloopNx
  .Lecbenc1x:
-       adds            w4, w4, #4
+       adds            w4, w4, #MAX_STRIDE
        beq             .Lecbencout
  .Lecbencloop:
        ld1             {v0.16b}, [x1], #16             /* get next pt block */
@@@ -62,14 -92,17 +89,17 @@@ AES_ENTRY(aes_ecb_decrypt
        dec_prepare     w3, x2, x5
  
  .LecbdecloopNx:
-       subs            w4, w4, #4
+       subs            w4, w4, #MAX_STRIDE
        bmi             .Lecbdec1x
        ld1             {v0.16b-v3.16b}, [x1], #64      /* get 4 ct blocks */
-       bl              aes_decrypt_block4x
+ ST4(  bl              aes_decrypt_block4x             )
+ ST5(  ld1             {v4.16b}, [x1], #16             )
+ ST5(  bl              aes_decrypt_block5x             )
        st1             {v0.16b-v3.16b}, [x0], #64
+ ST5(  st1             {v4.16b}, [x0], #16             )
        b               .LecbdecloopNx
  .Lecbdec1x:
-       adds            w4, w4, #4
+       adds            w4, w4, #MAX_STRIDE
        beq             .Lecbdecout
  .Lecbdecloop:
        ld1             {v0.16b}, [x1], #16             /* get next ct block */
@@@ -129,39 -162,56 +159,56 @@@ AES_ENTRY(aes_cbc_decrypt
        stp             x29, x30, [sp, #-16]!
        mov             x29, sp
  
-       ld1             {v7.16b}, [x5]                  /* get iv */
+       ld1             {cbciv.16b}, [x5]               /* get iv */
        dec_prepare     w3, x2, x6
  
  .LcbcdecloopNx:
-       subs            w4, w4, #4
+       subs            w4, w4, #MAX_STRIDE
        bmi             .Lcbcdec1x
        ld1             {v0.16b-v3.16b}, [x1], #64      /* get 4 ct blocks */
+ #if MAX_STRIDE == 5
+       ld1             {v4.16b}, [x1], #16             /* get 1 ct block */
+       mov             v5.16b, v0.16b
+       mov             v6.16b, v1.16b
+       mov             v7.16b, v2.16b
+       bl              aes_decrypt_block5x
+       sub             x1, x1, #32
+       eor             v0.16b, v0.16b, cbciv.16b
+       eor             v1.16b, v1.16b, v5.16b
+       ld1             {v5.16b}, [x1], #16             /* reload 1 ct block */
+       ld1             {cbciv.16b}, [x1], #16          /* reload 1 ct block */
+       eor             v2.16b, v2.16b, v6.16b
+       eor             v3.16b, v3.16b, v7.16b
+       eor             v4.16b, v4.16b, v5.16b
+ #else
        mov             v4.16b, v0.16b
        mov             v5.16b, v1.16b
        mov             v6.16b, v2.16b
        bl              aes_decrypt_block4x
        sub             x1, x1, #16
-       eor             v0.16b, v0.16b, v7.16b
+       eor             v0.16b, v0.16b, cbciv.16b
        eor             v1.16b, v1.16b, v4.16b
-       ld1             {v7.16b}, [x1], #16             /* reload 1 ct block */
+       ld1             {cbciv.16b}, [x1], #16          /* reload 1 ct block */
        eor             v2.16b, v2.16b, v5.16b
        eor             v3.16b, v3.16b, v6.16b
+ #endif
        st1             {v0.16b-v3.16b}, [x0], #64
+ ST5(  st1             {v4.16b}, [x0], #16             )
        b               .LcbcdecloopNx
  .Lcbcdec1x:
-       adds            w4, w4, #4
+       adds            w4, w4, #MAX_STRIDE
        beq             .Lcbcdecout
  .Lcbcdecloop:
        ld1             {v1.16b}, [x1], #16             /* get next ct block */
        mov             v0.16b, v1.16b                  /* ...and copy to v0 */
        decrypt_block   v0, w3, x2, x6, w7
-       eor             v0.16b, v0.16b, v7.16b          /* xor with iv => pt */
-       mov             v7.16b, v1.16b                  /* ct is next iv */
+       eor             v0.16b, v0.16b, cbciv.16b       /* xor with iv => pt */
+       mov             cbciv.16b, v1.16b               /* ct is next iv */
        st1             {v0.16b}, [x0], #16
        subs            w4, w4, #1
        bne             .Lcbcdecloop
  .Lcbcdecout:
-       st1             {v7.16b}, [x5]                  /* return iv */
+       st1             {cbciv.16b}, [x5]               /* return iv */
        ldp             x29, x30, [sp], #16
        ret
  AES_ENDPROC(aes_cbc_decrypt)
@@@ -255,51 -305,60 +302,60 @@@ AES_ENTRY(aes_ctr_encrypt
        mov             x29, sp
  
        enc_prepare     w3, x2, x6
-       ld1             {v4.16b}, [x5]
+       ld1             {vctr.16b}, [x5]
  
-       umov            x6, v4.d[1]             /* keep swabbed ctr in reg */
+       umov            x6, vctr.d[1]           /* keep swabbed ctr in reg */
        rev             x6, x6
        cmn             w6, w4                  /* 32 bit overflow? */
        bcs             .Lctrloop
  .LctrloopNx:
-       subs            w4, w4, #4
+       subs            w4, w4, #MAX_STRIDE
        bmi             .Lctr1x
        add             w7, w6, #1
-       mov             v0.16b, v4.16b
+       mov             v0.16b, vctr.16b
        add             w8, w6, #2
-       mov             v1.16b, v4.16b
+       mov             v1.16b, vctr.16b
+       add             w9, w6, #3
+       mov             v2.16b, vctr.16b
        add             w9, w6, #3
-       mov             v2.16b, v4.16b
        rev             w7, w7
-       mov             v3.16b, v4.16b
+       mov             v3.16b, vctr.16b
        rev             w8, w8
+ ST5(  mov             v4.16b, vctr.16b                )
        mov             v1.s[3], w7
        rev             w9, w9
+ ST5(  add             w10, w6, #4                     )
        mov             v2.s[3], w8
+ ST5(  rev             w10, w10                        )
        mov             v3.s[3], w9
+ ST5(  mov             v4.s[3], w10                    )
        ld1             {v5.16b-v7.16b}, [x1], #48      /* get 3 input blocks */
-       bl              aes_encrypt_block4x
+ ST4(  bl              aes_encrypt_block4x             )
+ ST5(  bl              aes_encrypt_block5x             )
        eor             v0.16b, v5.16b, v0.16b
-       ld1             {v5.16b}, [x1], #16             /* get 1 input block  */
+ ST4(  ld1             {v5.16b}, [x1], #16             )
        eor             v1.16b, v6.16b, v1.16b
+ ST5(  ld1             {v5.16b-v6.16b}, [x1], #32      )
        eor             v2.16b, v7.16b, v2.16b
        eor             v3.16b, v5.16b, v3.16b
+ ST5(  eor             v4.16b, v6.16b, v4.16b          )
        st1             {v0.16b-v3.16b}, [x0], #64
-       add             x6, x6, #4
+ ST5(  st1             {v4.16b}, [x0], #16             )
+       add             x6, x6, #MAX_STRIDE
        rev             x7, x6
-       ins             v4.d[1], x7
+       ins             vctr.d[1], x7
        cbz             w4, .Lctrout
        b               .LctrloopNx
  .Lctr1x:
-       adds            w4, w4, #4
+       adds            w4, w4, #MAX_STRIDE
        beq             .Lctrout
  .Lctrloop:
-       mov             v0.16b, v4.16b
+       mov             v0.16b, vctr.16b
        encrypt_block   v0, w3, x2, x8, w7
  
        adds            x6, x6, #1              /* increment BE ctr */
        rev             x7, x6
-       ins             v4.d[1], x7
+       ins             vctr.d[1], x7
        bcs             .Lctrcarry              /* overflow? */
  
  .Lctrcarrydone:
        bne             .Lctrloop
  
  .Lctrout:
-       st1             {v4.16b}, [x5]          /* return next CTR value */
+       st1             {vctr.16b}, [x5]        /* return next CTR value */
        ldp             x29, x30, [sp], #16
        ret
  
        b               .Lctrout
  
  .Lctrcarry:
-       umov            x7, v4.d[0]             /* load upper word of ctr  */
+       umov            x7, vctr.d[0]           /* load upper word of ctr  */
        rev             x7, x7                  /* ... to handle the carry */
        add             x7, x7, #1
        rev             x7, x7
-       ins             v4.d[0], x7
+       ins             vctr.d[0], x7
        b               .Lctrcarrydone
  AES_ENDPROC(aes_ctr_encrypt)
  
@@@ -1,8 -1,11 +1,8 @@@
 +/* SPDX-License-Identifier: GPL-2.0-only */
  /*
   * linux/arch/arm64/crypto/aes-neon.S - AES cipher for ARMv8 NEON
   *
   * Copyright (C) 2013 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <linux/linkage.h>
@@@ -12,6 -15,8 +12,8 @@@
  #define AES_ENDPROC(func)     ENDPROC(neon_ ## func)
  
        xtsmask         .req    v7
+       cbciv           .req    v7
+       vctr            .req    v4
  
        .macro          xts_reload_mask, tmp
        xts_load_mask   \tmp
  
        /*
         * Interleaved versions: functionally equivalent to the
-        * ones above, but applied to 2 or 4 AES states in parallel.
+        * ones above, but applied to AES states in parallel.
         */
  
-       .macro          sub_bytes_2x, in0, in1
-       sub             v8.16b, \in0\().16b, v15.16b
-       tbl             \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
-       sub             v9.16b, \in1\().16b, v15.16b
-       tbl             \in1\().16b, {v16.16b-v19.16b}, \in1\().16b
-       sub             v10.16b, v8.16b, v15.16b
-       tbx             \in0\().16b, {v20.16b-v23.16b}, v8.16b
-       sub             v11.16b, v9.16b, v15.16b
-       tbx             \in1\().16b, {v20.16b-v23.16b}, v9.16b
-       sub             v8.16b, v10.16b, v15.16b
-       tbx             \in0\().16b, {v24.16b-v27.16b}, v10.16b
-       sub             v9.16b, v11.16b, v15.16b
-       tbx             \in1\().16b, {v24.16b-v27.16b}, v11.16b
-       tbx             \in0\().16b, {v28.16b-v31.16b}, v8.16b
-       tbx             \in1\().16b, {v28.16b-v31.16b}, v9.16b
-       .endm
        .macro          sub_bytes_4x, in0, in1, in2, in3
        sub             v8.16b, \in0\().16b, v15.16b
        tbl             \in0\().16b, {v16.16b-v19.16b}, \in0\().16b
        eor             \in1\().16b, \in1\().16b, v11.16b
        .endm
  
-       .macro          do_block_2x, enc, in0, in1, rounds, rk, rkp, i
-       ld1             {v15.4s}, [\rk]
-       add             \rkp, \rk, #16
-       mov             \i, \rounds
- 1111: eor             \in0\().16b, \in0\().16b, v15.16b       /* ^round key */
-       eor             \in1\().16b, \in1\().16b, v15.16b       /* ^round key */
-       movi            v15.16b, #0x40
-       tbl             \in0\().16b, {\in0\().16b}, v13.16b     /* ShiftRows */
-       tbl             \in1\().16b, {\in1\().16b}, v13.16b     /* ShiftRows */
-       sub_bytes_2x    \in0, \in1
-       subs            \i, \i, #1
-       ld1             {v15.4s}, [\rkp], #16
-       beq             2222f
-       mix_columns_2x  \in0, \in1, \enc
-       b               1111b
- 2222: eor             \in0\().16b, \in0\().16b, v15.16b       /* ^round key */
-       eor             \in1\().16b, \in1\().16b, v15.16b       /* ^round key */
-       .endm
        .macro          do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
        ld1             {v15.4s}, [\rk]
        add             \rkp, \rk, #16
        eor             \in3\().16b, \in3\().16b, v15.16b       /* ^round key */
        .endm
  
-       .macro          encrypt_block2x, in0, in1, rounds, rk, rkp, i
-       do_block_2x     1, \in0, \in1, \rounds, \rk, \rkp, \i
-       .endm
-       .macro          decrypt_block2x, in0, in1, rounds, rk, rkp, i
-       do_block_2x     0, \in0, \in1, \rounds, \rk, \rkp, \i
-       .endm
        .macro          encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i
        do_block_4x     1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i
        .endm
@@@ -1,8 -1,11 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
   *
   * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <asm/neon.h>
@@@ -52,7 -55,7 +52,7 @@@ static int sha1_ce_finup(struct shash_d
                         unsigned int len, u8 *out)
  {
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
-       bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
+       bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
  
        if (!crypto_simd_usable())
                return crypto_sha1_finup(desc, data, len, out);
@@@ -1,8 -1,11 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
   *
   * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <asm/neon.h>
@@@ -57,7 -60,7 +57,7 @@@ static int sha256_ce_finup(struct shash
                           unsigned int len, u8 *out)
  {
        struct sha256_ce_state *sctx = shash_desc_ctx(desc);
-       bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
+       bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
  
        if (!crypto_simd_usable()) {
                if (len)
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Support for Intel AES-NI instructions. This file contains glue
   * code, the real AES implementation is in intel-aes_asm.S.
   *             Tadeusz Struk (tadeusz.struk@intel.com)
   *             Aidan O'Mahony (aidan.o.mahony@intel.com)
   *    Copyright (c) 2010, Intel Corporation.
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
   */
  
  #include <linux/hardirq.h>
@@@ -371,20 -375,6 +371,6 @@@ static void aes_decrypt(struct crypto_t
        }
  }
  
- static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
- {
-       struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
-       aesni_enc(ctx, dst, src);
- }
- static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
- {
-       struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
-       aesni_dec(ctx, dst, src);
- }
  static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
                                 unsigned int len)
  {
@@@ -920,7 -910,7 +906,7 @@@ static int helper_rfc4106_decrypt(struc
  }
  #endif
  
- static struct crypto_alg aesni_algs[] = { {
+ static struct crypto_alg aesni_cipher_alg = {
        .cra_name               = "aes",
        .cra_driver_name        = "aes-aesni",
        .cra_priority           = 300,
                        .cia_decrypt            = aes_decrypt
                }
        }
- }, {
-       .cra_name               = "__aes",
-       .cra_driver_name        = "__aes-aesni",
-       .cra_priority           = 300,
-       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
-       .cra_module             = THIS_MODULE,
-       .cra_u  = {
-               .cipher = {
-                       .cia_min_keysize        = AES_MIN_KEY_SIZE,
-                       .cia_max_keysize        = AES_MAX_KEY_SIZE,
-                       .cia_setkey             = aes_set_key,
-                       .cia_encrypt            = __aes_encrypt,
-                       .cia_decrypt            = __aes_decrypt
-               }
-       }
- } };
+ };
  
  static struct skcipher_alg aesni_skciphers[] = {
        {
@@@ -1150,7 -1123,7 +1119,7 @@@ static int __init aesni_init(void
  #endif
  #endif
  
-       err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+       err = crypto_register_alg(&aesni_cipher_alg);
        if (err)
                return err;
  
                                             ARRAY_SIZE(aesni_skciphers),
                                             aesni_simd_skciphers);
        if (err)
-               goto unregister_algs;
+               goto unregister_cipher;
  
        err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
                                         aesni_simd_aeads);
  unregister_skciphers:
        simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
                                  aesni_simd_skciphers);
- unregister_algs:
-       crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+ unregister_cipher:
+       crypto_unregister_alg(&aesni_cipher_alg);
        return err;
  }
  
@@@ -1181,7 -1154,7 +1150,7 @@@ static void __exit aesni_exit(void
                              aesni_simd_aeads);
        simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
                                  aesni_simd_skciphers);
-       crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+       crypto_unregister_alg(&aesni_cipher_alg);
  }
  
  late_initcall(aesni_init);
@@@ -1,9 -1,13 +1,9 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * x64 SIMD accelerated ChaCha and XChaCha stream ciphers,
   * including ChaCha20 (RFC7539)
   *
   * Copyright (C) 2015 Martin Willi
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
   */
  
  #include <crypto/algapi.h>
@@@ -124,7 -128,7 +124,7 @@@ static void chacha_dosimd(u32 *state, u
  }
  
  static int chacha_simd_stream_xor(struct skcipher_walk *walk,
-                                 struct chacha_ctx *ctx, u8 *iv)
+                                 const struct chacha_ctx *ctx, const u8 *iv)
  {
        u32 *state, state_buf[16 + 2] __aligned(8);
        int next_yield = 4096; /* bytes until next FPU yield */
diff --combined crypto/aead.c
@@@ -1,10 -1,15 +1,10 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * AEAD: Authenticated Encryption with Associated Data
   *
   * This file provides API support for AEAD algorithms.
   *
   * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  
  #include <crypto/internal/geniv.h>
@@@ -84,6 -89,42 +84,42 @@@ int crypto_aead_setauthsize(struct cryp
  }
  EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
  
+ int crypto_aead_encrypt(struct aead_request *req)
+ {
+       struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct crypto_alg *alg = aead->base.__crt_alg;
+       unsigned int cryptlen = req->cryptlen;
+       int ret;
+       crypto_stats_get(alg);
+       if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+               ret = -ENOKEY;
+       else
+               ret = crypto_aead_alg(aead)->encrypt(req);
+       crypto_stats_aead_encrypt(cryptlen, alg, ret);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
+ int crypto_aead_decrypt(struct aead_request *req)
+ {
+       struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct crypto_alg *alg = aead->base.__crt_alg;
+       unsigned int cryptlen = req->cryptlen;
+       int ret;
+       crypto_stats_get(alg);
+       if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+               ret = -ENOKEY;
+       else if (req->cryptlen < crypto_aead_authsize(aead))
+               ret = -EINVAL;
+       else
+               ret = crypto_aead_alg(aead)->decrypt(req);
+       crypto_stats_aead_decrypt(cryptlen, alg, ret);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
  static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
  {
        struct crypto_aead *aead = __crypto_aead_cast(tfm);
diff --combined crypto/algapi.c
@@@ -1,8 -1,13 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API for algorithms (i.e., low-level API).
   *
   * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  
  #include <crypto/algapi.h>
  
  static LIST_HEAD(crypto_template_list);
  
- static inline int crypto_set_driver_name(struct crypto_alg *alg)
- {
-       static const char suffix[] = "-generic";
-       char *driver_name = alg->cra_driver_name;
-       int len;
-       if (*driver_name)
-               return 0;
-       len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
-       if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
-               return -ENAMETOOLONG;
-       memcpy(driver_name + len, suffix, sizeof(suffix));
-       return 0;
- }
  static inline void crypto_check_module_sig(struct module *mod)
  {
        if (fips_enabled && mod && !module_sig_ok(mod))
@@@ -49,6 -37,9 +32,9 @@@ static int crypto_check_alg(struct cryp
  {
        crypto_check_module_sig(alg->cra_module);
  
+       if (!alg->cra_name[0] || !alg->cra_driver_name[0])
+               return -EINVAL;
        if (alg->cra_alignmask & (alg->cra_alignmask + 1))
                return -EINVAL;
  
@@@ -74,7 -65,7 +60,7 @@@
  
        refcount_set(&alg->cra_refcnt, 1);
  
-       return crypto_set_driver_name(alg);
+       return 0;
  }
  
  static void crypto_free_instance(struct crypto_instance *inst)
@@@ -947,19 -938,6 +933,6 @@@ struct crypto_async_request *crypto_deq
  }
  EXPORT_SYMBOL_GPL(crypto_dequeue_request);
  
- int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm)
- {
-       struct crypto_async_request *req;
-       list_for_each_entry(req, &queue->list, list) {
-               if (req->tfm == tfm)
-                       return 1;
-       }
-       return 0;
- }
- EXPORT_SYMBOL_GPL(crypto_tfm_in_queue);
  static inline void crypto_inc_byte(u8 *a, unsigned int size)
  {
        u8 *b = (a + size);
diff --combined crypto/arc4.c
@@@ -1,10 -1,15 +1,10 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API
   *
   * ARC4 Cipher Algorithm
   *
   * Jon Oberheide <jon@oberheide.org>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
 - *
   */
  
  #include <crypto/algapi.h>
  #include <linux/init.h>
  #include <linux/module.h>
  
- struct arc4_ctx {
-       u32 S[256];
-       u32 x, y;
- };
- static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
-                       unsigned int key_len)
- {
-       struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
-       int i, j = 0, k = 0;
-       ctx->x = 1;
-       ctx->y = 0;
-       for (i = 0; i < 256; i++)
-               ctx->S[i] = i;
-       for (i = 0; i < 256; i++) {
-               u32 a = ctx->S[i];
-               j = (j + in_key[k] + a) & 0xff;
-               ctx->S[i] = ctx->S[j];
-               ctx->S[j] = a;
-               if (++k >= key_len)
-                       k = 0;
-       }
-       return 0;
- }
- static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
-                                unsigned int key_len)
+ static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+                             unsigned int key_len)
  {
-       return arc4_set_key(&tfm->base, in_key, key_len);
- }
- static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in,
-                      unsigned int len)
- {
-       u32 *const S = ctx->S;
-       u32 x, y, a, b;
-       u32 ty, ta, tb;
-       if (len == 0)
-               return;
-       x = ctx->x;
-       y = ctx->y;
-       a = S[x];
-       y = (y + a) & 0xff;
-       b = S[y];
-       do {
-               S[y] = a;
-               a = (a + b) & 0xff;
-               S[x] = b;
-               x = (x + 1) & 0xff;
-               ta = S[x];
-               ty = (y + ta) & 0xff;
-               tb = S[ty];
-               *out++ = *in++ ^ S[a];
-               if (--len == 0)
-                       break;
-               y = ty;
-               a = ta;
-               b = tb;
-       } while (true);
-       ctx->x = x;
-       ctx->y = y;
- }
+       struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
  
- static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in)
- {
-       arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1);
+       return arc4_setkey(ctx, in_key, key_len);
  }
  
- static int ecb_arc4_crypt(struct skcipher_request *req)
+ static int crypto_arc4_crypt(struct skcipher_request *req)
  {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
        return err;
  }
  
- static struct crypto_alg arc4_cipher = {
-       .cra_name               =       "arc4",
-       .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
-       .cra_blocksize          =       ARC4_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct arc4_ctx),
-       .cra_module             =       THIS_MODULE,
-       .cra_u                  =       {
-               .cipher = {
-                       .cia_min_keysize        =       ARC4_MIN_KEY_SIZE,
-                       .cia_max_keysize        =       ARC4_MAX_KEY_SIZE,
-                       .cia_setkey             =       arc4_set_key,
-                       .cia_encrypt            =       arc4_crypt_one,
-                       .cia_decrypt            =       arc4_crypt_one,
-               },
-       },
- };
- static struct skcipher_alg arc4_skcipher = {
+ static struct skcipher_alg arc4_alg = {
+       /*
+        * For legacy reasons, this is named "ecb(arc4)", not "arc4".
+        * Nevertheless it's actually a stream cipher, not a block cipher.
+        */
        .base.cra_name          =       "ecb(arc4)",
+       .base.cra_driver_name   =       "ecb(arc4)-generic",
        .base.cra_priority      =       100,
        .base.cra_blocksize     =       ARC4_BLOCK_SIZE,
        .base.cra_ctxsize       =       sizeof(struct arc4_ctx),
        .base.cra_module        =       THIS_MODULE,
        .min_keysize            =       ARC4_MIN_KEY_SIZE,
        .max_keysize            =       ARC4_MAX_KEY_SIZE,
-       .setkey                 =       arc4_set_key_skcipher,
-       .encrypt                =       ecb_arc4_crypt,
-       .decrypt                =       ecb_arc4_crypt,
+       .setkey                 =       crypto_arc4_setkey,
+       .encrypt                =       crypto_arc4_crypt,
+       .decrypt                =       crypto_arc4_crypt,
  };
  
  static int __init arc4_init(void)
  {
-       int err;
-       err = crypto_register_alg(&arc4_cipher);
-       if (err)
-               return err;
-       err = crypto_register_skcipher(&arc4_skcipher);
-       if (err)
-               crypto_unregister_alg(&arc4_cipher);
-       return err;
+       return crypto_register_skcipher(&arc4_alg);
  }
  
  static void __exit arc4_exit(void)
  {
-       crypto_unregister_alg(&arc4_cipher);
-       crypto_unregister_skcipher(&arc4_skcipher);
+       crypto_unregister_skcipher(&arc4_alg);
  }
  
  subsys_initcall(arc4_init);
@@@ -164,4 -78,4 +73,4 @@@ module_exit(arc4_exit)
  MODULE_LICENSE("GPL");
  MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
  MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
- MODULE_ALIAS_CRYPTO("arc4");
+ MODULE_ALIAS_CRYPTO("ecb(arc4)");
@@@ -1,8 -1,12 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * ChaCha20-Poly1305 AEAD, RFC7539
   *
   * Copyright (C) 2015 Martin Willi
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
   */
  
  #include <crypto/internal/aead.h>
@@@ -61,6 -65,8 +61,8 @@@ struct chachapoly_req_ctx 
        unsigned int cryptlen;
        /* Actual AD, excluding IV */
        unsigned int assoclen;
+       /* request flags, with MAY_SLEEP cleared if needed */
+       u32 flags;
        union {
                struct poly_req poly;
                struct chacha_req chacha;
  static inline void async_done_continue(struct aead_request *req, int err,
                                       int (*cont)(struct aead_request *))
  {
-       if (!err)
+       if (!err) {
+               struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+               rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                err = cont(req);
+       }
  
        if (err != -EINPROGRESS && err != -EBUSY)
                aead_request_complete(req, err);
@@@ -129,16 -139,12 +135,12 @@@ static int chacha_decrypt(struct aead_r
  
        chacha_iv(creq->iv, req, 1);
  
-       sg_init_table(rctx->src, 2);
        src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
        dst = src;
-       if (req->src != req->dst) {
-               sg_init_table(rctx->dst, 2);
+       if (req->src != req->dst)
                dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
-       }
  
-       skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+       skcipher_request_set_callback(&creq->req, rctx->flags,
                                      chacha_decrypt_done, req);
        skcipher_request_set_tfm(&creq->req, ctx->chacha);
        skcipher_request_set_crypt(&creq->req, src, dst,
@@@ -172,17 -178,13 +174,13 @@@ static int poly_tail(struct aead_reques
        struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
        struct poly_req *preq = &rctx->u.poly;
-       __le64 len;
        int err;
  
-       sg_init_table(preq->src, 1);
-       len = cpu_to_le64(rctx->assoclen);
-       memcpy(&preq->tail.assoclen, &len, sizeof(len));
-       len = cpu_to_le64(rctx->cryptlen);
-       memcpy(&preq->tail.cryptlen, &len, sizeof(len));
-       sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
+       preq->tail.assoclen = cpu_to_le64(rctx->assoclen);
+       preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen);
+       sg_init_one(preq->src, &preq->tail, sizeof(preq->tail));
  
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_tail_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, preq->src,
@@@ -205,15 -207,14 +203,14 @@@ static int poly_cipherpad(struct aead_r
        struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
        struct poly_req *preq = &rctx->u.poly;
-       unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
+       unsigned int padlen;
        int err;
  
-       padlen = (bs - (rctx->cryptlen % bs)) % bs;
+       padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
        memset(preq->pad, 0, sizeof(preq->pad));
-       sg_init_table(preq->src, 1);
-       sg_set_buf(preq->src, &preq->pad, padlen);
+       sg_init_one(preq->src, preq->pad, padlen);
  
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_cipherpad_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@@ -241,10 -242,9 +238,9 @@@ static int poly_cipher(struct aead_requ
        if (rctx->cryptlen == req->cryptlen) /* encrypting */
                crypt = req->dst;
  
-       sg_init_table(rctx->src, 2);
        crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
  
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_cipher_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
@@@ -266,15 -266,14 +262,14 @@@ static int poly_adpad(struct aead_reque
        struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
        struct poly_req *preq = &rctx->u.poly;
-       unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
+       unsigned int padlen;
        int err;
  
-       padlen = (bs - (rctx->assoclen % bs)) % bs;
+       padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE;
        memset(preq->pad, 0, sizeof(preq->pad));
-       sg_init_table(preq->src, 1);
-       sg_set_buf(preq->src, preq->pad, padlen);
+       sg_init_one(preq->src, preq->pad, padlen);
  
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_adpad_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@@ -298,7 -297,7 +293,7 @@@ static int poly_ad(struct aead_request 
        struct poly_req *preq = &rctx->u.poly;
        int err;
  
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_ad_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
@@@ -322,10 -321,9 +317,9 @@@ static int poly_setkey(struct aead_requ
        struct poly_req *preq = &rctx->u.poly;
        int err;
  
-       sg_init_table(preq->src, 1);
-       sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
+       sg_init_one(preq->src, rctx->key, sizeof(rctx->key));
  
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_setkey_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
        ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
@@@ -349,7 -347,7 +343,7 @@@ static int poly_init(struct aead_reques
        struct poly_req *preq = &rctx->u.poly;
        int err;
  
-       ahash_request_set_callback(&preq->req, aead_request_flags(req),
+       ahash_request_set_callback(&preq->req, rctx->flags,
                                   poly_init_done, req);
        ahash_request_set_tfm(&preq->req, ctx->poly);
  
@@@ -381,13 -379,12 +375,12 @@@ static int poly_genkey(struct aead_requ
                rctx->assoclen -= 8;
        }
  
-       sg_init_table(creq->src, 1);
        memset(rctx->key, 0, sizeof(rctx->key));
-       sg_set_buf(creq->src, rctx->key, sizeof(rctx->key));
+       sg_init_one(creq->src, rctx->key, sizeof(rctx->key));
  
        chacha_iv(creq->iv, req, 0);
  
-       skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+       skcipher_request_set_callback(&creq->req, rctx->flags,
                                      poly_genkey_done, req);
        skcipher_request_set_tfm(&creq->req, ctx->chacha);
        skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
@@@ -418,16 -415,12 +411,12 @@@ static int chacha_encrypt(struct aead_r
  
        chacha_iv(creq->iv, req, 1);
  
-       sg_init_table(rctx->src, 2);
        src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
        dst = src;
-       if (req->src != req->dst) {
-               sg_init_table(rctx->dst, 2);
+       if (req->src != req->dst)
                dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
-       }
  
-       skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+       skcipher_request_set_callback(&creq->req, rctx->flags,
                                      chacha_encrypt_done, req);
        skcipher_request_set_tfm(&creq->req, ctx->chacha);
        skcipher_request_set_crypt(&creq->req, src, dst,
@@@ -445,6 -438,7 +434,7 @@@ static int chachapoly_encrypt(struct ae
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
  
        rctx->cryptlen = req->cryptlen;
+       rctx->flags = aead_request_flags(req);
  
        /* encrypt call chain:
         * - chacha_encrypt/done()
@@@ -466,6 -460,7 +456,7 @@@ static int chachapoly_decrypt(struct ae
        struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
  
        rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
+       rctx->flags = aead_request_flags(req);
  
        /* decrypt call chain:
         * - poly_genkey/done()
diff --combined crypto/chacha_generic.c
@@@ -1,9 -1,13 +1,9 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
   *
   * Copyright (C) 2015 Martin Willi
   * Copyright (C) 2018 Google LLC
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
   */
  
  #include <asm/unaligned.h>
@@@ -32,7 -36,7 +32,7 @@@ static void chacha_docrypt(u32 *state, 
  }
  
  static int chacha_stream_xor(struct skcipher_request *req,
-                            struct chacha_ctx *ctx, u8 *iv)
+                            const struct chacha_ctx *ctx, const u8 *iv)
  {
        struct skcipher_walk walk;
        u32 state[16];
@@@ -56,7 -60,7 +56,7 @@@
        return err;
  }
  
- void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv)
+ void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv)
  {
        state[0]  = 0x61707865; /* "expa" */
        state[1]  = 0x3320646e; /* "nd 3" */
diff --combined crypto/cryptd.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Software async crypto daemon.
   *
   *             Gabriele Paoloni <gabriele.paoloni@intel.com>
   *             Aidan O'Mahony (aidan.o.mahony@intel.com)
   *    Copyright (c) 2010, Intel Corporation.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  
  #include <crypto/internal/hash.h>
  #include <crypto/internal/aead.h>
  #include <crypto/internal/skcipher.h>
  #include <crypto/cryptd.h>
- #include <crypto/crypto_wq.h>
  #include <linux/atomic.h>
  #include <linux/err.h>
  #include <linux/init.h>
  #include <linux/scatterlist.h>
  #include <linux/sched.h>
  #include <linux/slab.h>
+ #include <linux/workqueue.h>
  
  static unsigned int cryptd_max_cpu_qlen = 1000;
  module_param(cryptd_max_cpu_qlen, uint, 0);
  MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  
+ static struct workqueue_struct *cryptd_wq;
  struct cryptd_cpu_queue {
        struct crypto_queue queue;
        struct work_struct work;
@@@ -136,7 -143,7 +138,7 @@@ static int cryptd_enqueue_request(struc
        if (err == -ENOSPC)
                goto out_put_cpu;
  
-       queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+       queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
  
        if (!atomic_read(refcnt))
                goto out_put_cpu;
@@@ -179,7 -186,7 +181,7 @@@ static void cryptd_queue_worker(struct 
        req->complete(req, 0);
  
        if (cpu_queue->queue.qlen)
-               queue_work(kcrypto_wq, &cpu_queue->work);
+               queue_work(cryptd_wq, &cpu_queue->work);
  }
  
  static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
@@@ -388,7 -395,6 +390,7 @@@ static void cryptd_skcipher_free(struc
        struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
  
        crypto_drop_skcipher(&ctx->spawn);
 +      kfree(inst);
  }
  
  static int cryptd_create_skcipher(struct crypto_template *tmpl,
@@@ -919,7 -925,7 +921,7 @@@ static int cryptd_create(struct crypto_
        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
        case CRYPTO_ALG_TYPE_BLKCIPHER:
                return cryptd_create_skcipher(tmpl, tb, &queue);
-       case CRYPTO_ALG_TYPE_DIGEST:
+       case CRYPTO_ALG_TYPE_HASH:
                return cryptd_create_hash(tmpl, tb, &queue);
        case CRYPTO_ALG_TYPE_AEAD:
                return cryptd_create_aead(tmpl, tb, &queue);
@@@ -1119,19 -1125,31 +1121,31 @@@ static int __init cryptd_init(void
  {
        int err;
  
+       cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
+                                   1);
+       if (!cryptd_wq)
+               return -ENOMEM;
        err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
        if (err)
-               return err;
+               goto err_destroy_wq;
  
        err = crypto_register_template(&cryptd_tmpl);
        if (err)
-               cryptd_fini_queue(&queue);
+               goto err_fini_queue;
  
+       return 0;
+ err_fini_queue:
+       cryptd_fini_queue(&queue);
+ err_destroy_wq:
+       destroy_workqueue(cryptd_wq);
        return err;
  }
  
  static void __exit cryptd_exit(void)
  {
+       destroy_workqueue(cryptd_wq);
        cryptd_fini_queue(&queue);
        crypto_unregister_template(&cryptd_tmpl);
  }
diff --combined crypto/crypto_null.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API.
   *
@@@ -10,6 -9,12 +10,6 @@@
   * The null cipher is compliant with RFC2410.
   *
   * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
 - *
   */
  
  #include <crypto/null.h>
@@@ -100,6 -105,7 +100,7 @@@ static struct shash_alg digest_null = 
        .final                  =       null_final,
        .base                   =       {
                .cra_name               =       "digest_null",
+               .cra_driver_name        =       "digest_null-generic",
                .cra_blocksize          =       NULL_BLOCK_SIZE,
                .cra_module             =       THIS_MODULE,
        }
@@@ -122,6 -128,7 +123,7 @@@ static struct skcipher_alg skcipher_nul
  
  static struct crypto_alg null_algs[] = { {
        .cra_name               =       "cipher_null",
+       .cra_driver_name        =       "cipher_null-generic",
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       NULL_BLOCK_SIZE,
        .cra_ctxsize            =       0,
        .cia_decrypt            =       null_crypt } }
  }, {
        .cra_name               =       "compress_null",
+       .cra_driver_name        =       "compress_null-generic",
        .cra_flags              =       CRYPTO_ALG_TYPE_COMPRESS,
        .cra_blocksize          =       NULL_BLOCK_SIZE,
        .cra_ctxsize            =       0,
diff --combined crypto/deflate.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API.
   *
@@@ -7,6 -6,11 +7,6 @@@
   *
   * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
   *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   * FIXME: deflate transforms will require up to a total of about 436k of kernel
   * memory on i386 (390k for compression, the rest for decompression), as the
   * current zlib kernel code uses a worst case pre-allocation system by default.
@@@ -275,6 -279,7 +275,7 @@@ static int deflate_sdecompress(struct c
  
  static struct crypto_alg alg = {
        .cra_name               = "deflate",
+       .cra_driver_name        = "deflate-generic",
        .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
        .cra_ctxsize            = sizeof(struct deflate_ctx),
        .cra_module             = THIS_MODULE,
diff --combined crypto/ghash-generic.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * GHASH: digest algorithm for GCM (Galois/Counter Mode).
   *
@@@ -7,6 -6,10 +7,6 @@@
   *   Author: Huang Ying <ying.huang@intel.com>
   *
   * The algorithm implementation is copied from gcm.c.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License version 2 as published
 - * by the Free Software Foundation.
   */
  
  #include <crypto/algapi.h>
@@@ -31,6 -34,7 +31,7 @@@ static int ghash_setkey(struct crypto_s
                        const u8 *key, unsigned int keylen)
  {
        struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+       be128 k;
  
        if (keylen != GHASH_BLOCK_SIZE) {
                crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  
        if (ctx->gf128)
                gf128mul_free_4k(ctx->gf128);
-       ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+       BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
+       memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
+       ctx->gf128 = gf128mul_init_4k_lle(&k);
+       memzero_explicit(&k, GHASH_BLOCK_SIZE);
        if (!ctx->gf128)
                return -ENOMEM;
  
diff --combined crypto/lrw.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /* LRW: as defined by Cyril Guyot in
   *    http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
   *
@@@ -6,6 -5,11 +6,6 @@@
   *
   * Based on ecb.c
   * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
   */
  /* This implementation is checked against the test vectors in the above
   * document and by a test vector provided by Ken Buchanan at
@@@ -384,7 -388,7 +384,7 @@@ static int create(struct crypto_templat
        inst->alg.base.cra_priority = alg->base.cra_priority;
        inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
        inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
-                                      (__alignof__(__be32) - 1);
+                                      (__alignof__(be128) - 1);
  
        inst->alg.ivsize = LRW_BLOCK_SIZE;
        inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
diff --combined crypto/lz4.c
@@@ -1,8 -1,21 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Cryptographic API.
   *
   * Copyright (c) 2013 Chanho Min <chanho.min@lge.com>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License version 2 as published by
 - * the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but WITHOUT
 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 - * more details.
 - *
 - * You should have received a copy of the GNU General Public License along with
 - * this program; if not, write to the Free Software Foundation, Inc., 51
 - * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 - *
   */
  
  #include <linux/init.h>
@@@ -106,6 -119,7 +106,7 @@@ static int lz4_decompress_crypto(struc
  
  static struct crypto_alg alg_lz4 = {
        .cra_name               = "lz4",
+       .cra_driver_name        = "lz4-generic",
        .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
        .cra_ctxsize            = sizeof(struct lz4_ctx),
        .cra_module             = THIS_MODULE,
diff --combined crypto/lz4hc.c
@@@ -1,8 -1,21 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Cryptographic API.
   *
   * Copyright (c) 2013 Chanho Min <chanho.min@lge.com>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License version 2 as published by
 - * the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but WITHOUT
 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 - * more details.
 - *
 - * You should have received a copy of the GNU General Public License along with
 - * this program; if not, write to the Free Software Foundation, Inc., 51
 - * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 - *
   */
  #include <linux/init.h>
  #include <linux/module.h>
@@@ -107,6 -120,7 +107,7 @@@ static int lz4hc_decompress_crypto(stru
  
  static struct crypto_alg alg_lz4hc = {
        .cra_name               = "lz4hc",
+       .cra_driver_name        = "lz4hc-generic",
        .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
        .cra_ctxsize            = sizeof(struct lz4hc_ctx),
        .cra_module             = THIS_MODULE,
diff --combined crypto/lzo-rle.c
@@@ -1,6 -1,19 +1,6 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Cryptographic API.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License version 2 as published by
 - * the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but WITHOUT
 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 - * more details.
 - *
 - * You should have received a copy of the GNU General Public License along with
 - * this program; if not, write to the Free Software Foundation, Inc., 51
 - * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 - *
   */
  
  #include <linux/init.h>
@@@ -109,6 -122,7 +109,7 @@@ static int lzorle_sdecompress(struct cr
  
  static struct crypto_alg alg = {
        .cra_name               = "lzo-rle",
+       .cra_driver_name        = "lzo-rle-generic",
        .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
        .cra_ctxsize            = sizeof(struct lzorle_ctx),
        .cra_module             = THIS_MODULE,
diff --combined crypto/lzo.c
@@@ -1,6 -1,19 +1,6 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Cryptographic API.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License version 2 as published by
 - * the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but WITHOUT
 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 - * more details.
 - *
 - * You should have received a copy of the GNU General Public License along with
 - * this program; if not, write to the Free Software Foundation, Inc., 51
 - * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 - *
   */
  
  #include <linux/init.h>
@@@ -109,6 -122,7 +109,7 @@@ static int lzo_sdecompress(struct crypt
  
  static struct crypto_alg alg = {
        .cra_name               = "lzo",
+       .cra_driver_name        = "lzo-generic",
        .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
        .cra_ctxsize            = sizeof(struct lzo_ctx),
        .cra_module             = THIS_MODULE,
diff --combined crypto/michael_mic.c
@@@ -1,10 -1,13 +1,10 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Cryptographic API
   *
   * Michael MIC (IEEE 802.11i/TKIP) keyed digest
   *
   * Copyright (c) 2004 Jouni Malinen <j@w1.fi>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  #include <crypto/internal/hash.h>
  #include <asm/byteorder.h>
@@@ -156,6 -159,7 +156,7 @@@ static struct shash_alg alg = 
        .descsize               =       sizeof(struct michael_mic_desc_ctx),
        .base                   =       {
                .cra_name               =       "michael_mic",
+               .cra_driver_name        =       "michael_mic-generic",
                .cra_blocksize          =       8,
                .cra_alignmask          =       3,
                .cra_ctxsize            =       sizeof(struct michael_mic_ctx),
diff --combined crypto/rmd128.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API.
   *
@@@ -7,6 -6,12 +7,6 @@@
   * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
   *
   * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  #include <crypto/internal/hash.h>
  #include <linux/init.h>
@@@ -298,6 -303,7 +298,7 @@@ static struct shash_alg alg = 
        .descsize       =       sizeof(struct rmd128_ctx),
        .base           =       {
                .cra_name        =      "rmd128",
+               .cra_driver_name =      "rmd128-generic",
                .cra_blocksize   =      RMD128_BLOCK_SIZE,
                .cra_module      =      THIS_MODULE,
        }
diff --combined crypto/rmd160.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API.
   *
@@@ -7,6 -6,12 +7,6 @@@
   * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
   *
   * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  #include <crypto/internal/hash.h>
  #include <linux/init.h>
@@@ -342,6 -347,7 +342,7 @@@ static struct shash_alg alg = 
        .descsize       =       sizeof(struct rmd160_ctx),
        .base           =       {
                .cra_name        =      "rmd160",
+               .cra_driver_name =      "rmd160-generic",
                .cra_blocksize   =      RMD160_BLOCK_SIZE,
                .cra_module      =      THIS_MODULE,
        }
diff --combined crypto/rmd256.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API.
   *
@@@ -7,6 -6,12 +7,6 @@@
   * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
   *
   * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  #include <crypto/internal/hash.h>
  #include <linux/init.h>
@@@ -317,6 -322,7 +317,7 @@@ static struct shash_alg alg = 
        .descsize       =       sizeof(struct rmd256_ctx),
        .base           =       {
                .cra_name        =      "rmd256",
+               .cra_driver_name =      "rmd256-generic",
                .cra_blocksize   =      RMD256_BLOCK_SIZE,
                .cra_module      =      THIS_MODULE,
        }
diff --combined crypto/rmd320.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API.
   *
@@@ -7,6 -6,12 +7,6 @@@
   * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
   *
   * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  #include <crypto/internal/hash.h>
  #include <linux/init.h>
@@@ -366,6 -371,7 +366,7 @@@ static struct shash_alg alg = 
        .descsize       =       sizeof(struct rmd320_ctx),
        .base           =       {
                .cra_name        =      "rmd320",
+               .cra_driver_name =      "rmd320-generic",
                .cra_blocksize   =      RMD320_BLOCK_SIZE,
                .cra_module      =      THIS_MODULE,
        }
diff --combined crypto/serpent_generic.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API.
   *
@@@ -10,6 -9,11 +10,6 @@@
   * Added tnepres support:
   *            Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
   *              Based on code by hvr
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
   */
  
  #include <linux/init.h>
        x4 ^= x2;                                       \
        })
  
- static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k)
+ /*
+  * both gcc and clang have misoptimized this function in the past,
+  * producing horrible object code from spilling temporary variables
+  * on the stack. Forcing this part out of line avoids that.
+  */
+ static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2,
+                                          u32 r3, u32 r4, u32 *k)
  {
        k += 100;
        S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
@@@ -637,6 -647,7 +643,7 @@@ static struct crypto_alg srp_algs[2] = 
        .cia_decrypt            =       serpent_decrypt } }
  }, {
        .cra_name               =       "tnepres",
+       .cra_driver_name        =       "tnepres-generic",
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       SERPENT_BLOCK_SIZE,
        .cra_ctxsize            =       sizeof(struct serpent_ctx),
diff --combined crypto/skcipher.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Symmetric key cipher operations.
   *
@@@ -7,6 -6,12 +7,6 @@@
   * the kernel is given a chance to schedule us once per page.
   *
   * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  
  #include <crypto/internal/aead.h>
@@@ -837,6 -842,40 +837,40 @@@ static int skcipher_setkey(struct crypt
        return 0;
  }
  
+ int crypto_skcipher_encrypt(struct skcipher_request *req)
+ {
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int cryptlen = req->cryptlen;
+       int ret;
+       crypto_stats_get(alg);
+       if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+               ret = -ENOKEY;
+       else
+               ret = tfm->encrypt(req);
+       crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
+ int crypto_skcipher_decrypt(struct skcipher_request *req)
+ {
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int cryptlen = req->cryptlen;
+       int ret;
+       crypto_stats_get(alg);
+       if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+               ret = -ENOKEY;
+       else
+               ret = tfm->decrypt(req);
+       crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
  static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
  {
        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
diff --combined crypto/tea.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /* 
   * Cryptographic API.
   *
   * compatibility with these implementations.
   *
   * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
 - *
   */
  
  #include <linux/init.h>
@@@ -216,6 -221,7 +216,7 @@@ static void xeta_decrypt(struct crypto_
  
  static struct crypto_alg tea_algs[3] = { {
        .cra_name               =       "tea",
+       .cra_driver_name        =       "tea-generic",
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       TEA_BLOCK_SIZE,
        .cra_ctxsize            =       sizeof (struct tea_ctx),
        .cia_decrypt            =       tea_decrypt } }
  }, {
        .cra_name               =       "xtea",
+       .cra_driver_name        =       "xtea-generic",
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       XTEA_BLOCK_SIZE,
        .cra_ctxsize            =       sizeof (struct xtea_ctx),
        .cia_decrypt            =       xtea_decrypt } }
  }, {
        .cra_name               =       "xeta",
+       .cra_driver_name        =       "xeta-generic",
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       XTEA_BLOCK_SIZE,
        .cra_ctxsize            =       sizeof (struct xtea_ctx),
diff --combined crypto/testmgr.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Algorithm testing framework and tests.
   *
   *             Gabriele Paoloni <gabriele.paoloni@intel.com>
   *             Tadeusz Struk (tadeusz.struk@intel.com)
   *    Copyright (c) 2010, Intel Corporation.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  
  #include <crypto/aead.h>
@@@ -1032,6 -1037,205 +1032,205 @@@ static void crypto_reenable_simd_for_te
  }
  #endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
  
+ static int build_hash_sglist(struct test_sglist *tsgl,
+                            const struct hash_testvec *vec,
+                            const struct testvec_config *cfg,
+                            unsigned int alignmask,
+                            const struct test_sg_division *divs[XBUFSIZE])
+ {
+       struct kvec kv;
+       struct iov_iter input;
+       kv.iov_base = (void *)vec->plaintext;
+       kv.iov_len = vec->psize;
+       iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize);
+       return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
+                                &input, divs);
+ }
+ static int check_hash_result(const char *type,
+                            const u8 *result, unsigned int digestsize,
+                            const struct hash_testvec *vec,
+                            const char *vec_name,
+                            const char *driver,
+                            const struct testvec_config *cfg)
+ {
+       if (memcmp(result, vec->digest, digestsize) != 0) {
+               pr_err("alg: %s: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+                      type, driver, vec_name, cfg->name);
+               return -EINVAL;
+       }
+       if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
+               pr_err("alg: %s: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
+                      type, driver, vec_name, cfg->name);
+               return -EOVERFLOW;
+       }
+       return 0;
+ }
+ static inline int check_shash_op(const char *op, int err,
+                                const char *driver, const char *vec_name,
+                                const struct testvec_config *cfg)
+ {
+       if (err)
+               pr_err("alg: shash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
+                      driver, op, err, vec_name, cfg->name);
+       return err;
+ }
+ static inline const void *sg_data(struct scatterlist *sg)
+ {
+       return page_address(sg_page(sg)) + sg->offset;
+ }
+ /* Test one hash test vector in one configuration, using the shash API */
+ static int test_shash_vec_cfg(const char *driver,
+                             const struct hash_testvec *vec,
+                             const char *vec_name,
+                             const struct testvec_config *cfg,
+                             struct shash_desc *desc,
+                             struct test_sglist *tsgl,
+                             u8 *hashstate)
+ {
+       struct crypto_shash *tfm = desc->tfm;
+       const unsigned int alignmask = crypto_shash_alignmask(tfm);
+       const unsigned int digestsize = crypto_shash_digestsize(tfm);
+       const unsigned int statesize = crypto_shash_statesize(tfm);
+       const struct test_sg_division *divs[XBUFSIZE];
+       unsigned int i;
+       u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
+       int err;
+       /* Set the key, if specified */
+       if (vec->ksize) {
+               err = crypto_shash_setkey(tfm, vec->key, vec->ksize);
+               if (err) {
+                       if (err == vec->setkey_error)
+                               return 0;
+                       pr_err("alg: shash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+                              driver, vec_name, vec->setkey_error, err,
+                              crypto_shash_get_flags(tfm));
+                       return err;
+               }
+               if (vec->setkey_error) {
+                       pr_err("alg: shash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+                              driver, vec_name, vec->setkey_error);
+                       return -EINVAL;
+               }
+       }
+       /* Build the scatterlist for the source data */
+       err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
+       if (err) {
+               pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
+                      driver, vec_name, cfg->name);
+               return err;
+       }
+       /* Do the actual hashing */
+       testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm));
+       testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
+       if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
+           vec->digest_error) {
+               /* Just using digest() */
+               if (tsgl->nents != 1)
+                       return 0;
+               if (cfg->nosimd)
+                       crypto_disable_simd_for_test();
+               err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]),
+                                         tsgl->sgl[0].length, result);
+               if (cfg->nosimd)
+                       crypto_reenable_simd_for_test();
+               if (err) {
+                       if (err == vec->digest_error)
+                               return 0;
+                       pr_err("alg: shash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+                              driver, vec_name, vec->digest_error, err,
+                              cfg->name);
+                       return err;
+               }
+               if (vec->digest_error) {
+                       pr_err("alg: shash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+                              driver, vec_name, vec->digest_error, cfg->name);
+                       return -EINVAL;
+               }
+               goto result_ready;
+       }
+       /* Using init(), zero or more update(), then final() or finup() */
+       if (cfg->nosimd)
+               crypto_disable_simd_for_test();
+       err = crypto_shash_init(desc);
+       if (cfg->nosimd)
+               crypto_reenable_simd_for_test();
+       err = check_shash_op("init", err, driver, vec_name, cfg);
+       if (err)
+               return err;
+       for (i = 0; i < tsgl->nents; i++) {
+               if (i + 1 == tsgl->nents &&
+                   cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
+                       if (divs[i]->nosimd)
+                               crypto_disable_simd_for_test();
+                       err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]),
+                                                tsgl->sgl[i].length, result);
+                       if (divs[i]->nosimd)
+                               crypto_reenable_simd_for_test();
+                       err = check_shash_op("finup", err, driver, vec_name,
+                                            cfg);
+                       if (err)
+                               return err;
+                       goto result_ready;
+               }
+               if (divs[i]->nosimd)
+                       crypto_disable_simd_for_test();
+               err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]),
+                                         tsgl->sgl[i].length);
+               if (divs[i]->nosimd)
+                       crypto_reenable_simd_for_test();
+               err = check_shash_op("update", err, driver, vec_name, cfg);
+               if (err)
+                       return err;
+               if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) {
+                       /* Test ->export() and ->import() */
+                       testmgr_poison(hashstate + statesize,
+                                      TESTMGR_POISON_LEN);
+                       err = crypto_shash_export(desc, hashstate);
+                       err = check_shash_op("export", err, driver, vec_name,
+                                            cfg);
+                       if (err)
+                               return err;
+                       if (!testmgr_is_poison(hashstate + statesize,
+                                              TESTMGR_POISON_LEN)) {
+                               pr_err("alg: shash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
+                                      driver, vec_name, cfg->name);
+                               return -EOVERFLOW;
+                       }
+                       testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm));
+                       err = crypto_shash_import(desc, hashstate);
+                       err = check_shash_op("import", err, driver, vec_name,
+                                            cfg);
+                       if (err)
+                               return err;
+               }
+       }
+       if (cfg->nosimd)
+               crypto_disable_simd_for_test();
+       err = crypto_shash_final(desc, result);
+       if (cfg->nosimd)
+               crypto_reenable_simd_for_test();
+       err = check_shash_op("final", err, driver, vec_name, cfg);
+       if (err)
+               return err;
+ result_ready:
+       return check_hash_result("shash", result, digestsize, vec, vec_name,
+                                driver, cfg);
+ }
  static int do_ahash_op(int (*op)(struct ahash_request *req),
                       struct ahash_request *req,
                       struct crypto_wait *wait, bool nosimd)
        return crypto_wait_req(err, wait);
  }
  
- static int check_nonfinal_hash_op(const char *op, int err,
-                                 u8 *result, unsigned int digestsize,
-                                 const char *driver, const char *vec_name,
-                                 const struct testvec_config *cfg)
+ static int check_nonfinal_ahash_op(const char *op, int err,
+                                  u8 *result, unsigned int digestsize,
+                                  const char *driver, const char *vec_name,
+                                  const struct testvec_config *cfg)
  {
        if (err) {
-               pr_err("alg: hash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
+               pr_err("alg: ahash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
                       driver, op, err, vec_name, cfg->name);
                return err;
        }
        if (!testmgr_is_poison(result, digestsize)) {
-               pr_err("alg: hash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
+               pr_err("alg: ahash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
                       driver, op, vec_name, cfg->name);
                return -EINVAL;
        }
        return 0;
  }
  
- static int test_hash_vec_cfg(const char *driver,
-                            const struct hash_testvec *vec,
-                            const char *vec_name,
-                            const struct testvec_config *cfg,
-                            struct ahash_request *req,
-                            struct test_sglist *tsgl,
-                            u8 *hashstate)
+ /* Test one hash test vector in one configuration, using the ahash API */
+ static int test_ahash_vec_cfg(const char *driver,
+                             const struct hash_testvec *vec,
+                             const char *vec_name,
+                             const struct testvec_config *cfg,
+                             struct ahash_request *req,
+                             struct test_sglist *tsgl,
+                             u8 *hashstate)
  {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        const unsigned int alignmask = crypto_ahash_alignmask(tfm);
        const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
        const struct test_sg_division *divs[XBUFSIZE];
        DECLARE_CRYPTO_WAIT(wait);
-       struct kvec _input;
-       struct iov_iter input;
        unsigned int i;
        struct scatterlist *pending_sgl;
        unsigned int pending_len;
                if (err) {
                        if (err == vec->setkey_error)
                                return 0;
-                       pr_err("alg: hash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+                       pr_err("alg: ahash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
                               driver, vec_name, vec->setkey_error, err,
                               crypto_ahash_get_flags(tfm));
                        return err;
                }
                if (vec->setkey_error) {
-                       pr_err("alg: hash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+                       pr_err("alg: ahash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
                               driver, vec_name, vec->setkey_error);
                        return -EINVAL;
                }
        }
  
        /* Build the scatterlist for the source data */
-       _input.iov_base = (void *)vec->plaintext;
-       _input.iov_len = vec->psize;
-       iov_iter_kvec(&input, WRITE, &_input, 1, vec->psize);
-       err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
-                               &input, divs);
+       err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
        if (err) {
-               pr_err("alg: hash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
+               pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
                       driver, vec_name, cfg->name);
                return err;
        }
                if (err) {
                        if (err == vec->digest_error)
                                return 0;
-                       pr_err("alg: hash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+                       pr_err("alg: ahash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
                               driver, vec_name, vec->digest_error, err,
                               cfg->name);
                        return err;
                }
                if (vec->digest_error) {
-                       pr_err("alg: hash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+                       pr_err("alg: ahash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
                               driver, vec_name, vec->digest_error, cfg->name);
                        return -EINVAL;
                }
        ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
        ahash_request_set_crypt(req, NULL, result, 0);
        err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
-       err = check_nonfinal_hash_op("init", err, result, digestsize,
-                                    driver, vec_name, cfg);
+       err = check_nonfinal_ahash_op("init", err, result, digestsize,
+                                     driver, vec_name, cfg);
        if (err)
                return err;
  
                                                pending_len);
                        err = do_ahash_op(crypto_ahash_update, req, &wait,
                                          divs[i]->nosimd);
-                       err = check_nonfinal_hash_op("update", err,
-                                                    result, digestsize,
-                                                    driver, vec_name, cfg);
+                       err = check_nonfinal_ahash_op("update", err,
+                                                     result, digestsize,
+                                                     driver, vec_name, cfg);
                        if (err)
                                return err;
                        pending_sgl = NULL;
                        testmgr_poison(hashstate + statesize,
                                       TESTMGR_POISON_LEN);
                        err = crypto_ahash_export(req, hashstate);
-                       err = check_nonfinal_hash_op("export", err,
-                                                    result, digestsize,
-                                                    driver, vec_name, cfg);
+                       err = check_nonfinal_ahash_op("export", err,
+                                                     result, digestsize,
+                                                     driver, vec_name, cfg);
                        if (err)
                                return err;
                        if (!testmgr_is_poison(hashstate + statesize,
                                               TESTMGR_POISON_LEN)) {
-                               pr_err("alg: hash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
+                               pr_err("alg: ahash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
                                       driver, vec_name, cfg->name);
                                return -EOVERFLOW;
                        }
  
                        testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
                        err = crypto_ahash_import(req, hashstate);
-                       err = check_nonfinal_hash_op("import", err,
-                                                    result, digestsize,
-                                                    driver, vec_name, cfg);
+                       err = check_nonfinal_ahash_op("import", err,
+                                                     result, digestsize,
+                                                     driver, vec_name, cfg);
                        if (err)
                                return err;
                }
        if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
                /* finish with update() and final() */
                err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
-               err = check_nonfinal_hash_op("update", err, result, digestsize,
-                                            driver, vec_name, cfg);
+               err = check_nonfinal_ahash_op("update", err, result, digestsize,
+                                             driver, vec_name, cfg);
                if (err)
                        return err;
                err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
                if (err) {
-                       pr_err("alg: hash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
+                       pr_err("alg: ahash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
                               driver, err, vec_name, cfg->name);
                        return err;
                }
                /* finish with finup() */
                err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
                if (err) {
-                       pr_err("alg: hash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
+                       pr_err("alg: ahash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
                               driver, err, vec_name, cfg->name);
                        return err;
                }
        }
  
  result_ready:
-       /* Check that the algorithm produced the correct digest */
-       if (memcmp(result, vec->digest, digestsize) != 0) {
-               pr_err("alg: hash: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
-                      driver, vec_name, cfg->name);
-               return -EINVAL;
-       }
-       if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
-               pr_err("alg: hash: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
-                      driver, vec_name, cfg->name);
-               return -EOVERFLOW;
+       return check_hash_result("ahash", result, digestsize, vec, vec_name,
+                                driver, cfg);
+ }
+ static int test_hash_vec_cfg(const char *driver,
+                            const struct hash_testvec *vec,
+                            const char *vec_name,
+                            const struct testvec_config *cfg,
+                            struct ahash_request *req,
+                            struct shash_desc *desc,
+                            struct test_sglist *tsgl,
+                            u8 *hashstate)
+ {
+       int err;
+       /*
+        * For algorithms implemented as "shash", most bugs will be detected by
+        * both the shash and ahash tests.  Test the shash API first so that the
+        * failures involve less indirection, so are easier to debug.
+        */
+       if (desc) {
+               err = test_shash_vec_cfg(driver, vec, vec_name, cfg, desc, tsgl,
+                                        hashstate);
+               if (err)
+                       return err;
        }
  
-       return 0;
+       return test_ahash_vec_cfg(driver, vec, vec_name, cfg, req, tsgl,
+                                 hashstate);
  }
  
  static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
                         unsigned int vec_num, struct ahash_request *req,
-                        struct test_sglist *tsgl, u8 *hashstate)
+                        struct shash_desc *desc, struct test_sglist *tsgl,
+                        u8 *hashstate)
  {
        char vec_name[16];
        unsigned int i;
        for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
                err = test_hash_vec_cfg(driver, vec, vec_name,
                                        &default_hash_testvec_configs[i],
-                                       req, tsgl, hashstate);
+                                       req, desc, tsgl, hashstate);
                if (err)
                        return err;
        }
                        generate_random_testvec_config(&cfg, cfgname,
                                                       sizeof(cfgname));
                        err = test_hash_vec_cfg(driver, vec, vec_name, &cfg,
-                                               req, tsgl, hashstate);
+                                               req, desc, tsgl, hashstate);
                        if (err)
                                return err;
+                       cond_resched();
                }
        }
  #endif
   * Generate a hash test vector from the given implementation.
   * Assumes the buffers in 'vec' were already allocated.
   */
- static void generate_random_hash_testvec(struct crypto_shash *tfm,
+ static void generate_random_hash_testvec(struct shash_desc *desc,
                                         struct hash_testvec *vec,
                                         unsigned int maxkeysize,
                                         unsigned int maxdatasize,
                                         char *name, size_t max_namelen)
  {
-       SHASH_DESC_ON_STACK(desc, tfm);
        /* Data */
        vec->psize = generate_random_length(maxdatasize);
        generate_random_bytes((u8 *)vec->plaintext, vec->psize);
                        vec->ksize = 1 + (prandom_u32() % maxkeysize);
                generate_random_bytes((u8 *)vec->key, vec->ksize);
  
-               vec->setkey_error = crypto_shash_setkey(tfm, vec->key,
+               vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
                                                        vec->ksize);
                /* If the key couldn't be set, no need to continue to digest. */
                if (vec->setkey_error)
        }
  
        /* Digest */
-       desc->tfm = tfm;
        vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
                                                vec->psize, (u8 *)vec->digest);
  done:
@@@ -1338,6 -1553,7 +1548,7 @@@ static int test_hash_vs_generic_impl(co
                                     const char *generic_driver,
                                     unsigned int maxkeysize,
                                     struct ahash_request *req,
+                                    struct shash_desc *desc,
                                     struct test_sglist *tsgl,
                                     u8 *hashstate)
  {
        const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
        char _generic_driver[CRYPTO_MAX_ALG_NAME];
        struct crypto_shash *generic_tfm = NULL;
+       struct shash_desc *generic_desc = NULL;
        unsigned int i;
        struct hash_testvec vec = { 0 };
        char vec_name[64];
-       struct testvec_config cfg;
+       struct testvec_config *cfg;
        char cfgname[TESTVEC_CONFIG_NAMELEN];
        int err;
  
                return err;
        }
  
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg) {
+               err = -ENOMEM;
+               goto out;
+       }
+       generic_desc = kzalloc(sizeof(*desc) +
+                              crypto_shash_descsize(generic_tfm), GFP_KERNEL);
+       if (!generic_desc) {
+               err = -ENOMEM;
+               goto out;
+       }
+       generic_desc->tfm = generic_tfm;
        /* Check the algorithm properties for consistency. */
  
        if (digestsize != crypto_shash_digestsize(generic_tfm)) {
        }
  
        for (i = 0; i < fuzz_iterations * 8; i++) {
-               generate_random_hash_testvec(generic_tfm, &vec,
+               generate_random_hash_testvec(generic_desc, &vec,
                                             maxkeysize, maxdatasize,
                                             vec_name, sizeof(vec_name));
-               generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+               generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
  
-               err = test_hash_vec_cfg(driver, &vec, vec_name, &cfg,
-                                       req, tsgl, hashstate);
+               err = test_hash_vec_cfg(driver, &vec, vec_name, cfg,
+                                       req, desc, tsgl, hashstate);
                if (err)
                        goto out;
                cond_resched();
        }
        err = 0;
  out:
+       kfree(cfg);
        kfree(vec.key);
        kfree(vec.plaintext);
        kfree(vec.digest);
        crypto_free_shash(generic_tfm);
+       kzfree(generic_desc);
        return err;
  }
  #else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
@@@ -1436,6 -1669,7 +1664,7 @@@ static int test_hash_vs_generic_impl(co
                                     const char *generic_driver,
                                     unsigned int maxkeysize,
                                     struct ahash_request *req,
+                                    struct shash_desc *desc,
                                     struct test_sglist *tsgl,
                                     u8 *hashstate)
  {
  }
  #endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
  
+ static int alloc_shash(const char *driver, u32 type, u32 mask,
+                      struct crypto_shash **tfm_ret,
+                      struct shash_desc **desc_ret)
+ {
+       struct crypto_shash *tfm;
+       struct shash_desc *desc;
+       tfm = crypto_alloc_shash(driver, type, mask);
+       if (IS_ERR(tfm)) {
+               if (PTR_ERR(tfm) == -ENOENT) {
+                       /*
+                        * This algorithm is only available through the ahash
+                        * API, not the shash API, so skip the shash tests.
+                        */
+                       return 0;
+               }
+               pr_err("alg: hash: failed to allocate shash transform for %s: %ld\n",
+                      driver, PTR_ERR(tfm));
+               return PTR_ERR(tfm);
+       }
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
+       if (!desc) {
+               crypto_free_shash(tfm);
+               return -ENOMEM;
+       }
+       desc->tfm = tfm;
+       *tfm_ret = tfm;
+       *desc_ret = desc;
+       return 0;
+ }
  static int __alg_test_hash(const struct hash_testvec *vecs,
                           unsigned int num_vecs, const char *driver,
                           u32 type, u32 mask,
                           const char *generic_driver, unsigned int maxkeysize)
  {
-       struct crypto_ahash *tfm;
+       struct crypto_ahash *atfm = NULL;
        struct ahash_request *req = NULL;
+       struct crypto_shash *stfm = NULL;
+       struct shash_desc *desc = NULL;
        struct test_sglist *tsgl = NULL;
        u8 *hashstate = NULL;
+       unsigned int statesize;
        unsigned int i;
        int err;
  
-       tfm = crypto_alloc_ahash(driver, type, mask);
-       if (IS_ERR(tfm)) {
+       /*
+        * Always test the ahash API.  This works regardless of whether the
+        * algorithm is implemented as ahash or shash.
+        */
+       atfm = crypto_alloc_ahash(driver, type, mask);
+       if (IS_ERR(atfm)) {
                pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
-                      driver, PTR_ERR(tfm));
-               return PTR_ERR(tfm);
+                      driver, PTR_ERR(atfm));
+               return PTR_ERR(atfm);
        }
  
-       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       req = ahash_request_alloc(atfm, GFP_KERNEL);
        if (!req) {
                pr_err("alg: hash: failed to allocate request for %s\n",
                       driver);
                goto out;
        }
  
+       /*
+        * If available also test the shash API, to cover corner cases that may
+        * be missed by testing the ahash API only.
+        */
+       err = alloc_shash(driver, type, mask, &stfm, &desc);
+       if (err)
+               goto out;
        tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL);
        if (!tsgl || init_test_sglist(tsgl) != 0) {
                pr_err("alg: hash: failed to allocate test buffers for %s\n",
                goto out;
        }
  
-       hashstate = kmalloc(crypto_ahash_statesize(tfm) + TESTMGR_POISON_LEN,
-                           GFP_KERNEL);
+       statesize = crypto_ahash_statesize(atfm);
+       if (stfm)
+               statesize = max(statesize, crypto_shash_statesize(stfm));
+       hashstate = kmalloc(statesize + TESTMGR_POISON_LEN, GFP_KERNEL);
        if (!hashstate) {
                pr_err("alg: hash: failed to allocate hash state buffer for %s\n",
                       driver);
        }
  
        for (i = 0; i < num_vecs; i++) {
-               err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate);
+               err = test_hash_vec(driver, &vecs[i], i, req, desc, tsgl,
+                                   hashstate);
                if (err)
                        goto out;
+               cond_resched();
        }
        err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
-                                       tsgl, hashstate);
+                                       desc, tsgl, hashstate);
  out:
        kfree(hashstate);
        if (tsgl) {
                destroy_test_sglist(tsgl);
                kfree(tsgl);
        }
+       kfree(desc);
+       crypto_free_shash(stfm);
        ahash_request_free(req);
-       crypto_free_ahash(tfm);
+       crypto_free_ahash(atfm);
        return err;
  }
  
@@@ -1755,6 -2044,7 +2039,7 @@@ static int test_aead_vec(const char *dr
                                                &cfg, req, tsgls);
                        if (err)
                                return err;
+                       cond_resched();
                }
        }
  #endif
@@@ -1864,7 -2154,7 +2149,7 @@@ static int test_aead_vs_generic_impl(co
        unsigned int i;
        struct aead_testvec vec = { 0 };
        char vec_name[64];
-       struct testvec_config cfg;
+       struct testvec_config *cfg;
        char cfgname[TESTVEC_CONFIG_NAMELEN];
        int err;
  
                return err;
        }
  
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg) {
+               err = -ENOMEM;
+               goto out;
+       }
        generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
        if (!generic_req) {
                err = -ENOMEM;
                generate_random_aead_testvec(generic_req, &vec,
                                             maxkeysize, maxdatasize,
                                             vec_name, sizeof(vec_name));
-               generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+               generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
  
-               err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, &cfg,
+               err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, cfg,
                                        req, tsgls);
                if (err)
                        goto out;
-               err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, &cfg,
+               err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg,
                                        req, tsgls);
                if (err)
                        goto out;
        }
        err = 0;
  out:
+       kfree(cfg);
        kfree(vec.key);
        kfree(vec.iv);
        kfree(vec.assoc);
@@@ -1994,6 -2291,7 +2286,7 @@@ static int test_aead(const char *driver
                                    tsgls);
                if (err)
                        return err;
+               cond_resched();
        }
        return 0;
  }
@@@ -2336,6 -2634,7 +2629,7 @@@ static int test_skcipher_vec(const cha
                                                    &cfg, req, tsgls);
                        if (err)
                                return err;
+                       cond_resched();
                }
        }
  #endif
@@@ -2409,7 -2708,7 +2703,7 @@@ static int test_skcipher_vs_generic_imp
        unsigned int i;
        struct cipher_testvec vec = { 0 };
        char vec_name[64];
-       struct testvec_config cfg;
+       struct testvec_config *cfg;
        char cfgname[TESTVEC_CONFIG_NAMELEN];
        int err;
  
                return err;
        }
  
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg) {
+               err = -ENOMEM;
+               goto out;
+       }
        generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
        if (!generic_req) {
                err = -ENOMEM;
        for (i = 0; i < fuzz_iterations * 8; i++) {
                generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
                                               vec_name, sizeof(vec_name));
-               generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+               generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
  
                err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name,
-                                           &cfg, req, tsgls);
+                                           cfg, req, tsgls);
                if (err)
                        goto out;
                err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name,
-                                           &cfg, req, tsgls);
+                                           cfg, req, tsgls);
                if (err)
                        goto out;
                cond_resched();
        }
        err = 0;
  out:
+       kfree(cfg);
        kfree(vec.key);
        kfree(vec.iv);
        kfree(vec.ptext);
@@@ -2535,6 -2841,7 +2836,7 @@@ static int test_skcipher(const char *dr
                                        tsgls);
                if (err)
                        return err;
+               cond_resched();
        }
        return 0;
  }
@@@ -4125,6 -4432,7 +4427,7 @@@ static const struct alg_test_desc alg_t
                }
        }, {
                .alg = "ecb(arc4)",
+               .generic_driver = "ecb(arc4)-generic",
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = __VECS(arc4_tv_template)
                .alg = "xts512(paes)",
                .test = alg_test_null,
                .fips_allowed = 1,
+       }, {
+               .alg = "xxhash64",
+               .test = alg_test_hash,
+               .fips_allowed = 1,
+               .suite = {
+                       .hash = __VECS(xxhash64_tv_template)
+               }
        }, {
                .alg = "zlib-deflate",
                .test = alg_test_comp,
diff --combined crypto/testmgr.h
@@@ -1,4 -1,3 +1,4 @@@
 +/* SPDX-License-Identifier: GPL-2.0-or-later */
  /*
   * Algorithm testing framework and tests.
   *
   *              Gabriele Paoloni <gabriele.paoloni@intel.com>
   *              Tadeusz Struk (tadeusz.struk@intel.com)
   *     Copyright (c) 2010, Intel Corporation.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
 - *
   */
  #ifndef _CRYPTO_TESTMGR_H
  #define _CRYPTO_TESTMGR_H
@@@ -38,7 -43,7 +38,7 @@@ struct hash_testvec 
        const char *key;
        const char *plaintext;
        const char *digest;
-       unsigned short psize;
+       unsigned int psize;
        unsigned short ksize;
        int setkey_error;
        int digest_error;
@@@ -69,7 -74,7 +69,7 @@@ struct cipher_testvec 
        const char *ctext;
        unsigned char wk; /* weak key flag */
        unsigned short klen;
-       unsigned short len;
+       unsigned int len;
        bool fips_skip;
        bool generates_iv;
        int setkey_error;
@@@ -105,9 -110,9 +105,9 @@@ struct aead_testvec 
        unsigned char novrfy;
        unsigned char wk;
        unsigned char klen;
-       unsigned short plen;
-       unsigned short clen;
-       unsigned short alen;
+       unsigned int plen;
+       unsigned int clen;
+       unsigned int alen;
        int setkey_error;
        int setauthsize_error;
        int crypt_error;
@@@ -33382,6 -33387,112 +33382,112 @@@ static const struct hash_testvec crc32c
        }
  };
  
+ static const struct hash_testvec xxhash64_tv_template[] = {
+       {
+               .psize = 0,
+               .digest = "\x99\xe9\xd8\x51\x37\xdb\x46\xef",
+       },
+       {
+               .plaintext = "\x40",
+               .psize = 1,
+               .digest = "\x20\x5c\x91\xaa\x88\xeb\x59\xd0",
+       },
+       {
+               .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d"
+                            "\x88\xc7\x9a\x09\x1a\x9b",
+               .psize = 14,
+               .digest = "\xa8\xe8\x2b\xa9\x92\xa1\x37\x4a",
+       },
+       {
+               .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d"
+                            "\x88\xc7\x9a\x09\x1a\x9b\x42\xe0"
+                            "\xd4\x38\xa5\x2a\x26\xa5\x19\x4b"
+                            "\x57\x65\x7f\xad\xc3\x7d\xca\x40"
+                            "\x31\x65\x05\xbb\x31\xae\x51\x11"
+                            "\xa8\xc0\xb3\x28\x42\xeb\x3c\x46"
+                            "\xc8\xed\xed\x0f\x8d\x0b\xfa\x6e"
+                            "\xbc\xe3\x88\x53\xca\x8f\xc8\xd9"
+                            "\x41\x26\x7a\x3d\x21\xdb\x1a\x3c"
+                            "\x01\x1d\xc9\xe9\xb7\x3a\x78\x67"
+                            "\x57\x20\x94\xf1\x1e\xfd\xce\x39"
+                            "\x99\x57\x69\x39\xa5\xd0\x8d\xd9"
+                            "\x43\xfe\x1d\x66\x04\x3c\x27\x6a"
+                            "\xe1\x0d\xe7\xc9\xfa\xc9\x07\x56"
+                            "\xa5\xb3\xec\xd9\x1f\x42\x65\x66"
+                            "\xaa\xbf\x87\x9b\xc5\x41\x9c\x27"
+                            "\x3f\x2f\xa9\x55\x93\x01\x27\x33"
+                            "\x43\x99\x4d\x81\x85\xae\x82\x00"
+                            "\x6c\xd0\xd1\xa3\x57\x18\x06\xcc"
+                            "\xec\x72\xf7\x8e\x87\x2d\x1f\x5e"
+                            "\xd7\x5b\x1f\x36\x4c\xfa\xfd\x18"
+                            "\x89\x76\xd3\x5e\xb5\x5a\xc0\x01"
+                            "\xd2\xa1\x9a\x50\xe6\x08\xb4\x76"
+                            "\x56\x4f\x0e\xbc\x54\xfc\x67\xe6"
+                            "\xb9\xc0\x28\x4b\xb5\xc3\xff\x79"
+                            "\x52\xea\xa1\x90\xc3\xaf\x08\x70"
+                            "\x12\x02\x0c\xdb\x94\x00\x38\x95"
+                            "\xed\xfd\x08\xf7\xe8\x04",
+               .psize = 222,
+               .digest = "\x41\xfc\xd4\x29\xfe\xe7\x85\x17",
+       },
+       {
+               .psize = 0,
+               .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00",
+               .ksize = 8,
+               .digest = "\xef\x17\x9b\x92\xa2\xfd\x75\xac",
+       },
+       {
+               .plaintext = "\x40",
+               .psize = 1,
+               .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00",
+               .ksize = 8,
+               .digest = "\xd1\x70\x4f\x14\x02\xc4\x9e\x71",
+       },
+       {
+               .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d"
+                            "\x88\xc7\x9a\x09\x1a\x9b",
+               .psize = 14,
+               .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00",
+               .ksize = 8,
+               .digest = "\xa4\xcd\xfe\x8e\x37\xe2\x1c\x64"
+       },
+       {
+               .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d"
+                            "\x88\xc7\x9a\x09\x1a\x9b\x42\xe0"
+                            "\xd4\x38\xa5\x2a\x26\xa5\x19\x4b"
+                            "\x57\x65\x7f\xad\xc3\x7d\xca\x40"
+                            "\x31\x65\x05\xbb\x31\xae\x51\x11"
+                            "\xa8\xc0\xb3\x28\x42\xeb\x3c\x46"
+                            "\xc8\xed\xed\x0f\x8d\x0b\xfa\x6e"
+                            "\xbc\xe3\x88\x53\xca\x8f\xc8\xd9"
+                            "\x41\x26\x7a\x3d\x21\xdb\x1a\x3c"
+                            "\x01\x1d\xc9\xe9\xb7\x3a\x78\x67"
+                            "\x57\x20\x94\xf1\x1e\xfd\xce\x39"
+                            "\x99\x57\x69\x39\xa5\xd0\x8d\xd9"
+                            "\x43\xfe\x1d\x66\x04\x3c\x27\x6a"
+                            "\xe1\x0d\xe7\xc9\xfa\xc9\x07\x56"
+                            "\xa5\xb3\xec\xd9\x1f\x42\x65\x66"
+                            "\xaa\xbf\x87\x9b\xc5\x41\x9c\x27"
+                            "\x3f\x2f\xa9\x55\x93\x01\x27\x33"
+                            "\x43\x99\x4d\x81\x85\xae\x82\x00"
+                            "\x6c\xd0\xd1\xa3\x57\x18\x06\xcc"
+                            "\xec\x72\xf7\x8e\x87\x2d\x1f\x5e"
+                            "\xd7\x5b\x1f\x36\x4c\xfa\xfd\x18"
+                            "\x89\x76\xd3\x5e\xb5\x5a\xc0\x01"
+                            "\xd2\xa1\x9a\x50\xe6\x08\xb4\x76"
+                            "\x56\x4f\x0e\xbc\x54\xfc\x67\xe6"
+                            "\xb9\xc0\x28\x4b\xb5\xc3\xff\x79"
+                            "\x52\xea\xa1\x90\xc3\xaf\x08\x70"
+                            "\x12\x02\x0c\xdb\x94\x00\x38\x95"
+                            "\xed\xfd\x08\xf7\xe8\x04",
+               .psize = 222,
+               .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00",
+               .ksize = 8,
+               .digest = "\x58\xbc\x55\xf2\x42\x81\x5c\xf0"
+       },
+ };
  static const struct comp_testvec lz4_comp_tv_template[] = {
        {
                .inlen  = 255,
diff --combined crypto/tgr192.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Cryptographic API.
   *
   *
   * Adapted for Linux Kernel Crypto  by Aaron Grothe 
   * ajgrothe@yahoo.com, February 22, 2005
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
 - *
   */
  #include <crypto/internal/hash.h>
  #include <linux/init.h>
@@@ -630,9 -635,10 +630,10 @@@ static struct shash_alg tgr_algs[3] = 
        .final          =       tgr192_final,
        .descsize       =       sizeof(struct tgr192_ctx),
        .base           =       {
-               .cra_name       =       "tgr192",
-               .cra_blocksize  =       TGR192_BLOCK_SIZE,
-               .cra_module     =       THIS_MODULE,
+               .cra_name        =      "tgr192",
+               .cra_driver_name =      "tgr192-generic",
+               .cra_blocksize   =      TGR192_BLOCK_SIZE,
+               .cra_module      =      THIS_MODULE,
        }
  }, {
        .digestsize     =       TGR160_DIGEST_SIZE,
        .final          =       tgr160_final,
        .descsize       =       sizeof(struct tgr192_ctx),
        .base           =       {
-               .cra_name       =       "tgr160",
-               .cra_blocksize  =       TGR192_BLOCK_SIZE,
-               .cra_module     =       THIS_MODULE,
+               .cra_name        =      "tgr160",
+               .cra_driver_name =      "tgr160-generic",
+               .cra_blocksize   =      TGR192_BLOCK_SIZE,
+               .cra_module      =      THIS_MODULE,
        }
  }, {
        .digestsize     =       TGR128_DIGEST_SIZE,
        .final          =       tgr128_final,
        .descsize       =       sizeof(struct tgr192_ctx),
        .base           =       {
-               .cra_name       =       "tgr128",
-               .cra_blocksize  =       TGR192_BLOCK_SIZE,
-               .cra_module     =       THIS_MODULE,
+               .cra_name        =      "tgr128",
+               .cra_driver_name =      "tgr128-generic",
+               .cra_blocksize   =      TGR192_BLOCK_SIZE,
+               .cra_module      =      THIS_MODULE,
        }
  } };
  
diff --combined crypto/zstd.c
@@@ -1,8 -1,16 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Cryptographic API.
   *
   * Copyright (c) 2017-present, Facebook, Inc.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License version 2 as published by
 - * the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but WITHOUT
 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 - * more details.
   */
  #include <linux/crypto.h>
  #include <linux/init.h>
@@@ -206,6 -214,7 +206,7 @@@ static int zstd_sdecompress(struct cryp
  
  static struct crypto_alg alg = {
        .cra_name               = "zstd",
+       .cra_driver_name        = "zstd-generic",
        .cra_flags              = CRYPTO_ALG_TYPE_COMPRESS,
        .cra_ctxsize            = sizeof(struct zstd_ctx),
        .cra_module             = THIS_MODULE,
diff --combined drivers/crypto/Kconfig
@@@ -1,4 -1,3 +1,4 @@@
 +# SPDX-License-Identifier: GPL-2.0-only
  
  menuconfig CRYPTO_HW
        bool "Hardware crypto devices"
@@@ -520,10 -519,13 +520,13 @@@ config CRYPTO_DEV_ATMEL_SH
          To compile this driver as a module, choose M here: the module
          will be called atmel-sha.
  
+ config CRYPTO_DEV_ATMEL_I2C
+       tristate
  config CRYPTO_DEV_ATMEL_ECC
        tristate "Support for Microchip / Atmel ECC hw accelerator"
-       depends on ARCH_AT91 || COMPILE_TEST
        depends on I2C
+       select CRYPTO_DEV_ATMEL_I2C
        select CRYPTO_ECDH
        select CRC16
        help
          To compile this driver as a module, choose M here: the module
          will be called atmel-ecc.
  
+ config CRYPTO_DEV_ATMEL_SHA204A
+       tristate "Support for Microchip / Atmel SHA accelerator and RNG"
+       depends on I2C
+       select CRYPTO_DEV_ATMEL_I2C
+       select HW_RANDOM
+       select CRC16
+       help
+         Microhip / Atmel SHA accelerator and RNG.
+         Select this if you want to use the Microchip / Atmel SHA204A
+         module as a random number generator. (Other functions of the
+         chip are currently not exposed by this driver)
+         To compile this driver as a module, choose M here: the module
+         will be called atmel-sha204a.
  config CRYPTO_DEV_CCP
        bool "Support for AMD Secure Processor"
        depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
@@@ -1,10 -1,19 +1,10 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /**
   * AMCC SoC PPC4xx Crypto Driver
   *
   * Copyright (c) 2008 Applied Micro Circuits Corporation.
   * All rights reserved. James Hsiao <jhsiao@amcc.com>
   *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
 - *
 - * This program is distributed in the hope that it will be useful,
 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 - * GNU General Public License for more details.
 - *
   * This file implements the Linux crypto algorithms.
   */
  
@@@ -67,12 -76,16 +67,16 @@@ static void set_dynamic_sa_command_1(st
  }
  
  static inline int crypto4xx_crypt(struct skcipher_request *req,
-                                 const unsigned int ivlen, bool decrypt)
+                                 const unsigned int ivlen, bool decrypt,
+                                 bool check_blocksize)
  {
        struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
        struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
        __le32 iv[AES_IV_SIZE];
  
+       if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+               return -EINVAL;
        if (ivlen)
                crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
  
                ctx->sa_len, 0, NULL);
  }
  
- int crypto4xx_encrypt_noiv(struct skcipher_request *req)
+ int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
+ {
+       return crypto4xx_crypt(req, 0, false, true);
+ }
+ int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
+ {
+       return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
+ }
+ int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
  {
-       return crypto4xx_crypt(req, 0, false);
+       return crypto4xx_crypt(req, 0, true, true);
  }
  
- int crypto4xx_encrypt_iv(struct skcipher_request *req)
+ int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
  {
-       return crypto4xx_crypt(req, AES_IV_SIZE, false);
+       return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
  }
  
- int crypto4xx_decrypt_noiv(struct skcipher_request *req)
+ int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
  {
-       return crypto4xx_crypt(req, 0, true);
+       return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
  }
  
- int crypto4xx_decrypt_iv(struct skcipher_request *req)
+ int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
  {
-       return crypto4xx_crypt(req, AES_IV_SIZE, true);
+       return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
  }
  
  /**
@@@ -269,8 -292,8 +283,8 @@@ crypto4xx_ctr_crypt(struct skcipher_req
                return ret;
        }
  
-       return encrypt ? crypto4xx_encrypt_iv(req)
-                      : crypto4xx_decrypt_iv(req);
+       return encrypt ? crypto4xx_encrypt_iv_stream(req)
+                      : crypto4xx_decrypt_iv_stream(req);
  }
  
  static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
@@@ -1,10 -1,19 +1,10 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /**
   * AMCC SoC PPC4xx Crypto Driver
   *
   * Copyright (c) 2008 Applied Micro Circuits Corporation.
   * All rights reserved. James Hsiao <jhsiao@amcc.com>
   *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
 - *
 - * This program is distributed in the hope that it will be useful,
 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 - * GNU General Public License for more details.
 - *
   * This file implements AMCC crypto offload Linux device driver for use with
   * Linux CryptoAPI.
   */
@@@ -182,7 -191,6 +182,6 @@@ static u32 crypto4xx_build_pdr(struct c
                                  dev->pdr_pa);
                return -ENOMEM;
        }
-       memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
        dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
                                   sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
                                   &dev->shadow_sa_pool_pa,
@@@ -1210,8 -1218,8 +1209,8 @@@ static struct crypto4xx_alg_common cryp
                .max_keysize = AES_MAX_KEY_SIZE,
                .ivsize = AES_IV_SIZE,
                .setkey = crypto4xx_setkey_aes_cbc,
-               .encrypt = crypto4xx_encrypt_iv,
-               .decrypt = crypto4xx_decrypt_iv,
+               .encrypt = crypto4xx_encrypt_iv_block,
+               .decrypt = crypto4xx_decrypt_iv_block,
                .init = crypto4xx_sk_init,
                .exit = crypto4xx_sk_exit,
        } },
                        .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
                        .cra_flags = CRYPTO_ALG_ASYNC |
                                CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_ctxsize = sizeof(struct crypto4xx_ctx),
                        .cra_module = THIS_MODULE,
                },
                .max_keysize = AES_MAX_KEY_SIZE,
                .ivsize = AES_IV_SIZE,
                .setkey = crypto4xx_setkey_aes_cfb,
-               .encrypt = crypto4xx_encrypt_iv,
-               .decrypt = crypto4xx_decrypt_iv,
+               .encrypt = crypto4xx_encrypt_iv_stream,
+               .decrypt = crypto4xx_decrypt_iv_stream,
                .init = crypto4xx_sk_init,
                .exit = crypto4xx_sk_exit,
        } },
                        .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
                                CRYPTO_ALG_ASYNC |
                                CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_ctxsize = sizeof(struct crypto4xx_ctx),
                        .cra_module = THIS_MODULE,
                },
                        .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
                        .cra_flags = CRYPTO_ALG_ASYNC |
                                CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_ctxsize = sizeof(struct crypto4xx_ctx),
                        .cra_module = THIS_MODULE,
                },
                .min_keysize = AES_MIN_KEY_SIZE,
                .max_keysize = AES_MAX_KEY_SIZE,
                .setkey = crypto4xx_setkey_aes_ecb,
-               .encrypt = crypto4xx_encrypt_noiv,
-               .decrypt = crypto4xx_decrypt_noiv,
+               .encrypt = crypto4xx_encrypt_noiv_block,
+               .decrypt = crypto4xx_decrypt_noiv_block,
                .init = crypto4xx_sk_init,
                .exit = crypto4xx_sk_exit,
        } },
                        .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
                        .cra_flags = CRYPTO_ALG_ASYNC |
                                CRYPTO_ALG_KERN_DRIVER_ONLY,
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_ctxsize = sizeof(struct crypto4xx_ctx),
                        .cra_module = THIS_MODULE,
                },
                .max_keysize = AES_MAX_KEY_SIZE,
                .ivsize = AES_IV_SIZE,
                .setkey = crypto4xx_setkey_aes_ofb,
-               .encrypt = crypto4xx_encrypt_iv,
-               .decrypt = crypto4xx_decrypt_iv,
+               .encrypt = crypto4xx_encrypt_iv_stream,
+               .decrypt = crypto4xx_decrypt_iv_stream,
                .init = crypto4xx_sk_init,
                .exit = crypto4xx_sk_exit,
        } },
@@@ -1,10 -1,19 +1,10 @@@
 +/* SPDX-License-Identifier: GPL-2.0-or-later */
  /**
   * AMCC SoC PPC4xx Crypto Driver
   *
   * Copyright (c) 2008 Applied Micro Circuits Corporation.
   * All rights reserved. James Hsiao <jhsiao@amcc.com>
   *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
 - *
 - * This program is distributed in the hope that it will be useful,
 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 - * GNU General Public License for more details.
 - *
   * This is the header file for AMCC Crypto offload Linux device driver for
   * use with Linux CryptoAPI.
  
@@@ -173,10 -182,12 +173,12 @@@ int crypto4xx_setkey_rfc3686(struct cry
                             const u8 *key, unsigned int keylen);
  int crypto4xx_encrypt_ctr(struct skcipher_request *req);
  int crypto4xx_decrypt_ctr(struct skcipher_request *req);
- int crypto4xx_encrypt_iv(struct skcipher_request *req);
- int crypto4xx_decrypt_iv(struct skcipher_request *req);
- int crypto4xx_encrypt_noiv(struct skcipher_request *req);
- int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+ int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
+ int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
+ int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
+ int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
+ int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
+ int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
  int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
  int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
  int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
@@@ -1,6 -1,17 +1,6 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Copyright 2016 Broadcom
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License, version 2, as
 - * published by the Free Software Foundation (the "GPL").
 - *
 - * This program is distributed in the hope that it will be useful, but
 - * WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 - * General Public License version 2 (GPLv2) for more details.
 - *
 - * You should have received a copy of the GNU General Public License
 - * version 2 (GPLv2) along with this source code.
   */
  
  #include <linux/err.h>
@@@ -85,7 -96,7 +85,7 @@@ MODULE_PARM_DESC(aead_pri, "Priority fo
   * 0x70 - ring 2
   * 0x78 - ring 3
   */
- char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
  /*
   * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
   * is set dynamically after reading SPU type from device tree.
@@@ -2083,7 -2094,7 +2083,7 @@@ static int __ahash_init(struct ahash_re
   * Return: true if incremental hashing is not supported
   *         false otherwise
   */
- bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
  {
        struct spu_hw *spu = &iproc_priv.spu;
  
@@@ -4809,7 -4820,7 +4809,7 @@@ static int spu_dt_read(struct platform_
        return 0;
  }
  
- int bcm_spu_probe(struct platform_device *pdev)
static int bcm_spu_probe(struct platform_device *pdev)
  {
        struct device *dev = &pdev->dev;
        struct spu_hw *spu = &iproc_priv.spu;
@@@ -4853,7 -4864,7 +4853,7 @@@ failure
        return err;
  }
  
- int bcm_spu_remove(struct platform_device *pdev)
static int bcm_spu_remove(struct platform_device *pdev)
  {
        int i;
        struct device *dev = &pdev->dev;
@@@ -1,6 -1,17 +1,6 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Copyright 2016 Broadcom
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License, version 2, as
 - * published by the Free Software Foundation (the "GPL").
 - *
 - * This program is distributed in the hope that it will be useful, but
 - * WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 - * General Public License version 2 (GPLv2) for more details.
 - *
 - * You should have received a copy of the GNU General Public License
 - * version 2 (GPLv2) along with this source code.
   */
  
  /*
@@@ -38,21 -49,21 +38,21 @@@ enum spu2_proto_sel 
        SPU2_DTLS_AEAD = 10
  };
  
- char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
        "DES", "3DES"
  };
  
char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS",
-       "CCM", "GCM"
static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB",
+       "XTS", "CCM", "GCM"
  };
  
- char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
        "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
        "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
        "SHA3-384", "SHA3-512"
  };
  
- char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
        "Rabin", "CCM", "GCM", "Reserved"
  };
  
@@@ -1,13 -1,15 +1,12 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  
  /*
   * Copyright (C) 2016 Cavium, Inc.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of version 2 of the GNU General Public License
 - * as published by the Free Software Foundation.
   */
  
  #include <crypto/aes.h>
  #include <crypto/algapi.h>
  #include <crypto/authenc.h>
- #include <crypto/crypto_wq.h>
  #include <crypto/des.h>
  #include <crypto/xts.h>
  #include <linux/crypto.h>
@@@ -1,8 -1,8 +1,8 @@@
 -// SPDX-License-Identifier: GPL-2.0
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * AMD Cryptographic Coprocessor (CCP) AES crypto API support
   *
-  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+  * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
   *
   * Author: Tom Lendacky <thomas.lendacky@amd.com>
   */
@@@ -76,8 -76,7 +76,7 @@@ static int ccp_aes_crypt(struct ablkcip
                return -EINVAL;
  
        if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
-            (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
-            (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+            (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
            (req->nbytes & (AES_BLOCK_SIZE - 1)))
                return -EINVAL;
  
@@@ -288,7 -287,7 +287,7 @@@ static struct ccp_aes_def aes_algs[] = 
                .version        = CCP_VERSION(3, 0),
                .name           = "cfb(aes)",
                .driver_name    = "cfb-aes-ccp",
-               .blocksize      = AES_BLOCK_SIZE,
+               .blocksize      = 1,
                .ivsize         = AES_BLOCK_SIZE,
                .alg_defaults   = &ccp_aes_defaults,
        },
@@@ -1,4 -1,4 +1,4 @@@
 -// SPDX-License-Identifier: GPL-2.0
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * AMD Cryptographic Coprocessor (CCP) driver
   *
@@@ -32,56 -32,62 +32,62 @@@ struct ccp_tasklet_data 
  };
  
  /* Human-readable error strings */
+ #define CCP_MAX_ERROR_CODE    64
  static char *ccp_error_codes[] = {
        "",
-       "ERR 01: ILLEGAL_ENGINE",
-       "ERR 02: ILLEGAL_KEY_ID",
-       "ERR 03: ILLEGAL_FUNCTION_TYPE",
-       "ERR 04: ILLEGAL_FUNCTION_MODE",
-       "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
-       "ERR 06: ILLEGAL_FUNCTION_SIZE",
-       "ERR 07: Zlib_MISSING_INIT_EOM",
-       "ERR 08: ILLEGAL_FUNCTION_RSVD",
-       "ERR 09: ILLEGAL_BUFFER_LENGTH",
-       "ERR 10: VLSB_FAULT",
-       "ERR 11: ILLEGAL_MEM_ADDR",
-       "ERR 12: ILLEGAL_MEM_SEL",
-       "ERR 13: ILLEGAL_CONTEXT_ID",
-       "ERR 14: ILLEGAL_KEY_ADDR",
-       "ERR 15: 0xF Reserved",
-       "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
-       "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
-       "ERR 18: CMD_TIMEOUT",
-       "ERR 19: IDMA0_AXI_SLVERR",
-       "ERR 20: IDMA0_AXI_DECERR",
-       "ERR 21: 0x15 Reserved",
-       "ERR 22: IDMA1_AXI_SLAVE_FAULT",
-       "ERR 23: IDMA1_AIXI_DECERR",
-       "ERR 24: 0x18 Reserved",
-       "ERR 25: ZLIBVHB_AXI_SLVERR",
-       "ERR 26: ZLIBVHB_AXI_DECERR",
-       "ERR 27: 0x1B Reserved",
-       "ERR 27: ZLIB_UNEXPECTED_EOM",
-       "ERR 27: ZLIB_EXTRA_DATA",
-       "ERR 30: ZLIB_BTYPE",
-       "ERR 31: ZLIB_UNDEFINED_SYMBOL",
-       "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
-       "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
-       "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
-       "ERR 35: ZLIB_UNCOMPRESSED_LEN",
-       "ERR 36: ZLIB_LIMIT_REACHED",
-       "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
-       "ERR 38: ODMA0_AXI_SLVERR",
-       "ERR 39: ODMA0_AXI_DECERR",
-       "ERR 40: 0x28 Reserved",
-       "ERR 41: ODMA1_AXI_SLVERR",
-       "ERR 42: ODMA1_AXI_DECERR",
-       "ERR 43: LSB_PARITY_ERR",
+       "ILLEGAL_ENGINE",
+       "ILLEGAL_KEY_ID",
+       "ILLEGAL_FUNCTION_TYPE",
+       "ILLEGAL_FUNCTION_MODE",
+       "ILLEGAL_FUNCTION_ENCRYPT",
+       "ILLEGAL_FUNCTION_SIZE",
+       "Zlib_MISSING_INIT_EOM",
+       "ILLEGAL_FUNCTION_RSVD",
+       "ILLEGAL_BUFFER_LENGTH",
+       "VLSB_FAULT",
+       "ILLEGAL_MEM_ADDR",
+       "ILLEGAL_MEM_SEL",
+       "ILLEGAL_CONTEXT_ID",
+       "ILLEGAL_KEY_ADDR",
+       "0xF Reserved",
+       "Zlib_ILLEGAL_MULTI_QUEUE",
+       "Zlib_ILLEGAL_JOBID_CHANGE",
+       "CMD_TIMEOUT",
+       "IDMA0_AXI_SLVERR",
+       "IDMA0_AXI_DECERR",
+       "0x15 Reserved",
+       "IDMA1_AXI_SLAVE_FAULT",
+       "IDMA1_AIXI_DECERR",
+       "0x18 Reserved",
+       "ZLIBVHB_AXI_SLVERR",
+       "ZLIBVHB_AXI_DECERR",
+       "0x1B Reserved",
+       "ZLIB_UNEXPECTED_EOM",
+       "ZLIB_EXTRA_DATA",
+       "ZLIB_BTYPE",
+       "ZLIB_UNDEFINED_SYMBOL",
+       "ZLIB_UNDEFINED_DISTANCE_S",
+       "ZLIB_CODE_LENGTH_SYMBOL",
+       "ZLIB _VHB_ILLEGAL_FETCH",
+       "ZLIB_UNCOMPRESSED_LEN",
+       "ZLIB_LIMIT_REACHED",
+       "ZLIB_CHECKSUM_MISMATCH0",
+       "ODMA0_AXI_SLVERR",
+       "ODMA0_AXI_DECERR",
+       "0x28 Reserved",
+       "ODMA1_AXI_SLVERR",
+       "ODMA1_AXI_DECERR",
  };
  
- void ccp_log_error(struct ccp_device *d, int e)
+ void ccp_log_error(struct ccp_device *d, unsigned int e)
  {
-       dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+       if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
+               return;
+       if (e < ARRAY_SIZE(ccp_error_codes))
+               dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
+       else
+               dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
  }
  
  /* List of CCPs, CCP count, read-write access lock, and access functions
@@@ -1,4 -1,4 +1,4 @@@
 -/* SPDX-License-Identifier: GPL-2.0 */
 +/* SPDX-License-Identifier: GPL-2.0-only */
  /*
   * AMD Cryptographic Coprocessor (CCP) driver
   *
@@@ -629,7 -629,7 +629,7 @@@ struct ccp5_desc 
  void ccp_add_device(struct ccp_device *ccp);
  void ccp_del_device(struct ccp_device *ccp);
  
- extern void ccp_log_error(struct ccp_device *, int);
+ extern void ccp_log_error(struct ccp_device *, unsigned int);
  
  struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
  bool ccp_queues_suspended(struct ccp_device *ccp);
@@@ -1,8 -1,8 +1,8 @@@
 -// SPDX-License-Identifier: GPL-2.0
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * AMD Cryptographic Coprocessor (CCP) driver
   *
-  * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
+  * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
   *
   * Author: Tom Lendacky <thomas.lendacky@amd.com>
   * Author: Gary R Hook <gary.hook@amd.com>
@@@ -890,8 -890,7 +890,7 @@@ static int ccp_run_aes_cmd(struct ccp_c
                return -EINVAL;
  
        if (((aes->mode == CCP_AES_MODE_ECB) ||
-            (aes->mode == CCP_AES_MODE_CBC) ||
-            (aes->mode == CCP_AES_MODE_CFB)) &&
+            (aes->mode == CCP_AES_MODE_CBC)) &&
            (aes->src_len & (AES_BLOCK_SIZE - 1)))
                return -EINVAL;
  
@@@ -1264,6 -1263,9 +1263,9 @@@ static int ccp_run_des3_cmd(struct ccp_
        int ret;
  
        /* Error checks */
+       if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
+               return -EINVAL;
        if (!cmd_q->ccp->vdata->perform->des3)
                return -EINVAL;
  
         * passthru option to convert from big endian to little endian.
         */
        if (des3->mode != CCP_DES3_MODE_ECB) {
-               u32 load_mode;
                op.sb_ctx = cmd_q->sb_ctx;
  
                ret = ccp_init_dm_workarea(&ctx, cmd_q,
                if (ret)
                        goto e_ctx;
  
-               if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
-                       load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
-               else
-                       load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
                ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
-                                    load_mode);
+                                    CCP_PASSTHRU_BYTESWAP_256BIT);
                if (ret) {
                        cmd->engine_error = cmd_q->cmd_error;
                        goto e_ctx;
                }
  
                /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
-               if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
-                       dm_offset = CCP_SB_BYTES - des3->iv_len;
-               else
-                       dm_offset = 0;
                ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
                                DES3_EDE_BLOCK_SIZE);
        }
@@@ -1,8 -1,12 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Intel IXP4xx NPE-C crypto driver
   *
   * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of version 2 of the GNU General Public License
 - * as published by the Free Software Foundation.
 - *
   */
  
  #include <linux/platform_device.h>
@@@ -100,7 -104,7 +100,7 @@@ struct buffer_desc 
        u16 pkt_len;
        u16 buf_len;
  #endif
-       u32 phys_addr;
+       dma_addr_t phys_addr;
        u32 __reserved[4];
        struct buffer_desc *next;
        enum dma_data_direction dir;
@@@ -117,9 -121,9 +117,9 @@@ struct crypt_ctl 
        u8 mode;                /* NPE_OP_*  operation mode */
  #endif
        u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
-       u32 icv_rev_aes;        /* icv or rev aes */
-       u32 src_buf;
-       u32 dst_buf;
+       dma_addr_t icv_rev_aes; /* icv or rev aes */
+       dma_addr_t src_buf;
+       dma_addr_t dst_buf;
  #ifdef __ARMEB__
        u16 auth_offs;          /* Authentication start offset */
        u16 auth_len;           /* Authentication data length */
@@@ -320,7 -324,8 +320,8 @@@ static struct crypt_ctl *get_crypt_desc
        }
  }
  
- static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
+ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
+                          dma_addr_t phys)
  {
        while (buf) {
                struct buffer_desc *buf1;
@@@ -602,7 -607,7 +603,7 @@@ static int register_chain_var(struct cr
        struct buffer_desc *buf;
        int i;
        u8 *pad;
-       u32 pad_phys, buf_phys;
+       dma_addr_t pad_phys, buf_phys;
  
        BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
        pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
@@@ -787,7 -792,7 +788,7 @@@ static struct buffer_desc *chainup_buff
        for (; nbytes > 0; sg = sg_next(sg)) {
                unsigned len = min(nbytes, sg->length);
                struct buffer_desc *next_buf;
-               u32 next_buf_phys;
+               dma_addr_t next_buf_phys;
                void *ptr;
  
                nbytes -= len;
diff --combined drivers/crypto/mxs-dcp.c
@@@ -1,8 -1,14 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Freescale i.MX23/i.MX28 Data Co-Processor driver
   *
   * Copyright (C) 2013 Marek Vasut <marex@denx.de>
 - *
 - * The code contained herein is licensed under the GNU General Public
 - * License. You may obtain a copy of the GNU General Public License
 - * Version 2 or later at the following locations:
 - *
 - * http://www.opensource.org/licenses/gpl-license.html
 - * http://www.gnu.org/copyleft/gpl.html
   */
  
  #include <linux/dma-mapping.h>
@@@ -986,8 -992,6 +986,6 @@@ static int mxs_dcp_probe(struct platfor
        struct device *dev = &pdev->dev;
        struct dcp *sdcp = NULL;
        int i, ret;
-       struct resource *iores;
        int dcp_vmi_irq, dcp_irq;
  
        if (global_sdcp) {
                return -ENODEV;
        }
  
-       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dcp_vmi_irq = platform_get_irq(pdev, 0);
        if (dcp_vmi_irq < 0) {
                dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
                return -ENOMEM;
  
        sdcp->dev = dev;
-       sdcp->base = devm_ioremap_resource(dev, iores);
+       sdcp->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sdcp->base))
                return PTR_ERR(sdcp->base);
  
@@@ -1,8 -1,17 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Driver for IBM PowerNV 842 compression accelerator
   *
   * Copyright (C) 2015 Dan Streetman, IBM Corp
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
 - *
 - * This program is distributed in the hope that it will be useful,
 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 - * GNU General Public License for more details.
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@@ -27,8 -36,6 +27,6 @@@ MODULE_ALIAS_CRYPTO("842-nx")
  #define WORKMEM_ALIGN (CRB_ALIGN)
  #define CSB_WAIT_MAX  (5000) /* ms */
  #define VAS_RETRIES   (10)
- /* # of requests allowed per RxFIFO at a time. 0 for unlimited */
- #define MAX_CREDITS_PER_RXFIFO        (1024)
  
  struct nx842_workmem {
        /* Below fields must be properly aligned */
@@@ -812,7 -819,11 +810,11 @@@ static int __init vas_cfg_coproc_info(s
        rxattr.lnotify_lpid = lpid;
        rxattr.lnotify_pid = pid;
        rxattr.lnotify_tid = tid;
-       rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
+       /*
+        * Maximum RX window credits can not be more than #CRBs in
+        * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
+        */
+       rxattr.wcreds_max = fifo_size / CRB_SIZE;
  
        /*
         * Open a VAS receice window which is used to configure RxFIFO
diff --combined drivers/crypto/nx/nx.c
@@@ -1,9 -1,21 +1,9 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /**
   * Routines supporting the Power 7+ Nest Accelerators driver
   *
   * Copyright (C) 2011-2012 International Business Machines Inc.
   *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; version 2 only.
 - *
 - * This program is distributed in the hope that it will be useful,
 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 - * GNU General Public License for more details.
 - *
 - * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the Free Software
 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 - *
   * Author: Kent Yoder <yoder1@us.ibm.com>
   */
  
@@@ -569,9 -581,7 +569,7 @@@ static int nx_register_algs(void
  
        memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
  
-       rc = NX_DEBUGFS_INIT(&nx_driver);
-       if (rc)
-               goto out;
+       NX_DEBUGFS_INIT(&nx_driver);
  
        nx_driver.of.status = NX_OKAY;
  
@@@ -1,9 -1,21 +1,9 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /**
   * debugfs routines supporting the Power 7+ Nest Accelerators driver
   *
   * Copyright (C) 2011-2012 International Business Machines Inc.
   *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; version 2 only.
 - *
 - * This program is distributed in the hope that it will be useful,
 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 - * GNU General Public License for more details.
 - *
 - * You should have received a copy of the GNU General Public License
 - * along with this program; if not, write to the Free Software
 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 - *
   * Author: Kent Yoder <yoder1@us.ibm.com>
   */
  
   * Documentation/ABI/testing/debugfs-pfo-nx-crypto
   */
  
int nx_debugfs_init(struct nx_crypto_driver *drv)
void nx_debugfs_init(struct nx_crypto_driver *drv)
  {
-       struct nx_debugfs *dfs = &drv->dfs;
+       struct dentry *root;
  
-       dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL);
+       root = debugfs_create_dir(NX_NAME, NULL);
+       drv->dfs_root = root;
  
-       dfs->dfs_aes_ops =
-               debugfs_create_u32("aes_ops",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root, (u32 *)&drv->stats.aes_ops);
-       dfs->dfs_sha256_ops =
-               debugfs_create_u32("sha256_ops",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root,
-                                  (u32 *)&drv->stats.sha256_ops);
-       dfs->dfs_sha512_ops =
-               debugfs_create_u32("sha512_ops",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root,
-                                  (u32 *)&drv->stats.sha512_ops);
-       dfs->dfs_aes_bytes =
-               debugfs_create_u64("aes_bytes",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root,
-                                  (u64 *)&drv->stats.aes_bytes);
-       dfs->dfs_sha256_bytes =
-               debugfs_create_u64("sha256_bytes",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root,
-                                  (u64 *)&drv->stats.sha256_bytes);
-       dfs->dfs_sha512_bytes =
-               debugfs_create_u64("sha512_bytes",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root,
-                                  (u64 *)&drv->stats.sha512_bytes);
-       dfs->dfs_errors =
-               debugfs_create_u32("errors",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root, (u32 *)&drv->stats.errors);
-       dfs->dfs_last_error =
-               debugfs_create_u32("last_error",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root,
-                                  (u32 *)&drv->stats.last_error);
-       dfs->dfs_last_error_pid =
-               debugfs_create_u32("last_error_pid",
-                                  S_IRUSR | S_IRGRP | S_IROTH,
-                                  dfs->dfs_root,
-                                  (u32 *)&drv->stats.last_error_pid);
-       return 0;
+       debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u32 *)&drv->stats.aes_ops);
+       debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u32 *)&drv->stats.sha256_ops);
+       debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u32 *)&drv->stats.sha512_ops);
+       debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u64 *)&drv->stats.aes_bytes);
+       debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u64 *)&drv->stats.sha256_bytes);
+       debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u64 *)&drv->stats.sha512_bytes);
+       debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u32 *)&drv->stats.errors);
+       debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u32 *)&drv->stats.last_error);
+       debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH,
+                          root, (u32 *)&drv->stats.last_error_pid);
  }
  
  void
  nx_debugfs_fini(struct nx_crypto_driver *drv)
  {
-       debugfs_remove_recursive(drv->dfs.dfs_root);
+       debugfs_remove_recursive(drv->dfs_root);
  }
  
  #endif
diff --combined drivers/crypto/sahara.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Cryptographic API.
   *
@@@ -8,6 -7,10 +8,6 @@@
   * Copyright (c) 2013 Vista Silicon S.L.
   * Author: Javier Martin <javier.martin@vista-silicon.com>
   *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as published
 - * by the Free Software Foundation.
 - *
   * Based on omap-aes.c and tegra-aes.c
   */
  
@@@ -1384,7 -1387,6 +1384,6 @@@ MODULE_DEVICE_TABLE(of, sahara_dt_ids)
  static int sahara_probe(struct platform_device *pdev)
  {
        struct sahara_dev *dev;
-       struct resource *res;
        u32 version;
        int irq;
        int err;
        platform_set_drvdata(pdev, dev);
  
        /* Get the base address */
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+       dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(dev->regs_base))
                return PTR_ERR(dev->regs_base);
  
@@@ -1,4 -1,3 +1,4 @@@
- obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
 +# SPDX-License-Identifier: GPL-2.0-only
+ obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
  obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
  obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
index 0000000,29d2095..440c9f1
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,387 +1,387 @@@
 - * License terms:  GNU General Public License (GPL), version 2
++// SPDX-License-Identifier: GPL-2.0-only
+ /*
+  * Copyright (C) STMicroelectronics SA 2017
+  * Author: Fabien Dessenne <fabien.dessenne@st.com>
+  */
+ #include <linux/bitrev.h>
+ #include <linux/clk.h>
+ #include <linux/crc32poly.h>
+ #include <linux/module.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <crypto/internal/hash.h>
+ #include <asm/unaligned.h>
+ #define DRIVER_NAME             "stm32-crc32"
+ #define CHKSUM_DIGEST_SIZE      4
+ #define CHKSUM_BLOCK_SIZE       1
+ /* Registers */
+ #define CRC_DR                  0x00000000
+ #define CRC_CR                  0x00000008
+ #define CRC_INIT                0x00000010
+ #define CRC_POL                 0x00000014
+ /* Registers values */
+ #define CRC_CR_RESET            BIT(0)
+ #define CRC_CR_REVERSE          (BIT(7) | BIT(6) | BIT(5))
+ #define CRC_INIT_DEFAULT        0xFFFFFFFF
+ #define CRC_AUTOSUSPEND_DELAY 50
+ struct stm32_crc {
+       struct list_head list;
+       struct device    *dev;
+       void __iomem     *regs;
+       struct clk       *clk;
+       u8               pending_data[sizeof(u32)];
+       size_t           nb_pending_bytes;
+ };
+ struct stm32_crc_list {
+       struct list_head dev_list;
+       spinlock_t       lock; /* protect dev_list */
+ };
+ static struct stm32_crc_list crc_list = {
+       .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
+       .lock     = __SPIN_LOCK_UNLOCKED(crc_list.lock),
+ };
+ struct stm32_crc_ctx {
+       u32 key;
+       u32 poly;
+ };
+ struct stm32_crc_desc_ctx {
+       u32    partial; /* crc32c: partial in first 4 bytes of that struct */
+       struct stm32_crc *crc;
+ };
+ static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
+ {
+       struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
+       mctx->key = CRC_INIT_DEFAULT;
+       mctx->poly = CRC32_POLY_LE;
+       return 0;
+ }
+ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
+ {
+       struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
+       mctx->key = CRC_INIT_DEFAULT;
+       mctx->poly = CRC32C_POLY_LE;
+       return 0;
+ }
+ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
+                           unsigned int keylen)
+ {
+       struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
+       if (keylen != sizeof(u32)) {
+               crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+       mctx->key = get_unaligned_le32(key);
+       return 0;
+ }
+ static int stm32_crc_init(struct shash_desc *desc)
+ {
+       struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+       struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+       struct stm32_crc *crc;
+       spin_lock_bh(&crc_list.lock);
+       list_for_each_entry(crc, &crc_list.dev_list, list) {
+               ctx->crc = crc;
+               break;
+       }
+       spin_unlock_bh(&crc_list.lock);
+       pm_runtime_get_sync(ctx->crc->dev);
+       /* Reset, set key, poly and configure in bit reverse mode */
+       writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
+       writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
+       writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+       /* Store partial result */
+       ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
+       ctx->crc->nb_pending_bytes = 0;
+       pm_runtime_mark_last_busy(ctx->crc->dev);
+       pm_runtime_put_autosuspend(ctx->crc->dev);
+       return 0;
+ }
+ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
+                           unsigned int length)
+ {
+       struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+       struct stm32_crc *crc = ctx->crc;
+       u32 *d32;
+       unsigned int i;
+       pm_runtime_get_sync(crc->dev);
+       if (unlikely(crc->nb_pending_bytes)) {
+               while (crc->nb_pending_bytes != sizeof(u32) && length) {
+                       /* Fill in pending data */
+                       crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+                       length--;
+               }
+               if (crc->nb_pending_bytes == sizeof(u32)) {
+                       /* Process completed pending data */
+                       writel_relaxed(*(u32 *)crc->pending_data,
+                                      crc->regs + CRC_DR);
+                       crc->nb_pending_bytes = 0;
+               }
+       }
+       d32 = (u32 *)d8;
+       for (i = 0; i < length >> 2; i++)
+               /* Process 32 bits data */
+               writel_relaxed(*(d32++), crc->regs + CRC_DR);
+       /* Store partial result */
+       ctx->partial = readl_relaxed(crc->regs + CRC_DR);
+       pm_runtime_mark_last_busy(crc->dev);
+       pm_runtime_put_autosuspend(crc->dev);
+       /* Check for pending data (non 32 bits) */
+       length &= 3;
+       if (likely(!length))
+               return 0;
+       if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
+               /* Shall not happen */
+               dev_err(crc->dev, "Pending data overflow\n");
+               return -EINVAL;
+       }
+       d8 = (const u8 *)d32;
+       for (i = 0; i < length; i++)
+               /* Store pending data */
+               crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
+       return 0;
+ }
+ static int stm32_crc_final(struct shash_desc *desc, u8 *out)
+ {
+       struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
+       struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
+       /* Send computed CRC */
+       put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
+                          ~ctx->partial : ctx->partial, out);
+       return 0;
+ }
+ static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
+                          unsigned int length, u8 *out)
+ {
+       return stm32_crc_update(desc, data, length) ?:
+              stm32_crc_final(desc, out);
+ }
+ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
+                           unsigned int length, u8 *out)
+ {
+       return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
+ }
+ static struct shash_alg algs[] = {
+       /* CRC-32 */
+       {
+               .setkey         = stm32_crc_setkey,
+               .init           = stm32_crc_init,
+               .update         = stm32_crc_update,
+               .final          = stm32_crc_final,
+               .finup          = stm32_crc_finup,
+               .digest         = stm32_crc_digest,
+               .descsize       = sizeof(struct stm32_crc_desc_ctx),
+               .digestsize     = CHKSUM_DIGEST_SIZE,
+               .base           = {
+                       .cra_name               = "crc32",
+                       .cra_driver_name        = DRIVER_NAME,
+                       .cra_priority           = 200,
+                       .cra_flags              = CRYPTO_ALG_OPTIONAL_KEY,
+                       .cra_blocksize          = CHKSUM_BLOCK_SIZE,
+                       .cra_alignmask          = 3,
+                       .cra_ctxsize            = sizeof(struct stm32_crc_ctx),
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = stm32_crc32_cra_init,
+               }
+       },
+       /* CRC-32Castagnoli */
+       {
+               .setkey         = stm32_crc_setkey,
+               .init           = stm32_crc_init,
+               .update         = stm32_crc_update,
+               .final          = stm32_crc_final,
+               .finup          = stm32_crc_finup,
+               .digest         = stm32_crc_digest,
+               .descsize       = sizeof(struct stm32_crc_desc_ctx),
+               .digestsize     = CHKSUM_DIGEST_SIZE,
+               .base           = {
+                       .cra_name               = "crc32c",
+                       .cra_driver_name        = DRIVER_NAME,
+                       .cra_priority           = 200,
+                       .cra_flags              = CRYPTO_ALG_OPTIONAL_KEY,
+                       .cra_blocksize          = CHKSUM_BLOCK_SIZE,
+                       .cra_alignmask          = 3,
+                       .cra_ctxsize            = sizeof(struct stm32_crc_ctx),
+                       .cra_module             = THIS_MODULE,
+                       .cra_init               = stm32_crc32c_cra_init,
+               }
+       }
+ };
+ static int stm32_crc_probe(struct platform_device *pdev)
+ {
+       struct device *dev = &pdev->dev;
+       struct stm32_crc *crc;
+       struct resource *res;
+       int ret;
+       crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
+       if (!crc)
+               return -ENOMEM;
+       crc->dev = dev;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       crc->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(crc->regs)) {
+               dev_err(dev, "Cannot map CRC IO\n");
+               return PTR_ERR(crc->regs);
+       }
+       crc->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(crc->clk)) {
+               dev_err(dev, "Could not get clock\n");
+               return PTR_ERR(crc->clk);
+       }
+       ret = clk_prepare_enable(crc->clk);
+       if (ret) {
+               dev_err(crc->dev, "Failed to enable clock\n");
+               return ret;
+       }
+       pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_get_noresume(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+       platform_set_drvdata(pdev, crc);
+       spin_lock(&crc_list.lock);
+       list_add(&crc->list, &crc_list.dev_list);
+       spin_unlock(&crc_list.lock);
+       ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+       if (ret) {
+               dev_err(dev, "Failed to register\n");
+               clk_disable_unprepare(crc->clk);
+               return ret;
+       }
+       dev_info(dev, "Initialized\n");
+       pm_runtime_put_sync(dev);
+       return 0;
+ }
+ static int stm32_crc_remove(struct platform_device *pdev)
+ {
+       struct stm32_crc *crc = platform_get_drvdata(pdev);
+       int ret = pm_runtime_get_sync(crc->dev);
+       if (ret < 0)
+               return ret;
+       spin_lock(&crc_list.lock);
+       list_del(&crc->list);
+       spin_unlock(&crc_list.lock);
+       crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+       pm_runtime_disable(crc->dev);
+       pm_runtime_put_noidle(crc->dev);
+       clk_disable_unprepare(crc->clk);
+       return 0;
+ }
+ #ifdef CONFIG_PM
+ static int stm32_crc_runtime_suspend(struct device *dev)
+ {
+       struct stm32_crc *crc = dev_get_drvdata(dev);
+       clk_disable_unprepare(crc->clk);
+       return 0;
+ }
+ static int stm32_crc_runtime_resume(struct device *dev)
+ {
+       struct stm32_crc *crc = dev_get_drvdata(dev);
+       int ret;
+       ret = clk_prepare_enable(crc->clk);
+       if (ret) {
+               dev_err(crc->dev, "Failed to prepare_enable clock\n");
+               return ret;
+       }
+       return 0;
+ }
+ #endif
+ static const struct dev_pm_ops stm32_crc_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
+                          stm32_crc_runtime_resume, NULL)
+ };
+ static const struct of_device_id stm32_dt_ids[] = {
+       { .compatible = "st,stm32f7-crc", },
+       {},
+ };
+ MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+ static struct platform_driver stm32_crc_driver = {
+       .probe  = stm32_crc_probe,
+       .remove = stm32_crc_remove,
+       .driver = {
+               .name           = DRIVER_NAME,
+               .pm             = &stm32_crc_pm_ops,
+               .of_match_table = stm32_dt_ids,
+       },
+ };
+ module_platform_driver(stm32_crc_driver);
+ MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+ MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
+ MODULE_LICENSE("GPL");
@@@ -1,9 -1,23 +1,9 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * This file is part of STM32 Crypto driver for Linux.
   *
   * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
   * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
 - *
 - * License terms: GPL V2.0.
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License version 2 as published by
 - * the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but
 - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
 - * details.
 - *
 - * You should have received a copy of the GNU General Public License along with
 - * this program. If not, see <http://www.gnu.org/licenses/>.
 - *
   */
  
  #include <linux/clk.h>
@@@ -349,7 -363,7 +349,7 @@@ static int stm32_hash_xmit_cpu(struct s
                return -ETIMEDOUT;
  
        if ((hdev->flags & HASH_FLAGS_HMAC) &&
-           (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
+           (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
                hdev->flags |= HASH_FLAGS_HMAC_KEY;
                stm32_hash_write_key(hdev);
                if (stm32_hash_wait_busy(hdev))
@@@ -447,8 -461,8 +447,8 @@@ static int stm32_hash_xmit_dma(struct s
  
        dma_async_issue_pending(hdev->dma_lch);
  
-       if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
-                                                      msecs_to_jiffies(100)))
+       if (!wait_for_completion_timeout(&hdev->dma_completion,
+                                        msecs_to_jiffies(100)))
                err = -ETIMEDOUT;
  
        if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
   *
@@@ -9,10 -8,15 +9,10 @@@
   * Add support also for DES and 3DES in CBC and ECB mode.
   *
   * You could find the datasheet in Documentation/arm/sunxi/README
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License as published by
 - * the Free Software Foundation; either version 2 of the License, or
 - * (at your option) any later version.
   */
  #include "sun4i-ss.h"
  
- static int sun4i_ss_opti_poll(struct skcipher_request *areq)
+ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
  {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
        struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
@@@ -114,6 -118,29 +114,29 @@@ release_ss
        return err;
  }
  
+ static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
+ {
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+       struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+       struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+       SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+       int err;
+       skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+       skcipher_request_set_callback(subreq, areq->base.flags, NULL,
+                                     NULL);
+       skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+                                  areq->cryptlen, areq->iv);
+       if (ctx->mode & SS_DECRYPTION)
+               err = crypto_skcipher_decrypt(subreq);
+       else
+               err = crypto_skcipher_encrypt(subreq);
+       skcipher_request_zero(subreq);
+       return err;
+ }
  /* Generic function that support SG with size not multiple of 4 */
  static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
  {
        unsigned int todo;
        struct sg_mapping_iter mi, mo;
        unsigned int oi, oo;    /* offset for in and out */
-       char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
-       char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
        unsigned int ob = 0;    /* offset in buf */
        unsigned int obo = 0;   /* offset in bufo*/
        unsigned int obl = 0;   /* length of data in bufo */
        if (no_chunk == 1 && !need_fallback)
                return sun4i_ss_opti_poll(areq);
  
-       if (need_fallback) {
-               SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
-               skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
-               skcipher_request_set_callback(subreq, areq->base.flags, NULL,
-                                             NULL);
-               skcipher_request_set_crypt(subreq, areq->src, areq->dst,
-                                          areq->cryptlen, areq->iv);
-               if (ctx->mode & SS_DECRYPTION)
-                       err = crypto_skcipher_decrypt(subreq);
-               else
-                       err = crypto_skcipher_encrypt(subreq);
-               skcipher_request_zero(subreq);
-               return err;
-       }
+       if (need_fallback)
+               return sun4i_ss_cipher_poll_fallback(areq);
  
        spin_lock_irqsave(&ss->slock, flags);
  
  
        while (oleft) {
                if (ileft) {
+                       char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
                        /*
                         * todo is the number of consecutive 4byte word that we
                         * can read from current SG
                                oo = 0;
                        }
                } else {
+                       char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
                        /*
                         * read obl bytes in bufo, we read at maximum for
                         * emptying the device
diff --combined drivers/crypto/talitos.c
@@@ -1,4 -1,4 +1,4 @@@
 -// SPDX-License-Identifier: GPL-2.0+
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * talitos - Freescale Integrated Security Engine (SEC) device driver
   *
@@@ -265,11 -265,11 +265,11 @@@ static int init_device(struct device *d
   * callback must check err and feedback in descriptor header
   * for device processing status.
   */
- int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
-                  void (*callback)(struct device *dev,
-                                   struct talitos_desc *desc,
-                                   void *context, int error),
-                  void *context)
static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
+                         void (*callback)(struct device *dev,
+                                          struct talitos_desc *desc,
+                                          void *context, int error),
+                         void *context)
  {
        struct talitos_private *priv = dev_get_drvdata(dev);
        struct talitos_request *request;
  
        return -EINPROGRESS;
  }
- EXPORT_SYMBOL(talitos_submit);
+ static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
+ {
+       struct talitos_edesc *edesc;
+       if (!is_sec1)
+               return request->desc->hdr;
+       if (!request->desc->next_desc)
+               return request->desc->hdr1;
+       edesc = container_of(request->desc, struct talitos_edesc, desc);
+       return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
+ }
  
  /*
   * process what was done, notify callback of error if not
@@@ -342,12 -356,7 +356,7 @@@ static void flush_channel(struct devic
  
                /* descriptors with their done bits set don't get the error */
                rmb();
-               if (!is_sec1)
-                       hdr = request->desc->hdr;
-               else if (request->desc->next_desc)
-                       hdr = (request->desc + 1)->hdr1;
-               else
-                       hdr = request->desc->hdr1;
+               hdr = get_request_hdr(request, is_sec1);
  
                if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
                        status = 0;
@@@ -477,8 -486,14 +486,14 @@@ static u32 current_desc_hdr(struct devi
                }
        }
  
-       if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
-               return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
+       if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
+               struct talitos_edesc *edesc;
+               edesc = container_of(priv->chan[ch].fifo[iter].desc,
+                                    struct talitos_edesc, desc);
+               return ((struct talitos_desc *)
+                       (edesc->buf + edesc->dma_len))->hdr;
+       }
  
        return priv->chan[ch].fifo[iter].desc->hdr;
  }
@@@ -824,7 -839,11 +839,11 @@@ static void talitos_unregister_rng(stru
   * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
   */
  #define TALITOS_CRA_PRIORITY_AEAD_HSNA        (TALITOS_CRA_PRIORITY - 1)
+ #ifdef CONFIG_CRYPTO_DEV_TALITOS2
  #define TALITOS_MAX_KEY_SIZE          (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+ #else
+ #define TALITOS_MAX_KEY_SIZE          (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
+ #endif
  #define TALITOS_MAX_IV_LENGTH         16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  
  struct talitos_ctx {
@@@ -948,36 -967,6 +967,6 @@@ badkey
        goto out;
  }
  
- /*
-  * talitos_edesc - s/w-extended descriptor
-  * @src_nents: number of segments in input scatterlist
-  * @dst_nents: number of segments in output scatterlist
-  * @icv_ool: whether ICV is out-of-line
-  * @iv_dma: dma address of iv for checking continuity and link table
-  * @dma_len: length of dma mapped link_tbl space
-  * @dma_link_tbl: bus physical address of link_tbl/buf
-  * @desc: h/w descriptor
-  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
-  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
-  *
-  * if decrypting (with authcheck), or either one of src_nents or dst_nents
-  * is greater than 1, an integrity check value is concatenated to the end
-  * of link_tbl data
-  */
- struct talitos_edesc {
-       int src_nents;
-       int dst_nents;
-       bool icv_ool;
-       dma_addr_t iv_dma;
-       int dma_len;
-       dma_addr_t dma_link_tbl;
-       struct talitos_desc desc;
-       union {
-               struct talitos_ptr link_tbl[0];
-               u8 buf[0];
-       };
- };
  static void talitos_sg_unmap(struct device *dev,
                             struct talitos_edesc *edesc,
                             struct scatterlist *src,
  
  static void ipsec_esp_unmap(struct device *dev,
                            struct talitos_edesc *edesc,
-                           struct aead_request *areq)
+                           struct aead_request *areq, bool encrypt)
  {
        struct crypto_aead *aead = crypto_aead_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_aead_ctx(aead);
        unsigned int ivsize = crypto_aead_ivsize(aead);
+       unsigned int authsize = crypto_aead_authsize(aead);
+       unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
        bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
        struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
  
                                         DMA_FROM_DEVICE);
        unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
  
-       talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
-                        areq->assoclen);
+       talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
+                        cryptlen + authsize, areq->assoclen);
  
        if (edesc->dma_len)
                dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
                unsigned int dst_nents = edesc->dst_nents ? : 1;
  
                sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
-                                  areq->assoclen + areq->cryptlen - ivsize);
+                                  areq->assoclen + cryptlen - ivsize);
        }
  }
  
@@@ -1043,31 -1034,14 +1034,14 @@@ static void ipsec_esp_encrypt_done(stru
                                   struct talitos_desc *desc, void *context,
                                   int err)
  {
-       struct talitos_private *priv = dev_get_drvdata(dev);
-       bool is_sec1 = has_ftr_sec1(priv);
        struct aead_request *areq = context;
        struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-       unsigned int authsize = crypto_aead_authsize(authenc);
        unsigned int ivsize = crypto_aead_ivsize(authenc);
        struct talitos_edesc *edesc;
-       struct scatterlist *sg;
-       void *icvdata;
  
        edesc = container_of(desc, struct talitos_edesc, desc);
  
-       ipsec_esp_unmap(dev, edesc, areq);
-       /* copy the generated ICV to dst */
-       if (edesc->icv_ool) {
-               if (is_sec1)
-                       icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
-               else
-                       icvdata = &edesc->link_tbl[edesc->src_nents +
-                                                  edesc->dst_nents + 2];
-               sg = sg_last(areq->dst, edesc->dst_nents);
-               memcpy((char *)sg_virt(sg) + sg->length - authsize,
-                      icvdata, authsize);
-       }
+       ipsec_esp_unmap(dev, edesc, areq, true);
  
        dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
  
@@@ -1084,32 -1058,16 +1058,16 @@@ static void ipsec_esp_decrypt_swauth_do
        struct crypto_aead *authenc = crypto_aead_reqtfm(req);
        unsigned int authsize = crypto_aead_authsize(authenc);
        struct talitos_edesc *edesc;
-       struct scatterlist *sg;
        char *oicv, *icv;
-       struct talitos_private *priv = dev_get_drvdata(dev);
-       bool is_sec1 = has_ftr_sec1(priv);
  
        edesc = container_of(desc, struct talitos_edesc, desc);
  
-       ipsec_esp_unmap(dev, edesc, req);
+       ipsec_esp_unmap(dev, edesc, req, false);
  
        if (!err) {
                /* auth check */
-               sg = sg_last(req->dst, edesc->dst_nents ? : 1);
-               icv = (char *)sg_virt(sg) + sg->length - authsize;
-               if (edesc->dma_len) {
-                       if (is_sec1)
-                               oicv = (char *)&edesc->dma_link_tbl +
-                                              req->assoclen + req->cryptlen;
-                       else
-                               oicv = (char *)
-                                      &edesc->link_tbl[edesc->src_nents +
-                                                       edesc->dst_nents + 2];
-                       if (edesc->icv_ool)
-                               icv = oicv + authsize;
-               } else
-                       oicv = (char *)&edesc->link_tbl[0];
+               oicv = edesc->buf + edesc->dma_len;
+               icv = oicv - authsize;
  
                err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
        }
@@@ -1128,7 -1086,7 +1086,7 @@@ static void ipsec_esp_decrypt_hwauth_do
  
        edesc = container_of(desc, struct talitos_edesc, desc);
  
-       ipsec_esp_unmap(dev, edesc, req);
+       ipsec_esp_unmap(dev, edesc, req, false);
  
        /* check ICV auth status */
        if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
   * stop at cryptlen bytes
   */
  static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
-                                unsigned int offset, int cryptlen,
+                                unsigned int offset, int datalen, int elen,
                                 struct talitos_ptr *link_tbl_ptr)
  {
-       int n_sg = sg_count;
+       int n_sg = elen ? sg_count + 1 : sg_count;
        int count = 0;
+       int cryptlen = datalen + elen;
  
        while (cryptlen && sg && n_sg--) {
                unsigned int len = sg_dma_len(sg);
                if (len > cryptlen)
                        len = cryptlen;
  
+               if (datalen > 0 && len > datalen) {
+                       to_talitos_ptr(link_tbl_ptr + count,
+                                      sg_dma_address(sg) + offset, datalen, 0);
+                       to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
+                       count++;
+                       len -= datalen;
+                       offset += datalen;
+               }
                to_talitos_ptr(link_tbl_ptr + count,
                               sg_dma_address(sg) + offset, len, 0);
                to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
                count++;
                cryptlen -= len;
+               datalen -= len;
                offset = 0;
  
  next:
        /* tag end of link table */
        if (count > 0)
                to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
-                                      DESC_PTR_LNKTBL_RETURN, 0);
+                                      DESC_PTR_LNKTBL_RET, 0);
  
        return count;
  }
  static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
                              unsigned int len, struct talitos_edesc *edesc,
                              struct talitos_ptr *ptr, int sg_count,
-                             unsigned int offset, int tbl_off, int elen)
+                             unsigned int offset, int tbl_off, int elen,
+                             bool force)
  {
        struct talitos_private *priv = dev_get_drvdata(dev);
        bool is_sec1 = has_ftr_sec1(priv);
                return 1;
        }
        to_talitos_ptr_ext_set(ptr, elen, is_sec1);
-       if (sg_count == 1) {
+       if (sg_count == 1 && !force) {
                to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
                return sg_count;
        }
                to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
                return sg_count;
        }
-       sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
+       sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
                                         &edesc->link_tbl[tbl_off]);
-       if (sg_count == 1) {
+       if (sg_count == 1 && !force) {
                /* Only one segment now, so no link tbl needed*/
                copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
                return sg_count;
@@@ -1224,13 -1193,14 +1193,14 @@@ static int talitos_sg_map(struct devic
                          unsigned int offset, int tbl_off)
  {
        return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
-                                 tbl_off, 0);
+                                 tbl_off, 0, false);
  }
  
  /*
   * fill in and submit ipsec_esp descriptor
   */
  static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+                    bool encrypt,
                     void (*callback)(struct device *dev,
                                      struct talitos_desc *desc,
                                      void *context, int error))
        struct talitos_ctx *ctx = crypto_aead_ctx(aead);
        struct device *dev = ctx->dev;
        struct talitos_desc *desc = &edesc->desc;
-       unsigned int cryptlen = areq->cryptlen;
+       unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
        unsigned int ivsize = crypto_aead_ivsize(aead);
        int tbl_off = 0;
        int sg_count, ret;
        bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
        struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
        struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
+       dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
  
        /* hmac key */
        to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
                elen = authsize;
  
        ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
-                                sg_count, areq->assoclen, tbl_off, elen);
+                                sg_count, areq->assoclen, tbl_off, elen,
+                                false);
  
        if (ret > 1) {
                tbl_off += ret;
                        dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
        }
  
-       ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
-                            sg_count, areq->assoclen, tbl_off);
-       if (is_ipsec_esp)
-               to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
-       /* ICV data */
-       if (ret > 1) {
-               tbl_off += ret;
-               edesc->icv_ool = true;
-               sync_needed = true;
-               if (is_ipsec_esp) {
-                       struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
-                       int offset = (edesc->src_nents + edesc->dst_nents + 2) *
-                                    sizeof(struct talitos_ptr) + authsize;
-                       /* Add an entry to the link table for ICV data */
-                       to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
-                       to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
-                                              is_sec1);
+       if (is_ipsec_esp && encrypt)
+               elen = authsize;
+       else
+               elen = 0;
+       ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+                                sg_count, areq->assoclen, tbl_off, elen,
+                                is_ipsec_esp && !encrypt);
+       tbl_off += ret;
  
-                       /* icv data follows link tables */
-                       to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
-                                      authsize, is_sec1);
-               } else {
-                       dma_addr_t addr = edesc->dma_link_tbl;
+       if (!encrypt && is_ipsec_esp) {
+               struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
  
-                       if (is_sec1)
-                               addr += areq->assoclen + cryptlen;
-                       else
-                               addr += sizeof(struct talitos_ptr) * tbl_off;
+               /* Add an entry to the link table for ICV data */
+               to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
+               to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
  
-                       to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
-               }
+               /* icv data follows link tables */
+               to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
+               to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
+               sync_needed = true;
+       } else if (!encrypt) {
+               to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
+               sync_needed = true;
        } else if (!is_ipsec_esp) {
-               ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
-                                    &desc->ptr[6], sg_count, areq->assoclen +
-                                                             cryptlen,
-                                    tbl_off);
-               if (ret > 1) {
-                       tbl_off += ret;
-                       edesc->icv_ool = true;
-                       sync_needed = true;
-               } else {
-                       edesc->icv_ool = false;
-               }
-       } else {
-               edesc->icv_ool = false;
+               talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
+                              sg_count, areq->assoclen + cryptlen, tbl_off);
        }
  
        /* iv out */
  
        ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
        if (ret != -EINPROGRESS) {
-               ipsec_esp_unmap(dev, edesc, areq);
+               ipsec_esp_unmap(dev, edesc, areq, encrypt);
                kfree(edesc);
        }
        return ret;
@@@ -1435,18 -1384,18 +1384,18 @@@ static struct talitos_edesc *talitos_ed
         * and space for two sets of ICVs (stashed and generated)
         */
        alloc_len = sizeof(struct talitos_edesc);
-       if (src_nents || dst_nents) {
+       if (src_nents || dst_nents || !encrypt) {
                if (is_sec1)
                        dma_len = (src_nents ? src_len : 0) +
-                                 (dst_nents ? dst_len : 0);
+                                 (dst_nents ? dst_len : 0) + authsize;
                else
                        dma_len = (src_nents + dst_nents + 2) *
-                                 sizeof(struct talitos_ptr) + authsize * 2;
+                                 sizeof(struct talitos_ptr) + authsize;
                alloc_len += dma_len;
        } else {
                dma_len = 0;
-               alloc_len += icv_stashing ? authsize : 0;
        }
+       alloc_len += icv_stashing ? authsize : 0;
  
        /* if its a ahash, add space for a second desc next to the first one */
        if (is_sec1 && !dst)
        edesc->dst_nents = dst_nents;
        edesc->iv_dma = iv_dma;
        edesc->dma_len = dma_len;
-       if (dma_len) {
-               void *addr = &edesc->link_tbl[0];
-               if (is_sec1 && !dst)
-                       addr += sizeof(struct talitos_desc);
-               edesc->dma_link_tbl = dma_map_single(dev, addr,
+       if (dma_len)
+               edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
                                                     edesc->dma_len,
                                                     DMA_BIDIRECTIONAL);
-       }
        return edesc;
  }
  
@@@ -1485,9 -1430,10 +1430,10 @@@ static struct talitos_edesc *aead_edesc
        unsigned int authsize = crypto_aead_authsize(authenc);
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
        unsigned int ivsize = crypto_aead_ivsize(authenc);
+       unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
  
        return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
-                                  iv, areq->assoclen, areq->cryptlen,
+                                  iv, areq->assoclen, cryptlen,
                                   authsize, ivsize, icv_stashing,
                                   areq->base.flags, encrypt);
  }
@@@ -1506,7 -1452,7 +1452,7 @@@ static int aead_encrypt(struct aead_req
        /* set encrypt */
        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
  
-       return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
+       return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
  }
  
  static int aead_decrypt(struct aead_request *req)
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
        struct talitos_private *priv = dev_get_drvdata(ctx->dev);
        struct talitos_edesc *edesc;
-       struct scatterlist *sg;
        void *icvdata;
  
-       req->cryptlen -= authsize;
        /* allocate extended descriptor */
        edesc = aead_edesc_alloc(req, req->iv, 1, false);
        if (IS_ERR(edesc))
                return PTR_ERR(edesc);
  
-       if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+       if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+           (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
            ((!edesc->src_nents && !edesc->dst_nents) ||
             priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
  
  
                /* reset integrity check result bits */
  
-               return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
+               return ipsec_esp(edesc, req, false,
+                                ipsec_esp_decrypt_hwauth_done);
        }
  
        /* Have to check the ICV with software */
        edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
  
        /* stash incoming ICV for later cmp with ICV generated by the h/w */
-       if (edesc->dma_len)
-               icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
-                                                  edesc->dst_nents + 2];
-       else
-               icvdata = &edesc->link_tbl[0];
+       icvdata = edesc->buf + edesc->dma_len;
  
-       sg = sg_last(req->src, edesc->src_nents ? : 1);
+       sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+                          req->assoclen + req->cryptlen - authsize);
  
-       memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
-       return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+       return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
  }
  
  static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@@ -1605,6 -1545,18 +1545,18 @@@ static int ablkcipher_des3_setkey(struc
        return ablkcipher_setkey(cipher, key, keylen);
  }
  
+ static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+                                 const u8 *key, unsigned int keylen)
+ {
+       if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
+           keylen == AES_KEYSIZE_256)
+               return ablkcipher_setkey(cipher, key, keylen);
+       crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       return -EINVAL;
+ }
  static void common_nonsnoop_unmap(struct device *dev,
                                  struct talitos_edesc *edesc,
                                  struct ablkcipher_request *areq)
@@@ -1624,11 -1576,15 +1576,15 @@@ static void ablkcipher_done(struct devi
                            int err)
  {
        struct ablkcipher_request *areq = context;
+       struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+       struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+       unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
        struct talitos_edesc *edesc;
  
        edesc = container_of(desc, struct talitos_edesc, desc);
  
        common_nonsnoop_unmap(dev, edesc, areq);
+       memcpy(areq->info, ctx->iv, ivsize);
  
        kfree(edesc);
  
@@@ -1723,6 -1679,14 +1679,14 @@@ static int ablkcipher_encrypt(struct ab
        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
        struct talitos_edesc *edesc;
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+       if (!areq->nbytes)
+               return 0;
+       if (areq->nbytes % blocksize)
+               return -EINVAL;
  
        /* allocate extended descriptor */
        edesc = ablkcipher_edesc_alloc(areq, true);
@@@ -1740,6 -1704,14 +1704,14 @@@ static int ablkcipher_decrypt(struct ab
        struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
        struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
        struct talitos_edesc *edesc;
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+       if (!areq->nbytes)
+               return 0;
+       if (areq->nbytes % blocksize)
+               return -EINVAL;
  
        /* allocate extended descriptor */
        edesc = ablkcipher_edesc_alloc(areq, false);
@@@ -1759,14 -1731,16 +1731,16 @@@ static void common_nonsnoop_hash_unmap(
        struct talitos_private *priv = dev_get_drvdata(dev);
        bool is_sec1 = has_ftr_sec1(priv);
        struct talitos_desc *desc = &edesc->desc;
-       struct talitos_desc *desc2 = desc + 1;
+       struct talitos_desc *desc2 = (struct talitos_desc *)
+                                    (edesc->buf + edesc->dma_len);
  
        unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
        if (desc->next_desc &&
            desc->ptr[5].ptr != desc2->ptr[5].ptr)
                unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
  
-       talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+       if (req_ctx->psrc)
+               talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
  
        /* When using hashctx-in, must unmap it. */
        if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
@@@ -1833,7 -1807,6 +1807,6 @@@ static void talitos_handle_buggy_hash(s
  
  static int common_nonsnoop_hash(struct talitos_edesc *edesc,
                                struct ahash_request *areq, unsigned int length,
-                               unsigned int offset,
                                void (*callback) (struct device *dev,
                                                  struct talitos_desc *desc,
                                                  void *context, int error))
  
        sg_count = edesc->src_nents ?: 1;
        if (is_sec1 && sg_count > 1)
-               sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
-                                  edesc->buf + sizeof(struct talitos_desc),
-                                  length, req_ctx->nbuf);
+               sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
        else if (length)
                sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
                                      DMA_TO_DEVICE);
                                       DMA_TO_DEVICE);
        } else {
                sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
-                                         &desc->ptr[3], sg_count, offset, 0);
+                                         &desc->ptr[3], sg_count, 0, 0);
                if (sg_count > 1)
                        sync_needed = true;
        }
                talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
  
        if (is_sec1 && req_ctx->nbuf && length) {
-               struct talitos_desc *desc2 = desc + 1;
+               struct talitos_desc *desc2 = (struct talitos_desc *)
+                                            (edesc->buf + edesc->dma_len);
                dma_addr_t next_desc;
  
                memset(desc2, 0, sizeof(*desc2));
                                                      DMA_TO_DEVICE);
                copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
                sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
-                                         &desc2->ptr[3], sg_count, offset, 0);
+                                         &desc2->ptr[3], sg_count, 0, 0);
                if (sg_count > 1)
                        sync_needed = true;
                copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
@@@ -2043,7 -2015,6 +2015,6 @@@ static int ahash_process_req(struct aha
        struct device *dev = ctx->dev;
        struct talitos_private *priv = dev_get_drvdata(dev);
        bool is_sec1 = has_ftr_sec1(priv);
-       int offset = 0;
        u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
  
        if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
                        sg_chain(req_ctx->bufsl, 2, areq->src);
                req_ctx->psrc = req_ctx->bufsl;
        } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+               int offset;
                if (nbytes_to_hash > blocksize)
                        offset = blocksize - req_ctx->nbuf;
                else
                sg_copy_to_buffer(areq->src, nents,
                                  ctx_buf + req_ctx->nbuf, offset);
                req_ctx->nbuf += offset;
-               req_ctx->psrc = areq->src;
+               req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
+                                                offset);
        } else
                req_ctx->psrc = areq->src;
  
        if (ctx->keylen && (req_ctx->first || req_ctx->last))
                edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
  
-       return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
-                                   ahash_done);
+       return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
  }
  
  static int ahash_update(struct ahash_request *areq)
@@@ -2339,7 -2312,7 +2312,7 @@@ static struct talitos_alg_template driv
                        .base = {
                                .cra_name = "authenc(hmac(sha1),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-sha1-"
-                                                  "cbc-aes-talitos",
+                                                  "cbc-aes-talitos-hsna",
                                .cra_blocksize = AES_BLOCK_SIZE,
                                .cra_flags = CRYPTO_ALG_ASYNC,
                        },
                                .cra_name = "authenc(hmac(sha1),"
                                            "cbc(des3_ede))",
                                .cra_driver_name = "authenc-hmac-sha1-"
-                                                  "cbc-3des-talitos",
+                                                  "cbc-3des-talitos-hsna",
                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                                .cra_flags = CRYPTO_ALG_ASYNC,
                        },
                        .base = {
                                .cra_name = "authenc(hmac(sha224),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-sha224-"
-                                                  "cbc-aes-talitos",
+                                                  "cbc-aes-talitos-hsna",
                                .cra_blocksize = AES_BLOCK_SIZE,
                                .cra_flags = CRYPTO_ALG_ASYNC,
                        },
                                .cra_name = "authenc(hmac(sha224),"
                                            "cbc(des3_ede))",
                                .cra_driver_name = "authenc-hmac-sha224-"
-                                                  "cbc-3des-talitos",
+                                                  "cbc-3des-talitos-hsna",
                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                                .cra_flags = CRYPTO_ALG_ASYNC,
                        },
                        .base = {
                                .cra_name = "authenc(hmac(sha256),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-sha256-"
-                                                  "cbc-aes-talitos",
+                                                  "cbc-aes-talitos-hsna",
                                .cra_blocksize = AES_BLOCK_SIZE,
                                .cra_flags = CRYPTO_ALG_ASYNC,
                        },
                                .cra_name = "authenc(hmac(sha256),"
                                            "cbc(des3_ede))",
                                .cra_driver_name = "authenc-hmac-sha256-"
-                                                  "cbc-3des-talitos",
+                                                  "cbc-3des-talitos-hsna",
                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                                .cra_flags = CRYPTO_ALG_ASYNC,
                        },
                        .base = {
                                .cra_name = "authenc(hmac(md5),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-md5-"
-                                                  "cbc-aes-talitos",
+                                                  "cbc-aes-talitos-hsna",
                                .cra_blocksize = AES_BLOCK_SIZE,
                                .cra_flags = CRYPTO_ALG_ASYNC,
                        },
                        .base = {
                                .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
                                .cra_driver_name = "authenc-hmac-md5-"
-                                                  "cbc-3des-talitos",
+                                                  "cbc-3des-talitos-hsna",
                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
                                .cra_flags = CRYPTO_ALG_ASYNC,
                        },
                        .cra_ablkcipher = {
                                .min_keysize = AES_MIN_KEY_SIZE,
                                .max_keysize = AES_MAX_KEY_SIZE,
-                               .ivsize = AES_BLOCK_SIZE,
+                               .setkey = ablkcipher_aes_setkey,
                        }
                },
                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
                                .min_keysize = AES_MIN_KEY_SIZE,
                                .max_keysize = AES_MAX_KEY_SIZE,
                                .ivsize = AES_BLOCK_SIZE,
+                               .setkey = ablkcipher_aes_setkey,
                        }
                },
                .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
                .alg.crypto = {
                        .cra_name = "ctr(aes)",
                        .cra_driver_name = "ctr-aes-talitos",
-                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_blocksize = 1,
                        .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
                                     CRYPTO_ALG_ASYNC,
                        .cra_ablkcipher = {
                                .min_keysize = AES_MIN_KEY_SIZE,
                                .max_keysize = AES_MAX_KEY_SIZE,
                                .ivsize = AES_BLOCK_SIZE,
+                               .setkey = ablkcipher_aes_setkey,
                        }
                },
                .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
                        .cra_ablkcipher = {
                                .min_keysize = DES_KEY_SIZE,
                                .max_keysize = DES_KEY_SIZE,
-                               .ivsize = DES_BLOCK_SIZE,
                                .setkey = ablkcipher_des_setkey,
                        }
                },
                        .cra_ablkcipher = {
                                .min_keysize = DES3_EDE_KEY_SIZE,
                                .max_keysize = DES3_EDE_KEY_SIZE,
-                               .ivsize = DES3_EDE_BLOCK_SIZE,
                                .setkey = ablkcipher_des3_setkey,
                        }
                },
@@@ -3270,7 -3243,10 +3243,10 @@@ static struct talitos_crypto_alg *talit
                alg->cra_priority = t_alg->algt.priority;
        else
                alg->cra_priority = TALITOS_CRA_PRIORITY;
-       alg->cra_alignmask = 0;
+       if (has_ftr_sec1(priv))
+               alg->cra_alignmask = 3;
+       else
+               alg->cra_alignmask = 0;
        alg->cra_ctxsize = sizeof(struct talitos_ctx);
        alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
  
@@@ -3418,7 -3394,7 +3394,7 @@@ static int talitos_probe(struct platfor
        if (err)
                goto err_out;
  
-       if (of_device_is_compatible(np, "fsl,sec1.0")) {
+       if (has_ftr_sec1(priv)) {
                if (priv->num_channels == 1)
                        tasklet_init(&priv->done_task[0], talitos1_done_ch0,
                                     (unsigned long)dev);
@@@ -1,4 -1,4 +1,4 @@@
 -// SPDX-License-Identifier: GPL-2.0
 +// SPDX-License-Identifier: GPL-2.0-only
  /**
   * AES CBC routines supporting VMX instructions on the Power 8
   *
@@@ -7,64 -7,52 +7,52 @@@
   * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
   */
  
- #include <linux/types.h>
- #include <linux/err.h>
- #include <linux/crypto.h>
- #include <linux/delay.h>
  #include <asm/simd.h>
  #include <asm/switch_to.h>
  #include <crypto/aes.h>
  #include <crypto/internal/simd.h>
- #include <crypto/scatterwalk.h>
- #include <crypto/skcipher.h>
+ #include <crypto/internal/skcipher.h>
  
  #include "aesp8-ppc.h"
  
  struct p8_aes_cbc_ctx {
-       struct crypto_sync_skcipher *fallback;
+       struct crypto_skcipher *fallback;
        struct aes_key enc_key;
        struct aes_key dec_key;
  };
  
- static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+ static int p8_aes_cbc_init(struct crypto_skcipher *tfm)
  {
-       const char *alg = crypto_tfm_alg_name(tfm);
-       struct crypto_sync_skcipher *fallback;
-       struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
-       fallback = crypto_alloc_sync_skcipher(alg, 0,
-                                             CRYPTO_ALG_NEED_FALLBACK);
+       struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct crypto_skcipher *fallback;
  
+       fallback = crypto_alloc_skcipher("cbc(aes)", 0,
+                                        CRYPTO_ALG_NEED_FALLBACK |
+                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(fallback)) {
-               printk(KERN_ERR
-                      "Failed to allocate transformation for '%s': %ld\n",
-                      alg, PTR_ERR(fallback));
+               pr_err("Failed to allocate cbc(aes) fallback: %ld\n",
+                      PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
  
-       crypto_sync_skcipher_set_flags(
-               fallback,
-               crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+                                   crypto_skcipher_reqsize(fallback));
        ctx->fallback = fallback;
        return 0;
  }
  
- static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
+ static void p8_aes_cbc_exit(struct crypto_skcipher *tfm)
  {
-       struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
  
-       if (ctx->fallback) {
-               crypto_free_sync_skcipher(ctx->fallback);
-               ctx->fallback = NULL;
-       }
+       crypto_free_skcipher(ctx->fallback);
  }
  
- static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+ static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
                             unsigned int keylen)
  {
+       struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
        int ret;
-       struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
  
        preempt_disable();
        pagefault_disable();
        pagefault_enable();
        preempt_enable();
  
-       ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+       ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
  
        return ret ? -EINVAL : 0;
  }
  
- static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
-                             struct scatterlist *dst,
-                             struct scatterlist *src, unsigned int nbytes)
+ static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc)
  {
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
        int ret;
-       struct blkcipher_walk walk;
-       struct p8_aes_cbc_ctx *ctx =
-               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
  
        if (!crypto_simd_usable()) {
-               SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
-               skcipher_request_set_sync_tfm(req, ctx->fallback);
-               skcipher_request_set_callback(req, desc->flags, NULL, NULL);
-               skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-               ret = crypto_skcipher_encrypt(req);
-               skcipher_request_zero(req);
-       } else {
-               blkcipher_walk_init(&walk, dst, src, nbytes);
-               ret = blkcipher_walk_virt(desc, &walk);
-               while ((nbytes = walk.nbytes)) {
-                       preempt_disable();
-                       pagefault_disable();
-                       enable_kernel_vsx();
-                       aes_p8_cbc_encrypt(walk.src.virt.addr,
-                                          walk.dst.virt.addr,
-                                          nbytes & AES_BLOCK_MASK,
-                                          &ctx->enc_key, walk.iv, 1);
-                       disable_kernel_vsx();
-                       pagefault_enable();
-                       preempt_enable();
-                       nbytes &= AES_BLOCK_SIZE - 1;
-                       ret = blkcipher_walk_done(desc, &walk, nbytes);
-               }
+               struct skcipher_request *subreq = skcipher_request_ctx(req);
+               *subreq = *req;
+               skcipher_request_set_tfm(subreq, ctx->fallback);
+               return enc ? crypto_skcipher_encrypt(subreq) :
+                            crypto_skcipher_decrypt(subreq);
        }
  
+       ret = skcipher_walk_virt(&walk, req, false);
+       while ((nbytes = walk.nbytes) != 0) {
+               preempt_disable();
+               pagefault_disable();
+               enable_kernel_vsx();
+               aes_p8_cbc_encrypt(walk.src.virt.addr,
+                                  walk.dst.virt.addr,
+                                  round_down(nbytes, AES_BLOCK_SIZE),
+                                  enc ? &ctx->enc_key : &ctx->dec_key,
+                                  walk.iv, enc);
+               disable_kernel_vsx();
+               pagefault_enable();
+               preempt_enable();
+               ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
+       }
        return ret;
  }
  
- static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
-                             struct scatterlist *dst,
-                             struct scatterlist *src, unsigned int nbytes)
+ static int p8_aes_cbc_encrypt(struct skcipher_request *req)
  {
-       int ret;
-       struct blkcipher_walk walk;
-       struct p8_aes_cbc_ctx *ctx =
-               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
-       if (!crypto_simd_usable()) {
-               SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
-               skcipher_request_set_sync_tfm(req, ctx->fallback);
-               skcipher_request_set_callback(req, desc->flags, NULL, NULL);
-               skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-               ret = crypto_skcipher_decrypt(req);
-               skcipher_request_zero(req);
-       } else {
-               blkcipher_walk_init(&walk, dst, src, nbytes);
-               ret = blkcipher_walk_virt(desc, &walk);
-               while ((nbytes = walk.nbytes)) {
-                       preempt_disable();
-                       pagefault_disable();
-                       enable_kernel_vsx();
-                       aes_p8_cbc_encrypt(walk.src.virt.addr,
-                                          walk.dst.virt.addr,
-                                          nbytes & AES_BLOCK_MASK,
-                                          &ctx->dec_key, walk.iv, 0);
-                       disable_kernel_vsx();
-                       pagefault_enable();
-                       preempt_enable();
-                       nbytes &= AES_BLOCK_SIZE - 1;
-                       ret = blkcipher_walk_done(desc, &walk, nbytes);
-               }
-       }
-       return ret;
+       return p8_aes_cbc_crypt(req, 1);
  }
  
+ static int p8_aes_cbc_decrypt(struct skcipher_request *req)
+ {
+       return p8_aes_cbc_crypt(req, 0);
+ }
  
- struct crypto_alg p8_aes_cbc_alg = {
-       .cra_name = "cbc(aes)",
-       .cra_driver_name = "p8_aes_cbc",
-       .cra_module = THIS_MODULE,
-       .cra_priority = 2000,
-       .cra_type = &crypto_blkcipher_type,
-       .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
-       .cra_alignmask = 0,
-       .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
-       .cra_init = p8_aes_cbc_init,
-       .cra_exit = p8_aes_cbc_exit,
-       .cra_blkcipher = {
-                         .ivsize = AES_BLOCK_SIZE,
-                         .min_keysize = AES_MIN_KEY_SIZE,
-                         .max_keysize = AES_MAX_KEY_SIZE,
-                         .setkey = p8_aes_cbc_setkey,
-                         .encrypt = p8_aes_cbc_encrypt,
-                         .decrypt = p8_aes_cbc_decrypt,
-       },
+ struct skcipher_alg p8_aes_cbc_alg = {
+       .base.cra_name = "cbc(aes)",
+       .base.cra_driver_name = "p8_aes_cbc",
+       .base.cra_module = THIS_MODULE,
+       .base.cra_priority = 2000,
+       .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+       .base.cra_blocksize = AES_BLOCK_SIZE,
+       .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
+       .setkey = p8_aes_cbc_setkey,
+       .encrypt = p8_aes_cbc_encrypt,
+       .decrypt = p8_aes_cbc_decrypt,
+       .init = p8_aes_cbc_init,
+       .exit = p8_aes_cbc_exit,
+       .min_keysize = AES_MIN_KEY_SIZE,
+       .max_keysize = AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
  };
@@@ -1,4 -1,4 +1,4 @@@
 -// SPDX-License-Identifier: GPL-2.0
 +// SPDX-License-Identifier: GPL-2.0-only
  /**
   * AES CTR routines supporting VMX instructions on the Power 8
   *
@@@ -7,62 -7,51 +7,51 @@@
   * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
   */
  
- #include <linux/types.h>
- #include <linux/err.h>
- #include <linux/crypto.h>
- #include <linux/delay.h>
  #include <asm/simd.h>
  #include <asm/switch_to.h>
  #include <crypto/aes.h>
  #include <crypto/internal/simd.h>
- #include <crypto/scatterwalk.h>
- #include <crypto/skcipher.h>
+ #include <crypto/internal/skcipher.h>
  
  #include "aesp8-ppc.h"
  
  struct p8_aes_ctr_ctx {
-       struct crypto_sync_skcipher *fallback;
+       struct crypto_skcipher *fallback;
        struct aes_key enc_key;
  };
  
- static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+ static int p8_aes_ctr_init(struct crypto_skcipher *tfm)
  {
-       const char *alg = crypto_tfm_alg_name(tfm);
-       struct crypto_sync_skcipher *fallback;
-       struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct crypto_skcipher *fallback;
  
-       fallback = crypto_alloc_sync_skcipher(alg, 0,
-                                             CRYPTO_ALG_NEED_FALLBACK);
+       fallback = crypto_alloc_skcipher("ctr(aes)", 0,
+                                        CRYPTO_ALG_NEED_FALLBACK |
+                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(fallback)) {
-               printk(KERN_ERR
-                      "Failed to allocate transformation for '%s': %ld\n",
-                      alg, PTR_ERR(fallback));
+               pr_err("Failed to allocate ctr(aes) fallback: %ld\n",
+                      PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
  
-       crypto_sync_skcipher_set_flags(
-               fallback,
-               crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+                                   crypto_skcipher_reqsize(fallback));
        ctx->fallback = fallback;
        return 0;
  }
  
- static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
+ static void p8_aes_ctr_exit(struct crypto_skcipher *tfm)
  {
-       struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
  
-       if (ctx->fallback) {
-               crypto_free_sync_skcipher(ctx->fallback);
-               ctx->fallback = NULL;
-       }
+       crypto_free_skcipher(ctx->fallback);
  }
  
- static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+ static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key,
                             unsigned int keylen)
  {
+       struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
        int ret;
-       struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
  
        preempt_disable();
        pagefault_disable();
        pagefault_enable();
        preempt_enable();
  
-       ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+       ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
  
        return ret ? -EINVAL : 0;
  }
  
- static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
-                            struct blkcipher_walk *walk)
+ static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
+                            struct skcipher_walk *walk)
  {
        u8 *ctrblk = walk->iv;
        u8 keystream[AES_BLOCK_SIZE];
        crypto_inc(ctrblk, AES_BLOCK_SIZE);
  }
  
- static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
-                           struct scatterlist *dst,
-                           struct scatterlist *src, unsigned int nbytes)
+ static int p8_aes_ctr_crypt(struct skcipher_request *req)
  {
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
        int ret;
-       u64 inc;
-       struct blkcipher_walk walk;
-       struct p8_aes_ctr_ctx *ctx =
-               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
  
        if (!crypto_simd_usable()) {
-               SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
-               skcipher_request_set_sync_tfm(req, ctx->fallback);
-               skcipher_request_set_callback(req, desc->flags, NULL, NULL);
-               skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-               ret = crypto_skcipher_encrypt(req);
-               skcipher_request_zero(req);
-       } else {
-               blkcipher_walk_init(&walk, dst, src, nbytes);
-               ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
-               while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
-                       preempt_disable();
-                       pagefault_disable();
-                       enable_kernel_vsx();
-                       aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
-                                                   walk.dst.virt.addr,
-                                                   (nbytes &
-                                                    AES_BLOCK_MASK) /
-                                                   AES_BLOCK_SIZE,
-                                                   &ctx->enc_key,
-                                                   walk.iv);
-                       disable_kernel_vsx();
-                       pagefault_enable();
-                       preempt_enable();
-                       /* We need to update IV mostly for last bytes/round */
-                       inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
-                       if (inc > 0)
-                               while (inc--)
-                                       crypto_inc(walk.iv, AES_BLOCK_SIZE);
-                       nbytes &= AES_BLOCK_SIZE - 1;
-                       ret = blkcipher_walk_done(desc, &walk, nbytes);
-               }
-               if (walk.nbytes) {
-                       p8_aes_ctr_final(ctx, &walk);
-                       ret = blkcipher_walk_done(desc, &walk, 0);
-               }
+               struct skcipher_request *subreq = skcipher_request_ctx(req);
+               *subreq = *req;
+               skcipher_request_set_tfm(subreq, ctx->fallback);
+               return crypto_skcipher_encrypt(subreq);
        }
  
+       ret = skcipher_walk_virt(&walk, req, false);
+       while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+               preempt_disable();
+               pagefault_disable();
+               enable_kernel_vsx();
+               aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
+                                           walk.dst.virt.addr,
+                                           nbytes / AES_BLOCK_SIZE,
+                                           &ctx->enc_key, walk.iv);
+               disable_kernel_vsx();
+               pagefault_enable();
+               preempt_enable();
+               do {
+                       crypto_inc(walk.iv, AES_BLOCK_SIZE);
+               } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE);
+               ret = skcipher_walk_done(&walk, nbytes);
+       }
+       if (nbytes) {
+               p8_aes_ctr_final(ctx, &walk);
+               ret = skcipher_walk_done(&walk, 0);
+       }
        return ret;
  }
  
- struct crypto_alg p8_aes_ctr_alg = {
-       .cra_name = "ctr(aes)",
-       .cra_driver_name = "p8_aes_ctr",
-       .cra_module = THIS_MODULE,
-       .cra_priority = 2000,
-       .cra_type = &crypto_blkcipher_type,
-       .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
-       .cra_alignmask = 0,
-       .cra_blocksize = 1,
-       .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
-       .cra_init = p8_aes_ctr_init,
-       .cra_exit = p8_aes_ctr_exit,
-       .cra_blkcipher = {
-                         .ivsize = AES_BLOCK_SIZE,
-                         .min_keysize = AES_MIN_KEY_SIZE,
-                         .max_keysize = AES_MAX_KEY_SIZE,
-                         .setkey = p8_aes_ctr_setkey,
-                         .encrypt = p8_aes_ctr_crypt,
-                         .decrypt = p8_aes_ctr_crypt,
-       },
+ struct skcipher_alg p8_aes_ctr_alg = {
+       .base.cra_name = "ctr(aes)",
+       .base.cra_driver_name = "p8_aes_ctr",
+       .base.cra_module = THIS_MODULE,
+       .base.cra_priority = 2000,
+       .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+       .base.cra_blocksize = 1,
+       .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
+       .setkey = p8_aes_ctr_setkey,
+       .encrypt = p8_aes_ctr_crypt,
+       .decrypt = p8_aes_ctr_crypt,
+       .init = p8_aes_ctr_init,
+       .exit = p8_aes_ctr_exit,
+       .min_keysize = AES_MIN_KEY_SIZE,
+       .max_keysize = AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
+       .chunksize = AES_BLOCK_SIZE,
  };
@@@ -1,4 -1,4 +1,4 @@@
 -// SPDX-License-Identifier: GPL-2.0
 +// SPDX-License-Identifier: GPL-2.0-only
  /**
   * AES XTS routines supporting VMX In-core instructions on Power 8
   *
@@@ -7,67 -7,56 +7,56 @@@
   * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
   */
  
- #include <linux/types.h>
- #include <linux/err.h>
- #include <linux/crypto.h>
- #include <linux/delay.h>
  #include <asm/simd.h>
  #include <asm/switch_to.h>
  #include <crypto/aes.h>
  #include <crypto/internal/simd.h>
- #include <crypto/scatterwalk.h>
+ #include <crypto/internal/skcipher.h>
  #include <crypto/xts.h>
- #include <crypto/skcipher.h>
  
  #include "aesp8-ppc.h"
  
  struct p8_aes_xts_ctx {
-       struct crypto_sync_skcipher *fallback;
+       struct crypto_skcipher *fallback;
        struct aes_key enc_key;
        struct aes_key dec_key;
        struct aes_key tweak_key;
  };
  
- static int p8_aes_xts_init(struct crypto_tfm *tfm)
+ static int p8_aes_xts_init(struct crypto_skcipher *tfm)
  {
-       const char *alg = crypto_tfm_alg_name(tfm);
-       struct crypto_sync_skcipher *fallback;
-       struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct crypto_skcipher *fallback;
  
-       fallback = crypto_alloc_sync_skcipher(alg, 0,
-                                             CRYPTO_ALG_NEED_FALLBACK);
+       fallback = crypto_alloc_skcipher("xts(aes)", 0,
+                                        CRYPTO_ALG_NEED_FALLBACK |
+                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(fallback)) {
-               printk(KERN_ERR
-                       "Failed to allocate transformation for '%s': %ld\n",
-                       alg, PTR_ERR(fallback));
+               pr_err("Failed to allocate xts(aes) fallback: %ld\n",
+                      PTR_ERR(fallback));
                return PTR_ERR(fallback);
        }
  
-       crypto_sync_skcipher_set_flags(
-               fallback,
-               crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+                                   crypto_skcipher_reqsize(fallback));
        ctx->fallback = fallback;
        return 0;
  }
  
- static void p8_aes_xts_exit(struct crypto_tfm *tfm)
+ static void p8_aes_xts_exit(struct crypto_skcipher *tfm)
  {
-       struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
  
-       if (ctx->fallback) {
-               crypto_free_sync_skcipher(ctx->fallback);
-               ctx->fallback = NULL;
-       }
+       crypto_free_skcipher(ctx->fallback);
  }
  
- static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+ static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
                             unsigned int keylen)
  {
+       struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
        int ret;
-       struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
  
-       ret = xts_check_key(tfm, key, keylen);
+       ret = xts_verify_key(tfm, key, keylen);
        if (ret)
                return ret;
  
        pagefault_enable();
        preempt_enable();
  
-       ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+       ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
  
        return ret ? -EINVAL : 0;
  }
  
- static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
-                           struct scatterlist *dst,
-                           struct scatterlist *src,
-                           unsigned int nbytes, int enc)
+ static int p8_aes_xts_crypt(struct skcipher_request *req, int enc)
  {
-       int ret;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct skcipher_walk walk;
+       unsigned int nbytes;
        u8 tweak[AES_BLOCK_SIZE];
-       u8 *iv;
-       struct blkcipher_walk walk;
-       struct p8_aes_xts_ctx *ctx =
-               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+       int ret;
  
        if (!crypto_simd_usable()) {
-               SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
-               skcipher_request_set_sync_tfm(req, ctx->fallback);
-               skcipher_request_set_callback(req, desc->flags, NULL, NULL);
-               skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-               ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
-               skcipher_request_zero(req);
-       } else {
-               blkcipher_walk_init(&walk, dst, src, nbytes);
+               struct skcipher_request *subreq = skcipher_request_ctx(req);
+               *subreq = *req;
+               skcipher_request_set_tfm(subreq, ctx->fallback);
+               return enc ? crypto_skcipher_encrypt(subreq) :
+                            crypto_skcipher_decrypt(subreq);
+       }
+       ret = skcipher_walk_virt(&walk, req, false);
+       if (ret)
+               return ret;
+       preempt_disable();
+       pagefault_disable();
+       enable_kernel_vsx();
  
-               ret = blkcipher_walk_virt(desc, &walk);
+       aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key);
+       disable_kernel_vsx();
+       pagefault_enable();
+       preempt_enable();
  
+       while ((nbytes = walk.nbytes) != 0) {
                preempt_disable();
                pagefault_disable();
                enable_kernel_vsx();
-               iv = walk.iv;
-               memset(tweak, 0, AES_BLOCK_SIZE);
-               aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
+               if (enc)
+                       aes_p8_xts_encrypt(walk.src.virt.addr,
+                                          walk.dst.virt.addr,
+                                          round_down(nbytes, AES_BLOCK_SIZE),
+                                          &ctx->enc_key, NULL, tweak);
+               else
+                       aes_p8_xts_decrypt(walk.src.virt.addr,
+                                          walk.dst.virt.addr,
+                                          round_down(nbytes, AES_BLOCK_SIZE),
+                                          &ctx->dec_key, NULL, tweak);
                disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
  
-               while ((nbytes = walk.nbytes)) {
-                       preempt_disable();
-                       pagefault_disable();
-                       enable_kernel_vsx();
-                       if (enc)
-                               aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
-                                               nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
-                       else
-                               aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
-                                               nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
-                       disable_kernel_vsx();
-                       pagefault_enable();
-                       preempt_enable();
-                       nbytes &= AES_BLOCK_SIZE - 1;
-                       ret = blkcipher_walk_done(desc, &walk, nbytes);
-               }
+               ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
        }
        return ret;
  }
  
- static int p8_aes_xts_encrypt(struct blkcipher_desc *desc,
-                             struct scatterlist *dst,
-                             struct scatterlist *src, unsigned int nbytes)
+ static int p8_aes_xts_encrypt(struct skcipher_request *req)
  {
-       return p8_aes_xts_crypt(desc, dst, src, nbytes, 1);
+       return p8_aes_xts_crypt(req, 1);
  }
  
- static int p8_aes_xts_decrypt(struct blkcipher_desc *desc,
-                             struct scatterlist *dst,
-                             struct scatterlist *src, unsigned int nbytes)
+ static int p8_aes_xts_decrypt(struct skcipher_request *req)
  {
-       return p8_aes_xts_crypt(desc, dst, src, nbytes, 0);
+       return p8_aes_xts_crypt(req, 0);
  }
  
- struct crypto_alg p8_aes_xts_alg = {
-       .cra_name = "xts(aes)",
-       .cra_driver_name = "p8_aes_xts",
-       .cra_module = THIS_MODULE,
-       .cra_priority = 2000,
-       .cra_type = &crypto_blkcipher_type,
-       .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
-       .cra_alignmask = 0,
-       .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
-       .cra_init = p8_aes_xts_init,
-       .cra_exit = p8_aes_xts_exit,
-       .cra_blkcipher = {
-                       .ivsize = AES_BLOCK_SIZE,
-                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
-                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
-                       .setkey  = p8_aes_xts_setkey,
-                       .encrypt = p8_aes_xts_encrypt,
-                       .decrypt = p8_aes_xts_decrypt,
-       }
+ struct skcipher_alg p8_aes_xts_alg = {
+       .base.cra_name = "xts(aes)",
+       .base.cra_driver_name = "p8_aes_xts",
+       .base.cra_module = THIS_MODULE,
+       .base.cra_priority = 2000,
+       .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+       .base.cra_blocksize = AES_BLOCK_SIZE,
+       .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
+       .setkey = p8_aes_xts_setkey,
+       .encrypt = p8_aes_xts_encrypt,
+       .decrypt = p8_aes_xts_decrypt,
+       .init = p8_aes_xts_init,
+       .exit = p8_aes_xts_exit,
+       .min_keysize = 2 * AES_MIN_KEY_SIZE,
+       .max_keysize = 2 * AES_MAX_KEY_SIZE,
+       .ivsize = AES_BLOCK_SIZE,
  };
diff --combined drivers/crypto/vmx/vmx.c
@@@ -1,4 -1,4 +1,4 @@@
 -// SPDX-License-Identifier: GPL-2.0
 +// SPDX-License-Identifier: GPL-2.0-only
  /**
   * Routines supporting VMX instructions on the Power 8
   *
  #include <linux/crypto.h>
  #include <asm/cputable.h>
  #include <crypto/internal/hash.h>
+ #include <crypto/internal/skcipher.h>
  
  extern struct shash_alg p8_ghash_alg;
  extern struct crypto_alg p8_aes_alg;
- extern struct crypto_alg p8_aes_cbc_alg;
- extern struct crypto_alg p8_aes_ctr_alg;
- extern struct crypto_alg p8_aes_xts_alg;
- static struct crypto_alg *algs[] = {
-       &p8_aes_alg,
-       &p8_aes_cbc_alg,
-       &p8_aes_ctr_alg,
-       &p8_aes_xts_alg,
-       NULL,
- };
+ extern struct skcipher_alg p8_aes_cbc_alg;
+ extern struct skcipher_alg p8_aes_ctr_alg;
+ extern struct skcipher_alg p8_aes_xts_alg;
  
  static int __init p8_init(void)
  {
-       int ret = 0;
-       struct crypto_alg **alg_it;
+       int ret;
  
-       for (alg_it = algs; *alg_it; alg_it++) {
-               ret = crypto_register_alg(*alg_it);
-               printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
-                      (*alg_it)->cra_name, ret);
-               if (ret) {
-                       for (alg_it--; alg_it >= algs; alg_it--)
-                               crypto_unregister_alg(*alg_it);
-                       break;
-               }
-       }
+       ret = crypto_register_shash(&p8_ghash_alg);
        if (ret)
-               return ret;
+               goto err;
  
-       ret = crypto_register_shash(&p8_ghash_alg);
-       if (ret) {
-               for (alg_it = algs; *alg_it; alg_it++)
-                       crypto_unregister_alg(*alg_it);
-       }
+       ret = crypto_register_alg(&p8_aes_alg);
+       if (ret)
+               goto err_unregister_ghash;
+       ret = crypto_register_skcipher(&p8_aes_cbc_alg);
+       if (ret)
+               goto err_unregister_aes;
+       ret = crypto_register_skcipher(&p8_aes_ctr_alg);
+       if (ret)
+               goto err_unregister_aes_cbc;
+       ret = crypto_register_skcipher(&p8_aes_xts_alg);
+       if (ret)
+               goto err_unregister_aes_ctr;
+       return 0;
+ err_unregister_aes_ctr:
+       crypto_unregister_skcipher(&p8_aes_ctr_alg);
+ err_unregister_aes_cbc:
+       crypto_unregister_skcipher(&p8_aes_cbc_alg);
+ err_unregister_aes:
+       crypto_unregister_alg(&p8_aes_alg);
+ err_unregister_ghash:
+       crypto_unregister_shash(&p8_ghash_alg);
+ err:
        return ret;
  }
  
  static void __exit p8_exit(void)
  {
-       struct crypto_alg **alg_it;
-       for (alg_it = algs; *alg_it; alg_it++) {
-               printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name);
-               crypto_unregister_alg(*alg_it);
-       }
+       crypto_unregister_skcipher(&p8_aes_xts_alg);
+       crypto_unregister_skcipher(&p8_aes_ctr_alg);
+       crypto_unregister_skcipher(&p8_aes_cbc_alg);
+       crypto_unregister_alg(&p8_aes_alg);
        crypto_unregister_shash(&p8_ghash_alg);
  }
  
@@@ -1,8 -1,12 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-or-later
  /*
   * Linux I2C core ACPI support code
   *
   * Copyright (C) 2014 Intel Corp, Author: Lan Tianyu <tianyu.lan@intel.com>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option)
 - * any later version.
   */
  
  #include <linux/acpi.h>
@@@ -111,8 -115,7 +111,7 @@@ static int i2c_acpi_do_lookup(struct ac
        struct list_head resource_list;
        int ret;
  
-       if (acpi_bus_get_status(adev) || !adev->status.present ||
-           acpi_device_enumerated(adev))
+       if (acpi_bus_get_status(adev) || !adev->status.present)
                return -EINVAL;
  
        if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0)
@@@ -147,6 -150,9 +146,9 @@@ static int i2c_acpi_get_info(struct acp
        lookup.info = info;
        lookup.index = -1;
  
+       if (acpi_device_enumerated(adev))
+               return -EINVAL;
        ret = i2c_acpi_do_lookup(adev, &lookup);
        if (ret)
                return ret;
diff --combined drivers/net/ppp/Kconfig
@@@ -1,4 -1,3 +1,4 @@@
 +# SPDX-License-Identifier: GPL-2.0-only
  #
  # PPP network device configuration
  #
@@@ -87,8 -86,7 +87,7 @@@ config PPP_MPP
        depends on PPP
        select CRYPTO
        select CRYPTO_SHA1
-       select CRYPTO_ARC4
-       select CRYPTO_ECB
+       select CRYPTO_LIB_ARC4
        ---help---
          Support for the MPPE Encryption protocol, as employed by the
          Microsoft Point-to-Point Tunneling Protocol.
   *                    deprecated in 2.6
   */
  
+ #include <crypto/arc4.h>
  #include <crypto/hash.h>
- #include <crypto/skcipher.h>
  #include <linux/err.h>
+ #include <linux/fips.h>
  #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/init.h>
@@@ -63,16 -64,8 +64,9 @@@ MODULE_AUTHOR("Frank Cusack <fcusack@fc
  MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
  MODULE_LICENSE("Dual BSD/GPL");
  MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
 +MODULE_SOFTDEP("pre: arc4");
  MODULE_VERSION("1.0.2");
  
- static unsigned int
- setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
- {
-       sg_set_buf(sg, address, length);
-       return length;
- }
  #define SHA1_PAD_SIZE 40
  
  /*
@@@ -96,7 -89,7 +90,7 @@@ static inline void sha_pad_init(struct 
   * State for an MPPE (de)compressor.
   */
  struct ppp_mppe_state {
-       struct crypto_sync_skcipher *arc4;
+       struct arc4_ctx arc4;
        struct shash_desc *sha1;
        unsigned char *sha1_digest;
        unsigned char master_key[MPPE_MAX_KEY_LEN];
@@@ -155,24 -148,11 +149,11 @@@ static void get_new_key_from_sha(struc
   */
  static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
  {
-       struct scatterlist sg_in[1], sg_out[1];
-       SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
-       skcipher_request_set_sync_tfm(req, state->arc4);
-       skcipher_request_set_callback(req, 0, NULL, NULL);
        get_new_key_from_sha(state);
        if (!initial_key) {
-               crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
-                                           state->keylen);
-               sg_init_table(sg_in, 1);
-               sg_init_table(sg_out, 1);
-               setup_sg(sg_in, state->sha1_digest, state->keylen);
-               setup_sg(sg_out, state->session_key, state->keylen);
-               skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen,
-                                          NULL);
-               if (crypto_skcipher_encrypt(req))
-                   printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
+               arc4_setkey(&state->arc4, state->sha1_digest, state->keylen);
+               arc4_crypt(&state->arc4, state->session_key, state->sha1_digest,
+                          state->keylen);
        } else {
                memcpy(state->session_key, state->sha1_digest, state->keylen);
        }
                state->session_key[1] = 0x26;
                state->session_key[2] = 0x9e;
        }
-       crypto_sync_skcipher_setkey(state->arc4, state->session_key,
-                                   state->keylen);
-       skcipher_request_zero(req);
+       arc4_setkey(&state->arc4, state->session_key, state->keylen);
  }
  
  /*
@@@ -197,7 -175,8 +176,8 @@@ static void *mppe_alloc(unsigned char *
        unsigned int digestsize;
  
        if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
-           options[0] != CI_MPPE || options[1] != CILEN_MPPE)
+           options[0] != CI_MPPE || options[1] != CILEN_MPPE ||
+           fips_enabled)
                goto out;
  
        state = kzalloc(sizeof(*state), GFP_KERNEL);
                goto out;
  
  
-       state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
-       if (IS_ERR(state->arc4)) {
-               state->arc4 = NULL;
-               goto out_free;
-       }
        shash = crypto_alloc_shash("sha1", 0, 0);
        if (IS_ERR(shash))
                goto out_free;
@@@ -251,7 -224,6 +225,6 @@@ out_free
                crypto_free_shash(state->sha1->tfm);
                kzfree(state->sha1);
        }
-       crypto_free_sync_skcipher(state->arc4);
        kfree(state);
  out:
        return NULL;
@@@ -267,8 -239,7 +240,7 @@@ static void mppe_free(void *arg
                kfree(state->sha1_digest);
                crypto_free_shash(state->sha1->tfm);
                kzfree(state->sha1);
-               crypto_free_sync_skcipher(state->arc4);
-               kfree(state);
+               kzfree(state);
        }
  }
  
@@@ -367,10 -338,7 +339,7 @@@ mppe_compress(void *arg, unsigned char 
              int isize, int osize)
  {
        struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
-       SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
        int proto;
-       int err;
-       struct scatterlist sg_in[1], sg_out[1];
  
        /*
         * Check that the protocol is in the range we handle.
        ibuf += 2;              /* skip to proto field */
        isize -= 2;
  
-       /* Encrypt packet */
-       sg_init_table(sg_in, 1);
-       sg_init_table(sg_out, 1);
-       setup_sg(sg_in, ibuf, isize);
-       setup_sg(sg_out, obuf, osize);
-       skcipher_request_set_sync_tfm(req, state->arc4);
-       skcipher_request_set_callback(req, 0, NULL, NULL);
-       skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
-       err = crypto_skcipher_encrypt(req);
-       skcipher_request_zero(req);
-       if (err) {
-               printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
-               return -1;
-       }
+       arc4_crypt(&state->arc4, obuf, ibuf, isize);
  
        state->stats.unc_bytes += isize;
        state->stats.unc_packets++;
@@@ -481,10 -435,8 +436,8 @@@ mppe_decompress(void *arg, unsigned cha
                int osize)
  {
        struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
-       SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
        unsigned ccount;
        int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
-       struct scatterlist sg_in[1], sg_out[1];
  
        if (isize <= PPP_HDRLEN + MPPE_OVHD) {
                if (state->debug)
         * Decrypt the first byte in order to check if it is
         * a compressed or uncompressed protocol field.
         */
-       sg_init_table(sg_in, 1);
-       sg_init_table(sg_out, 1);
-       setup_sg(sg_in, ibuf, 1);
-       setup_sg(sg_out, obuf, 1);
-       skcipher_request_set_sync_tfm(req, state->arc4);
-       skcipher_request_set_callback(req, 0, NULL, NULL);
-       skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
-       if (crypto_skcipher_decrypt(req)) {
-               printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
-               osize = DECOMP_ERROR;
-               goto out_zap_req;
-       }
+       arc4_crypt(&state->arc4, obuf, ibuf, 1);
  
        /*
         * Do PFC decompression.
        }
  
        /* And finally, decrypt the rest of the packet. */
-       setup_sg(sg_in, ibuf + 1, isize - 1);
-       setup_sg(sg_out, obuf + 1, osize - 1);
-       skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL);
-       if (crypto_skcipher_decrypt(req)) {
-               printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
-               osize = DECOMP_ERROR;
-               goto out_zap_req;
-       }
+       arc4_crypt(&state->arc4, obuf + 1, ibuf + 1, isize - 1);
  
        state->stats.unc_bytes += osize;
        state->stats.unc_packets++;
        /* good packet credit */
        state->sanity_errors >>= 1;
  
- out_zap_req:
-       skcipher_request_zero(req);
        return osize;
  
  sanity_error:
@@@ -729,8 -660,7 +661,7 @@@ static struct compressor ppp_mppe = 
  static int __init ppp_mppe_init(void)
  {
        int answer;
-       if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
-             crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)))
+       if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))
                return -ENODEV;
  
        sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
diff --combined fs/cifs/Kconfig
@@@ -1,4 -1,3 +1,4 @@@
 +# SPDX-License-Identifier: GPL-2.0-only
  config CIFS
        tristate "SMB3 and CIFS support (advanced network filesystem)"
        depends on INET
@@@ -10,7 -9,7 +10,7 @@@
        select CRYPTO_SHA512
        select CRYPTO_CMAC
        select CRYPTO_HMAC
-       select CRYPTO_ARC4
+       select CRYPTO_LIB_ARC4
        select CRYPTO_AEAD2
        select CRYPTO_CCM
        select CRYPTO_ECB
diff --combined fs/cifs/cifsfs.c
@@@ -303,7 -303,6 +303,7 @@@ cifs_alloc_inode(struct super_block *sb
        cifs_inode->uniqueid = 0;
        cifs_inode->createtime = 0;
        cifs_inode->epoch = 0;
 +      spin_lock_init(&cifs_inode->open_file_lock);
        generate_random_uuid(cifs_inode->lease_key);
  
        /*
@@@ -1591,7 -1590,6 +1591,6 @@@ MODULE_DESCRIPTIO
        ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
        "also older servers complying with the SNIA CIFS Specification)");
  MODULE_VERSION(CIFS_VERSION);
- MODULE_SOFTDEP("pre: arc4");
  MODULE_SOFTDEP("pre: des");
  MODULE_SOFTDEP("pre: ecb");
  MODULE_SOFTDEP("pre: hmac");
diff --combined include/crypto/aead.h
@@@ -1,8 -1,13 +1,8 @@@
 +/* SPDX-License-Identifier: GPL-2.0-or-later */
  /*
   * AEAD: Authenticated Encryption with Associated Data
   * 
   * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option) 
 - * any later version.
 - *
   */
  
  #ifndef _CRYPTO_AEAD_H
@@@ -317,21 -322,7 +317,7 @@@ static inline struct crypto_aead *crypt
   *
   * Return: 0 if the cipher operation was successful; < 0 if an error occurred
   */
- static inline int crypto_aead_encrypt(struct aead_request *req)
- {
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct crypto_alg *alg = aead->base.__crt_alg;
-       unsigned int cryptlen = req->cryptlen;
-       int ret;
-       crypto_stats_get(alg);
-       if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
-               ret = -ENOKEY;
-       else
-               ret = crypto_aead_alg(aead)->encrypt(req);
-       crypto_stats_aead_encrypt(cryptlen, alg, ret);
-       return ret;
- }
+ int crypto_aead_encrypt(struct aead_request *req);
  
  /**
   * crypto_aead_decrypt() - decrypt ciphertext
   *       integrity of the ciphertext or the associated data was violated);
   *       < 0 if an error occurred.
   */
- static inline int crypto_aead_decrypt(struct aead_request *req)
- {
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       struct crypto_alg *alg = aead->base.__crt_alg;
-       unsigned int cryptlen = req->cryptlen;
-       int ret;
-       crypto_stats_get(alg);
-       if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
-               ret = -ENOKEY;
-       else if (req->cryptlen < crypto_aead_authsize(aead))
-               ret = -EINVAL;
-       else
-               ret = crypto_aead_alg(aead)->decrypt(req);
-       crypto_stats_aead_decrypt(cryptlen, alg, ret);
-       return ret;
- }
+ int crypto_aead_decrypt(struct aead_request *req);
  
  /**
   * DOC: Asynchronous AEAD Request Handle
diff --combined include/crypto/algapi.h
@@@ -1,8 -1,13 +1,8 @@@
 +/* SPDX-License-Identifier: GPL-2.0-or-later */
  /*
   * Cryptographic API for algorithms (i.e., low-level API).
   *
   * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option) 
 - * any later version.
 - *
   */
  #ifndef _CRYPTO_ALGAPI_H
  #define _CRYPTO_ALGAPI_H
@@@ -189,7 -194,6 +189,6 @@@ void crypto_init_queue(struct crypto_qu
  int crypto_enqueue_request(struct crypto_queue *queue,
                           struct crypto_async_request *request);
  struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
- int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
  static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
  {
        return queue->qlen;
@@@ -371,12 -375,6 +370,6 @@@ static inline void *ablkcipher_request_
        return req->__ctx;
  }
  
- static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
-                                         struct crypto_ablkcipher *tfm)
- {
-       return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
- }
  static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
                                                     u32 type, u32 mask)
  {
@@@ -1,8 -1,13 +1,8 @@@
 +/* SPDX-License-Identifier: GPL-2.0-or-later */
  /*
   * Hash algorithms.
   * 
   * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option) 
 - * any later version.
 - *
   */
  
  #ifndef _CRYPTO_INTERNAL_HASH_H
@@@ -196,12 -201,6 +196,6 @@@ static inline struct ahash_request *aha
        return ahash_request_cast(crypto_dequeue_request(queue));
  }
  
- static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
-                                         struct crypto_ahash *tfm)
- {
-       return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
- }
  static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
  {
        return crypto_tfm_ctx(&tfm->base);
@@@ -1,8 -1,13 +1,8 @@@
 +/* SPDX-License-Identifier: GPL-2.0-or-later */
  /*
   * Symmetric key ciphers.
   * 
   * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option) 
 - * any later version.
 - *
   */
  
  #ifndef _CRYPTO_INTERNAL_SKCIPHER_H
@@@ -200,6 -205,66 +200,66 @@@ static inline unsigned int crypto_skcip
        return alg->max_keysize;
  }
  
+ static inline unsigned int crypto_skcipher_alg_chunksize(
+       struct skcipher_alg *alg)
+ {
+       if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+           CRYPTO_ALG_TYPE_BLKCIPHER)
+               return alg->base.cra_blocksize;
+       if (alg->base.cra_ablkcipher.encrypt)
+               return alg->base.cra_blocksize;
+       return alg->chunksize;
+ }
+ static inline unsigned int crypto_skcipher_alg_walksize(
+       struct skcipher_alg *alg)
+ {
+       if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+           CRYPTO_ALG_TYPE_BLKCIPHER)
+               return alg->base.cra_blocksize;
+       if (alg->base.cra_ablkcipher.encrypt)
+               return alg->base.cra_blocksize;
+       return alg->walksize;
+ }
+ /**
+  * crypto_skcipher_chunksize() - obtain chunk size
+  * @tfm: cipher handle
+  *
+  * The block size is set to one for ciphers such as CTR.  However,
+  * you still need to provide incremental updates in multiples of
+  * the underlying block size as the IV does not have sub-block
+  * granularity.  This is known in this API as the chunk size.
+  *
+  * Return: chunk size in bytes
+  */
+ static inline unsigned int crypto_skcipher_chunksize(
+       struct crypto_skcipher *tfm)
+ {
+       return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+ }
+ /**
+  * crypto_skcipher_walksize() - obtain walk size
+  * @tfm: cipher handle
+  *
+  * In some cases, algorithms can only perform optimally when operating on
+  * multiple blocks in parallel. This is reflected by the walksize, which
+  * must be a multiple of the chunksize (or equal if the concern does not
+  * apply)
+  *
+  * Return: walk size in bytes
+  */
+ static inline unsigned int crypto_skcipher_walksize(
+       struct crypto_skcipher *tfm)
+ {
+       return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
+ }
  /* Helpers for simple block cipher modes of operation */
  struct skcipher_ctx_simple {
        struct crypto_cipher *cipher;   /* underlying block cipher */
@@@ -1,8 -1,13 +1,8 @@@
 +/* SPDX-License-Identifier: GPL-2.0-or-later */
  /*
   * Symmetric key ciphers.
   * 
   * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
 - *
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option) 
 - * any later version.
 - *
   */
  
  #ifndef _CRYPTO_SKCIPHER_H
@@@ -288,66 -293,6 +288,6 @@@ static inline unsigned int crypto_sync_
        return crypto_skcipher_ivsize(&tfm->base);
  }
  
- static inline unsigned int crypto_skcipher_alg_chunksize(
-       struct skcipher_alg *alg)
- {
-       if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-           CRYPTO_ALG_TYPE_BLKCIPHER)
-               return alg->base.cra_blocksize;
-       if (alg->base.cra_ablkcipher.encrypt)
-               return alg->base.cra_blocksize;
-       return alg->chunksize;
- }
- static inline unsigned int crypto_skcipher_alg_walksize(
-       struct skcipher_alg *alg)
- {
-       if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-           CRYPTO_ALG_TYPE_BLKCIPHER)
-               return alg->base.cra_blocksize;
-       if (alg->base.cra_ablkcipher.encrypt)
-               return alg->base.cra_blocksize;
-       return alg->walksize;
- }
- /**
-  * crypto_skcipher_chunksize() - obtain chunk size
-  * @tfm: cipher handle
-  *
-  * The block size is set to one for ciphers such as CTR.  However,
-  * you still need to provide incremental updates in multiples of
-  * the underlying block size as the IV does not have sub-block
-  * granularity.  This is known in this API as the chunk size.
-  *
-  * Return: chunk size in bytes
-  */
- static inline unsigned int crypto_skcipher_chunksize(
-       struct crypto_skcipher *tfm)
- {
-       return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
- }
- /**
-  * crypto_skcipher_walksize() - obtain walk size
-  * @tfm: cipher handle
-  *
-  * In some cases, algorithms can only perform optimally when operating on
-  * multiple blocks in parallel. This is reflected by the walksize, which
-  * must be a multiple of the chunksize (or equal if the concern does not
-  * apply)
-  *
-  * Return: walk size in bytes
-  */
- static inline unsigned int crypto_skcipher_walksize(
-       struct crypto_skcipher *tfm)
- {
-       return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
- }
  /**
   * crypto_skcipher_blocksize() - obtain block size of cipher
   * @tfm: cipher handle
@@@ -479,21 -424,7 +419,7 @@@ static inline struct crypto_sync_skciph
   *
   * Return: 0 if the cipher operation was successful; < 0 if an error occurred
   */
- static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
- {
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int cryptlen = req->cryptlen;
-       int ret;
-       crypto_stats_get(alg);
-       if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-               ret = -ENOKEY;
-       else
-               ret = tfm->encrypt(req);
-       crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
-       return ret;
- }
+ int crypto_skcipher_encrypt(struct skcipher_request *req);
  
  /**
   * crypto_skcipher_decrypt() - decrypt ciphertext
   *
   * Return: 0 if the cipher operation was successful; < 0 if an error occurred
   */
- static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
- {
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct crypto_alg *alg = tfm->base.__crt_alg;
-       unsigned int cryptlen = req->cryptlen;
-       int ret;
-       crypto_stats_get(alg);
-       if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-               ret = -ENOKEY;
-       else
-               ret = tfm->decrypt(req);
-       crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
-       return ret;
- }
+ int crypto_skcipher_decrypt(struct skcipher_request *req);
  
  /**
   * DOC: Symmetric Key Cipher Request Handle
diff --combined include/linux/crypto.h
@@@ -1,4 -1,3 +1,4 @@@
 +/* SPDX-License-Identifier: GPL-2.0-or-later */
  /*
   * Scatterlist Cryptographic API.
   *
@@@ -8,6 -7,12 +8,6 @@@
   *
   * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
   * and Nettle, by Niels Möller.
 - * 
 - * This program is free software; you can redistribute it and/or modify it
 - * under the terms of the GNU General Public License as published by the Free
 - * Software Foundation; either version 2 of the License, or (at your option) 
 - * any later version.
 - *
   */
  #ifndef _LINUX_CRYPTO_H
  #define _LINUX_CRYPTO_H
@@@ -49,7 -54,6 +49,6 @@@
  #define CRYPTO_ALG_TYPE_SCOMPRESS     0x0000000b
  #define CRYPTO_ALG_TYPE_RNG           0x0000000c
  #define CRYPTO_ALG_TYPE_AKCIPHER      0x0000000d
- #define CRYPTO_ALG_TYPE_DIGEST                0x0000000e
  #define CRYPTO_ALG_TYPE_HASH          0x0000000e
  #define CRYPTO_ALG_TYPE_SHASH         0x0000000e
  #define CRYPTO_ALG_TYPE_AHASH         0x0000000f
@@@ -323,6 -327,17 +322,17 @@@ struct cipher_alg 
        void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
  };
  
+ /**
+  * struct compress_alg - compression/decompression algorithm
+  * @coa_compress: Compress a buffer of specified length, storing the resulting
+  *              data in the specified buffer. Return the length of the
+  *              compressed data in dlen.
+  * @coa_decompress: Decompress the source buffer, storing the uncompressed
+  *                data in the specified buffer. The length of the data is
+  *                returned in dlen.
+  *
+  * All fields are mandatory.
+  */
  struct compress_alg {
        int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
                            unsigned int slen, u8 *dst, unsigned int *dlen);
diff --combined lib/scatterlist.c
@@@ -1,8 -1,10 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
   *
   * Scatterlist handling helpers.
 - *
 - * This source code is licensed under the GNU General Public License,
 - * Version 2. See the file COPYING for more details.
   */
  #include <linux/export.h>
  #include <linux/slab.h>
@@@ -676,17 -678,18 +676,18 @@@ static bool sg_miter_get_next_page(stru
  {
        if (!miter->__remaining) {
                struct scatterlist *sg;
-               unsigned long pgoffset;
  
                if (!__sg_page_iter_next(&miter->piter))
                        return false;
  
                sg = miter->piter.sg;
-               pgoffset = miter->piter.sg_pgoffset;
  
-               miter->__offset = pgoffset ? 0 : sg->offset;
+               miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
+               miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
+               miter->__offset &= PAGE_SIZE - 1;
                miter->__remaining = sg->offset + sg->length -
-                               (pgoffset << PAGE_SHIFT) - miter->__offset;
+                                    (miter->piter.sg_pgoffset << PAGE_SHIFT) -
+                                    miter->__offset;
                miter->__remaining = min_t(unsigned long, miter->__remaining,
                                           PAGE_SIZE - miter->__offset);
        }
diff --combined net/mac80211/Kconfig
@@@ -1,9 -1,8 +1,9 @@@
 +# SPDX-License-Identifier: GPL-2.0-only
  config MAC80211
        tristate "Generic IEEE 802.11 Networking Stack (mac80211)"
        depends on CFG80211
        select CRYPTO
-       select CRYPTO_ARC4
+       select CRYPTO_LIB_ARC4
        select CRYPTO_AES
        select CRYPTO_CCM
        select CRYPTO_GCM
diff --combined net/mac80211/cfg.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * mac80211 configuration hooks for cfg80211
   *
@@@ -6,6 -5,8 +6,6 @@@
   * Copyright 2013-2015  Intel Mobile Communications GmbH
   * Copyright (C) 2015-2017 Intel Deutschland GmbH
   * Copyright (C) 2018 Intel Corporation
 - *
 - * This file is GPLv2 as found in COPYING.
   */
  
  #include <linux/ieee80211.h>
@@@ -14,6 -15,7 +14,7 @@@
  #include <linux/slab.h>
  #include <net/net_namespace.h>
  #include <linux/rcupdate.h>
+ #include <linux/fips.h>
  #include <linux/if_ether.h>
  #include <net/cfg80211.h>
  #include "ieee80211_i.h"
@@@ -402,9 -404,8 +403,8 @@@ static int ieee80211_add_key(struct wip
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_TKIP:
        case WLAN_CIPHER_SUITE_WEP104:
-               if (IS_ERR(local->wep_tx_tfm))
+               if (WARN_ON_ONCE(fips_enabled))
                        return -EINVAL;
-               break;
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_CCMP_256:
        case WLAN_CIPHER_SUITE_AES_CMAC:
@@@ -1,4 -1,3 +1,4 @@@
 +/* SPDX-License-Identifier: GPL-2.0-only */
  /*
   * Copyright 2002-2005, Instant802 Networks, Inc.
   * Copyright 2005, Devicescape Software, Inc.
@@@ -6,6 -5,10 +6,6 @@@
   * Copyright 2007-2010        Johannes Berg <johannes@sipsolutions.net>
   * Copyright 2013-2015  Intel Mobile Communications GmbH
   * Copyright (C) 2018-2019 Intel Corporation
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #ifndef IEEE80211_I_H
@@@ -1255,8 -1258,8 +1255,8 @@@ struct ieee80211_local 
  
        struct rate_control_ref *rate_ctrl;
  
-       struct crypto_cipher *wep_tx_tfm;
-       struct crypto_cipher *wep_rx_tfm;
+       struct arc4_ctx wep_tx_ctx;
+       struct arc4_ctx wep_rx_ctx;
        u32 wep_iv;
  
        /* see iface.c */
@@@ -1432,7 -1435,7 +1432,7 @@@ ieee80211_get_sband(struct ieee80211_su
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
  
 -      if (WARN_ON(!chanctx_conf)) {
 +      if (WARN_ON_ONCE(!chanctx_conf)) {
                rcu_read_unlock();
                return NULL;
        }
@@@ -2034,13 -2037,6 +2034,13 @@@ void __ieee80211_flush_queues(struct ie
  
  static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
  {
 +      /*
 +       * It's unsafe to try to do any work during reconfigure flow.
 +       * When the flow ends the work will be requeued.
 +       */
 +      if (local->in_reconfig)
 +              return false;
 +
        /*
         * If quiescing is set, we are racing with __ieee80211_suspend.
         * __ieee80211_suspend flushes the workers after setting quiescing,
@@@ -2229,9 -2225,6 +2229,9 @@@ void ieee80211_tdls_cancel_channel_swit
                                          const u8 *addr);
  void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
  void ieee80211_tdls_chsw_work(struct work_struct *wk);
 +void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
 +                                    const u8 *peer, u16 reason);
 +const char *ieee80211_get_reason_code_string(u16 reason_code);
  
  extern const struct ethtool_ops ieee80211_ethtool_ops;
  
diff --combined net/mac80211/key.h
@@@ -1,7 -1,10 +1,7 @@@
 +/* SPDX-License-Identifier: GPL-2.0-only */
  /*
   * Copyright 2002-2004, Instant802 Networks, Inc.
   * Copyright 2005, Devicescape Software, Inc.
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #ifndef IEEE80211_KEY_H
@@@ -11,6 -14,7 +11,7 @@@
  #include <linux/list.h>
  #include <linux/crypto.h>
  #include <linux/rcupdate.h>
+ #include <crypto/arc4.h>
  #include <net/mac80211.h>
  
  #define NUM_DEFAULT_KEYS 4
diff --combined net/mac80211/main.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Copyright 2002-2005, Instant802 Networks, Inc.
   * Copyright 2005-2006, Devicescape Software, Inc.
@@@ -6,10 -5,15 +6,11 @@@
   * Copyright 2013-2014  Intel Mobile Communications GmbH
   * Copyright (C) 2017     Intel Deutschland GmbH
   * Copyright (C) 2018 - 2019 Intel Corporation
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <net/mac80211.h>
  #include <linux/module.h>
+ #include <linux/fips.h>
  #include <linux/init.h>
  #include <linux/netdevice.h>
  #include <linux/types.h>
@@@ -730,8 -734,7 +731,7 @@@ EXPORT_SYMBOL(ieee80211_alloc_hw_nm)
  
  static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
  {
-       bool have_wep = !(IS_ERR(local->wep_tx_tfm) ||
-                         IS_ERR(local->wep_rx_tfm));
+       bool have_wep = !fips_enabled; /* FIPS does not permit the use of RC4 */
        bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE);
        int n_suites = 0, r = 0, w = 0;
        u32 *suites;
@@@ -1298,7 -1301,6 +1298,6 @@@ int ieee80211_register_hw(struct ieee80
   fail_rate:
        rtnl_unlock();
        ieee80211_led_exit(local);
-       ieee80211_wep_free(local);
   fail_flows:
        destroy_workqueue(local->workqueue);
   fail_workqueue:
@@@ -1355,7 -1357,6 +1354,6 @@@ void ieee80211_unregister_hw(struct iee
  
        destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
-       ieee80211_wep_free(local);
        ieee80211_led_exit(local);
        kfree(local->int_scan_req);
  }
diff --combined net/mac80211/mlme.c
@@@ -1,4 -1,3 +1,4 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * BSS client mode implementation
   * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
@@@ -9,9 -8,14 +9,10 @@@
   * Copyright 2013-2014  Intel Mobile Communications GmbH
   * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
   * Copyright (C) 2018 - 2019 Intel Corporation
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <linux/delay.h>
+ #include <linux/fips.h>
  #include <linux/if_ether.h>
  #include <linux/skbuff.h>
  #include <linux/if_arp.h>
@@@ -2960,7 -2964,7 +2961,7 @@@ static void ieee80211_rx_mgmt_auth(stru
  #define case_WLAN(type) \
        case WLAN_REASON_##type: return #type
  
 -static const char *ieee80211_get_reason_code_string(u16 reason_code)
 +const char *ieee80211_get_reason_code_string(u16 reason_code)
  {
        switch (reason_code) {
        case_WLAN(UNSPECIFIED);
@@@ -3025,11 -3029,6 +3026,11 @@@ static void ieee80211_rx_mgmt_deauth(st
        if (len < 24 + 2)
                return;
  
 +      if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
 +              ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
 +              return;
 +      }
 +
        if (ifmgd->associated &&
            ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
                const u8 *bssid = ifmgd->associated->bssid;
@@@ -3079,11 -3078,6 +3080,11 @@@ static void ieee80211_rx_mgmt_disassoc(
  
        reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
  
 +      if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
 +              ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
 +              return;
 +      }
 +
        sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n",
                   mgmt->sa, reason_code,
                   ieee80211_get_reason_code_string(reason_code));
@@@ -5045,7 -5039,7 +5046,7 @@@ int ieee80211_mgd_auth(struct ieee80211
                auth_alg = WLAN_AUTH_OPEN;
                break;
        case NL80211_AUTHTYPE_SHARED_KEY:
-               if (IS_ERR(local->wep_tx_tfm))
+               if (fips_enabled)
                        return -EOPNOTSUPP;
                auth_alg = WLAN_AUTH_SHARED_KEY;
                break;
diff --combined net/mac80211/tkip.c
@@@ -1,8 -1,11 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Copyright 2002-2004, Instant802 Networks, Inc.
   * Copyright 2005, Devicescape Software, Inc.
   * Copyright (C) 2016 Intel Deutschland GmbH
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  #include <linux/kernel.h>
  #include <linux/bitops.h>
@@@ -219,7 -222,7 +219,7 @@@ EXPORT_SYMBOL(ieee80211_get_tkip_p2k)
   * @payload_len is the length of payload (_not_ including IV/ICV length).
   * @ta is the transmitter addresses.
   */
- int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
+ int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx,
                                struct ieee80211_key *key,
                                struct sk_buff *skb,
                                u8 *payload, size_t payload_len)
  
        ieee80211_get_tkip_p2k(&key->conf, skb, rc4key);
  
-       return ieee80211_wep_encrypt_data(tfm, rc4key, 16,
+       return ieee80211_wep_encrypt_data(ctx, rc4key, 16,
                                          payload, payload_len);
  }
  
   * beginning of the buffer containing IEEE 802.11 header payload, i.e.,
   * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
   * length of payload, including IV, Ext. IV, MIC, ICV.  */
- int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
+ int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx,
                                struct ieee80211_key *key,
                                u8 *payload, size_t payload_len, u8 *ta,
                                u8 *ra, int only_iv, int queue,
  
        tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key);
  
-       res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
+       res = ieee80211_wep_decrypt_data(ctx, rc4key, 16, pos, payload_len - 12);
   done:
        if (res == TKIP_DECRYPT_OK) {
                /*
diff --combined net/mac80211/tkip.h
@@@ -1,6 -1,9 +1,6 @@@
 +/* SPDX-License-Identifier: GPL-2.0-only */
  /*
   * Copyright 2002-2004, Instant802 Networks, Inc.
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #ifndef TKIP_H
@@@ -10,7 -13,7 +10,7 @@@
  #include <linux/crypto.h>
  #include "key.h"
  
- int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
+ int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx,
                                struct ieee80211_key *key,
                                struct sk_buff *skb,
                                u8 *payload, size_t payload_len);
@@@ -21,7 -24,7 +21,7 @@@ enum 
        TKIP_DECRYPT_INVALID_KEYIDX = -2,
        TKIP_DECRYPT_REPLAY = -3,
  };
- int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
+ int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx,
                                struct ieee80211_key *key,
                                u8 *payload, size_t payload_len, u8 *ta,
                                u8 *ra, int only_iv, int queue,
diff --combined net/mac80211/wep.c
@@@ -1,8 -1,11 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Software WEP encryption implementation
   * Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi>
   * Copyright 2003, Instant802 Networks, Inc.
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <linux/netdevice.h>
@@@ -27,30 -30,9 +27,9 @@@ int ieee80211_wep_init(struct ieee80211
        /* start WEP IV from a random value */
        get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN);
  
-       local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, 0);
-       if (IS_ERR(local->wep_tx_tfm)) {
-               local->wep_rx_tfm = ERR_PTR(-EINVAL);
-               return PTR_ERR(local->wep_tx_tfm);
-       }
-       local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, 0);
-       if (IS_ERR(local->wep_rx_tfm)) {
-               crypto_free_cipher(local->wep_tx_tfm);
-               local->wep_tx_tfm = ERR_PTR(-EINVAL);
-               return PTR_ERR(local->wep_rx_tfm);
-       }
        return 0;
  }
  
- void ieee80211_wep_free(struct ieee80211_local *local)
- {
-       if (!IS_ERR(local->wep_tx_tfm))
-               crypto_free_cipher(local->wep_tx_tfm);
-       if (!IS_ERR(local->wep_rx_tfm))
-               crypto_free_cipher(local->wep_rx_tfm);
- }
  static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
  {
        /*
@@@ -128,21 -110,17 +107,17 @@@ static void ieee80211_wep_remove_iv(str
  /* Perform WEP encryption using given key. data buffer must have tailroom
   * for 4-byte ICV. data_len must not include this ICV. Note: this function
   * does _not_ add IV. data = RC4(data | CRC32(data)) */
- int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
+ int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key,
                               size_t klen, u8 *data, size_t data_len)
  {
        __le32 icv;
-       int i;
-       if (IS_ERR(tfm))
-               return -1;
  
        icv = cpu_to_le32(~crc32_le(~0, data, data_len));
        put_unaligned(icv, (__le32 *)(data + data_len));
  
-       crypto_cipher_setkey(tfm, rc4key, klen);
-       for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++)
-               crypto_cipher_encrypt_one(tfm, data + i, data + i);
+       arc4_setkey(ctx, rc4key, klen);
+       arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN);
+       memzero_explicit(ctx, sizeof(*ctx));
  
        return 0;
  }
@@@ -181,7 -159,7 +156,7 @@@ int ieee80211_wep_encrypt(struct ieee80
        /* Add room for ICV */
        skb_put(skb, IEEE80211_WEP_ICV_LEN);
  
-       return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3,
+       return ieee80211_wep_encrypt_data(&local->wep_tx_ctx, rc4key, keylen + 3,
                                          iv + IEEE80211_WEP_IV_LEN, len);
  }
  
  /* Perform WEP decryption using given key. data buffer includes encrypted
   * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV.
   * Return 0 on success and -1 on ICV mismatch. */
- int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
+ int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key,
                               size_t klen, u8 *data, size_t data_len)
  {
        __le32 crc;
-       int i;
-       if (IS_ERR(tfm))
-               return -1;
  
-       crypto_cipher_setkey(tfm, rc4key, klen);
-       for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++)
-               crypto_cipher_decrypt_one(tfm, data + i, data + i);
+       arc4_setkey(ctx, rc4key, klen);
+       arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN);
+       memzero_explicit(ctx, sizeof(*ctx));
  
        crc = cpu_to_le32(~crc32_le(~0, data, data_len));
        if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0)
@@@ -253,7 -227,7 +224,7 @@@ static int ieee80211_wep_decrypt(struc
        /* Copy rest of the WEP key (the secret part) */
        memcpy(rc4key + 3, key->conf.key, key->conf.keylen);
  
-       if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen,
+       if (ieee80211_wep_decrypt_data(&local->wep_rx_ctx, rc4key, klen,
                                       skb->data + hdrlen +
                                       IEEE80211_WEP_IV_LEN, len))
                ret = -1;
diff --combined net/mac80211/wep.h
@@@ -1,8 -1,11 +1,8 @@@
 +/* SPDX-License-Identifier: GPL-2.0-only */
  /*
   * Software WEP encryption implementation
   * Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi>
   * Copyright 2003, Instant802 Networks, Inc.
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #ifndef WEP_H
  #include "key.h"
  
  int ieee80211_wep_init(struct ieee80211_local *local);
- void ieee80211_wep_free(struct ieee80211_local *local);
- int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
+ int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key,
                                size_t klen, u8 *data, size_t data_len);
  int ieee80211_wep_encrypt(struct ieee80211_local *local,
                          struct sk_buff *skb,
                          const u8 *key, int keylen, int keyidx);
- int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
+ int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key,
                               size_t klen, u8 *data, size_t data_len);
  
  ieee80211_rx_result
diff --combined net/mac80211/wpa.c
@@@ -1,8 -1,11 +1,8 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * Copyright 2002-2004, Instant802 Networks, Inc.
   * Copyright 2008, Jouni Malinen <j@w1.fi>
   * Copyright (C) 2016-2017 Intel Deutschland GmbH
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation.
   */
  
  #include <linux/netdevice.h>
@@@ -239,7 -242,7 +239,7 @@@ static int tkip_encrypt_skb(struct ieee
        /* Add room for ICV */
        skb_put(skb, IEEE80211_TKIP_ICV_LEN);
  
-       return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm,
+       return ieee80211_tkip_encrypt_data(&tx->local->wep_tx_ctx,
                                           key, skb, pos, len);
  }
  
@@@ -290,7 -293,7 +290,7 @@@ ieee80211_crypto_tkip_decrypt(struct ie
        if (status->flag & RX_FLAG_DECRYPTED)
                hwaccel = 1;
  
-       res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
+       res = ieee80211_tkip_decrypt_data(&rx->local->wep_rx_ctx,
                                          key, skb->data + hdrlen,
                                          skb->len - hdrlen, rx->sta->sta.addr,
                                          hdr->addr1, hwaccel, rx->security_idx,
@@@ -1172,7 -1175,7 +1172,7 @@@ ieee80211_crypto_aes_gmac_decrypt(struc
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_key *key = rx->key;
        struct ieee80211_mmie_16 *mmie;
 -      u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
 +      u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN];
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  
        if (!ieee80211_is_mgmt(hdr->frame_control))
                memcpy(nonce, hdr->addr2, ETH_ALEN);
                memcpy(nonce + ETH_ALEN, ipn, 6);
  
 +              mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
 +              if (!mic)
 +                      return RX_DROP_UNUSABLE;
                if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
                                       skb->data + 24, skb->len - 24,
                                       mic) < 0 ||
                    crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
                        key->u.aes_gmac.icverrors++;
 +                      kfree(mic);
                        return RX_DROP_UNUSABLE;
                }
 +              kfree(mic);
        }
  
        memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
diff --combined net/wireless/Kconfig
@@@ -1,4 -1,3 +1,4 @@@
 +# SPDX-License-Identifier: GPL-2.0-only
  config WIRELESS_EXT
        bool
  
@@@ -213,12 -212,14 +213,14 @@@ config LIB8021
  
  config LIB80211_CRYPT_WEP
        tristate
+       select CRYPTO_LIB_ARC4
  
  config LIB80211_CRYPT_CCMP
        tristate
  
  config LIB80211_CRYPT_TKIP
        tristate
+       select CRYPTO_LIB_ARC4
  
  config LIB80211_DEBUG
        bool "lib80211 debugging messages"
@@@ -1,14 -1,19 +1,15 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * lib80211 crypt: host-based TKIP encryption implementation for lib80211
   *
   * Copyright (c) 2003-2004, Jouni Malinen <j@w1.fi>
   * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation. See README and COPYING for
 - * more details.
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
  #include <linux/err.h>
+ #include <linux/fips.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/slab.h>
@@@ -25,6 -30,7 +26,7 @@@
  #include <linux/ieee80211.h>
  #include <net/iw_handler.h>
  
+ #include <crypto/arc4.h>
  #include <crypto/hash.h>
  #include <linux/crypto.h>
  #include <linux/crc32.h>
@@@ -60,9 -66,9 +62,9 @@@ struct lib80211_tkip_data 
  
        int key_idx;
  
-       struct crypto_cipher *rx_tfm_arc4;
+       struct arc4_ctx rx_ctx_arc4;
+       struct arc4_ctx tx_ctx_arc4;
        struct crypto_shash *rx_tfm_michael;
-       struct crypto_cipher *tx_tfm_arc4;
        struct crypto_shash *tx_tfm_michael;
  
        /* scratch buffers for virt_to_page() (crypto API) */
@@@ -89,30 -95,21 +91,21 @@@ static void *lib80211_tkip_init(int key
  {
        struct lib80211_tkip_data *priv;
  
+       if (fips_enabled)
+               return NULL;
        priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
        if (priv == NULL)
                goto fail;
  
        priv->key_idx = key_idx;
  
-       priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0);
-       if (IS_ERR(priv->tx_tfm_arc4)) {
-               priv->tx_tfm_arc4 = NULL;
-               goto fail;
-       }
        priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
        if (IS_ERR(priv->tx_tfm_michael)) {
                priv->tx_tfm_michael = NULL;
                goto fail;
        }
  
-       priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0);
-       if (IS_ERR(priv->rx_tfm_arc4)) {
-               priv->rx_tfm_arc4 = NULL;
-               goto fail;
-       }
        priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
        if (IS_ERR(priv->rx_tfm_michael)) {
                priv->rx_tfm_michael = NULL;
        fail:
        if (priv) {
                crypto_free_shash(priv->tx_tfm_michael);
-               crypto_free_cipher(priv->tx_tfm_arc4);
                crypto_free_shash(priv->rx_tfm_michael);
-               crypto_free_cipher(priv->rx_tfm_arc4);
                kfree(priv);
        }
  
@@@ -138,11 -133,9 +129,9 @@@ static void lib80211_tkip_deinit(void *
        struct lib80211_tkip_data *_priv = priv;
        if (_priv) {
                crypto_free_shash(_priv->tx_tfm_michael);
-               crypto_free_cipher(_priv->tx_tfm_arc4);
                crypto_free_shash(_priv->rx_tfm_michael);
-               crypto_free_cipher(_priv->rx_tfm_arc4);
        }
-       kfree(priv);
+       kzfree(priv);
  }
  
  static inline u16 RotR1(u16 val)
@@@ -341,7 -334,6 +330,6 @@@ static int lib80211_tkip_encrypt(struc
        int len;
        u8 rc4key[16], *pos, *icv;
        u32 crc;
-       int i;
  
        if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
                struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
  
-       crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
-       for (i = 0; i < len + 4; i++)
-               crypto_cipher_encrypt_one(tkey->tx_tfm_arc4, pos + i, pos + i);
+       arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16);
+       arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4);
        return 0;
  }
  
@@@ -396,7 -388,6 +384,6 @@@ static int lib80211_tkip_decrypt(struc
        u8 icv[4];
        u32 crc;
        int plen;
-       int i;
  
        hdr = (struct ieee80211_hdr *)skb->data;
  
  
        plen = skb->len - hdr_len - 12;
  
-       crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
-       for (i = 0; i < plen + 4; i++)
-               crypto_cipher_decrypt_one(tkey->rx_tfm_arc4, pos + i, pos + i);
+       arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16);
+       arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4);
  
        crc = ~crc32_le(~0, pos, plen);
        icv[0] = crc;
@@@ -636,17 -626,17 +622,17 @@@ static int lib80211_tkip_set_key(void *
        struct lib80211_tkip_data *tkey = priv;
        int keyidx;
        struct crypto_shash *tfm = tkey->tx_tfm_michael;
-       struct crypto_cipher *tfm2 = tkey->tx_tfm_arc4;
+       struct arc4_ctx *tfm2 = &tkey->tx_ctx_arc4;
        struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
-       struct crypto_cipher *tfm4 = tkey->rx_tfm_arc4;
+       struct arc4_ctx *tfm4 = &tkey->rx_ctx_arc4;
  
        keyidx = tkey->key_idx;
        memset(tkey, 0, sizeof(*tkey));
        tkey->key_idx = keyidx;
        tkey->tx_tfm_michael = tfm;
-       tkey->tx_tfm_arc4 = tfm2;
+       tkey->tx_ctx_arc4 = *tfm2;
        tkey->rx_tfm_michael = tfm3;
-       tkey->rx_tfm_arc4 = tfm4;
+       tkey->rx_ctx_arc4 = *tfm4;
        if (len == TKIP_KEY_LEN) {
                memcpy(tkey->key, key, TKIP_KEY_LEN);
                tkey->key_set = 1;
@@@ -1,12 -1,17 +1,13 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
  /*
   * lib80211 crypt: host-based WEP encryption implementation for lib80211
   *
   * Copyright (c) 2002-2004, Jouni Malinen <j@w1.fi>
   * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
 - *
 - * This program is free software; you can redistribute it and/or modify
 - * it under the terms of the GNU General Public License version 2 as
 - * published by the Free Software Foundation. See README and COPYING for
 - * more details.
   */
  
  #include <linux/err.h>
+ #include <linux/fips.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/slab.h>
@@@ -18,7 -23,7 +19,7 @@@
  
  #include <net/lib80211.h>
  
- #include <linux/crypto.h>
+ #include <crypto/arc4.h>
  #include <linux/crc32.h>
  
  MODULE_AUTHOR("Jouni Malinen");
@@@ -31,52 -36,31 +32,31 @@@ struct lib80211_wep_data 
        u8 key[WEP_KEY_LEN + 1];
        u8 key_len;
        u8 key_idx;
-       struct crypto_cipher *tx_tfm;
-       struct crypto_cipher *rx_tfm;
+       struct arc4_ctx tx_ctx;
+       struct arc4_ctx rx_ctx;
  };
  
  static void *lib80211_wep_init(int keyidx)
  {
        struct lib80211_wep_data *priv;
  
+       if (fips_enabled)
+               return NULL;
        priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
        if (priv == NULL)
-               goto fail;
+               return NULL;
        priv->key_idx = keyidx;
  
-       priv->tx_tfm = crypto_alloc_cipher("arc4", 0, 0);
-       if (IS_ERR(priv->tx_tfm)) {
-               priv->tx_tfm = NULL;
-               goto fail;
-       }
-       priv->rx_tfm = crypto_alloc_cipher("arc4", 0, 0);
-       if (IS_ERR(priv->rx_tfm)) {
-               priv->rx_tfm = NULL;
-               goto fail;
-       }
        /* start WEP IV from a random value */
        get_random_bytes(&priv->iv, 4);
  
        return priv;
-       fail:
-       if (priv) {
-               crypto_free_cipher(priv->tx_tfm);
-               crypto_free_cipher(priv->rx_tfm);
-               kfree(priv);
-       }
-       return NULL;
  }
  
  static void lib80211_wep_deinit(void *priv)
  {
-       struct lib80211_wep_data *_priv = priv;
-       if (_priv) {
-               crypto_free_cipher(_priv->tx_tfm);
-               crypto_free_cipher(_priv->rx_tfm);
-       }
-       kfree(priv);
+       kzfree(priv);
  }
  
  /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */
@@@ -128,7 -112,6 +108,6 @@@ static int lib80211_wep_encrypt(struct 
        u32 crc, klen, len;
        u8 *pos, *icv;
        u8 key[WEP_KEY_LEN + 3];
-       int i;
  
        /* other checks are in lib80211_wep_build_iv */
        if (skb_tailroom(skb) < 4)
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
  
-       crypto_cipher_setkey(wep->tx_tfm, key, klen);
-       for (i = 0; i < len + 4; i++)
-               crypto_cipher_encrypt_one(wep->tx_tfm, pos + i, pos + i);
+       arc4_setkey(&wep->tx_ctx, key, klen);
+       arc4_crypt(&wep->tx_ctx, pos, pos, len + 4);
  
        return 0;
  }
@@@ -177,7 -158,6 +154,6 @@@ static int lib80211_wep_decrypt(struct 
        u32 crc, klen, plen;
        u8 key[WEP_KEY_LEN + 3];
        u8 keyidx, *pos, icv[4];
-       int i;
  
        if (skb->len < hdr_len + 8)
                return -1;
        /* Apply RC4 to data and compute CRC32 over decrypted data */
        plen = skb->len - hdr_len - 8;
  
-       crypto_cipher_setkey(wep->rx_tfm, key, klen);
-       for (i = 0; i < plen + 4; i++)
-               crypto_cipher_decrypt_one(wep->rx_tfm, pos + i, pos + i);
+       arc4_setkey(&wep->rx_ctx, key, klen);
+       arc4_crypt(&wep->rx_ctx, pos, pos, plen + 4);
  
        crc = ~crc32_le(~0, pos, plen);
        icv[0] = crc;