summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/api-samples.rst1
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c2
-rw-r--r--arch/arm/crypto/chacha-neon-glue.c5
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c5
-rw-r--r--arch/arm/crypto/crct10dif-ce-glue.c3
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c10
-rw-r--r--arch/arm/crypto/nhpoly1305-neon-glue.c3
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c5
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c5
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c5
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c7
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c5
-rw-r--r--arch/arm64/crypto/aes-glue.c6
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c4
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c5
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c5
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c17
-rw-r--r--arch/arm64/crypto/nhpoly1305-neon-glue.c3
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sha256-glue.c5
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c5
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c5
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c4
-rw-r--r--arch/powerpc/crypto/crct10dif-vpmsum_glue.c4
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/s390/crypto/des_s390.c21
-rw-r--r--arch/sparc/crypto/des_glue.c11
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c212
-rw-r--r--arch/x86/crypto/chacha_glue.c6
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c5
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c7
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c20
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c11
-rw-r--r--arch/x86/crypto/morus1280-avx2-glue.c12
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c12
-rw-r--r--arch/x86/crypto/morus1280_glue.c85
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c12
-rw-r--r--arch/x86/crypto/morus640_glue.c85
-rw-r--r--arch/x86/crypto/nhpoly1305-avx2-glue.c5
-rw-r--r--arch/x86/crypto/nhpoly1305-sse2-glue.c5
-rw-r--r--arch/x86/crypto/poly1305_glue.c4
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c10
-rw-r--r--arch/x86/power/hibernate.c1
-rw-r--r--crypto/842.c2
-rw-r--r--crypto/Kconfig85
-rw-r--r--crypto/Makefile10
-rw-r--r--crypto/adiantum.c3
-rw-r--r--crypto/aegis128.c2
-rw-r--r--crypto/aegis128l.c2
-rw-r--r--crypto/aegis256.c2
-rw-r--r--crypto/aes_generic.c10
-rw-r--r--crypto/akcipher.c14
-rw-r--r--crypto/algboss.c8
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c2
-rw-r--r--crypto/arc4.c2
-rw-r--r--crypto/asymmetric_keys/asym_tpm.c43
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c1
-rw-r--r--crypto/asymmetric_keys/public_key.c105
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c1
-rw-r--r--crypto/asymmetric_keys/x509.asn12
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c57
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c1
-rw-r--r--crypto/authenc.c2
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/ccm.c46
-rw-r--r--crypto/cfb.c2
-rw-r--r--crypto/chacha20poly1305.c6
-rw-r--r--crypto/chacha_generic.c12
-rw-r--r--crypto/cmac.c2
-rw-r--r--crypto/crc32_generic.c2
-rw-r--r--crypto/crc32c_generic.c2
-rw-r--r--crypto/crct10dif_generic.c13
-rw-r--r--crypto/cryptd.c252
-rw-r--r--crypto/crypto_null.c2
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/cts.c20
-rw-r--r--crypto/deflate.c2
-rw-r--r--crypto/des_generic.c13
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c3
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/ecc.c417
-rw-r--r--crypto/ecc.h153
-rw-r--r--crypto/ecc_curve_defs.h15
-rw-r--r--crypto/ecdh.c2
-rw-r--r--crypto/echainiv.c2
-rw-r--r--crypto/ecrdsa.c296
-rw-r--r--crypto/ecrdsa_defs.h225
-rw-r--r--crypto/ecrdsa_params.asn14
-rw-r--r--crypto/ecrdsa_pub_key.asn11
-rw-r--r--crypto/fcrypt.c2
-rw-r--r--crypto/fips.c2
-rw-r--r--crypto/gcm.c36
-rw-r--r--crypto/ghash-generic.c2
-rw-r--r--crypto/hmac.c13
-rw-r--r--crypto/jitterentropy-kcapi.c2
-rw-r--r--crypto/keywrap.c2
-rw-r--r--crypto/khazad.c2
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/lz4.c2
-rw-r--r--crypto/lz4hc.c2
-rw-r--r--crypto/lzo-rle.c2
-rw-r--r--crypto/lzo.c2
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c2
-rw-r--r--crypto/michael_mic.c2
-rw-r--r--crypto/morus1280.c2
-rw-r--r--crypto/morus640.c2
-rw-r--r--crypto/nhpoly1305.c2
-rw-r--r--crypto/ofb.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/pcrypt.c2
-rw-r--r--crypto/poly1305_generic.c2
-rw-r--r--crypto/rmd128.c2
-rw-r--r--crypto/rmd160.c2
-rw-r--r--crypto/rmd256.c2
-rw-r--r--crypto/rmd320.c2
-rw-r--r--crypto/rsa-pkcs1pad.c33
-rw-r--r--crypto/rsa.c111
-rw-r--r--crypto/salsa20_generic.c13
-rw-r--r--crypto/scompress.c129
-rw-r--r--crypto/seed.c2
-rw-r--r--crypto/seqiv.c2
-rw-r--r--crypto/serpent_generic.c2
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/sha3_generic.c2
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--crypto/shash.c7
-rw-r--r--crypto/simd.c273
-rw-r--r--crypto/skcipher.c9
-rw-r--r--crypto/sm3_generic.c2
-rw-r--r--crypto/sm4_generic.c2
-rw-r--r--crypto/streebog_generic.c27
-rw-r--r--crypto/tcrypt.c2
-rw-r--r--crypto/tea.c2
-rw-r--r--crypto/testmgr.c1242
-rw-r--r--crypto/testmgr.h181
-rw-r--r--crypto/tgr192.c2
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/vmac.c2
-rw-r--r--crypto/wp512.c2
-rw-r--r--crypto/xcbc.c2
-rw-r--r--crypto/xts.c2
-rw-r--r--crypto/zstd.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c1
-rw-r--r--drivers/char/hw_random/stm32-rng.c9
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c24
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c48
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h3
-rw-r--r--drivers/crypto/atmel-tdes.c106
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/bcm/cipher.c22
-rw-r--r--drivers/crypto/bcm/spu.c3
-rw-r--r--drivers/crypto/bcm/util.c1
-rw-r--r--drivers/crypto/caam/caamalg.c75
-rw-r--r--drivers/crypto/caam/caamalg_qi.c66
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c243
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c20
-rw-r--r--drivers/crypto/caam/error.c2
-rw-r--r--drivers/crypto/caam/intern.h4
-rw-r--r--drivers/crypto/caam/jr.c33
-rw-r--r--drivers/crypto/caam/qi.c4
-rw-r--r--drivers/crypto/caam/regs.h11
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_mbox.c17
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c65
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h46
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c8
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c69
-rw-r--r--drivers/crypto/ccree/Makefile1
-rw-r--r--drivers/crypto/ccree/cc_aead.c118
-rw-r--r--drivers/crypto/ccree/cc_aead.h3
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c341
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_cipher.c585
-rw-r--r--drivers/crypto/ccree/cc_cipher.h3
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h10
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c44
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h2
-rw-r--r--drivers/crypto/ccree/cc_driver.c120
-rw-r--r--drivers/crypto/ccree/cc_driver.h36
-rw-r--r--drivers/crypto/ccree/cc_fips.c29
-rw-r--r--drivers/crypto/ccree/cc_fips.h4
-rw-r--r--drivers/crypto/ccree/cc_hash.c64
-rw-r--r--drivers/crypto/ccree/cc_hash.h2
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h123
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h35
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c11
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h2
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h2
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h4
-rw-r--r--drivers/crypto/ccree/cc_pm.c11
-rw-r--r--drivers/crypto/ccree/cc_pm.h2
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c116
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c7
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2
-rw-r--r--drivers/crypto/hifn_795x.c31
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c11
-rw-r--r--drivers/crypto/ixp4xx_crypto.c64
-rw-r--r--drivers/crypto/marvell/cipher.c11
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c3
-rw-r--r--drivers/crypto/mxc-scc.c767
-rw-r--r--drivers/crypto/mxs-dcp.c14
-rw-r--r--drivers/crypto/n2_core.c15
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c6
-rw-r--r--drivers/crypto/nx/nx-842.c3
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c12
-rw-r--r--drivers/crypto/nx/nx-sha256.c6
-rw-r--r--drivers/crypto/nx/nx-sha512.c6
-rw-r--r--drivers/crypto/omap-des.c29
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/padlock-sha.c5
-rw-r--r--drivers/crypto/picoxcell_crypto.c35
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c2
-rw-r--r--drivers/crypto/qce/ablkcipher.c22
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c61
-rw-r--r--drivers/crypto/s5p-sss.c1
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/stm32/Kconfig1
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c74
-rw-r--r--drivers/crypto/stm32/stm32-hash.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c78
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c19
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h2
-rw-r--r--drivers/crypto/talitos.c108
-rw-r--r--drivers/crypto/ux500/cryp/Makefile6
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c86
-rw-r--r--drivers/crypto/vmx/aes.c14
-rw-r--r--drivers/crypto/vmx/aes_cbc.c14
-rw-r--r--drivers/crypto/vmx/aes_ctr.c10
-rw-r--r--drivers/crypto/vmx/aes_xts.c14
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl4
-rw-r--r--drivers/crypto/vmx/ghash.c10
-rw-r--r--drivers/crypto/vmx/vmx.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h1
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/net/ppp/ppp_mppe.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c1
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c1
-rw-r--r--drivers/staging/ks7010/ks_hostif.c1
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c1
-rw-r--r--drivers/thunderbolt/domain.c1
-rw-r--r--fs/cifs/misc.c1
-rw-r--r--fs/crypto/keyinfo.c1
-rw-r--r--fs/ecryptfs/crypto.c1
-rw-r--r--fs/ecryptfs/keystore.c1
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/f2fs/f2fs.h1
-rw-r--r--fs/nfsd/nfs4recover.c1
-rw-r--r--fs/ubifs/auth.c6
-rw-r--r--fs/ubifs/replay.c2
-rw-r--r--include/crypto/aes.h8
-rw-r--r--include/crypto/akcipher.h54
-rw-r--r--include/crypto/cryptd.h18
-rw-r--r--include/crypto/des.h43
-rw-r--r--include/crypto/hash.h10
-rw-r--r--include/crypto/internal/simd.h44
-rw-r--r--include/crypto/morus1280_glue.h79
-rw-r--r--include/crypto/morus640_glue.h79
-rw-r--r--include/crypto/public_key.h4
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/oid_registry.h18
-rw-r--r--include/linux/psp-sev.h3
-rw-r--r--include/uapi/linux/psp-sev.h18
-rw-r--r--kernel/kexec_file.c1
-rw-r--r--lib/crc-t10dif.c1
-rw-r--r--lib/digsig.c1
-rw-r--r--lib/libcrc32c.c1
-rw-r--r--net/bluetooth/amp.c1
-rw-r--r--net/bluetooth/smp.c1
-rw-r--r--net/sctp/auth.c1
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c1
-rw-r--r--net/wireless/lib80211_crypt_tkip.c1
-rw-r--r--security/apparmor/crypto.c2
-rw-r--r--security/integrity/digsig_asymmetric.c11
-rw-r--r--security/integrity/evm/evm_crypto.c1
-rw-r--r--security/integrity/ima/ima_crypto.c4
-rw-r--r--security/keys/dh.c1
-rw-r--r--security/keys/encrypted-keys/encrypted.c1
-rw-r--r--security/keys/trusted.c1
322 files changed, 5973 insertions, 4248 deletions
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 0f6ca8b7261e..f14afaaf2f32 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -133,7 +133,6 @@ Code Example For Use of Operational State Memory With SHASH
         if (!sdesc)
             return ERR_PTR(-ENOMEM);
         sdesc->shash.tfm = alg;
-        sdesc->shash.flags = 0x0;
         return sdesc;
     }
 
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 07e31941dc67..617c2c99ebfb 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
 	int err;
 
 	err = skcipher_walk_virt(&walk, req, true);
+	if (err)
+		return err;
 
 	crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
 
diff --git a/arch/arm/crypto/chacha-neon-glue.c b/arch/arm/crypto/chacha-neon-glue.c
index 9d6fda81986d..48a89537b828 100644
--- a/arch/arm/crypto/chacha-neon-glue.c
+++ b/arch/arm/crypto/chacha-neon-glue.c
@@ -21,6 +21,7 @@
 
 #include <crypto/algapi.h>
 #include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -93,7 +94,7 @@ static int chacha_neon(struct skcipher_request *req)
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
 		return crypto_chacha_crypt(req);
 
 	return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -107,7 +108,7 @@ static int xchacha_neon(struct skcipher_request *req)
 	u32 state[16];
 	u8 real_iv[16];
 
-	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
 		return crypto_xchacha_crypt(req);
 
 	crypto_chacha_init(state, ctx, req->iv);
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index cd9e93b46c2d..e712c2a7d387 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -16,6 +16,7 @@
 #include <linux/string.h>
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 
 #include <asm/hwcap.h>
 #include <asm/neon.h>
@@ -113,7 +114,7 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
 	u32 *crc = shash_desc_ctx(desc);
 	unsigned int l;
 
-	if (may_use_simd()) {
+	if (crypto_simd_usable()) {
 		if ((u32)data % SCALE_F) {
 			l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
 
@@ -147,7 +148,7 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
 	u32 *crc = shash_desc_ctx(desc);
 	unsigned int l;
 
-	if (may_use_simd()) {
+	if (crypto_simd_usable()) {
 		if ((u32)data % SCALE_F) {
 			l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
 
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
index 3d6b800b8396..3b24f2872592 100644
--- a/arch/arm/crypto/crct10dif-ce-glue.c
+++ b/arch/arm/crypto/crct10dif-ce-glue.c
@@ -15,6 +15,7 @@
 #include <linux/string.h>
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 
 #include <asm/neon.h>
 #include <asm/simd.h>
@@ -36,7 +37,7 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
 {
 	u16 *crc = shash_desc_ctx(desc);
 
-	if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+	if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
 		kernel_neon_begin();
 		*crc = crc_t10dif_pmull(*crc, data, length);
 		kernel_neon_end();
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index b7d30b6cf49c..39d1ccec1aab 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -14,6 +14,7 @@
 #include <asm/unaligned.h>
 #include <crypto/cryptd.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/gf128mul.h>
 #include <linux/cpufeature.h>
 #include <linux/crypto.h>
@@ -185,7 +186,6 @@ static int ghash_async_init(struct ahash_request *req)
 	struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
 
 	desc->tfm = child;
-	desc->flags = req->base.flags;
 	return crypto_shash_init(desc);
 }
 
@@ -196,7 +196,7 @@ static int ghash_async_update(struct ahash_request *req)
 	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
 	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
 		memcpy(cryptd_req, req, sizeof(*req));
 		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -214,7 +214,7 @@ static int ghash_async_final(struct ahash_request *req)
 	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
 	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
 		memcpy(cryptd_req, req, sizeof(*req));
 		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -232,7 +232,7 @@ static int ghash_async_digest(struct ahash_request *req)
 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
 	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
 		memcpy(cryptd_req, req, sizeof(*req));
 		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -242,7 +242,6 @@ static int ghash_async_digest(struct ahash_request *req)
 		struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
 
 		desc->tfm = child;
-		desc->flags = req->base.flags;
 		return shash_ahash_digest(req, desc);
 	}
 }
@@ -255,7 +254,6 @@ static int ghash_async_import(struct ahash_request *req, const void *in)
 	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
 
 	desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
-	desc->flags = req->base.flags;
 
 	return crypto_shash_import(desc, in);
 }
diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c
index 49aae87cb2bc..ae5aefc44a4d 100644
--- a/arch/arm/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm/crypto/nhpoly1305-neon-glue.c
@@ -9,6 +9,7 @@
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/nhpoly1305.h>
 #include <linux/module.h>
 
@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
 static int nhpoly1305_neon_update(struct shash_desc *desc,
 				  const u8 *src, unsigned int srclen)
 {
-	if (srclen < 64 || !may_use_simd())
+	if (srclen < 64 || !crypto_simd_usable())
 		return crypto_nhpoly1305_update(desc, src, srclen);
 
 	do {
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index b732522e20f8..4c6c6900853c 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -9,6 +9,7 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha1_base.h>
 #include <linux/cpufeature.h>
@@ -33,7 +34,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha1_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
 		return sha1_update_arm(desc, data, len);
 
@@ -47,7 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 			 unsigned int len, u8 *out)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return sha1_finup_arm(desc, data, len, out);
 
 	kernel_neon_begin();
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index d15e0ea2c95e..d6c95c213d42 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -19,6 +19,7 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
@@ -39,7 +40,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha1_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
 		return sha1_update_arm(desc, data, len);
 
@@ -54,7 +55,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
 static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
 			   unsigned int len, u8 *out)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return sha1_finup_arm(desc, data, len, out);
 
 	kernel_neon_begin();
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index 1211a5c129fc..a47a9d4b663e 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -9,6 +9,7 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha256_base.h>
 #include <linux/cpufeature.h>
@@ -34,7 +35,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
 		return crypto_sha256_arm_update(desc, data, len);
 
@@ -49,7 +50,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
 static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
 			 unsigned int len, u8 *out)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sha256_arm_finup(desc, data, len, out);
 
 	kernel_neon_begin();
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 1d82c6cd31a4..f3f6b1624fc3 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -15,6 +15,7 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <linux/cryptohash.h>
 #include <linux/types.h>
 #include <linux/string.h>
@@ -34,7 +35,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
 		return crypto_sha256_arm_update(desc, data, len);
 
@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
 static int sha256_finup(struct shash_desc *desc, const u8 *data,
 			unsigned int len, u8 *out)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sha256_arm_finup(desc, data, len, out);
 
 	kernel_neon_begin();
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 8a5642b41fd6..d33ab59c26c0 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -9,6 +9,7 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha512_base.h>
 #include <linux/crypto.h>
@@ -30,7 +31,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha512_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
 		return sha512_arm_update(desc, data, len);
 
@@ -45,7 +46,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
 static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
 			     unsigned int len, u8 *out)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return sha512_arm_finup(desc, data, len, out);
 
 	kernel_neon_begin();
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 036ea77f83bc..cb89c80800b5 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -14,6 +14,7 @@
 #include <crypto/aes.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/module.h>
 
@@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
 static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
 			   u32 abytes, u32 *macp)
 {
-	if (may_use_simd()) {
+	if (crypto_simd_usable()) {
 		kernel_neon_begin();
 		ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
 				     num_rounds(key));
@@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req)
 
 	err = skcipher_walk_aead_encrypt(&walk, req, false);
 
-	if (may_use_simd()) {
+	if (crypto_simd_usable()) {
 		while (walk.nbytes) {
 			u32 tail = walk.nbytes % AES_BLOCK_SIZE;
 
@@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req)
 
 	err = skcipher_walk_aead_decrypt(&walk, req, false);
 
-	if (may_use_simd()) {
+	if (crypto_simd_usable()) {
 		while (walk.nbytes) {
 			u32 tail = walk.nbytes % AES_BLOCK_SIZE;
 
diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
index e6b3227bbf57..3213843fcb46 100644
--- a/arch/arm64/crypto/aes-ce-glue.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -12,6 +12,7 @@
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/aes.h>
+#include <crypto/internal/simd.h>
 #include <linux/cpufeature.h>
 #include <linux/crypto.h>
 #include <linux/module.h>
@@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
 {
 	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
 		return;
 	}
@@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
 {
 	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
 		return;
 	}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 1e676625ef33..f0ceb545bd1e 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return aes_ctr_encrypt_fallback(ctx, req);
 
 	return ctr_encrypt(req);
@@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
 {
 	int rounds = 6 + ctx->key_length / 4;
 
-	if (may_use_simd()) {
+	if (crypto_simd_usable()) {
 		kernel_neon_begin();
 		aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
 			       enc_after);
@@ -707,7 +707,7 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
 	struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
 	struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0);
+	mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);
 
 	memcpy(out, ctx->dg, AES_BLOCK_SIZE);
 
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index bf1b321ff4c1..02b65d9eb947 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return aes_ctr_encrypt_fallback(&ctx->fallback, req);
 
 	return ctr_encrypt(req);
@@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
 	int err;
 
 	err = skcipher_walk_virt(&walk, req, false);
+	if (err)
+		return err;
 
 	kernel_neon_begin();
 	neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
index cb054f51c917..82029cda2e77 100644
--- a/arch/arm64/crypto/chacha-neon-glue.c
+++ b/arch/arm64/crypto/chacha-neon-glue.c
@@ -21,6 +21,7 @@
 
 #include <crypto/algapi.h>
 #include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req)
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
 		return crypto_chacha_crypt(req);
 
 	return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req)
 	u32 state[16];
 	u8 real_iv[16];
 
-	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
 		return crypto_xchacha_crypt(req);
 
 	crypto_chacha_init(state, ctx, req->iv);
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index e81d5bd555c0..2e0a7d2eee24 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -16,6 +16,7 @@
 #include <linux/string.h>
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 
 #include <asm/neon.h>
 #include <asm/simd.h>
@@ -38,7 +39,7 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
 {
 	u16 *crc = shash_desc_ctx(desc);
 
-	if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+	if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
 		kernel_neon_begin();
 		*crc = crc_t10dif_pmull_p8(*crc, data, length);
 		kernel_neon_end();
@@ -54,7 +55,7 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
 {
 	u16 *crc = shash_desc_ctx(desc);
 
-	if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+	if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
 		kernel_neon_begin();
 		*crc = crc_t10dif_pmull_p64(*crc, data, length);
 		kernel_neon_end();
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 4e69bb78ea89..b39ed99b06fb 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -17,6 +17,7 @@
 #include <crypto/gf128mul.h>
 #include <crypto/internal/aead.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
 #include <linux/cpufeature.h>
@@ -89,7 +90,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
 						struct ghash_key const *k,
 						const char *head))
 {
-	if (likely(may_use_simd())) {
+	if (likely(crypto_simd_usable())) {
 		kernel_neon_begin();
 		simd_update(blocks, dg, src, key, head);
 		kernel_neon_end();
@@ -441,7 +442,7 @@ static int gcm_encrypt(struct aead_request *req)
 
 	err = skcipher_walk_aead_encrypt(&walk, req, false);
 
-	if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+	if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
 		u32 const *rk = NULL;
 
 		kernel_neon_begin();
@@ -473,9 +474,11 @@ static int gcm_encrypt(struct aead_request *req)
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
 		while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
-			int blocks = walk.nbytes / AES_BLOCK_SIZE;
+			const int blocks =
+				walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 			u8 *dst = walk.dst.virt.addr;
 			u8 *src = walk.src.virt.addr;
+			int remaining = blocks;
 
 			do {
 				__aes_arm64_encrypt(ctx->aes_key.key_enc,
@@ -485,9 +488,9 @@ static int gcm_encrypt(struct aead_request *req)
 
 				dst += AES_BLOCK_SIZE;
 				src += AES_BLOCK_SIZE;
-			} while (--blocks > 0);
+			} while (--remaining > 0);
 
-			ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
+			ghash_do_update(blocks, dg,
 					walk.dst.virt.addr, &ctx->ghash_key,
 					NULL, pmull_ghash_update_p64);
 
@@ -563,7 +566,7 @@ static int gcm_decrypt(struct aead_request *req)
 
 	err = skcipher_walk_aead_decrypt(&walk, req, false);
 
-	if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+	if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
 		u32 const *rk = NULL;
 
 		kernel_neon_begin();
@@ -609,7 +612,7 @@ static int gcm_decrypt(struct aead_request *req)
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
 		while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
-			int blocks = walk.nbytes / AES_BLOCK_SIZE;
+			int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 			u8 *dst = walk.dst.virt.addr;
 			u8 *src = walk.src.virt.addr;
 
diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
index 38a589044b6c..895d3727c1fb 100644
--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
@@ -9,6 +9,7 @@
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/nhpoly1305.h>
 #include <linux/module.h>
 
@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
 static int nhpoly1305_neon_update(struct shash_desc *desc,
 				  const u8 *src, unsigned int srclen)
 {
-	if (srclen < 64 || !may_use_simd())
+	if (srclen < 64 || !crypto_simd_usable())
 		return crypto_nhpoly1305_update(desc, src, srclen);
 
 	do {
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 17fac2889f56..eaa7a8258f1c 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -12,6 +12,7 @@
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha1_base.h>
 #include <linux/cpufeature.h>
@@ -38,7 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha1_ce_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sha1_update(desc, data, len);
 
 	sctx->finalize = 0;
@@ -56,7 +57,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 	struct sha1_ce_state *sctx = shash_desc_ctx(desc);
 	bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sha1_finup(desc, data, len, out);
 
 	/*
@@ -78,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 {
 	struct sha1_ce_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sha1_finup(desc, NULL, 0, out);
 
 	sctx->finalize = 0;
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 261f5195cab7..a725997e55f2 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -12,6 +12,7 @@
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha256_base.h>
 #include <linux/cpufeature.h>
@@ -42,7 +43,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
 				(sha256_block_fn *)sha256_block_data_order);
 
@@ -61,7 +62,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 	bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
 
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
 				(sha256_block_fn *)sha256_block_data_order);
@@ -90,7 +91,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 {
 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		sha256_base_do_finalize(desc,
 				(sha256_block_fn *)sha256_block_data_order);
 		return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 0cccdb9cc2c0..e62298740e31 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -14,6 +14,7 @@
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha256_base.h>
 #include <linux/cryptohash.h>
@@ -89,7 +90,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 {
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return sha256_base_do_update(desc, data, len,
 				(sha256_block_fn *)sha256_block_data_order);
 
@@ -119,7 +120,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
 static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
 			     unsigned int len, u8 *out)
 {
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		if (len)
 			sha256_base_do_update(desc, data, len,
 				(sha256_block_fn *)sha256_block_data_order);
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index a336feac0f59..9a4bbfc45f40 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -14,6 +14,7 @@
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha3.h>
 #include <linux/cpufeature.h>
 #include <linux/crypto.h>
@@ -32,7 +33,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
 	struct sha3_state *sctx = shash_desc_ctx(desc);
 	unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sha3_update(desc, data, len);
 
 	if ((sctx->partial + len) >= sctx->rsiz) {
@@ -76,7 +77,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
 	__le64 *digest = (__le64 *)out;
 	int i;
 
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sha3_final(desc, out);
 
 	sctx->buf[sctx->partial++] = 0x06;
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index f2c5f28c622a..2369540040aa 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -13,6 +13,7 @@
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sha.h>
 #include <crypto/sha512_base.h>
 #include <linux/cpufeature.h>
@@ -31,7 +32,7 @@ asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
 static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 			    unsigned int len)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return sha512_base_do_update(desc, data, len,
 				(sha512_block_fn *)sha512_block_data_order);
 
@@ -46,7 +47,7 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
 static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 			   unsigned int len, u8 *out)
 {
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		if (len)
 			sha512_base_do_update(desc, data, len,
 				(sha512_block_fn *)sha512_block_data_order);
@@ -65,7 +66,7 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
 
 static int sha512_ce_final(struct shash_desc *desc, u8 *out)
 {
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		sha512_base_do_finalize(desc,
 				(sha512_block_fn *)sha512_block_data_order);
 		return sha512_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 88938a20d9b2..5d15533799a2 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -12,6 +12,7 @@
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/sm3.h>
 #include <crypto/sm3_base.h>
 #include <linux/cpufeature.h>
@@ -28,7 +29,7 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
 static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
 			 unsigned int len)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sm3_update(desc, data, len);
 
 	kernel_neon_begin();
@@ -40,7 +41,7 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
 
 static int sm3_ce_final(struct shash_desc *desc, u8 *out)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sm3_finup(desc, NULL, 0, out);
 
 	kernel_neon_begin();
@@ -53,7 +54,7 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out)
 static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
 			unsigned int len, u8 *out)
 {
-	if (!may_use_simd())
+	if (!crypto_simd_usable())
 		return crypto_sm3_finup(desc, data, len, out);
 
 	kernel_neon_begin();
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 0c4fc223f225..2754c875d39c 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -3,6 +3,7 @@
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/sm4.h>
+#include <crypto/internal/simd.h>
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 #include <linux/crypto.h>
@@ -20,7 +21,7 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
 	const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		crypto_sm4_encrypt(tfm, out, in);
 	} else {
 		kernel_neon_begin();
@@ -33,7 +34,7 @@ static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
 	const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	if (!may_use_simd()) {
+	if (!crypto_simd_usable()) {
 		crypto_sm4_decrypt(tfm, out, in);
 	} else {
 		kernel_neon_begin();
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index fd1d6c83f0c0..c4fa242dd652 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -1,10 +1,12 @@
 #include <linux/crc32.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/cpufeature.h>
+#include <asm/simd.h>
 #include <asm/switch_to.h>
 
 #define CHKSUM_BLOCK_SIZE	1
@@ -22,7 +24,7 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
 	unsigned int prealign;
 	unsigned int tail;
 
-	if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
+	if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
 		return __crc32c_le(crc, p, len);
 
 	if ((unsigned long)p & VMX_ALIGN_MASK) {
diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
index 02ea277863d1..e27ff16573b5 100644
--- a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
@@ -12,11 +12,13 @@
 
 #include <linux/crc-t10dif.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/cpufeature.h>
+#include <asm/simd.h>
 #include <asm/switch_to.h>
 
 #define VMX_ALIGN		16
@@ -32,7 +34,7 @@ static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
 	unsigned int tail;
 	u32 crc = crci;
 
-	if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
+	if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
 		return crc_t10dif_generic(crc, p, len);
 
 	if ((unsigned long)p & VMX_ALIGN_MASK) {
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 36bda391e549..b9f6e72bf4e5 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -10,3 +10,4 @@ generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += vtime.h
 generic-y += msi.h
+generic-y += simd.h
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 0d15383d0ff1..1f9ab24dc048 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -224,24 +224,11 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
 		       unsigned int key_len)
 {
 	struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+	int err;
 
-	if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
-	    crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
-			  DES_KEY_SIZE)) &&
-	    (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
-		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		return -EINVAL;
-	}
-
-	/* in fips mode, ensure k1 != k2 and k2 != k3 and k1 != k3 */
-	if (fips_enabled &&
-	    !(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
-	      crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
-			    DES_KEY_SIZE) &&
-	      crypto_memneq(key, &key[DES_KEY_SIZE * 2], DES_KEY_SIZE))) {
-		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		return -EINVAL;
-	}
+	err = __des3_verify_key(&tfm->crt_flags, key);
+	if (unlikely(err))
+		return err;
 
 	memcpy(ctx->key, key, key_len);
 	return 0;
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 4884315daff4..453a4cf5492a 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -201,18 +201,15 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
 			    unsigned int keylen)
 {
 	struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
-	const u32 *K = (const u32 *)key;
 	u32 *flags = &tfm->crt_flags;
 	u64 k1[DES_EXPKEY_WORDS / 2];
 	u64 k2[DES_EXPKEY_WORDS / 2];
 	u64 k3[DES_EXPKEY_WORDS / 2];
+	int err;
 
-	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
-		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
-		     (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
-		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		return -EINVAL;
-	}
+	err = __des3_verify_key(flags, key);
+	if (unlikely(err))
+		return err;
 
 	des_sparc64_key_expand((const u32 *)key, k1);
 	key += DES_KEY_SIZE;
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 3ea71b871813..bdeee1b830be 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -11,8 +11,8 @@
  * any later version.
  */
 
-#include <crypto/cryptd.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
 {
 }
 
-static int cryptd_aegis128_aesni_setkey(struct crypto_aead *aead,
-					const u8 *key, unsigned int keylen)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis128_aesni_setauthsize(struct crypto_aead *aead,
-					     unsigned int authsize)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis128_aesni_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis128_aesni_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis128_aesni_init_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead *cryptd_tfm;
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_tfm = cryptd_alloc_aead("__aegis128-aesni", CRYPTO_ALG_INTERNAL,
-				       CRYPTO_ALG_INTERNAL);
-	if (IS_ERR(cryptd_tfm))
-		return PTR_ERR(cryptd_tfm);
-
-	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-	return 0;
-}
-
-static void cryptd_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis128_aesni_alg[] = {
-	{
-		.setkey = crypto_aegis128_aesni_setkey,
-		.setauthsize = crypto_aegis128_aesni_setauthsize,
-		.encrypt = crypto_aegis128_aesni_encrypt,
-		.decrypt = crypto_aegis128_aesni_decrypt,
-		.init = crypto_aegis128_aesni_init_tfm,
-		.exit = crypto_aegis128_aesni_exit_tfm,
-
-		.ivsize = AEGIS128_NONCE_SIZE,
-		.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
-		.chunksize = AEGIS128_BLOCK_SIZE,
-
-		.base = {
-			.cra_flags = CRYPTO_ALG_INTERNAL,
-			.cra_blocksize = 1,
-			.cra_ctxsize = sizeof(struct aegis_ctx) +
-				__alignof__(struct aegis_ctx),
-			.cra_alignmask = 0,
-
-			.cra_name = "__aegis128",
-			.cra_driver_name = "__aegis128-aesni",
-
-			.cra_module = THIS_MODULE,
-		}
-	}, {
-		.setkey = cryptd_aegis128_aesni_setkey,
-		.setauthsize = cryptd_aegis128_aesni_setauthsize,
-		.encrypt = cryptd_aegis128_aesni_encrypt,
-		.decrypt = cryptd_aegis128_aesni_decrypt,
-		.init = cryptd_aegis128_aesni_init_tfm,
-		.exit = cryptd_aegis128_aesni_exit_tfm,
-
-		.ivsize = AEGIS128_NONCE_SIZE,
-		.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
-		.chunksize = AEGIS128_BLOCK_SIZE,
-
-		.base = {
-			.cra_flags = CRYPTO_ALG_ASYNC,
-			.cra_blocksize = 1,
-			.cra_ctxsize = sizeof(struct cryptd_aead *),
-			.cra_alignmask = 0,
-
-			.cra_priority = 400,
-
-			.cra_name = "aegis128",
-			.cra_driver_name = "aegis128-aesni",
-
-			.cra_module = THIS_MODULE,
-		}
+static struct aead_alg crypto_aegis128_aesni_alg = {
+	.setkey = crypto_aegis128_aesni_setkey,
+	.setauthsize = crypto_aegis128_aesni_setauthsize,
+	.encrypt = crypto_aegis128_aesni_encrypt,
+	.decrypt = crypto_aegis128_aesni_decrypt,
+	.init = crypto_aegis128_aesni_init_tfm,
+	.exit = crypto_aegis128_aesni_exit_tfm,
+
+	.ivsize = AEGIS128_NONCE_SIZE,
+	.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
+	.chunksize = AEGIS128_BLOCK_SIZE,
+
+	.base = {
+		.cra_flags = CRYPTO_ALG_INTERNAL,
+		.cra_blocksize = 1,
+		.cra_ctxsize = sizeof(struct aegis_ctx) +
+			       __alignof__(struct aegis_ctx),
+		.cra_alignmask = 0,
+		.cra_priority = 400,
+
+		.cra_name = "__aegis128",
+		.cra_driver_name = "__aegis128-aesni",
+
+		.cra_module = THIS_MODULE,
 	}
 };
 
+static struct simd_aead_alg *simd_alg;
+
 static int __init crypto_aegis128_aesni_module_init(void)
 {
 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis128_aesni_module_init(void)
 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
 		return -ENODEV;
 
-	return crypto_register_aeads(crypto_aegis128_aesni_alg,
-				     ARRAY_SIZE(crypto_aegis128_aesni_alg));
+	return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
+					  &simd_alg);
 }
 
 static void __exit crypto_aegis128_aesni_module_exit(void)
 {
-	crypto_unregister_aeads(crypto_aegis128_aesni_alg,
-				ARRAY_SIZE(crypto_aegis128_aesni_alg));
+	simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
 }
 
 module_init(crypto_aegis128_aesni_module_init);
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 1b1b39c66c5e..80d917f7e467 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -11,8 +11,8 @@
  * any later version.
  */
 
-#include <crypto/cryptd.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
 {
 }
 
-static int cryptd_aegis128l_aesni_setkey(struct crypto_aead *aead,
-					 const u8 *key, unsigned int keylen)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis128l_aesni_setauthsize(struct crypto_aead *aead,
-					      unsigned int authsize)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis128l_aesni_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis128l_aesni_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis128l_aesni_init_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead *cryptd_tfm;
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_tfm = cryptd_alloc_aead("__aegis128l-aesni", CRYPTO_ALG_INTERNAL,
-				       CRYPTO_ALG_INTERNAL);
-	if (IS_ERR(cryptd_tfm))
-		return PTR_ERR(cryptd_tfm);
-
-	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-	return 0;
-}
-
-static void cryptd_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis128l_aesni_alg[] = {
-	{
-		.setkey = crypto_aegis128l_aesni_setkey,
-		.setauthsize = crypto_aegis128l_aesni_setauthsize,
-		.encrypt = crypto_aegis128l_aesni_encrypt,
-		.decrypt = crypto_aegis128l_aesni_decrypt,
-		.init = crypto_aegis128l_aesni_init_tfm,
-		.exit = crypto_aegis128l_aesni_exit_tfm,
-
-		.ivsize = AEGIS128L_NONCE_SIZE,
-		.maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
-		.chunksize = AEGIS128L_BLOCK_SIZE,
-
-		.base = {
-			.cra_flags = CRYPTO_ALG_INTERNAL,
-			.cra_blocksize = 1,
-			.cra_ctxsize = sizeof(struct aegis_ctx) +
-				__alignof__(struct aegis_ctx),
-			.cra_alignmask = 0,
-
-			.cra_name = "__aegis128l",
-			.cra_driver_name = "__aegis128l-aesni",
-
-			.cra_module = THIS_MODULE,
-		}
-	}, {
-		.setkey = cryptd_aegis128l_aesni_setkey,
-		.setauthsize = cryptd_aegis128l_aesni_setauthsize,
-		.encrypt = cryptd_aegis128l_aesni_encrypt,
-		.decrypt = cryptd_aegis128l_aesni_decrypt,
-		.init = cryptd_aegis128l_aesni_init_tfm,
-		.exit = cryptd_aegis128l_aesni_exit_tfm,
-
-		.ivsize = AEGIS128L_NONCE_SIZE,
-		.maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
-		.chunksize = AEGIS128L_BLOCK_SIZE,
-
-		.base = {
-			.cra_flags = CRYPTO_ALG_ASYNC,
-			.cra_blocksize = 1,
-			.cra_ctxsize = sizeof(struct cryptd_aead *),
-			.cra_alignmask = 0,
-
-			.cra_priority = 400,
-
-			.cra_name = "aegis128l",
-			.cra_driver_name = "aegis128l-aesni",
-
-			.cra_module = THIS_MODULE,
-		}
+static struct aead_alg crypto_aegis128l_aesni_alg = {
+	.setkey = crypto_aegis128l_aesni_setkey,
+	.setauthsize = crypto_aegis128l_aesni_setauthsize,
+	.encrypt = crypto_aegis128l_aesni_encrypt,
+	.decrypt = crypto_aegis128l_aesni_decrypt,
+	.init = crypto_aegis128l_aesni_init_tfm,
+	.exit = crypto_aegis128l_aesni_exit_tfm,
+
+	.ivsize = AEGIS128L_NONCE_SIZE,
+	.maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
+	.chunksize = AEGIS128L_BLOCK_SIZE,
+
+	.base = {
+		.cra_flags = CRYPTO_ALG_INTERNAL,
+		.cra_blocksize = 1,
+		.cra_ctxsize = sizeof(struct aegis_ctx) +
+			       __alignof__(struct aegis_ctx),
+		.cra_alignmask = 0,
+		.cra_priority = 400,
+
+		.cra_name = "__aegis128l",
+		.cra_driver_name = "__aegis128l-aesni",
+
+		.cra_module = THIS_MODULE,
 	}
 };
 
+static struct simd_aead_alg *simd_alg;
+
 static int __init crypto_aegis128l_aesni_module_init(void)
 {
 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis128l_aesni_module_init(void)
 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
 		return -ENODEV;
 
-	return crypto_register_aeads(crypto_aegis128l_aesni_alg,
-				     ARRAY_SIZE(crypto_aegis128l_aesni_alg));
+	return simd_register_aeads_compat(&crypto_aegis128l_aesni_alg, 1,
+					  &simd_alg);
 }
 
 static void __exit crypto_aegis128l_aesni_module_exit(void)
 {
-	crypto_unregister_aeads(crypto_aegis128l_aesni_alg,
-				ARRAY_SIZE(crypto_aegis128l_aesni_alg));
+	simd_unregister_aeads(&crypto_aegis128l_aesni_alg, 1, &simd_alg);
 }
 
 module_init(crypto_aegis128l_aesni_module_init);
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 6227ca3220a0..716eecb66bd5 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -11,8 +11,8 @@
  * any later version.
  */
 
-#include <crypto/cryptd.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
 {
 }
 
-static int cryptd_aegis256_aesni_setkey(struct crypto_aead *aead,
-					const u8 *key, unsigned int keylen)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis256_aesni_setauthsize(struct crypto_aead *aead,
-					     unsigned int authsize)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis256_aesni_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis256_aesni_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis256_aesni_init_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead *cryptd_tfm;
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_tfm = cryptd_alloc_aead("__aegis256-aesni", CRYPTO_ALG_INTERNAL,
-				       CRYPTO_ALG_INTERNAL);
-	if (IS_ERR(cryptd_tfm))
-		return PTR_ERR(cryptd_tfm);
-
-	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-	return 0;
-}
-
-static void cryptd_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis256_aesni_alg[] = {
-	{
-		.setkey = crypto_aegis256_aesni_setkey,
-		.setauthsize = crypto_aegis256_aesni_setauthsize,
-		.encrypt = crypto_aegis256_aesni_encrypt,
-		.decrypt = crypto_aegis256_aesni_decrypt,
-		.init = crypto_aegis256_aesni_init_tfm,
-		.exit = crypto_aegis256_aesni_exit_tfm,
-
-		.ivsize = AEGIS256_NONCE_SIZE,
-		.maxauthsize = AEGIS256_MAX_AUTH_SIZE,
-		.chunksize = AEGIS256_BLOCK_SIZE,
-
-		.base = {
-			.cra_flags = CRYPTO_ALG_INTERNAL,
-			.cra_blocksize = 1,
-			.cra_ctxsize = sizeof(struct aegis_ctx) +
-				__alignof__(struct aegis_ctx),
-			.cra_alignmask = 0,
-
-			.cra_name = "__aegis256",
-			.cra_driver_name = "__aegis256-aesni",
-
-			.cra_module = THIS_MODULE,
-		}
-	}, {
-		.setkey = cryptd_aegis256_aesni_setkey,
-		.setauthsize = cryptd_aegis256_aesni_setauthsize,
-		.encrypt = cryptd_aegis256_aesni_encrypt,
-		.decrypt = cryptd_aegis256_aesni_decrypt,
-		.init = cryptd_aegis256_aesni_init_tfm,
-		.exit = cryptd_aegis256_aesni_exit_tfm,
-
-		.ivsize = AEGIS256_NONCE_SIZE,
-		.maxauthsize = AEGIS256_MAX_AUTH_SIZE,
-		.chunksize = AEGIS256_BLOCK_SIZE,
-
-		.base = {
-			.cra_flags = CRYPTO_ALG_ASYNC,
-			.cra_blocksize = 1,
-			.cra_ctxsize = sizeof(struct cryptd_aead *),
-			.cra_alignmask = 0,
-
-			.cra_priority = 400,
-
-			.cra_name = "aegis256",
-			.cra_driver_name = "aegis256-aesni",
-
-			.cra_module = THIS_MODULE,
-		}
+static struct aead_alg crypto_aegis256_aesni_alg = {
+	.setkey = crypto_aegis256_aesni_setkey,
+	.setauthsize = crypto_aegis256_aesni_setauthsize,
+	.encrypt = crypto_aegis256_aesni_encrypt,
+	.decrypt = crypto_aegis256_aesni_decrypt,
+	.init = crypto_aegis256_aesni_init_tfm,
+	.exit = crypto_aegis256_aesni_exit_tfm,
+
+	.ivsize = AEGIS256_NONCE_SIZE,
+	.maxauthsize = AEGIS256_MAX_AUTH_SIZE,
+	.chunksize = AEGIS256_BLOCK_SIZE,
+
+	.base = {
+		.cra_flags = CRYPTO_ALG_INTERNAL,
+		.cra_blocksize = 1,
+		.cra_ctxsize = sizeof(struct aegis_ctx) +
+			       __alignof__(struct aegis_ctx),
+		.cra_alignmask = 0,
+		.cra_priority = 400,
+
+		.cra_name = "__aegis256",
+		.cra_driver_name = "__aegis256-aesni",
+
+		.cra_module = THIS_MODULE,
 	}
 };
 
+static struct simd_aead_alg *simd_alg;
+
 static int __init crypto_aegis256_aesni_module_init(void)
 {
 	if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis256_aesni_module_init(void)
 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
 		return -ENODEV;
 
-	return crypto_register_aeads(crypto_aegis256_aesni_alg,
-				    ARRAY_SIZE(crypto_aegis256_aesni_alg));
+	return simd_register_aeads_compat(&crypto_aegis256_aesni_alg, 1,
+					  &simd_alg);
 }
 
 static void __exit crypto_aegis256_aesni_module_exit(void)
 {
-	crypto_unregister_aeads(crypto_aegis256_aesni_alg,
-				ARRAY_SIZE(crypto_aegis256_aesni_alg));
+	simd_unregister_aeads(&crypto_aegis256_aesni_alg, 1, &simd_alg);
 }
 
 module_init(crypto_aegis256_aesni_module_init);
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 1e3d2102033a..21c246799aa5 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -25,14 +25,13 @@
 #include <linux/err.h>
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
-#include <crypto/cryptd.h>
 #include <crypto/ctr.h>
 #include <crypto/b128ops.h>
 #include <crypto/gcm.h>
 #include <crypto/xts.h>
 #include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
 #include <asm/crypto/aes.h>
+#include <asm/simd.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/aead.h>
 #include <crypto/internal/simd.h>
@@ -333,7 +332,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 		return -EINVAL;
 	}
 
-	if (!irq_fpu_usable())
+	if (!crypto_simd_usable())
 		err = crypto_aes_expand_key(ctx, in_key, key_len);
 	else {
 		kernel_fpu_begin();
@@ -354,7 +353,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 
-	if (!irq_fpu_usable())
+	if (!crypto_simd_usable())
 		crypto_aes_encrypt_x86(ctx, dst, src);
 	else {
 		kernel_fpu_begin();
@@ -367,7 +366,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
 
-	if (!irq_fpu_usable())
+	if (!crypto_simd_usable())
 		crypto_aes_decrypt_x86(ctx, dst, src);
 	else {
 		kernel_fpu_begin();
@@ -643,29 +642,6 @@ static int xts_decrypt(struct skcipher_request *req)
 				   aes_ctx(ctx->raw_crypt_ctx));
 }
 
-static int rfc4106_init(struct crypto_aead *aead)
-{
-	struct cryptd_aead *cryptd_tfm;
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
-				       CRYPTO_ALG_INTERNAL,
-				       CRYPTO_ALG_INTERNAL);
-	if (IS_ERR(cryptd_tfm))
-		return PTR_ERR(cryptd_tfm);
-
-	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-	return 0;
-}
-
-static void rfc4106_exit(struct crypto_aead *aead)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_free_aead(*ctx);
-}
-
 static int
 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
 {
@@ -710,15 +686,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 }
 
-static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
-				  unsigned int key_len)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
-}
-
+/* This is the Integrity Check Value (aka the authentication tag) length and can
+ * be 8, 12 or 16 bytes long. */
 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
 				       unsigned int authsize)
 {
@@ -734,17 +703,6 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
 	return 0;
 }
 
-/* This is the Integrity Check Value (aka the authentication tag length and can
- * be 8, 12 or 16 bytes long. */
-static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
-				       unsigned int authsize)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(parent);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
 				       unsigned int authsize)
 {
@@ -964,38 +922,6 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
 	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
 			      aes_ctx);
 }
-
-static int gcmaes_wrapper_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	tfm = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		tfm = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, tfm);
-
-	return crypto_aead_encrypt(req);
-}
-
-static int gcmaes_wrapper_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	tfm = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		tfm = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, tfm);
-
-	return crypto_aead_decrypt(req);
-}
 #endif
 
 static struct crypto_alg aesni_algs[] = { {
@@ -1148,31 +1074,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
 			      aes_ctx);
 }
 
-static int generic_gcmaes_init(struct crypto_aead *aead)
-{
-	struct cryptd_aead *cryptd_tfm;
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
-				       CRYPTO_ALG_INTERNAL,
-				       CRYPTO_ALG_INTERNAL);
-	if (IS_ERR(cryptd_tfm))
-		return PTR_ERR(cryptd_tfm);
-
-	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-
-	return 0;
-}
-
-static void generic_gcmaes_exit(struct crypto_aead *aead)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg aesni_aead_algs[] = { {
+static struct aead_alg aesni_aeads[] = { {
 	.setkey			= common_rfc4106_set_key,
 	.setauthsize		= common_rfc4106_set_authsize,
 	.encrypt		= helper_rfc4106_encrypt,
@@ -1180,8 +1082,9 @@ static struct aead_alg aesni_aead_algs[] = { {
 	.ivsize			= GCM_RFC4106_IV_SIZE,
 	.maxauthsize		= 16,
 	.base = {
-		.cra_name		= "__gcm-aes-aesni",
-		.cra_driver_name	= "__driver-gcm-aes-aesni",
+		.cra_name		= "__rfc4106(gcm(aes))",
+		.cra_driver_name	= "__rfc4106-gcm-aesni",
+		.cra_priority		= 400,
 		.cra_flags		= CRYPTO_ALG_INTERNAL,
 		.cra_blocksize		= 1,
 		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
@@ -1189,24 +1092,6 @@ static struct aead_alg aesni_aead_algs[] = { {
 		.cra_module		= THIS_MODULE,
 	},
 }, {
-	.init			= rfc4106_init,
-	.exit			= rfc4106_exit,
-	.setkey			= gcmaes_wrapper_set_key,
-	.setauthsize		= gcmaes_wrapper_set_authsize,
-	.encrypt		= gcmaes_wrapper_encrypt,
-	.decrypt		= gcmaes_wrapper_decrypt,
-	.ivsize			= GCM_RFC4106_IV_SIZE,
-	.maxauthsize		= 16,
-	.base = {
-		.cra_name		= "rfc4106(gcm(aes))",
-		.cra_driver_name	= "rfc4106-gcm-aesni",
-		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_ASYNC,
-		.cra_blocksize		= 1,
-		.cra_ctxsize		= sizeof(struct cryptd_aead *),
-		.cra_module		= THIS_MODULE,
-	},
-}, {
 	.setkey			= generic_gcmaes_set_key,
 	.setauthsize		= generic_gcmaes_set_authsize,
 	.encrypt		= generic_gcmaes_encrypt,
@@ -1214,38 +1099,21 @@ static struct aead_alg aesni_aead_algs[] = { {
 	.ivsize			= GCM_AES_IV_SIZE,
 	.maxauthsize		= 16,
 	.base = {
-		.cra_name		= "__generic-gcm-aes-aesni",
-		.cra_driver_name	= "__driver-generic-gcm-aes-aesni",
-		.cra_priority		= 0,
+		.cra_name		= "__gcm(aes)",
+		.cra_driver_name	= "__generic-gcm-aesni",
+		.cra_priority		= 400,
 		.cra_flags		= CRYPTO_ALG_INTERNAL,
 		.cra_blocksize		= 1,
 		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
 		.cra_alignmask		= AESNI_ALIGN - 1,
 		.cra_module		= THIS_MODULE,
 	},
-}, {
-	.init			= generic_gcmaes_init,
-	.exit			= generic_gcmaes_exit,
-	.setkey			= gcmaes_wrapper_set_key,
-	.setauthsize		= gcmaes_wrapper_set_authsize,
-	.encrypt		= gcmaes_wrapper_encrypt,
-	.decrypt		= gcmaes_wrapper_decrypt,
-	.ivsize			= GCM_AES_IV_SIZE,
-	.maxauthsize		= 16,
-	.base = {
-		.cra_name		= "gcm(aes)",
-		.cra_driver_name	= "generic-gcm-aesni",
-		.cra_priority		= 400,
-		.cra_flags		= CRYPTO_ALG_ASYNC,
-		.cra_blocksize		= 1,
-		.cra_ctxsize		= sizeof(struct cryptd_aead *),
-		.cra_module		= THIS_MODULE,
-	},
 } };
 #else
-static struct aead_alg aesni_aead_algs[0];
+static struct aead_alg aesni_aeads[0];
 #endif
 
+static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
 
 static const struct x86_cpu_id aesni_cpu_id[] = {
 	X86_FEATURE_MATCH(X86_FEATURE_AES),
@@ -1253,23 +1121,9 @@ static const struct x86_cpu_id aesni_cpu_id[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
 
-static void aesni_free_simds(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
-		    aesni_simd_skciphers[i]; i++)
-		simd_skcipher_free(aesni_simd_skciphers[i]);
-}
-
 static int __init aesni_init(void)
 {
-	struct simd_skcipher_alg *simd;
-	const char *basename;
-	const char *algname;
-	const char *drvname;
 	int err;
-	int i;
 
 	if (!x86_match_cpu(aesni_cpu_id))
 		return -ENODEV;
@@ -1304,36 +1158,22 @@ static int __init aesni_init(void)
 	if (err)
 		return err;
 
-	err = crypto_register_skciphers(aesni_skciphers,
-					ARRAY_SIZE(aesni_skciphers));
+	err = simd_register_skciphers_compat(aesni_skciphers,
+					     ARRAY_SIZE(aesni_skciphers),
+					     aesni_simd_skciphers);
 	if (err)
 		goto unregister_algs;
 
-	err = crypto_register_aeads(aesni_aead_algs,
-				    ARRAY_SIZE(aesni_aead_algs));
+	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
+					 aesni_simd_aeads);
 	if (err)
 		goto unregister_skciphers;
 
-	for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
-		algname = aesni_skciphers[i].base.cra_name + 2;
-		drvname = aesni_skciphers[i].base.cra_driver_name + 2;
-		basename = aesni_skciphers[i].base.cra_driver_name;
-		simd = simd_skcipher_create_compat(algname, drvname, basename);
-		err = PTR_ERR(simd);
-		if (IS_ERR(simd))
-			goto unregister_simds;
-
-		aesni_simd_skciphers[i] = simd;
-	}
-
 	return 0;
 
-unregister_simds:
-	aesni_free_simds();
-	crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
 unregister_skciphers:
-	crypto_unregister_skciphers(aesni_skciphers,
-				    ARRAY_SIZE(aesni_skciphers));
+	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
+				  aesni_simd_skciphers);
 unregister_algs:
 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
 	return err;
@@ -1341,10 +1181,10 @@ unregister_algs:
 
 static void __exit aesni_exit(void)
 {
-	aesni_free_simds();
-	crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
-	crypto_unregister_skciphers(aesni_skciphers,
-				    ARRAY_SIZE(aesni_skciphers));
+	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
+			      aesni_simd_aeads);
+	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
+				  aesni_simd_skciphers);
 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
 }
 
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 45c1c4143176..4967ad620775 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -12,10 +12,10 @@
 
 #include <crypto/algapi.h>
 #include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <asm/fpu/api.h>
 #include <asm/simd.h>
 
 #define CHACHA_STATE_ALIGN 16
@@ -170,7 +170,7 @@ static int chacha_simd(struct skcipher_request *req)
 	struct skcipher_walk walk;
 	int err;
 
-	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
+	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
 		return crypto_chacha_crypt(req);
 
 	err = skcipher_walk_virt(&walk, req, true);
@@ -193,7 +193,7 @@ static int xchacha_simd(struct skcipher_request *req)
 	u8 real_iv[16];
 	int err;
 
-	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
+	if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
 		return crypto_xchacha_crypt(req);
 
 	err = skcipher_walk_virt(&walk, req, true);
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index c8d9cdacbf10..cb4ab6645106 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -32,10 +32,11 @@
 #include <linux/kernel.h>
 #include <linux/crc32.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 
 #include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
 
 #define CHKSUM_BLOCK_SIZE	1
 #define CHKSUM_DIGEST_SIZE	4
@@ -54,7 +55,7 @@ static u32 __attribute__((pure))
 	unsigned int iremainder;
 	unsigned int prealign;
 
-	if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable())
+	if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !crypto_simd_usable())
 		return crc32_le(crc, p, len);
 
 	if ((long)p & SCALE_F_MASK) {
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 5773e1161072..a58fe217c856 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -29,10 +29,11 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 
 #include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
-#include <asm/fpu/internal.h>
+#include <asm/simd.h>
 
 #define CHKSUM_BLOCK_SIZE	1
 #define CHKSUM_DIGEST_SIZE	4
@@ -177,7 +178,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
 	 * use faster PCL version if datasize is large enough to
 	 * overcome kernel fpu state save/restore overhead
 	 */
-	if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
+	if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
 		kernel_fpu_begin();
 		*crcp = crc_pcl(data, len, *crcp);
 		kernel_fpu_end();
@@ -189,7 +190,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
 static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
 				u8 *out)
 {
-	if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
+	if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
 		kernel_fpu_begin();
 		*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
 		kernel_fpu_end();
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index 0e785c0b2354..3c81e15b0873 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -26,12 +26,13 @@
 #include <linux/module.h>
 #include <linux/crc-t10dif.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
-#include <asm/fpu/api.h>
 #include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
+#include <asm/simd.h>
 
 asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
 
@@ -53,7 +54,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	if (length >= 16 && irq_fpu_usable()) {
+	if (length >= 16 && crypto_simd_usable()) {
 		kernel_fpu_begin();
 		ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
 		kernel_fpu_end();
@@ -70,15 +71,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
 	return 0;
 }
 
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
-			u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
 {
-	if (len >= 16 && irq_fpu_usable()) {
+	if (len >= 16 && crypto_simd_usable()) {
 		kernel_fpu_begin();
-		*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
+		*(__u16 *)out = crc_t10dif_pcl(crc, data, len);
 		kernel_fpu_end();
 	} else
-		*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+		*(__u16 *)out = crc_t10dif_generic(crc, data, len);
 	return 0;
 }
 
@@ -87,15 +87,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	return __chksum_finup(&ctx->crc, data, len, out);
+	return __chksum_finup(ctx->crc, data, len, out);
 }
 
 static int chksum_digest(struct shash_desc *desc, const u8 *data,
 			 unsigned int length, u8 *out)
 {
-	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
-	return __chksum_finup(&ctx->crc, data, length, out);
+	return __chksum_finup(0, data, length, out);
 }
 
 static struct shash_alg alg = {
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 3582ae885ee1..e3f3e6fd9d65 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -19,8 +19,9 @@
 #include <crypto/cryptd.h>
 #include <crypto/gf128mul.h>
 #include <crypto/internal/hash.h>
-#include <asm/fpu/api.h>
+#include <crypto/internal/simd.h>
 #include <asm/cpu_device_id.h>
+#include <asm/simd.h>
 
 #define GHASH_BLOCK_SIZE	16
 #define GHASH_DIGEST_SIZE	16
@@ -171,7 +172,6 @@ static int ghash_async_init(struct ahash_request *req)
 	struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
 
 	desc->tfm = child;
-	desc->flags = req->base.flags;
 	return crypto_shash_init(desc);
 }
 
@@ -182,7 +182,7 @@ static int ghash_async_update(struct ahash_request *req)
 	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
 	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
 
-	if (!irq_fpu_usable() ||
+	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
 		memcpy(cryptd_req, req, sizeof(*req));
 		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -200,7 +200,7 @@ static int ghash_async_final(struct ahash_request *req)
 	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
 	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
 
-	if (!irq_fpu_usable() ||
+	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
 		memcpy(cryptd_req, req, sizeof(*req));
 		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -241,7 +241,7 @@ static int ghash_async_digest(struct ahash_request *req)
 	struct ahash_request *cryptd_req = ahash_request_ctx(req);
 	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
 
-	if (!irq_fpu_usable() ||
+	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
 		memcpy(cryptd_req, req, sizeof(*req));
 		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -251,7 +251,6 @@ static int ghash_async_digest(struct ahash_request *req)
 		struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
 
 		desc->tfm = child;
-		desc->flags = req->base.flags;
 		return shash_ahash_digest(req, desc);
 	}
 }
diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c
index 6634907d6ccd..679627a2a824 100644
--- a/arch/x86/crypto/morus1280-avx2-glue.c
+++ b/arch/x86/crypto/morus1280-avx2-glue.c
@@ -12,6 +12,7 @@
  */
 
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
 #include <crypto/morus1280_glue.h>
 #include <linux/module.h>
 #include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_avx2_dec_tail(void *state, const void *src,
 asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
 					    u64 assoclen, u64 cryptlen);
 
-MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
+MORUS1280_DECLARE_ALG(avx2, "morus1280-avx2", 400);
+
+static struct simd_aead_alg *simd_alg;
 
 static int __init crypto_morus1280_avx2_module_init(void)
 {
@@ -44,14 +47,13 @@ static int __init crypto_morus1280_avx2_module_init(void)
 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
 		return -ENODEV;
 
-	return crypto_register_aeads(crypto_morus1280_avx2_algs,
-				     ARRAY_SIZE(crypto_morus1280_avx2_algs));
+	return simd_register_aeads_compat(&crypto_morus1280_avx2_alg, 1,
+					  &simd_alg);
 }
 
 static void __exit crypto_morus1280_avx2_module_exit(void)
 {
-	crypto_unregister_aeads(crypto_morus1280_avx2_algs,
-				ARRAY_SIZE(crypto_morus1280_avx2_algs));
+	simd_unregister_aeads(&crypto_morus1280_avx2_alg, 1, &simd_alg);
 }
 
 module_init(crypto_morus1280_avx2_module_init);
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index f40244eaf14d..c35c0638d0bb 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -12,6 +12,7 @@
  */
 
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
 #include <crypto/morus1280_glue.h>
 #include <linux/module.h>
 #include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_sse2_dec_tail(void *state, const void *src,
 asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
 					    u64 assoclen, u64 cryptlen);
 
-MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
+MORUS1280_DECLARE_ALG(sse2, "morus1280-sse2", 350);
+
+static struct simd_aead_alg *simd_alg;
 
 static int __init crypto_morus1280_sse2_module_init(void)
 {
@@ -43,14 +46,13 @@ static int __init crypto_morus1280_sse2_module_init(void)
 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
 		return -ENODEV;
 
-	return crypto_register_aeads(crypto_morus1280_sse2_algs,
-				     ARRAY_SIZE(crypto_morus1280_sse2_algs));
+	return simd_register_aeads_compat(&crypto_morus1280_sse2_alg, 1,
+					  &simd_alg);
 }
 
 static void __exit crypto_morus1280_sse2_module_exit(void)
 {
-	crypto_unregister_aeads(crypto_morus1280_sse2_algs,
-				ARRAY_SIZE(crypto_morus1280_sse2_algs));
+	simd_unregister_aeads(&crypto_morus1280_sse2_alg, 1, &simd_alg);
 }
 
 module_init(crypto_morus1280_sse2_module_init);
diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
index 7e600f8bcdad..30fc1bd98ec3 100644
--- a/arch/x86/crypto/morus1280_glue.c
+++ b/arch/x86/crypto/morus1280_glue.c
@@ -11,7 +11,6 @@
  * any later version.
  */
 
-#include <crypto/cryptd.h>
 #include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/morus1280_glue.h>
@@ -205,90 +204,6 @@ void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
 }
 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
 
-int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
-				 unsigned int keylen)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey);
-
-int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
-				      unsigned int authsize)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize);
-
-int cryptd_morus1280_glue_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_encrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt);
-
-int cryptd_morus1280_glue_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_decrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt);
-
-int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead *cryptd_tfm;
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
-	char internal_name[CRYPTO_MAX_ALG_NAME];
-
-	if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
-			>= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
-				       CRYPTO_ALG_INTERNAL);
-	if (IS_ERR(cryptd_tfm))
-		return PTR_ERR(cryptd_tfm);
-
-	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-	return 0;
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm);
-
-void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_free_aead(*ctx);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
 MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 9afaf8f8565a..32da56b3bdad 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -12,6 +12,7 @@
  */
 
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
 #include <crypto/morus640_glue.h>
 #include <linux/module.h>
 #include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus640_sse2_dec_tail(void *state, const void *src,
 asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
 					   u64 assoclen, u64 cryptlen);
 
-MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
+MORUS640_DECLARE_ALG(sse2, "morus640-sse2", 400);
+
+static struct simd_aead_alg *simd_alg;
 
 static int __init crypto_morus640_sse2_module_init(void)
 {
@@ -43,14 +46,13 @@ static int __init crypto_morus640_sse2_module_init(void)
 	    !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
 		return -ENODEV;
 
-	return crypto_register_aeads(crypto_morus640_sse2_algs,
-				     ARRAY_SIZE(crypto_morus640_sse2_algs));
+	return simd_register_aeads_compat(&crypto_morus640_sse2_alg, 1,
+					  &simd_alg);
 }
 
 static void __exit crypto_morus640_sse2_module_exit(void)
 {
-	crypto_unregister_aeads(crypto_morus640_sse2_algs,
-				ARRAY_SIZE(crypto_morus640_sse2_algs));
+	simd_unregister_aeads(&crypto_morus640_sse2_alg, 1, &simd_alg);
 }
 
 module_init(crypto_morus640_sse2_module_init);
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
index cb3a81732016..1dea33d84426 100644
--- a/arch/x86/crypto/morus640_glue.c
+++ b/arch/x86/crypto/morus640_glue.c
@@ -11,7 +11,6 @@
  * any later version.
  */
 
-#include <crypto/cryptd.h>
 #include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/morus640_glue.h>
@@ -200,90 +199,6 @@ void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
 }
 EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops);
 
-int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
-				unsigned int keylen)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey);
-
-int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
-				     unsigned int authsize)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize);
-
-int cryptd_morus640_glue_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_encrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt);
-
-int cryptd_morus640_glue_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	struct cryptd_aead *cryptd_tfm = *ctx;
-
-	aead = &cryptd_tfm->base;
-	if (irq_fpu_usable() && (!in_atomic() ||
-				 !cryptd_aead_queued(cryptd_tfm)))
-		aead = cryptd_aead_child(cryptd_tfm);
-
-	aead_request_set_tfm(req, aead);
-
-	return crypto_aead_decrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt);
-
-int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead *cryptd_tfm;
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-	const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
-	char internal_name[CRYPTO_MAX_ALG_NAME];
-
-	if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
-			>= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
-				       CRYPTO_ALG_INTERNAL);
-	if (IS_ERR(cryptd_tfm))
-		return PTR_ERR(cryptd_tfm);
-
-	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-	return 0;
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm);
-
-void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead)
-{
-	struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
-	cryptd_free_aead(*ctx);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
 MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
index 20d815ea4b6a..f7567cbd35b6 100644
--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
@@ -7,9 +7,10 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/nhpoly1305.h>
 #include <linux/module.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
 
 asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
 			u8 hash[NH_HASH_BYTES]);
@@ -24,7 +25,7 @@ static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len,
 static int nhpoly1305_avx2_update(struct shash_desc *desc,
 				  const u8 *src, unsigned int srclen)
 {
-	if (srclen < 64 || !irq_fpu_usable())
+	if (srclen < 64 || !crypto_simd_usable())
 		return crypto_nhpoly1305_update(desc, src, srclen);
 
 	do {
diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
index ed68d164ce14..a661ede3b5cf 100644
--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
@@ -7,9 +7,10 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/nhpoly1305.h>
 #include <linux/module.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
 
 asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
 			u8 hash[NH_HASH_BYTES]);
@@ -24,7 +25,7 @@ static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len,
 static int nhpoly1305_sse2_update(struct shash_desc *desc,
 				  const u8 *src, unsigned int srclen)
 {
-	if (srclen < 64 || !irq_fpu_usable())
+	if (srclen < 64 || !crypto_simd_usable())
 		return crypto_nhpoly1305_update(desc, src, srclen);
 
 	do {
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 88cc01506c84..6eb65b237b3c 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -11,11 +11,11 @@
 
 #include <crypto/algapi.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/poly1305.h>
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <asm/fpu/api.h>
 #include <asm/simd.h>
 
 struct poly1305_simd_desc_ctx {
@@ -126,7 +126,7 @@ static int poly1305_simd_update(struct shash_desc *desc,
 	unsigned int bytes;
 
 	/* kernel_fpu_begin/end is costly, use fallback for small updates */
-	if (srclen <= 288 || !may_use_simd())
+	if (srclen <= 288 || !crypto_simd_usable())
 		return crypto_poly1305_update(desc, src, srclen);
 
 	kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 7391c7de72c7..42f177afc33a 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -22,6 +22,7 @@
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
@@ -29,7 +30,7 @@
 #include <linux/types.h>
 #include <crypto/sha.h>
 #include <crypto/sha1_base.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
 
 typedef void (sha1_transform_fn)(u32 *digest, const char *data,
 				unsigned int rounds);
@@ -39,7 +40,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha1_state *sctx = shash_desc_ctx(desc);
 
-	if (!irq_fpu_usable() ||
+	if (!crypto_simd_usable() ||
 	    (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
 		return crypto_sha1_update(desc, data, len);
 
@@ -57,7 +58,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
 static int sha1_finup(struct shash_desc *desc, const u8 *data,
 		      unsigned int len, u8 *out, sha1_transform_fn *sha1_xform)
 {
-	if (!irq_fpu_usable())
+	if (!crypto_simd_usable())
 		return crypto_sha1_finup(desc, data, len, out);
 
 	kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 773a873d2b28..73867da3cbee 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -30,6 +30,7 @@
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
@@ -37,8 +38,8 @@
 #include <linux/types.h>
 #include <crypto/sha.h>
 #include <crypto/sha256_base.h>
-#include <asm/fpu/api.h>
 #include <linux/string.h>
+#include <asm/simd.h>
 
 asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
 				       u64 rounds);
@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 
-	if (!irq_fpu_usable() ||
+	if (!crypto_simd_usable() ||
 	    (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
 		return crypto_sha256_update(desc, data, len);
 
@@ -67,7 +68,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
 static int sha256_finup(struct shash_desc *desc, const u8 *data,
 	      unsigned int len, u8 *out, sha256_transform_fn *sha256_xform)
 {
-	if (!irq_fpu_usable())
+	if (!crypto_simd_usable())
 		return crypto_sha256_finup(desc, data, len, out);
 
 	kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index f1b811b60ba6..458356a3f124 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -28,16 +28,16 @@
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/cryptohash.h>
+#include <linux/string.h>
 #include <linux/types.h>
 #include <crypto/sha.h>
 #include <crypto/sha512_base.h>
-#include <asm/fpu/api.h>
-
-#include <linux/string.h>
+#include <asm/simd.h>
 
 asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
 				       u64 rounds);
@@ -49,7 +49,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
 {
 	struct sha512_state *sctx = shash_desc_ctx(desc);
 
-	if (!irq_fpu_usable() ||
+	if (!crypto_simd_usable() ||
 	    (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
 		return crypto_sha512_update(desc, data, len);
 
@@ -67,7 +67,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
 static int sha512_finup(struct shash_desc *desc, const u8 *data,
 	      unsigned int len, u8 *out, sha512_transform_fn *sha512_xform)
 {
-	if (!irq_fpu_usable())
+	if (!crypto_simd_usable())
 		return crypto_sha512_finup(desc, data, len, out);
 
 	kernel_fpu_begin();
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index bcddf09b5aa3..4845b8c7be7f 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -90,7 +90,6 @@ static int get_e820_md5(struct e820_table *table, void *buf)
 	}
 
 	desc->tfm = tfm;
-	desc->flags = 0;
 
 	size = offsetof(struct e820_table, entries) +
 		sizeof(struct e820_entry) * table->nr_entries;
diff --git a/crypto/842.c b/crypto/842.c
index bc26dc942821..5f98393b65d1 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -144,7 +144,7 @@ static int __init crypto842_mod_init(void)
 
 	return ret;
 }
-module_init(crypto842_mod_init);
+subsys_initcall(crypto842_mod_init);
 
 static void __exit crypto842_mod_exit(void)
 {
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bbab6bf33519..3d056e7da65f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -27,8 +27,8 @@ config CRYPTO_FIPS
 	depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
 	depends on (MODULE_SIG || !MODULES)
 	help
-	  This options enables the fips boot option which is
-	  required if you want to system to operate in a FIPS 200
+	  This option enables the fips boot option which is
+	  required if you want the system to operate in a FIPS 200
 	  certification.  You should say no unless you know what
 	  this is.
 
@@ -113,29 +113,6 @@ config CRYPTO_ACOMP
 	select CRYPTO_ALGAPI
 	select CRYPTO_ACOMP2
 
-config CRYPTO_RSA
-	tristate "RSA algorithm"
-	select CRYPTO_AKCIPHER
-	select CRYPTO_MANAGER
-	select MPILIB
-	select ASN1
-	help
-	  Generic implementation of the RSA public key algorithm.
-
-config CRYPTO_DH
-	tristate "Diffie-Hellman algorithm"
-	select CRYPTO_KPP
-	select MPILIB
-	help
-	  Generic implementation of the Diffie-Hellman algorithm.
-
-config CRYPTO_ECDH
-	tristate "ECDH algorithm"
-	select CRYPTO_KPP
-	select CRYPTO_RNG_DEFAULT
-	help
-	  Generic implementation of the ECDH algorithm
-
 config CRYPTO_MANAGER
 	tristate "Cryptographic algorithm manager"
 	select CRYPTO_MANAGER2
@@ -253,6 +230,48 @@ config CRYPTO_GLUE_HELPER_X86
 config CRYPTO_ENGINE
 	tristate
 
+comment "Public-key cryptography"
+
+config CRYPTO_RSA
+	tristate "RSA algorithm"
+	select CRYPTO_AKCIPHER
+	select CRYPTO_MANAGER
+	select MPILIB
+	select ASN1
+	help
+	  Generic implementation of the RSA public key algorithm.
+
+config CRYPTO_DH
+	tristate "Diffie-Hellman algorithm"
+	select CRYPTO_KPP
+	select MPILIB
+	help
+	  Generic implementation of the Diffie-Hellman algorithm.
+
+config CRYPTO_ECC
+	tristate
+
+config CRYPTO_ECDH
+	tristate "ECDH algorithm"
+	select CRYPTO_ECC
+	select CRYPTO_KPP
+	select CRYPTO_RNG_DEFAULT
+	help
+	  Generic implementation of the ECDH algorithm
+
+config CRYPTO_ECRDSA
+	tristate "EC-RDSA (GOST 34.10) algorithm"
+	select CRYPTO_ECC
+	select CRYPTO_AKCIPHER
+	select CRYPTO_STREEBOG
+	select OID_REGISTRY
+	select ASN1
+	help
+	  Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012,
+	  RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic
+	  standard algorithms (called GOST algorithms). Only signature verification
+	  is implemented.
+
 comment "Authenticated Encryption with Associated Data"
 
 config CRYPTO_CCM
@@ -310,25 +329,25 @@ config CRYPTO_AEGIS128_AESNI_SSE2
 	tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
 	depends on X86 && 64BIT
 	select CRYPTO_AEAD
-	select CRYPTO_CRYPTD
+	select CRYPTO_SIMD
 	help
-	 AESNI+SSE2 implementation of the AEGSI-128 dedicated AEAD algorithm.
+	 AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm.
 
 config CRYPTO_AEGIS128L_AESNI_SSE2
 	tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
 	depends on X86 && 64BIT
 	select CRYPTO_AEAD
-	select CRYPTO_CRYPTD
+	select CRYPTO_SIMD
 	help
-	 AESNI+SSE2 implementation of the AEGSI-128L dedicated AEAD algorithm.
+	 AESNI+SSE2 implementation of the AEGIS-128L dedicated AEAD algorithm.
 
 config CRYPTO_AEGIS256_AESNI_SSE2
 	tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
 	depends on X86 && 64BIT
 	select CRYPTO_AEAD
-	select CRYPTO_CRYPTD
+	select CRYPTO_SIMD
 	help
-	 AESNI+SSE2 implementation of the AEGSI-256 dedicated AEAD algorithm.
+	 AESNI+SSE2 implementation of the AEGIS-256 dedicated AEAD algorithm.
 
 config CRYPTO_MORUS640
 	tristate "MORUS-640 AEAD algorithm"
@@ -340,7 +359,7 @@ config CRYPTO_MORUS640_GLUE
 	tristate
 	depends on X86
 	select CRYPTO_AEAD
-	select CRYPTO_CRYPTD
+	select CRYPTO_SIMD
 	help
 	  Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD
 	  algorithm.
@@ -363,7 +382,7 @@ config CRYPTO_MORUS1280_GLUE
 	tristate
 	depends on X86
 	select CRYPTO_AEAD
-	select CRYPTO_CRYPTD
+	select CRYPTO_SIMD
 	help
 	  Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD
 	  algorithm.
diff --git a/crypto/Makefile b/crypto/Makefile
index fb5bf2a3a666..266a4cdbb9e2 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -147,12 +147,20 @@ obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
 obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
 obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
 obj-$(CONFIG_CRYPTO_OFB) += ofb.o
+obj-$(CONFIG_CRYPTO_ECC) += ecc.o
 
-ecdh_generic-y := ecc.o
 ecdh_generic-y += ecdh.o
 ecdh_generic-y += ecdh_helper.o
 obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
 
+$(obj)/ecrdsa_params.asn1.o: $(obj)/ecrdsa_params.asn1.c $(obj)/ecrdsa_params.asn1.h
+$(obj)/ecrdsa_pub_key.asn1.o: $(obj)/ecrdsa_pub_key.asn1.c $(obj)/ecrdsa_pub_key.asn1.h
+$(obj)/ecrdsa.o: $(obj)/ecrdsa_params.asn1.h $(obj)/ecrdsa_pub_key.asn1.h
+ecrdsa_generic-y += ecrdsa.o
+ecrdsa_generic-y += ecrdsa_params.asn1.o
+ecrdsa_generic-y += ecrdsa_pub_key.asn1.o
+obj-$(CONFIG_CRYPTO_ECRDSA) += ecrdsa_generic.o
+
 #
 # generic algorithms and the async_tx api
 #
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 5564e73266a6..395a3ddd3707 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -265,7 +265,6 @@ static int adiantum_hash_message(struct skcipher_request *req,
 	int err;
 
 	hash_desc->tfm = tctx->hash;
-	hash_desc->flags = 0;
 
 	err = crypto_shash_init(hash_desc);
 	if (err)
@@ -659,7 +658,7 @@ static void __exit adiantum_module_exit(void)
 	crypto_unregister_template(&adiantum_tmpl);
 }
 
-module_init(adiantum_module_init);
+subsys_initcall(adiantum_module_init);
 module_exit(adiantum_module_exit);
 
 MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 3718a8341303..d78f77fc5dd1 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -448,7 +448,7 @@ static void __exit crypto_aegis128_module_exit(void)
 	crypto_unregister_aead(&crypto_aegis128_alg);
 }
 
-module_init(crypto_aegis128_module_init);
+subsys_initcall(crypto_aegis128_module_init);
 module_exit(crypto_aegis128_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index 275a8616d71b..9bca3d619a22 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -512,7 +512,7 @@ static void __exit crypto_aegis128l_module_exit(void)
 	crypto_unregister_aead(&crypto_aegis128l_alg);
 }
 
-module_init(crypto_aegis128l_module_init);
+subsys_initcall(crypto_aegis128l_module_init);
 module_exit(crypto_aegis128l_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index ecd6b7f34a2d..b47fd39595ad 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -463,7 +463,7 @@ static void __exit crypto_aegis256_module_exit(void)
 	crypto_unregister_aead(&crypto_aegis256_alg);
 }
 
-module_init(crypto_aegis256_module_init);
+subsys_initcall(crypto_aegis256_module_init);
 module_exit(crypto_aegis256_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 13df33aca463..f217568917e4 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -64,7 +64,7 @@ static inline u8 byte(const u32 x, const unsigned n)
 static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
 
 /* cacheline-aligned to facilitate prefetching into cache */
-__visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = {
 	{
 		0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
 		0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
@@ -328,7 +328,7 @@ __visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
 	}
 };
 
-__visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_fl_tab[4][256] ____cacheline_aligned = {
 	{
 		0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
 		0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
@@ -592,7 +592,7 @@ __visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
 	}
 };
 
-__visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_it_tab[4][256] ____cacheline_aligned = {
 	{
 		0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
 		0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
@@ -856,7 +856,7 @@ __visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
 	}
 };
 
-__visible const u32 crypto_il_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = {
 	{
 		0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
 		0x00000030, 0x00000036, 0x000000a5, 0x00000038,
@@ -1470,7 +1470,7 @@ static void __exit aes_fini(void)
 	crypto_unregister_alg(&aes_alg);
 }
 
-module_init(aes_init);
+subsys_initcall(aes_init);
 module_exit(aes_fini);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 0cbeae137e0a..780daa436dac 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -119,10 +119,24 @@ static void akcipher_prepare_alg(struct akcipher_alg *alg)
 	base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
 }
 
+static int akcipher_default_op(struct akcipher_request *req)
+{
+	return -ENOSYS;
+}
+
 int crypto_register_akcipher(struct akcipher_alg *alg)
 {
 	struct crypto_alg *base = &alg->base;
 
+	if (!alg->sign)
+		alg->sign = akcipher_default_op;
+	if (!alg->verify)
+		alg->verify = akcipher_default_op;
+	if (!alg->encrypt)
+		alg->encrypt = akcipher_default_op;
+	if (!alg->decrypt)
+		alg->decrypt = akcipher_default_op;
+
 	akcipher_prepare_alg(alg);
 	return crypto_register_alg(base);
 }
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 527b44d0af21..bb97cfb38836 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -296,7 +296,13 @@ static void __exit cryptomgr_exit(void)
 	BUG_ON(err);
 }
 
-subsys_initcall(cryptomgr_init);
+/*
+ * This is arch_initcall() so that the crypto self-tests are run on algorithms
+ * registered early by subsys_initcall().  subsys_initcall() is needed for
+ * generic implementations so that they're available for comparison tests when
+ * other implementations are registered later by module_init().
+ */
+arch_initcall(cryptomgr_init);
 module_exit(cryptomgr_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index eff337ce9003..e7c43ea4ce9d 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -472,7 +472,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
 module_param(dbg, int, 0);
 MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-module_init(prng_mod_init);
+subsys_initcall(prng_mod_init);
 module_exit(prng_mod_fini);
 MODULE_ALIAS_CRYPTO("stdrng");
 MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 4bb187c2a902..673927de0eb9 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -699,7 +699,7 @@ static void __exit anubis_mod_fini(void)
 	crypto_unregister_alg(&anubis_alg);
 }
 
-module_init(anubis_mod_init);
+subsys_initcall(anubis_mod_init);
 module_exit(anubis_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 6c93342e3405..2233d36456e2 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -163,7 +163,7 @@ static void __exit arc4_exit(void)
 	crypto_unregister_skcipher(&arc4_skcipher);
 }
 
-module_init(arc4_init);
+subsys_initcall(arc4_init);
 module_exit(arc4_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
index 5d4c270463f6..76d2ce3a1b5b 100644
--- a/crypto/asymmetric_keys/asym_tpm.c
+++ b/crypto/asymmetric_keys/asym_tpm.c
@@ -276,6 +276,10 @@ static int tpm_sign(struct tpm_buf *tb,
 
 	return datalen;
 }
+
+/* Room to fit two u32 zeros for algo id and parameters length. */
+#define SETKEY_PARAMS_SIZE (sizeof(u32) * 2)
+
 /*
  * Maximum buffer size for the BER/DER encoded public key.  The public key
  * is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048
@@ -286,8 +290,9 @@ static int tpm_sign(struct tpm_buf *tb,
  *     - 257 bytes of n
  *   - max 2 bytes for INTEGER e type/length
  *     - 3 bytes of e
+ * - 4+4 of zeros for set_pub_key parameters (SETKEY_PARAMS_SIZE)
  */
-#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3)
+#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3 + SETKEY_PARAMS_SIZE)
 
 /*
  * Provide a part of a description of the key for /proc/keys.
@@ -364,6 +369,8 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
 	cur = encode_tag_length(cur, 0x02, sizeof(e));
 	memcpy(cur, e, sizeof(e));
 	cur += sizeof(e);
+	/* Zero parameters to satisfy set_pub_key ABI. */
+	memset(cur, 0, SETKEY_PARAMS_SIZE);
 
 	return cur - buf;
 }
@@ -744,12 +751,10 @@ static int tpm_key_verify_signature(const struct key *key,
 	struct crypto_wait cwait;
 	struct crypto_akcipher *tfm;
 	struct akcipher_request *req;
-	struct scatterlist sig_sg, digest_sg;
+	struct scatterlist src_sg[2];
 	char alg_name[CRYPTO_MAX_ALG_NAME];
 	uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
 	uint32_t der_pub_key_len;
-	void *output;
-	unsigned int outlen;
 	int ret;
 
 	pr_devel("==>%s()\n", __func__);
@@ -781,37 +786,17 @@ static int tpm_key_verify_signature(const struct key *key,
 	if (!req)
 		goto error_free_tfm;
 
-	ret = -ENOMEM;
-	outlen = crypto_akcipher_maxsize(tfm);
-	output = kmalloc(outlen, GFP_KERNEL);
-	if (!output)
-		goto error_free_req;
-
-	sg_init_one(&sig_sg, sig->s, sig->s_size);
-	sg_init_one(&digest_sg, output, outlen);
-	akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
-				   outlen);
+	sg_init_table(src_sg, 2);
+	sg_set_buf(&src_sg[0], sig->s, sig->s_size);
+	sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
+	akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
+				   sig->digest_size);
 	crypto_init_wait(&cwait);
 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
 				      CRYPTO_TFM_REQ_MAY_SLEEP,
 				      crypto_req_done, &cwait);
-
-	/* Perform the verification calculation.  This doesn't actually do the
-	 * verification, but rather calculates the hash expected by the
-	 * signature and returns that to us.
-	 */
 	ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
-	if (ret)
-		goto out_free_output;
-
-	/* Do the actual verification step. */
-	if (req->dst_len != sig->digest_size ||
-	    memcmp(sig->digest, output, sig->digest_size) != 0)
-		ret = -EKEYREJECTED;
 
-out_free_output:
-	kfree(output);
-error_free_req:
 	akcipher_request_free(req);
 error_free_tfm:
 	crypto_free_akcipher(tfm);
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 97c77f66b20d..f7b0980bf02d 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -56,7 +56,6 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
 		goto error_no_desc;
 
 	desc->tfm   = tfm;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	/* Digest the message [RFC2315 9.3] */
 	ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index f5d85b47fcc6..77e0ae7840ff 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -45,6 +45,7 @@ void public_key_free(struct public_key *key)
 {
 	if (key) {
 		kfree(key->key);
+		kfree(key->params);
 		kfree(key);
 	}
 }
@@ -94,6 +95,12 @@ int software_key_determine_akcipher(const char *encoding,
 	return -ENOPKG;
 }
 
+static u8 *pkey_pack_u32(u8 *dst, u32 val)
+{
+	memcpy(dst, &val, sizeof(val));
+	return dst + sizeof(val);
+}
+
 /*
  * Query information about a key.
  */
@@ -103,6 +110,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
 	struct crypto_akcipher *tfm;
 	struct public_key *pkey = params->key->payload.data[asym_crypto];
 	char alg_name[CRYPTO_MAX_ALG_NAME];
+	u8 *key, *ptr;
 	int ret, len;
 
 	ret = software_key_determine_akcipher(params->encoding,
@@ -115,14 +123,22 @@ static int software_key_query(const struct kernel_pkey_params *params,
 	if (IS_ERR(tfm))
 		return PTR_ERR(tfm);
 
+	key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+		      GFP_KERNEL);
+	if (!key)
+		goto error_free_tfm;
+	memcpy(key, pkey->key, pkey->keylen);
+	ptr = key + pkey->keylen;
+	ptr = pkey_pack_u32(ptr, pkey->algo);
+	ptr = pkey_pack_u32(ptr, pkey->paramlen);
+	memcpy(ptr, pkey->params, pkey->paramlen);
+
 	if (pkey->key_is_private)
-		ret = crypto_akcipher_set_priv_key(tfm,
-						   pkey->key, pkey->keylen);
+		ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
 	else
-		ret = crypto_akcipher_set_pub_key(tfm,
-						  pkey->key, pkey->keylen);
+		ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
 	if (ret < 0)
-		goto error_free_tfm;
+		goto error_free_key;
 
 	len = crypto_akcipher_maxsize(tfm);
 	info->key_size = len * 8;
@@ -137,6 +153,8 @@ static int software_key_query(const struct kernel_pkey_params *params,
 					KEYCTL_SUPPORTS_SIGN);
 	ret = 0;
 
+error_free_key:
+	kfree(key);
 error_free_tfm:
 	crypto_free_akcipher(tfm);
 	pr_devel("<==%s() = %d\n", __func__, ret);
@@ -155,6 +173,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
 	struct crypto_wait cwait;
 	struct scatterlist in_sg, out_sg;
 	char alg_name[CRYPTO_MAX_ALG_NAME];
+	char *key, *ptr;
 	int ret;
 
 	pr_devel("==>%s()\n", __func__);
@@ -173,14 +192,23 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
 	if (!req)
 		goto error_free_tfm;
 
+	key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+		      GFP_KERNEL);
+	if (!key)
+		goto error_free_req;
+
+	memcpy(key, pkey->key, pkey->keylen);
+	ptr = key + pkey->keylen;
+	ptr = pkey_pack_u32(ptr, pkey->algo);
+	ptr = pkey_pack_u32(ptr, pkey->paramlen);
+	memcpy(ptr, pkey->params, pkey->paramlen);
+
 	if (pkey->key_is_private)
-		ret = crypto_akcipher_set_priv_key(tfm,
-						   pkey->key, pkey->keylen);
+		ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
 	else
-		ret = crypto_akcipher_set_pub_key(tfm,
-						  pkey->key, pkey->keylen);
+		ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
 	if (ret)
-		goto error_free_req;
+		goto error_free_key;
 
 	sg_init_one(&in_sg, in, params->in_len);
 	sg_init_one(&out_sg, out, params->out_len);
@@ -210,6 +238,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
 	if (ret == 0)
 		ret = req->dst_len;
 
+error_free_key:
+	kfree(key);
 error_free_req:
 	akcipher_request_free(req);
 error_free_tfm:
@@ -227,10 +257,9 @@ int public_key_verify_signature(const struct public_key *pkey,
 	struct crypto_wait cwait;
 	struct crypto_akcipher *tfm;
 	struct akcipher_request *req;
-	struct scatterlist sig_sg, digest_sg;
+	struct scatterlist src_sg[2];
 	char alg_name[CRYPTO_MAX_ALG_NAME];
-	void *output;
-	unsigned int outlen;
+	char *key, *ptr;
 	int ret;
 
 	pr_devel("==>%s()\n", __func__);
@@ -254,45 +283,37 @@ int public_key_verify_signature(const struct public_key *pkey,
 	if (!req)
 		goto error_free_tfm;
 
+	key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+		      GFP_KERNEL);
+	if (!key)
+		goto error_free_req;
+
+	memcpy(key, pkey->key, pkey->keylen);
+	ptr = key + pkey->keylen;
+	ptr = pkey_pack_u32(ptr, pkey->algo);
+	ptr = pkey_pack_u32(ptr, pkey->paramlen);
+	memcpy(ptr, pkey->params, pkey->paramlen);
+
 	if (pkey->key_is_private)
-		ret = crypto_akcipher_set_priv_key(tfm,
-						   pkey->key, pkey->keylen);
+		ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
 	else
-		ret = crypto_akcipher_set_pub_key(tfm,
-						  pkey->key, pkey->keylen);
+		ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
 	if (ret)
-		goto error_free_req;
-
-	ret = -ENOMEM;
-	outlen = crypto_akcipher_maxsize(tfm);
-	output = kmalloc(outlen, GFP_KERNEL);
-	if (!output)
-		goto error_free_req;
+		goto error_free_key;
 
-	sg_init_one(&sig_sg, sig->s, sig->s_size);
-	sg_init_one(&digest_sg, output, outlen);
-	akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
-				   outlen);
+	sg_init_table(src_sg, 2);
+	sg_set_buf(&src_sg[0], sig->s, sig->s_size);
+	sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
+	akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
+				   sig->digest_size);
 	crypto_init_wait(&cwait);
 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
 				      CRYPTO_TFM_REQ_MAY_SLEEP,
 				      crypto_req_done, &cwait);
-
-	/* Perform the verification calculation.  This doesn't actually do the
-	 * verification, but rather calculates the hash expected by the
-	 * signature and returns that to us.
-	 */
 	ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
-	if (ret)
-		goto out_free_output;
-
-	/* Do the actual verification step. */
-	if (req->dst_len != sig->digest_size ||
-	    memcmp(sig->digest, output, sig->digest_size) != 0)
-		ret = -EKEYREJECTED;
 
-out_free_output:
-	kfree(output);
+error_free_key:
+	kfree(key);
 error_free_req:
 	akcipher_request_free(req);
 error_free_tfm:
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index d178650fd524..f8e4a932bcfb 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -354,7 +354,6 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
 		goto error_no_desc;
 
 	desc->tfm   = tfm;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 	ret = crypto_shash_init(desc);
 	if (ret < 0)
 		goto error;
diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1
index aae0cde414e2..5c9f4e4a5231 100644
--- a/crypto/asymmetric_keys/x509.asn1
+++ b/crypto/asymmetric_keys/x509.asn1
@@ -22,7 +22,7 @@ CertificateSerialNumber ::= INTEGER
 
 AlgorithmIdentifier ::= SEQUENCE {
 	algorithm		OBJECT IDENTIFIER ({ x509_note_OID }),
-	parameters		ANY OPTIONAL
+	parameters		ANY OPTIONAL ({ x509_note_params })
 }
 
 Name ::= SEQUENCE OF RelativeDistinguishedName
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 991f4d735a4e..5b7bfd95c334 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -26,6 +26,9 @@ struct x509_parse_context {
 	const void	*cert_start;		/* Start of cert content */
 	const void	*key;			/* Key data */
 	size_t		key_size;		/* Size of key data */
+	const void	*params;		/* Key parameters */
+	size_t		params_size;		/* Size of key parameters */
+	enum OID	key_algo;		/* Public key algorithm */
 	enum OID	last_oid;		/* Last OID encountered */
 	enum OID	algo_oid;		/* Algorithm OID */
 	unsigned char	nr_mpi;			/* Number of MPIs stored */
@@ -109,6 +112,13 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
 
 	cert->pub->keylen = ctx->key_size;
 
+	cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL);
+	if (!cert->pub->params)
+		goto error_decode;
+
+	cert->pub->paramlen = ctx->params_size;
+	cert->pub->algo = ctx->key_algo;
+
 	/* Grab the signature bits */
 	ret = x509_get_sig_params(cert);
 	if (ret < 0)
@@ -220,6 +230,14 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
 	case OID_sha224WithRSAEncryption:
 		ctx->cert->sig->hash_algo = "sha224";
 		goto rsa_pkcs1;
+
+	case OID_gost2012Signature256:
+		ctx->cert->sig->hash_algo = "streebog256";
+		goto ecrdsa;
+
+	case OID_gost2012Signature512:
+		ctx->cert->sig->hash_algo = "streebog512";
+		goto ecrdsa;
 	}
 
 rsa_pkcs1:
@@ -227,6 +245,11 @@ rsa_pkcs1:
 	ctx->cert->sig->encoding = "pkcs1";
 	ctx->algo_oid = ctx->last_oid;
 	return 0;
+ecrdsa:
+	ctx->cert->sig->pkey_algo = "ecrdsa";
+	ctx->cert->sig->encoding = "raw";
+	ctx->algo_oid = ctx->last_oid;
+	return 0;
 }
 
 /*
@@ -246,7 +269,8 @@ int x509_note_signature(void *context, size_t hdrlen,
 		return -EINVAL;
 	}
 
-	if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
+	if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
+	    strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0) {
 		/* Discard the BIT STRING metadata */
 		if (vlen < 1 || *(const u8 *)value != 0)
 			return -EBADMSG;
@@ -401,6 +425,27 @@ int x509_note_subject(void *context, size_t hdrlen,
 }
 
 /*
+ * Extract the parameters for the public key
+ */
+int x509_note_params(void *context, size_t hdrlen,
+		     unsigned char tag,
+		     const void *value, size_t vlen)
+{
+	struct x509_parse_context *ctx = context;
+
+	/*
+	 * AlgorithmIdentifier is used three times in the x509, we should skip
+	 * first and ignore third, using second one which is after subject and
+	 * before subjectPublicKey.
+	 */
+	if (!ctx->cert->raw_subject || ctx->key)
+		return 0;
+	ctx->params = value - hdrlen;
+	ctx->params_size = vlen + hdrlen;
+	return 0;
+}
+
+/*
  * Extract the data for the public key algorithm
  */
 int x509_extract_key_data(void *context, size_t hdrlen,
@@ -409,11 +454,15 @@ int x509_extract_key_data(void *context, size_t hdrlen,
 {
 	struct x509_parse_context *ctx = context;
 
-	if (ctx->last_oid != OID_rsaEncryption)
+	ctx->key_algo = ctx->last_oid;
+	if (ctx->last_oid == OID_rsaEncryption)
+		ctx->cert->pub->pkey_algo = "rsa";
+	else if (ctx->last_oid == OID_gost2012PKey256 ||
+		 ctx->last_oid == OID_gost2012PKey512)
+		ctx->cert->pub->pkey_algo = "ecrdsa";
+	else
 		return -ENOPKG;
 
-	ctx->cert->pub->pkey_algo = "rsa";
-
 	/* Discard the BIT STRING metadata */
 	if (vlen < 1 || *(const u8 *)value != 0)
 		return -EBADMSG;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 9338b4558cdc..bd96683d8cde 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -77,7 +77,6 @@ int x509_get_sig_params(struct x509_certificate *cert)
 		goto error;
 
 	desc->tfm = tfm;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
 	if (ret < 0)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4be293a4b5f0..b3eddac7fa3a 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -508,7 +508,7 @@ static void __exit crypto_authenc_module_exit(void)
 	crypto_unregister_template(&crypto_authenc_tmpl);
 }
 
-module_init(crypto_authenc_module_init);
+subsys_initcall(crypto_authenc_module_init);
 module_exit(crypto_authenc_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 4741fe89ba2c..58074308e535 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -523,7 +523,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
 	crypto_unregister_template(&crypto_authenc_esn_tmpl);
 }
 
-module_init(crypto_authenc_esn_module_init);
+subsys_initcall(crypto_authenc_esn_module_init);
 module_exit(crypto_authenc_esn_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 87b392a77a93..8548ced8b074 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -133,7 +133,7 @@ static void __exit blowfish_mod_fini(void)
 	crypto_unregister_alg(&alg);
 }
 
-module_init(blowfish_mod_init);
+subsys_initcall(blowfish_mod_init);
 module_exit(blowfish_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 32ddd4836ff5..15ce1281f5d9 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1092,7 +1092,7 @@ static void __exit camellia_fini(void)
 	crypto_unregister_alg(&camellia_alg);
 }
 
-module_init(camellia_init);
+subsys_initcall(camellia_init);
 module_exit(camellia_fini);
 
 MODULE_DESCRIPTION("Camellia Cipher Algorithm");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 66169c178314..24bc7d4e33be 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -543,7 +543,7 @@ static void __exit cast5_mod_fini(void)
 	crypto_unregister_alg(&alg);
 }
 
-module_init(cast5_mod_init);
+subsys_initcall(cast5_mod_init);
 module_exit(cast5_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index c8e5ec69790e..edd59cc34991 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -285,7 +285,7 @@ static void __exit cast6_mod_fini(void)
 	crypto_unregister_alg(&alg);
 }
 
-module_init(cast6_mod_init);
+subsys_initcall(cast6_mod_init);
 module_exit(cast6_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index d12efaac9230..129f79d03365 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -98,7 +98,7 @@ static void __exit crypto_cbc_module_exit(void)
 	crypto_unregister_template(&crypto_cbc_tmpl);
 }
 
-module_init(crypto_cbc_module_init);
+subsys_initcall(crypto_cbc_module_init);
 module_exit(crypto_cbc_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 50df8f001c1c..c1ef9d0b4271 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -458,7 +458,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
 
 static int crypto_ccm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
-				    const char *full_name,
 				    const char *ctr_name,
 				    const char *mac_name)
 {
@@ -486,7 +485,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
 
 	mac = __crypto_hash_alg_common(mac_alg);
 	err = -EINVAL;
-	if (mac->digestsize != 16)
+	if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
+	    mac->digestsize != 16)
 		goto out_put_mac;
 
 	inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
@@ -509,23 +509,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
 
 	ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
 
-	/* Not a stream cipher? */
+	/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
 	err = -EINVAL;
-	if (ctr->base.cra_blocksize != 1)
+	if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+	    crypto_skcipher_alg_ivsize(ctr) != 16 ||
+	    ctr->base.cra_blocksize != 1)
 		goto err_drop_ctr;
 
-	/* We want the real thing! */
-	if (crypto_skcipher_alg_ivsize(ctr) != 16)
+	/* ctr and cbcmac must use the same underlying block cipher. */
+	if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
 		goto err_drop_ctr;
 
 	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+		goto err_drop_ctr;
+
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "ccm_base(%s,%s)", ctr->base.cra_driver_name,
 		     mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_ctr;
 
-	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
 	inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = (mac->base.cra_priority +
 				       ctr->base.cra_priority) / 2;
@@ -567,7 +571,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
 	char mac_name[CRYPTO_MAX_ALG_NAME];
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
@@ -581,35 +584,24 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
-	    CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
-					mac_name);
+	return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
 }
 
 static int crypto_ccm_base_create(struct crypto_template *tmpl,
 				  struct rtattr **tb)
 {
 	const char *ctr_name;
-	const char *cipher_name;
-	char full_name[CRYPTO_MAX_ALG_NAME];
+	const char *mac_name;
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
 		return PTR_ERR(ctr_name);
 
-	cipher_name = crypto_attr_alg_name(tb[2]);
-	if (IS_ERR(cipher_name))
-		return PTR_ERR(cipher_name);
-
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
-		     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
+	mac_name = crypto_attr_alg_name(tb[2]);
+	if (IS_ERR(mac_name))
+		return PTR_ERR(mac_name);
 
-	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
-					cipher_name);
+	return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
 }
 
 static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1014,7 +1006,7 @@ static void __exit crypto_ccm_module_exit(void)
 				    ARRAY_SIZE(crypto_ccm_tmpls));
 }
 
-module_init(crypto_ccm_module_init);
+subsys_initcall(crypto_ccm_module_init);
 module_exit(crypto_ccm_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 03ac847f6d6a..7b68fbb61732 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -243,7 +243,7 @@ static void __exit crypto_cfb_module_exit(void)
 	crypto_unregister_template(&crypto_cfb_tmpl);
 }
 
-module_init(crypto_cfb_module_init);
+subsys_initcall(crypto_cfb_module_init);
 module_exit(crypto_cfb_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index ed2e12e26dd8..e38a2d61819a 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -645,8 +645,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
 
 	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "%s(%s,%s)", name, chacha_name,
-		     poly_name) >= CRYPTO_MAX_ALG_NAME)
+		     "%s(%s,%s)", name, chacha->base.cra_name,
+		     poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_chacha;
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s,%s)", name, chacha->base.cra_driver_name,
@@ -725,7 +725,7 @@ static void __exit chacha20poly1305_module_exit(void)
 				    ARRAY_SIZE(rfc7539_tmpls));
 }
 
-module_init(chacha20poly1305_module_init);
+subsys_initcall(chacha20poly1305_module_init);
 module_exit(chacha20poly1305_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
index 35b583101f4f..d2ec04997832 100644
--- a/crypto/chacha_generic.c
+++ b/crypto/chacha_generic.c
@@ -22,18 +22,16 @@ static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
 	/* aligned to potentially speed up crypto_xor() */
 	u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
 
-	if (dst != src)
-		memcpy(dst, src, bytes);
-
 	while (bytes >= CHACHA_BLOCK_SIZE) {
 		chacha_block(state, stream, nrounds);
-		crypto_xor(dst, stream, CHACHA_BLOCK_SIZE);
+		crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
 		bytes -= CHACHA_BLOCK_SIZE;
 		dst += CHACHA_BLOCK_SIZE;
+		src += CHACHA_BLOCK_SIZE;
 	}
 	if (bytes) {
 		chacha_block(state, stream, nrounds);
-		crypto_xor(dst, stream, bytes);
+		crypto_xor_cpy(dst, src, stream, bytes);
 	}
 }
 
@@ -52,7 +50,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
 		unsigned int nbytes = walk.nbytes;
 
 		if (nbytes < walk.total)
-			nbytes = round_down(nbytes, walk.stride);
+			nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
 
 		chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
 			       nbytes, ctx->nrounds);
@@ -203,7 +201,7 @@ static void __exit chacha_generic_mod_fini(void)
 	crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
 }
 
-module_init(chacha_generic_mod_init);
+subsys_initcall(chacha_generic_mod_init);
 module_exit(chacha_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 16301f52858c..c60b6c011ec6 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -313,7 +313,7 @@ static void __exit crypto_cmac_module_exit(void)
 	crypto_unregister_template(&crypto_cmac_tmpl);
 }
 
-module_init(crypto_cmac_module_init);
+subsys_initcall(crypto_cmac_module_init);
 module_exit(crypto_cmac_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index 00facd27bcc2..9e97912280bd 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -146,7 +146,7 @@ static void __exit crc32_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(crc32_mod_init);
+subsys_initcall(crc32_mod_init);
 module_exit(crc32_mod_fini);
 
 MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 7283066ecc98..ad26f15d4c7b 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -165,7 +165,7 @@ static void __exit crc32c_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(crc32c_mod_init);
+subsys_initcall(crc32c_mod_init);
 module_exit(crc32c_mod_fini);
 
 MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 8e94e29dc6fc..d90c0070710e 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
 	return 0;
 }
 
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
-			u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
 {
-	*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+	*(__u16 *)out = crc_t10dif_generic(crc, data, len);
 	return 0;
 }
 
@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
 {
 	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
-	return __chksum_finup(&ctx->crc, data, len, out);
+	return __chksum_finup(ctx->crc, data, len, out);
 }
 
 static int chksum_digest(struct shash_desc *desc, const u8 *data,
 			 unsigned int length, u8 *out)
 {
-	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
-	return __chksum_finup(&ctx->crc, data, length, out);
+	return __chksum_finup(0, data, length, out);
 }
 
 static struct shash_alg alg = {
@@ -115,7 +112,7 @@ static void __exit crct10dif_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(crct10dif_mod_init);
+subsys_initcall(crct10dif_mod_init);
 module_exit(crct10dif_mod_fini);
 
 MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 5640e5db7bdb..b3bb99390ae7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -65,15 +65,6 @@ struct aead_instance_ctx {
 	struct cryptd_queue *queue;
 };
 
-struct cryptd_blkcipher_ctx {
-	atomic_t refcnt;
-	struct crypto_blkcipher *child;
-};
-
-struct cryptd_blkcipher_request_ctx {
-	crypto_completion_t complete;
-};
-
 struct cryptd_skcipher_ctx {
 	atomic_t refcnt;
 	struct crypto_sync_skcipher *child;
@@ -216,129 +207,6 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
 	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 }
 
-static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
-				   const u8 *key, unsigned int keylen)
-{
-	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
-	struct crypto_blkcipher *child = ctx->child;
-	int err;
-
-	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
-					  CRYPTO_TFM_REQ_MASK);
-	err = crypto_blkcipher_setkey(child, key, keylen);
-	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
-					    CRYPTO_TFM_RES_MASK);
-	return err;
-}
-
-static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
-				   struct crypto_blkcipher *child,
-				   int err,
-				   int (*crypt)(struct blkcipher_desc *desc,
-						struct scatterlist *dst,
-						struct scatterlist *src,
-						unsigned int len))
-{
-	struct cryptd_blkcipher_request_ctx *rctx;
-	struct cryptd_blkcipher_ctx *ctx;
-	struct crypto_ablkcipher *tfm;
-	struct blkcipher_desc desc;
-	int refcnt;
-
-	rctx = ablkcipher_request_ctx(req);
-
-	if (unlikely(err == -EINPROGRESS))
-		goto out;
-
-	desc.tfm = child;
-	desc.info = req->info;
-	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	err = crypt(&desc, req->dst, req->src, req->nbytes);
-
-	req->base.complete = rctx->complete;
-
-out:
-	tfm = crypto_ablkcipher_reqtfm(req);
-	ctx = crypto_ablkcipher_ctx(tfm);
-	refcnt = atomic_read(&ctx->refcnt);
-
-	local_bh_disable();
-	rctx->complete(&req->base, err);
-	local_bh_enable();
-
-	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
-		crypto_free_ablkcipher(tfm);
-}
-
-static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
-{
-	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
-	struct crypto_blkcipher *child = ctx->child;
-
-	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
-			       crypto_blkcipher_crt(child)->encrypt);
-}
-
-static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
-{
-	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
-	struct crypto_blkcipher *child = ctx->child;
-
-	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
-			       crypto_blkcipher_crt(child)->decrypt);
-}
-
-static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
-				    crypto_completion_t compl)
-{
-	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
-	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
-	struct cryptd_queue *queue;
-
-	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
-	rctx->complete = req->base.complete;
-	req->base.complete = compl;
-
-	return cryptd_enqueue_request(queue, &req->base);
-}
-
-static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
-{
-	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
-}
-
-static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
-{
-	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
-}
-
-static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
-{
-	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct crypto_spawn *spawn = &ictx->spawn;
-	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct crypto_blkcipher *cipher;
-
-	cipher = crypto_spawn_blkcipher(spawn);
-	if (IS_ERR(cipher))
-		return PTR_ERR(cipher);
-
-	ctx->child = cipher;
-	tfm->crt_ablkcipher.reqsize =
-		sizeof(struct cryptd_blkcipher_request_ctx);
-	return 0;
-}
-
-static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
-{
-	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	crypto_free_blkcipher(ctx->child);
-}
-
 static int cryptd_init_instance(struct crypto_instance *inst,
 				struct crypto_alg *alg)
 {
@@ -382,67 +250,6 @@ out_free_inst:
 	goto out;
 }
 
-static int cryptd_create_blkcipher(struct crypto_template *tmpl,
-				   struct rtattr **tb,
-				   struct cryptd_queue *queue)
-{
-	struct cryptd_instance_ctx *ctx;
-	struct crypto_instance *inst;
-	struct crypto_alg *alg;
-	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
-	u32 mask = CRYPTO_ALG_TYPE_MASK;
-	int err;
-
-	cryptd_check_internal(tb, &type, &mask);
-
-	alg = crypto_get_attr_alg(tb, type, mask);
-	if (IS_ERR(alg))
-		return PTR_ERR(alg);
-
-	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
-	err = PTR_ERR(inst);
-	if (IS_ERR(inst))
-		goto out_put_alg;
-
-	ctx = crypto_instance_ctx(inst);
-	ctx->queue = queue;
-
-	err = crypto_init_spawn(&ctx->spawn, alg, inst,
-				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
-	if (err)
-		goto out_free_inst;
-
-	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
-	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
-		type |= CRYPTO_ALG_INTERNAL;
-	inst->alg.cra_flags = type;
-	inst->alg.cra_type = &crypto_ablkcipher_type;
-
-	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
-	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
-	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
-
-	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
-
-	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
-	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
-
-	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
-	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
-	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
-
-	err = crypto_register_instance(tmpl, inst);
-	if (err) {
-		crypto_drop_spawn(&ctx->spawn);
-out_free_inst:
-		kfree(inst);
-	}
-
-out_put_alg:
-	crypto_mod_put(alg);
-	return err;
-}
-
 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 				  const u8 *key, unsigned int keylen)
 {
@@ -738,7 +545,6 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 		goto out;
 
 	desc->tfm = child;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	err = crypto_shash_init(desc);
 
@@ -830,7 +636,6 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 		goto out;
 
 	desc->tfm = child;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	err = shash_ahash_digest(req, desc);
 
@@ -859,7 +664,6 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in)
 	struct shash_desc *desc = cryptd_shash_desc(req);
 
 	desc->tfm = ctx->child;
-	desc->flags = req->base.flags;
 
 	return crypto_shash_import(desc, in);
 }
@@ -1118,10 +922,6 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 
 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 	case CRYPTO_ALG_TYPE_BLKCIPHER:
-		if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
-		    CRYPTO_ALG_TYPE_BLKCIPHER)
-			return cryptd_create_blkcipher(tmpl, tb, &queue);
-
 		return cryptd_create_skcipher(tmpl, tb, &queue);
 	case CRYPTO_ALG_TYPE_DIGEST:
 		return cryptd_create_hash(tmpl, tb, &queue);
@@ -1160,58 +960,6 @@ static struct crypto_template cryptd_tmpl = {
 	.module = THIS_MODULE,
 };
 
-struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
-						  u32 type, u32 mask)
-{
-	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
-	struct cryptd_blkcipher_ctx *ctx;
-	struct crypto_tfm *tfm;
-
-	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
-		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
-		return ERR_PTR(-EINVAL);
-	type = crypto_skcipher_type(type);
-	mask &= ~CRYPTO_ALG_TYPE_MASK;
-	mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
-	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
-	if (IS_ERR(tfm))
-		return ERR_CAST(tfm);
-	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
-		crypto_free_tfm(tfm);
-		return ERR_PTR(-EINVAL);
-	}
-
-	ctx = crypto_tfm_ctx(tfm);
-	atomic_set(&ctx->refcnt, 1);
-
-	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
-}
-EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
-
-struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
-{
-	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
-	return ctx->child;
-}
-EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
-
-bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
-{
-	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
-
-	return atomic_read(&ctx->refcnt) - 1;
-}
-EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
-
-void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
-{
-	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
-
-	if (atomic_dec_and_test(&ctx->refcnt))
-		crypto_free_ablkcipher(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
-
 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
 					      u32 type, u32 mask)
 {
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 01630a9c7e01..9320d4eaa4a8 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -220,7 +220,7 @@ static void __exit crypto_null_mod_fini(void)
 	crypto_unregister_skcipher(&skcipher_null);
 }
 
-module_init(crypto_null_mod_init);
+subsys_initcall(crypto_null_mod_init);
 module_exit(crypto_null_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index ec8f8b67473a..52cdf2c5605f 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -384,7 +384,7 @@ static void __exit crypto_ctr_module_exit(void)
 				    ARRAY_SIZE(crypto_ctr_tmpls));
 }
 
-module_init(crypto_ctr_module_init);
+subsys_initcall(crypto_ctr_module_init);
 module_exit(crypto_ctr_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/cts.c b/crypto/cts.c
index 4e28d83ae37d..6b6087dbb62a 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -152,12 +152,14 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
 	struct skcipher_request *subreq = &rctx->subreq;
 	int bsize = crypto_skcipher_blocksize(tfm);
 	unsigned int nbytes = req->cryptlen;
-	int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
 	unsigned int offset;
 
 	skcipher_request_set_tfm(subreq, ctx->child);
 
-	if (cbc_blocks <= 0) {
+	if (nbytes < bsize)
+		return -EINVAL;
+
+	if (nbytes == bsize) {
 		skcipher_request_set_callback(subreq, req->base.flags,
 					      req->base.complete,
 					      req->base.data);
@@ -166,7 +168,7 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
 		return crypto_skcipher_encrypt(subreq);
 	}
 
-	offset = cbc_blocks * bsize;
+	offset = rounddown(nbytes - 1, bsize);
 	rctx->offset = offset;
 
 	skcipher_request_set_callback(subreq, req->base.flags,
@@ -244,13 +246,15 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
 	struct skcipher_request *subreq = &rctx->subreq;
 	int bsize = crypto_skcipher_blocksize(tfm);
 	unsigned int nbytes = req->cryptlen;
-	int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
 	unsigned int offset;
 	u8 *space;
 
 	skcipher_request_set_tfm(subreq, ctx->child);
 
-	if (cbc_blocks <= 0) {
+	if (nbytes < bsize)
+		return -EINVAL;
+
+	if (nbytes == bsize) {
 		skcipher_request_set_callback(subreq, req->base.flags,
 					      req->base.complete,
 					      req->base.data);
@@ -264,10 +268,10 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
 
 	space = crypto_cts_reqctx_space(req);
 
-	offset = cbc_blocks * bsize;
+	offset = rounddown(nbytes - 1, bsize);
 	rctx->offset = offset;
 
-	if (cbc_blocks <= 1)
+	if (offset <= bsize)
 		memcpy(space, req->iv, bsize);
 	else
 		scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
@@ -419,7 +423,7 @@ static void __exit crypto_cts_module_exit(void)
 	crypto_unregister_template(&crypto_cts_tmpl);
 }
 
-module_init(crypto_cts_module_init);
+subsys_initcall(crypto_cts_module_init);
 module_exit(crypto_cts_module_exit);
 
 MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 94ec3b36a8e8..aab089cde1bf 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -334,7 +334,7 @@ static void __exit deflate_mod_fini(void)
 	crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
 }
 
-module_init(deflate_mod_init);
+subsys_initcall(deflate_mod_init);
 module_exit(deflate_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 1e6621665dd9..d7a88b4fa611 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -862,14 +862,11 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
 		      unsigned int keylen)
 {
-	const u32 *K = (const u32 *)key;
+	int err;
 
-	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
-		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
-		     (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
-		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		return -EINVAL;
-	}
+	err = __des3_verify_key(flags, key);
+	if (unlikely(err))
+		return err;
 
 	des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
 	dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
@@ -993,7 +990,7 @@ static void __exit des_generic_mod_fini(void)
 	crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
 }
 
-module_init(des_generic_mod_init);
+subsys_initcall(des_generic_mod_init);
 module_exit(des_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/dh.c b/crypto/dh.c
index 09a44de4209d..ce77fb4ee8b3 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -236,7 +236,7 @@ static void dh_exit(void)
 	crypto_unregister_kpp(&dh);
 }
 
-module_init(dh_init);
+subsys_initcall(dh_init);
 module_exit(dh_exit);
 MODULE_ALIAS_CRYPTO("dh");
 MODULE_LICENSE("GPL");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index bc52d9562611..2a5b16bb000c 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1587,7 +1587,6 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
 	}
 
 	sdesc->shash.tfm = tfm;
-	sdesc->shash.flags = 0;
 	drbg->priv_data = sdesc;
 
 	return crypto_shash_alignmask(tfm);
@@ -2039,7 +2038,7 @@ static void __exit drbg_exit(void)
 	crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
 }
 
-module_init(drbg_init);
+subsys_initcall(drbg_init);
 module_exit(drbg_exit);
 #ifndef CRYPTO_DRBG_HASH_STRING
 #define CRYPTO_DRBG_HASH_STRING ""
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 0732715c8d91..de839129d151 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -101,7 +101,7 @@ static void __exit crypto_ecb_module_exit(void)
 	crypto_unregister_template(&crypto_ecb_tmpl);
 }
 
-module_init(crypto_ecb_module_init);
+subsys_initcall(crypto_ecb_module_init);
 module_exit(crypto_ecb_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/ecc.c b/crypto/ecc.c
index ed1237115066..dfe114bc0c4a 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2013, Kenneth MacKay
- * All rights reserved.
+ * Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved.
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -24,12 +24,15 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/module.h>
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/swab.h>
 #include <linux/fips.h>
 #include <crypto/ecdh.h>
 #include <crypto/rng.h>
+#include <asm/unaligned.h>
+#include <linux/ratelimit.h>
 
 #include "ecc.h"
 #include "ecc_curve_defs.h"
@@ -112,7 +115,7 @@ static void vli_clear(u64 *vli, unsigned int ndigits)
 }
 
 /* Returns true if vli == 0, false otherwise. */
-static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
+bool vli_is_zero(const u64 *vli, unsigned int ndigits)
 {
 	int i;
 
@@ -123,6 +126,7 @@ static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
 
 	return true;
 }
+EXPORT_SYMBOL(vli_is_zero);
 
 /* Returns nonzero if bit bit of vli is set. */
 static u64 vli_test_bit(const u64 *vli, unsigned int bit)
@@ -130,6 +134,11 @@ static u64 vli_test_bit(const u64 *vli, unsigned int bit)
 	return (vli[bit / 64] & ((u64)1 << (bit % 64)));
 }
 
+static bool vli_is_negative(const u64 *vli, unsigned int ndigits)
+{
+	return vli_test_bit(vli, ndigits * 64 - 1);
+}
+
 /* Counts the number of 64-bit "digits" in vli. */
 static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
 {
@@ -161,6 +170,27 @@ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
 	return ((num_digits - 1) * 64 + i);
 }
 
+/* Set dest from unaligned bit string src. */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits)
+{
+	int i;
+	const u64 *from = src;
+
+	for (i = 0; i < ndigits; i++)
+		dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]);
+}
+EXPORT_SYMBOL(vli_from_be64);
+
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits)
+{
+	int i;
+	const u64 *from = src;
+
+	for (i = 0; i < ndigits; i++)
+		dest[i] = get_unaligned_le64(&from[i]);
+}
+EXPORT_SYMBOL(vli_from_le64);
+
 /* Sets dest = src. */
 static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
 {
@@ -171,7 +201,7 @@ static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
 }
 
 /* Returns sign of left - right. */
-static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
 {
 	int i;
 
@@ -184,6 +214,7 @@ static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
 
 	return 0;
 }
+EXPORT_SYMBOL(vli_cmp);
 
 /* Computes result = in << c, returning carry. Can modify in place
  * (if result == in). 0 < shift < 64.
@@ -239,8 +270,30 @@ static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
 	return carry;
 }
 
+/* Computes result = left + right, returning carry. Can modify in place. */
+static u64 vli_uadd(u64 *result, const u64 *left, u64 right,
+		    unsigned int ndigits)
+{
+	u64 carry = right;
+	int i;
+
+	for (i = 0; i < ndigits; i++) {
+		u64 sum;
+
+		sum = left[i] + carry;
+		if (sum != left[i])
+			carry = (sum < left[i]);
+		else
+			carry = !!carry;
+
+		result[i] = sum;
+	}
+
+	return carry;
+}
+
 /* Computes result = left - right, returning borrow. Can modify in place. */
-static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
 		   unsigned int ndigits)
 {
 	u64 borrow = 0;
@@ -258,9 +311,37 @@ static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
 
 	return borrow;
 }
+EXPORT_SYMBOL(vli_sub);
+
+/* Computes result = left - right, returning borrow. Can modify in place. */
+static u64 vli_usub(u64 *result, const u64 *left, u64 right,
+	     unsigned int ndigits)
+{
+	u64 borrow = right;
+	int i;
+
+	for (i = 0; i < ndigits; i++) {
+		u64 diff;
+
+		diff = left[i] - borrow;
+		if (diff != left[i])
+			borrow = (diff > left[i]);
+
+		result[i] = diff;
+	}
+
+	return borrow;
+}
 
 static uint128_t mul_64_64(u64 left, u64 right)
 {
+	uint128_t result;
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+	unsigned __int128 m = (unsigned __int128)left * right;
+
+	result.m_low  = m;
+	result.m_high = m >> 64;
+#else
 	u64 a0 = left & 0xffffffffull;
 	u64 a1 = left >> 32;
 	u64 b0 = right & 0xffffffffull;
@@ -269,7 +350,6 @@ static uint128_t mul_64_64(u64 left, u64 right)
 	u64 m1 = a0 * b1;
 	u64 m2 = a1 * b0;
 	u64 m3 = a1 * b1;
-	uint128_t result;
 
 	m2 += (m0 >> 32);
 	m2 += m1;
@@ -280,7 +360,7 @@ static uint128_t mul_64_64(u64 left, u64 right)
 
 	result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
 	result.m_high = m3 + (m2 >> 32);
-
+#endif
 	return result;
 }
 
@@ -330,6 +410,28 @@ static void vli_mult(u64 *result, const u64 *left, const u64 *right,
 	result[ndigits * 2 - 1] = r01.m_low;
 }
 
+/* Compute product = left * right, for a small right value. */
+static void vli_umult(u64 *result, const u64 *left, u32 right,
+		      unsigned int ndigits)
+{
+	uint128_t r01 = { 0 };
+	unsigned int k;
+
+	for (k = 0; k < ndigits; k++) {
+		uint128_t product;
+
+		product = mul_64_64(left[k], right);
+		r01 = add_128_128(r01, product);
+		/* no carry */
+		result[k] = r01.m_low;
+		r01.m_low = r01.m_high;
+		r01.m_high = 0;
+	}
+	result[k] = r01.m_low;
+	for (++k; k < ndigits * 2; k++)
+		result[k] = 0;
+}
+
 static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
 {
 	uint128_t r01 = { 0, 0 };
@@ -402,6 +504,170 @@ static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
 		vli_add(result, result, mod, ndigits);
 }
 
+/*
+ * Computes result = product % mod
+ * for special form moduli: p = 2^k-c, for small c (note the minus sign)
+ *
+ * References:
+ * R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective.
+ * 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form
+ * Algorithm 9.2.13 (Fast mod operation for special-form moduli).
+ */
+static void vli_mmod_special(u64 *result, const u64 *product,
+			      const u64 *mod, unsigned int ndigits)
+{
+	u64 c = -mod[0];
+	u64 t[ECC_MAX_DIGITS * 2];
+	u64 r[ECC_MAX_DIGITS * 2];
+
+	vli_set(r, product, ndigits * 2);
+	while (!vli_is_zero(r + ndigits, ndigits)) {
+		vli_umult(t, r + ndigits, c, ndigits);
+		vli_clear(r + ndigits, ndigits);
+		vli_add(r, r, t, ndigits * 2);
+	}
+	vli_set(t, mod, ndigits);
+	vli_clear(t + ndigits, ndigits);
+	while (vli_cmp(r, t, ndigits * 2) >= 0)
+		vli_sub(r, r, t, ndigits * 2);
+	vli_set(result, r, ndigits);
+}
+
+/*
+ * Computes result = product % mod
+ * for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign)
+ * where k-1 does not fit into qword boundary by -1 bit (such as 255).
+
+ * References (loosely based on):
+ * A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography.
+ * 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47.
+ * URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf
+ *
+ * H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren.
+ * Handbook of Elliptic and Hyperelliptic Curve Cryptography.
+ * Algorithm 10.25 Fast reduction for special form moduli
+ */
+static void vli_mmod_special2(u64 *result, const u64 *product,
+			       const u64 *mod, unsigned int ndigits)
+{
+	u64 c2 = mod[0] * 2;
+	u64 q[ECC_MAX_DIGITS];
+	u64 r[ECC_MAX_DIGITS * 2];
+	u64 m[ECC_MAX_DIGITS * 2]; /* expanded mod */
+	int carry; /* last bit that doesn't fit into q */
+	int i;
+
+	vli_set(m, mod, ndigits);
+	vli_clear(m + ndigits, ndigits);
+
+	vli_set(r, product, ndigits);
+	/* q and carry are top bits */
+	vli_set(q, product + ndigits, ndigits);
+	vli_clear(r + ndigits, ndigits);
+	carry = vli_is_negative(r, ndigits);
+	if (carry)
+		r[ndigits - 1] &= (1ull << 63) - 1;
+	for (i = 1; carry || !vli_is_zero(q, ndigits); i++) {
+		u64 qc[ECC_MAX_DIGITS * 2];
+
+		vli_umult(qc, q, c2, ndigits);
+		if (carry)
+			vli_uadd(qc, qc, mod[0], ndigits * 2);
+		vli_set(q, qc + ndigits, ndigits);
+		vli_clear(qc + ndigits, ndigits);
+		carry = vli_is_negative(qc, ndigits);
+		if (carry)
+			qc[ndigits - 1] &= (1ull << 63) - 1;
+		if (i & 1)
+			vli_sub(r, r, qc, ndigits * 2);
+		else
+			vli_add(r, r, qc, ndigits * 2);
+	}
+	while (vli_is_negative(r, ndigits * 2))
+		vli_add(r, r, m, ndigits * 2);
+	while (vli_cmp(r, m, ndigits * 2) >= 0)
+		vli_sub(r, r, m, ndigits * 2);
+
+	vli_set(result, r, ndigits);
+}
+
+/*
+ * Computes result = product % mod, where product is 2N words long.
+ * Reference: Ken MacKay's micro-ecc.
+ * Currently only designed to work for curve_p or curve_n.
+ */
+static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
+			  unsigned int ndigits)
+{
+	u64 mod_m[2 * ECC_MAX_DIGITS];
+	u64 tmp[2 * ECC_MAX_DIGITS];
+	u64 *v[2] = { tmp, product };
+	u64 carry = 0;
+	unsigned int i;
+	/* Shift mod so its highest set bit is at the maximum position. */
+	int shift = (ndigits * 2 * 64) - vli_num_bits(mod, ndigits);
+	int word_shift = shift / 64;
+	int bit_shift = shift % 64;
+
+	vli_clear(mod_m, word_shift);
+	if (bit_shift > 0) {
+		for (i = 0; i < ndigits; ++i) {
+			mod_m[word_shift + i] = (mod[i] << bit_shift) | carry;
+			carry = mod[i] >> (64 - bit_shift);
+		}
+	} else
+		vli_set(mod_m + word_shift, mod, ndigits);
+
+	for (i = 1; shift >= 0; --shift) {
+		u64 borrow = 0;
+		unsigned int j;
+
+		for (j = 0; j < ndigits * 2; ++j) {
+			u64 diff = v[i][j] - mod_m[j] - borrow;
+
+			if (diff != v[i][j])
+				borrow = (diff > v[i][j]);
+			v[1 - i][j] = diff;
+		}
+		i = !(i ^ borrow); /* Swap the index if there was no borrow */
+		vli_rshift1(mod_m, ndigits);
+		mod_m[ndigits - 1] |= mod_m[ndigits] << (64 - 1);
+		vli_rshift1(mod_m + ndigits, ndigits);
+	}
+	vli_set(result, v[i], ndigits);
+}
+
+/* Computes result = product % mod using Barrett's reduction with precomputed
+ * value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have
+ * length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits
+ * boundary.
+ *
+ * Reference:
+ * R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010.
+ * 2.4.1 Barrett's algorithm. Algorithm 2.5.
+ */
+static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
+			     unsigned int ndigits)
+{
+	u64 q[ECC_MAX_DIGITS * 2];
+	u64 r[ECC_MAX_DIGITS * 2];
+	const u64 *mu = mod + ndigits;
+
+	vli_mult(q, product + ndigits, mu, ndigits);
+	if (mu[ndigits])
+		vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits);
+	vli_mult(r, mod, q + ndigits, ndigits);
+	vli_sub(r, product, r, ndigits * 2);
+	while (!vli_is_zero(r + ndigits, ndigits) ||
+	       vli_cmp(r, mod, ndigits) != -1) {
+		u64 carry;
+
+		carry = vli_sub(r, r, mod, ndigits);
+		vli_usub(r + ndigits, r + ndigits, carry, ndigits);
+	}
+	vli_set(result, r, ndigits);
+}
+
 /* Computes p_result = p_product % curve_p.
  * See algorithm 5 and 6 from
  * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
@@ -509,14 +775,33 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
 	}
 }
 
-/* Computes result = product % curve_prime
- *  from http://www.nsa.gov/ia/_files/nist-routines.pdf
-*/
+/* Computes result = product % curve_prime for different curve_primes.
+ *
+ * Note that curve_primes are distinguished just by heuristic check and
+ * not by complete conformance check.
+ */
 static bool vli_mmod_fast(u64 *result, u64 *product,
 			  const u64 *curve_prime, unsigned int ndigits)
 {
 	u64 tmp[2 * ECC_MAX_DIGITS];
 
+	/* Currently, both NIST primes have -1 in lowest qword. */
+	if (curve_prime[0] != -1ull) {
+		/* Try to handle Pseudo-Marsenne primes. */
+		if (curve_prime[ndigits - 1] == -1ull) {
+			vli_mmod_special(result, product, curve_prime,
+					 ndigits);
+			return true;
+		} else if (curve_prime[ndigits - 1] == 1ull << 63 &&
+			   curve_prime[ndigits - 2] == 0) {
+			vli_mmod_special2(result, product, curve_prime,
+					  ndigits);
+			return true;
+		}
+		vli_mmod_barrett(result, product, curve_prime, ndigits);
+		return true;
+	}
+
 	switch (ndigits) {
 	case 3:
 		vli_mmod_fast_192(result, product, curve_prime, tmp);
@@ -525,13 +810,26 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
 		vli_mmod_fast_256(result, product, curve_prime, tmp);
 		break;
 	default:
-		pr_err("unsupports digits size!\n");
+		pr_err_ratelimited("ecc: unsupported digits size!\n");
 		return false;
 	}
 
 	return true;
 }
 
+/* Computes result = (left * right) % mod.
+ * Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+		       const u64 *mod, unsigned int ndigits)
+{
+	u64 product[ECC_MAX_DIGITS * 2];
+
+	vli_mult(product, left, right, ndigits);
+	vli_mmod_slow(result, product, mod, ndigits);
+}
+EXPORT_SYMBOL(vli_mod_mult_slow);
+
 /* Computes result = (left * right) % curve_prime. */
 static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
 			      const u64 *curve_prime, unsigned int ndigits)
@@ -557,7 +855,7 @@ static void vli_mod_square_fast(u64 *result, const u64 *left,
  * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
  * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
  */
-static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
 			unsigned int ndigits)
 {
 	u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS];
@@ -630,6 +928,7 @@ static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
 
 	vli_set(result, u, ndigits);
 }
+EXPORT_SYMBOL(vli_mod_inv);
 
 /* ------ Point operations ------ */
 
@@ -903,6 +1202,85 @@ static void ecc_point_mult(struct ecc_point *result,
 	vli_set(result->y, ry[0], ndigits);
 }
 
+/* Computes R = P + Q mod p */
+static void ecc_point_add(const struct ecc_point *result,
+		   const struct ecc_point *p, const struct ecc_point *q,
+		   const struct ecc_curve *curve)
+{
+	u64 z[ECC_MAX_DIGITS];
+	u64 px[ECC_MAX_DIGITS];
+	u64 py[ECC_MAX_DIGITS];
+	unsigned int ndigits = curve->g.ndigits;
+
+	vli_set(result->x, q->x, ndigits);
+	vli_set(result->y, q->y, ndigits);
+	vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
+	vli_set(px, p->x, ndigits);
+	vli_set(py, p->y, ndigits);
+	xycz_add(px, py, result->x, result->y, curve->p, ndigits);
+	vli_mod_inv(z, z, curve->p, ndigits);
+	apply_z(result->x, result->y, z, curve->p, ndigits);
+}
+
+/* Computes R = u1P + u2Q mod p using Shamir's trick.
+ * Based on: Kenneth MacKay's micro-ecc (2014).
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+			   const u64 *u1, const struct ecc_point *p,
+			   const u64 *u2, const struct ecc_point *q,
+			   const struct ecc_curve *curve)
+{
+	u64 z[ECC_MAX_DIGITS];
+	u64 sump[2][ECC_MAX_DIGITS];
+	u64 *rx = result->x;
+	u64 *ry = result->y;
+	unsigned int ndigits = curve->g.ndigits;
+	unsigned int num_bits;
+	struct ecc_point sum = ECC_POINT_INIT(sump[0], sump[1], ndigits);
+	const struct ecc_point *points[4];
+	const struct ecc_point *point;
+	unsigned int idx;
+	int i;
+
+	ecc_point_add(&sum, p, q, curve);
+	points[0] = NULL;
+	points[1] = p;
+	points[2] = q;
+	points[3] = &sum;
+
+	num_bits = max(vli_num_bits(u1, ndigits),
+		       vli_num_bits(u2, ndigits));
+	i = num_bits - 1;
+	idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+	point = points[idx];
+
+	vli_set(rx, point->x, ndigits);
+	vli_set(ry, point->y, ndigits);
+	vli_clear(z + 1, ndigits - 1);
+	z[0] = 1;
+
+	for (--i; i >= 0; i--) {
+		ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);
+		idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+		point = points[idx];
+		if (point) {
+			u64 tx[ECC_MAX_DIGITS];
+			u64 ty[ECC_MAX_DIGITS];
+			u64 tz[ECC_MAX_DIGITS];
+
+			vli_set(tx, point->x, ndigits);
+			vli_set(ty, point->y, ndigits);
+			apply_z(tx, ty, z, curve->p, ndigits);
+			vli_mod_sub(tz, rx, tx, curve->p, ndigits);
+			xycz_add(tx, ty, rx, ry, curve->p, ndigits);
+			vli_mod_mult_fast(z, z, tz, curve->p, ndigits);
+		}
+	}
+	vli_mod_inv(z, z, curve->p, ndigits);
+	apply_z(rx, ry, z, curve->p, ndigits);
+}
+EXPORT_SYMBOL(ecc_point_mult_shamir);
+
 static inline void ecc_swap_digits(const u64 *in, u64 *out,
 				   unsigned int ndigits)
 {
@@ -948,6 +1326,7 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
 
 	return __ecc_is_key_valid(curve, private_key, ndigits);
 }
+EXPORT_SYMBOL(ecc_is_key_valid);
 
 /*
  * ECC private keys are generated using the method of extra random bits,
@@ -1000,6 +1379,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
 
 	return 0;
 }
+EXPORT_SYMBOL(ecc_gen_privkey);
 
 int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
 		     const u64 *private_key, u64 *public_key)
@@ -1036,13 +1416,17 @@ err_free_point:
 out:
 	return ret;
 }
+EXPORT_SYMBOL(ecc_make_pub_key);
 
 /* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
-static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
-				       struct ecc_point *pk)
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+				struct ecc_point *pk)
 {
 	u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
 
+	if (WARN_ON(pk->ndigits != curve->g.ndigits))
+		return -EINVAL;
+
 	/* Check 1: Verify key is not the zero point. */
 	if (ecc_point_is_zero(pk))
 		return -EINVAL;
@@ -1064,8 +1448,8 @@ static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
 		return -EINVAL;
 
 	return 0;
-
 }
+EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
 
 int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
 			      const u64 *private_key, const u64 *public_key,
@@ -1121,3 +1505,6 @@ err_alloc_product:
 out:
 	return ret;
 }
+EXPORT_SYMBOL(crypto_ecdh_shared_secret);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/ecc.h b/crypto/ecc.h
index f75a86baa3bd..ab0eb70b9c09 100644
--- a/crypto/ecc.h
+++ b/crypto/ecc.h
@@ -26,13 +26,51 @@
 #ifndef _CRYPTO_ECC_H
 #define _CRYPTO_ECC_H
 
+/* One digit is u64 qword. */
 #define ECC_CURVE_NIST_P192_DIGITS  3
 #define ECC_CURVE_NIST_P256_DIGITS  4
-#define ECC_MAX_DIGITS              ECC_CURVE_NIST_P256_DIGITS
+#define ECC_MAX_DIGITS             (512 / 64)
 
 #define ECC_DIGITS_TO_BYTES_SHIFT 3
 
 /**
+ * struct ecc_point - elliptic curve point in affine coordinates
+ *
+ * @x:		X coordinate in vli form.
+ * @y:		Y coordinate in vli form.
+ * @ndigits:	Length of vlis in u64 qwords.
+ */
+struct ecc_point {
+	u64 *x;
+	u64 *y;
+	u8 ndigits;
+};
+
+#define ECC_POINT_INIT(x, y, ndigits)	(struct ecc_point) { x, y, ndigits }
+
+/**
+ * struct ecc_curve - definition of elliptic curve
+ *
+ * @name:	Short name of the curve.
+ * @g:		Generator point of the curve.
+ * @p:		Prime number, if Barrett's reduction is used for this curve
+ *		pre-calculated value 'mu' is appended to the @p after ndigits.
+ *		Use of Barrett's reduction is heuristically determined in
+ *		vli_mmod_fast().
+ * @n:		Order of the curve group.
+ * @a:		Curve parameter a.
+ * @b:		Curve parameter b.
+ */
+struct ecc_curve {
+	char *name;
+	struct ecc_point g;
+	u64 *p;
+	u64 *n;
+	u64 *a;
+	u64 *b;
+};
+
+/**
  * ecc_is_key_valid() - Validate a given ECDH private key
  *
  * @curve_id:		id representing the curve to use
@@ -91,4 +129,117 @@ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
 int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
 			      const u64 *private_key, const u64 *public_key,
 			      u64 *secret);
+
+/**
+ * ecc_is_pubkey_valid_partial() - Partial public key validation
+ *
+ * @curve:		elliptic curve domain parameters
+ * @pk:			public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
+ * Public-Key Validation Routine.
+ *
+ * Note: There is no check that the public key is in the correct elliptic curve
+ * subgroup.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+				struct ecc_point *pk);
+
+/**
+ * vli_is_zero() - Determine is vli is zero
+ *
+ * @vli:		vli to check.
+ * @ndigits:		length of the @vli
+ */
+bool vli_is_zero(const u64 *vli, unsigned int ndigits);
+
+/**
+ * vli_cmp() - compare left and right vlis
+ *
+ * @left:		vli
+ * @right:		vli
+ * @ndigits:		length of both vlis
+ *
+ * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * 0 if @left == @right, 1 if @left > @right.
+ */
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
+
+/**
+ * vli_sub() - Subtracts right from left
+ *
+ * @result:		where to write result
+ * @left:		vli
+ * @right		vli
+ * @ndigits:		length of all vlis
+ *
+ * Note: can modify in-place.
+ *
+ * Return: carry bit.
+ */
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+	    unsigned int ndigits);
+
+/**
+ * vli_from_be64() - Load vli from big-endian u64 array
+ *
+ * @dest:		destination vli
+ * @src:		source array of u64 BE values
+ * @ndigits:		length of both vli and array
+ */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_from_le64() - Load vli from little-endian u64 array
+ *
+ * @dest:		destination vli
+ * @src:		source array of u64 LE values
+ * @ndigits:		length of both vli and array
+ */
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_mod_inv() - Modular inversion
+ *
+ * @result:		where to write vli number
+ * @input:		vli value to operate on
+ * @mod:		modulus
+ * @ndigits:		length of all vlis
+ */
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+		 unsigned int ndigits);
+
+/**
+ * vli_mod_mult_slow() - Modular multiplication
+ *
+ * @result:		where to write result value
+ * @left:		vli number to multiply with @right
+ * @right:		vli number to multiply with @left
+ * @mod:		modulus
+ * @ndigits:		length of all vlis
+ *
+ * Note: Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+		       const u64 *mod, unsigned int ndigits);
+
+/**
+ * ecc_point_mult_shamir() - Add two points multiplied by scalars
+ *
+ * @result:		resulting point
+ * @x:			scalar to multiply with @p
+ * @p:			point to multiply with @x
+ * @y:			scalar to multiply with @q
+ * @q:			point to multiply with @y
+ * @curve:		curve
+ *
+ * Returns result = x * p + x * q over the curve.
+ * This works faster than two multiplications and addition.
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+			   const u64 *x, const struct ecc_point *p,
+			   const u64 *y, const struct ecc_point *q,
+			   const struct ecc_curve *curve);
 #endif
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 336ab1805639..69be6c7d228f 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -2,21 +2,6 @@
 #ifndef _CRYTO_ECC_CURVE_DEFS_H
 #define _CRYTO_ECC_CURVE_DEFS_H
 
-struct ecc_point {
-	u64 *x;
-	u64 *y;
-	u8 ndigits;
-};
-
-struct ecc_curve {
-	char *name;
-	struct ecc_point g;
-	u64 *p;
-	u64 *n;
-	u64 *a;
-	u64 *b;
-};
-
 /* NIST P-192: a = p - 3 */
 static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
 				0x188DA80EB03090F6ull };
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index bf6300175b9c..890092bd8989 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -166,7 +166,7 @@ static void ecdh_exit(void)
 	crypto_unregister_kpp(&ecdh);
 }
 
-module_init(ecdh_init);
+subsys_initcall(ecdh_init);
 module_exit(ecdh_exit);
 MODULE_ALIAS_CRYPTO("ecdh");
 MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 77e607fdbfb7..e71d1bc8d850 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -174,7 +174,7 @@ static void __exit echainiv_module_exit(void)
 	crypto_unregister_template(&echainiv_tmpl);
 }
 
-module_init(echainiv_module_init);
+subsys_initcall(echainiv_module_init);
 module_exit(echainiv_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
new file mode 100644
index 000000000000..887ec21aee49
--- /dev/null
+++ b/crypto/ecrdsa.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Elliptic Curve (Russian) Digital Signature Algorithm for Cryptographic API
+ *
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * References:
+ * GOST 34.10-2018, GOST R 34.10-2012, RFC 7091, ISO/IEC 14888-3:2018.
+ *
+ * Historical references:
+ * GOST R 34.10-2001, RFC 4357, ISO/IEC 14888-3:2006/Amd 1:2010.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/streebog.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <linux/oid_registry.h>
+#include "ecrdsa_params.asn1.h"
+#include "ecrdsa_pub_key.asn1.h"
+#include "ecc.h"
+#include "ecrdsa_defs.h"
+
+#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
+#define ECRDSA_MAX_DIGITS (512 / 64)
+
+struct ecrdsa_ctx {
+	enum OID algo_oid; /* overall public key oid */
+	enum OID curve_oid; /* parameter */
+	enum OID digest_oid; /* parameter */
+	const struct ecc_curve *curve; /* curve from oid */
+	unsigned int digest_len; /* parameter (bytes) */
+	const char *digest; /* digest name from oid */
+	unsigned int key_len; /* @key length (bytes) */
+	const char *key; /* raw public key */
+	struct ecc_point pub_key;
+	u64 _pubp[2][ECRDSA_MAX_DIGITS]; /* point storage for @pub_key */
+};
+
+static const struct ecc_curve *get_curve_by_oid(enum OID oid)
+{
+	switch (oid) {
+	case OID_gostCPSignA:
+	case OID_gostTC26Sign256B:
+		return &gost_cp256a;
+	case OID_gostCPSignB:
+	case OID_gostTC26Sign256C:
+		return &gost_cp256b;
+	case OID_gostCPSignC:
+	case OID_gostTC26Sign256D:
+		return &gost_cp256c;
+	case OID_gostTC26Sign512A:
+		return &gost_tc512a;
+	case OID_gostTC26Sign512B:
+		return &gost_tc512b;
+	/* The following two aren't implemented: */
+	case OID_gostTC26Sign256A:
+	case OID_gostTC26Sign512C:
+	default:
+		return NULL;
+	}
+}
+
+static int ecrdsa_verify(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	unsigned char sig[ECRDSA_MAX_SIG_SIZE];
+	unsigned char digest[STREEBOG512_DIGEST_SIZE];
+	unsigned int ndigits = req->dst_len / sizeof(u64);
+	u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */
+	u64 _r[ECRDSA_MAX_DIGITS]; /* -r */
+	u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */
+	u64 e[ECRDSA_MAX_DIGITS]; /* h \mod q */
+	u64 *v = e;		  /* e^{-1} \mod q */
+	u64 z1[ECRDSA_MAX_DIGITS];
+	u64 *z2 = _r;
+	struct ecc_point cc = ECC_POINT_INIT(s, e, ndigits); /* reuse s, e */
+
+	/*
+	 * Digest value, digest algorithm, and curve (modulus) should have the
+	 * same length (256 or 512 bits), public key and signature should be
+	 * twice bigger.
+	 */
+	if (!ctx->curve ||
+	    !ctx->digest ||
+	    !req->src ||
+	    !ctx->pub_key.x ||
+	    req->dst_len != ctx->digest_len ||
+	    req->dst_len != ctx->curve->g.ndigits * sizeof(u64) ||
+	    ctx->pub_key.ndigits != ctx->curve->g.ndigits ||
+	    req->dst_len * 2 != req->src_len ||
+	    WARN_ON(req->src_len > sizeof(sig)) ||
+	    WARN_ON(req->dst_len > sizeof(digest)))
+		return -EBADMSG;
+
+	sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len),
+			  sig, req->src_len);
+	sg_pcopy_to_buffer(req->src,
+			   sg_nents_for_len(req->src,
+					    req->src_len + req->dst_len),
+			   digest, req->dst_len, req->src_len);
+
+	vli_from_be64(s, sig, ndigits);
+	vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits);
+
+	/* Step 1: verify that 0 < r < q, 0 < s < q */
+	if (vli_is_zero(r, ndigits) ||
+	    vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
+	    vli_is_zero(s, ndigits) ||
+	    vli_cmp(s, ctx->curve->n, ndigits) == 1)
+		return -EKEYREJECTED;
+
+	/* Step 2: calculate hash (h) of the message (passed as input) */
+	/* Step 3: calculate e = h \mod q */
+	vli_from_le64(e, digest, ndigits);
+	if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
+		vli_sub(e, e, ctx->curve->n, ndigits);
+	if (vli_is_zero(e, ndigits))
+		e[0] = 1;
+
+	/* Step 4: calculate v = e^{-1} \mod q */
+	vli_mod_inv(v, e, ctx->curve->n, ndigits);
+
+	/* Step 5: calculate z_1 = sv \mod q, z_2 = -rv \mod q */
+	vli_mod_mult_slow(z1, s, v, ctx->curve->n, ndigits);
+	vli_sub(_r, ctx->curve->n, r, ndigits);
+	vli_mod_mult_slow(z2, _r, v, ctx->curve->n, ndigits);
+
+	/* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
+	ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
+			      ctx->curve);
+	if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
+		vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
+
+	/* Step 7: if R == r signature is valid */
+	if (!vli_cmp(cc.x, r, ndigits))
+		return 0;
+	else
+		return -EKEYREJECTED;
+}
+
+int ecrdsa_param_curve(void *context, size_t hdrlen, unsigned char tag,
+		       const void *value, size_t vlen)
+{
+	struct ecrdsa_ctx *ctx = context;
+
+	ctx->curve_oid = look_up_OID(value, vlen);
+	if (!ctx->curve_oid)
+		return -EINVAL;
+	ctx->curve = get_curve_by_oid(ctx->curve_oid);
+	return 0;
+}
+
+/* Optional. If present should match expected digest algo OID. */
+int ecrdsa_param_digest(void *context, size_t hdrlen, unsigned char tag,
+			const void *value, size_t vlen)
+{
+	struct ecrdsa_ctx *ctx = context;
+	int digest_oid = look_up_OID(value, vlen);
+
+	if (digest_oid != ctx->digest_oid)
+		return -EINVAL;
+	return 0;
+}
+
+int ecrdsa_parse_pub_key(void *context, size_t hdrlen, unsigned char tag,
+			 const void *value, size_t vlen)
+{
+	struct ecrdsa_ctx *ctx = context;
+
+	ctx->key = value;
+	ctx->key_len = vlen;
+	return 0;
+}
+
+static u8 *ecrdsa_unpack_u32(u32 *dst, void *src)
+{
+	memcpy(dst, src, sizeof(u32));
+	return src + sizeof(u32);
+}
+
+/* Parse BER encoded subjectPublicKey. */
+static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+			      unsigned int keylen)
+{
+	struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	unsigned int ndigits;
+	u32 algo, paramlen;
+	u8 *params;
+	int err;
+
+	err = asn1_ber_decoder(&ecrdsa_pub_key_decoder, ctx, key, keylen);
+	if (err < 0)
+		return err;
+
+	/* Key parameters is in the key after keylen. */
+	params = ecrdsa_unpack_u32(&paramlen,
+			  ecrdsa_unpack_u32(&algo, (u8 *)key + keylen));
+
+	if (algo == OID_gost2012PKey256) {
+		ctx->digest	= "streebog256";
+		ctx->digest_oid	= OID_gost2012Digest256;
+		ctx->digest_len	= 256 / 8;
+	} else if (algo == OID_gost2012PKey512) {
+		ctx->digest	= "streebog512";
+		ctx->digest_oid	= OID_gost2012Digest512;
+		ctx->digest_len	= 512 / 8;
+	} else
+		return -ENOPKG;
+	ctx->algo_oid = algo;
+
+	/* Parse SubjectPublicKeyInfo.AlgorithmIdentifier.parameters. */
+	err = asn1_ber_decoder(&ecrdsa_params_decoder, ctx, params, paramlen);
+	if (err < 0)
+		return err;
+	/*
+	 * Sizes of algo (set in digest_len) and curve should match
+	 * each other.
+	 */
+	if (!ctx->curve ||
+	    ctx->curve->g.ndigits * sizeof(u64) != ctx->digest_len)
+		return -ENOPKG;
+	/*
+	 * Key is two 256- or 512-bit coordinates which should match
+	 * curve size.
+	 */
+	if ((ctx->key_len != (2 * 256 / 8) &&
+	     ctx->key_len != (2 * 512 / 8)) ||
+	    ctx->key_len != ctx->curve->g.ndigits * sizeof(u64) * 2)
+		return -ENOPKG;
+
+	ndigits = ctx->key_len / sizeof(u64) / 2;
+	ctx->pub_key = ECC_POINT_INIT(ctx->_pubp[0], ctx->_pubp[1], ndigits);
+	vli_from_le64(ctx->pub_key.x, ctx->key, ndigits);
+	vli_from_le64(ctx->pub_key.y, ctx->key + ndigits * sizeof(u64),
+		      ndigits);
+
+	if (ecc_is_pubkey_valid_partial(ctx->curve, &ctx->pub_key))
+		return -EKEYREJECTED;
+
+	return 0;
+}
+
+static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm)
+{
+	struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	/*
+	 * Verify doesn't need any output, so it's just informational
+	 * for keyctl to determine the key bit size.
+	 */
+	return ctx->pub_key.ndigits * sizeof(u64);
+}
+
+static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+}
+
+static struct akcipher_alg ecrdsa_alg = {
+	.verify		= ecrdsa_verify,
+	.set_pub_key	= ecrdsa_set_pub_key,
+	.max_size	= ecrdsa_max_size,
+	.exit		= ecrdsa_exit_tfm,
+	.base = {
+		.cra_name	 = "ecrdsa",
+		.cra_driver_name = "ecrdsa-generic",
+		.cra_priority	 = 100,
+		.cra_module	 = THIS_MODULE,
+		.cra_ctxsize	 = sizeof(struct ecrdsa_ctx),
+	},
+};
+
+static int __init ecrdsa_mod_init(void)
+{
+	return crypto_register_akcipher(&ecrdsa_alg);
+}
+
+static void __exit ecrdsa_mod_fini(void)
+{
+	crypto_unregister_akcipher(&ecrdsa_alg);
+}
+
+module_init(ecrdsa_mod_init);
+module_exit(ecrdsa_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
+MODULE_DESCRIPTION("EC-RDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecrdsa-generic");
diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h
new file mode 100644
index 000000000000..170baf039007
--- /dev/null
+++ b/crypto/ecrdsa_defs.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Definitions of EC-RDSA Curve Parameters
+ *
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYTO_ECRDSA_DEFS_H
+#define _CRYTO_ECRDSA_DEFS_H
+
+#include "ecc.h"
+
+#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
+#define ECRDSA_MAX_DIGITS (512 / 64)
+
+/*
+ * EC-RDSA uses its own set of curves.
+ *
+ * cp256{a,b,c} curves first defined for GOST R 34.10-2001 in RFC 4357 (as
+ * 256-bit {A,B,C}-ParamSet), but inherited for GOST R 34.10-2012 and
+ * proposed for use in R 50.1.114-2016 and RFC 7836 as the 256-bit curves.
+ */
+/* OID_gostCPSignA 1.2.643.2.2.35.1 */
+static u64 cp256a_g_x[] = {
+	0x0000000000000001ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256a_g_y[] = {
+	0x22ACC99C9E9F1E14ull, 0x35294F2DDF23E3B1ull,
+	0x27DF505A453F2B76ull, 0x8D91E471E0989CDAull, };
+static u64 cp256a_p[] = { /* p = 2^256 - 617 */
+	0xFFFFFFFFFFFFFD97ull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_n[] = {
+	0x45841B09B761B893ull, 0x6C611070995AD100ull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_a[] = { /* a = p - 3 */
+	0xFFFFFFFFFFFFFD94ull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_b[] = {
+	0x00000000000000a6ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull };
+
+static struct ecc_curve gost_cp256a = {
+	.name = "cp256a",
+	.g = {
+		.x = cp256a_g_x,
+		.y = cp256a_g_y,
+		.ndigits = 256 / 64,
+	},
+	.p = cp256a_p,
+	.n = cp256a_n,
+	.a = cp256a_a,
+	.b = cp256a_b
+};
+
+/* OID_gostCPSignB 1.2.643.2.2.35.2 */
+static u64 cp256b_g_x[] = {
+	0x0000000000000001ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256b_g_y[] = {
+	0x744BF8D717717EFCull, 0xC545C9858D03ECFBull,
+	0xB83D1C3EB2C070E5ull, 0x3FA8124359F96680ull, };
+static u64 cp256b_p[] = { /* p = 2^255 + 3225 */
+	0x0000000000000C99ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 cp256b_n[] = {
+	0xE497161BCC8A198Full, 0x5F700CFFF1A624E5ull,
+	0x0000000000000001ull, 0x8000000000000000ull, };
+static u64 cp256b_a[] = { /* a = p - 3 */
+	0x0000000000000C96ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 cp256b_b[] = {
+	0x2F49D4CE7E1BBC8Bull, 0xE979259373FF2B18ull,
+	0x66A7D3C25C3DF80Aull, 0x3E1AF419A269A5F8ull, };
+
+static struct ecc_curve gost_cp256b = {
+	.name = "cp256b",
+	.g = {
+		.x = cp256b_g_x,
+		.y = cp256b_g_y,
+		.ndigits = 256 / 64,
+	},
+	.p = cp256b_p,
+	.n = cp256b_n,
+	.a = cp256b_a,
+	.b = cp256b_b
+};
+
+/* OID_gostCPSignC 1.2.643.2.2.35.3 */
+static u64 cp256c_g_x[] = {
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256c_g_y[] = {
+	0x366E550DFDB3BB67ull, 0x4D4DC440D4641A8Full,
+	0x3CBF3783CD08C0EEull, 0x41ECE55743711A8Cull, };
+static u64 cp256c_p[] = {
+	0x7998F7B9022D759Bull, 0xCF846E86789051D3ull,
+	0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull,
+	/* pre-computed value for Barrett's reduction */
+	0xedc283cdd217b5a2ull, 0xbac48fc06398ae59ull,
+	0x405384d55f9f3b73ull, 0xa51f176161f1d734ull,
+	0x0000000000000001ull, };
+static u64 cp256c_n[] = {
+	0xF02F3A6598980BB9ull, 0x582CA3511EDDFB74ull,
+	0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull, };
+static u64 cp256c_a[] = { /* a = p - 3 */
+	0x7998F7B9022D7598ull, 0xCF846E86789051D3ull,
+	0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull, };
+static u64 cp256c_b[] = {
+	0x000000000000805aull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull, };
+
+static struct ecc_curve gost_cp256c = {
+	.name = "cp256c",
+	.g = {
+		.x = cp256c_g_x,
+		.y = cp256c_g_y,
+		.ndigits = 256 / 64,
+	},
+	.p = cp256c_p,
+	.n = cp256c_n,
+	.a = cp256c_a,
+	.b = cp256c_b
+};
+
+/* tc512{a,b} curves first recommended in 2013 and then standardized in
+ * R 50.1.114-2016 and RFC 7836 for use with GOST R 34.10-2012 (as TC26
+ * 512-bit ParamSet{A,B}).
+ */
+/* OID_gostTC26Sign512A 1.2.643.7.1.2.1.2.1 */
+static u64 tc512a_g_x[] = {
+	0x0000000000000003ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 tc512a_g_y[] = {
+	0x89A589CB5215F2A4ull, 0x8028FE5FC235F5B8ull,
+	0x3D75E6A50E3A41E9ull, 0xDF1626BE4FD036E9ull,
+	0x778064FDCBEFA921ull, 0xCE5E1C93ACF1ABC1ull,
+	0xA61B8816E25450E6ull, 0x7503CFE87A836AE3ull, };
+static u64 tc512a_p[] = { /* p = 2^512 - 569 */
+	0xFFFFFFFFFFFFFDC7ull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_n[] = {
+	0xCACDB1411F10B275ull, 0x9B4B38ABFAD2B85Dull,
+	0x6FF22B8D4E056060ull, 0x27E69532F48D8911ull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_a[] = { /* a = p - 3 */
+	0xFFFFFFFFFFFFFDC4ull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+	0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_b[] = {
+	0x503190785A71C760ull, 0x862EF9D4EBEE4761ull,
+	0x4CB4574010DA90DDull, 0xEE3CB090F30D2761ull,
+	0x79BD081CFD0B6265ull, 0x34B82574761CB0E8ull,
+	0xC1BD0B2B6667F1DAull, 0xE8C2505DEDFC86DDull, };
+
+static struct ecc_curve gost_tc512a = {
+	.name = "tc512a",
+	.g = {
+		.x = tc512a_g_x,
+		.y = tc512a_g_y,
+		.ndigits = 512 / 64,
+	},
+	.p = tc512a_p,
+	.n = tc512a_n,
+	.a = tc512a_a,
+	.b = tc512a_b
+};
+
+/* OID_gostTC26Sign512B 1.2.643.7.1.2.1.2.2 */
+static u64 tc512b_g_x[] = {
+	0x0000000000000002ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 tc512b_g_y[] = {
+	0x7E21340780FE41BDull, 0x28041055F94CEEECull,
+	0x152CBCAAF8C03988ull, 0xDCB228FD1EDF4A39ull,
+	0xBE6DD9E6C8EC7335ull, 0x3C123B697578C213ull,
+	0x2C071E3647A8940Full, 0x1A8F7EDA389B094Cull, };
+static u64 tc512b_p[] = { /* p = 2^511 + 111 */
+	0x000000000000006Full, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_n[] = {
+	0xC6346C54374F25BDull, 0x8B996712101BEA0Eull,
+	0xACFDB77BD9D40CFAull, 0x49A1EC142565A545ull,
+	0x0000000000000001ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_a[] = { /* a = p - 3 */
+	0x000000000000006Cull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x0000000000000000ull,
+	0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_b[] = {
+	0xFB8CCBC7C5140116ull, 0x50F78BEE1FA3106Eull,
+	0x7F8B276FAD1AB69Cull, 0x3E965D2DB1416D21ull,
+	0xBF85DC806C4B289Full, 0xB97C7D614AF138BCull,
+	0x7E3E06CF6F5E2517ull, 0x687D1B459DC84145ull, };
+
+static struct ecc_curve gost_tc512b = {
+	.name = "tc512b",
+	.g = {
+		.x = tc512b_g_x,
+		.y = tc512b_g_y,
+		.ndigits = 512 / 64,
+	},
+	.p = tc512b_p,
+	.n = tc512b_n,
+	.a = tc512b_a,
+	.b = tc512b_b
+};
+
+#endif
diff --git a/crypto/ecrdsa_params.asn1 b/crypto/ecrdsa_params.asn1
new file mode 100644
index 000000000000..aba99c3763cf
--- /dev/null
+++ b/crypto/ecrdsa_params.asn1
@@ -0,0 +1,4 @@
+EcrdsaParams ::= SEQUENCE {
+	curve	OBJECT IDENTIFIER ({ ecrdsa_param_curve }),
+	digest	OBJECT IDENTIFIER OPTIONAL ({ ecrdsa_param_digest })
+}
diff --git a/crypto/ecrdsa_pub_key.asn1 b/crypto/ecrdsa_pub_key.asn1
new file mode 100644
index 000000000000..048cb646bce4
--- /dev/null
+++ b/crypto/ecrdsa_pub_key.asn1
@@ -0,0 +1 @@
+EcrdsaPubKey ::= OCTET STRING ({ ecrdsa_parse_pub_key })
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 77286ea28865..4e8704405a3b 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -414,7 +414,7 @@ static void __exit fcrypt_mod_fini(void)
 	crypto_unregister_alg(&fcrypt_alg);
 }
 
-module_init(fcrypt_mod_init);
+subsys_initcall(fcrypt_mod_init);
 module_exit(fcrypt_mod_fini);
 
 MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/fips.c b/crypto/fips.c
index 9d627c1cf8bc..9dfed122d6da 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -74,5 +74,5 @@ static void __exit fips_exit(void)
 	crypto_proc_fips_exit();
 }
 
-module_init(fips_init);
+subsys_initcall(fips_init);
 module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e1a11f529d25..33f45a980967 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
 
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
-				    const char *full_name,
 				    const char *ctr_name,
 				    const char *ghash_name)
 {
@@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
 		goto err_free_inst;
 
 	err = -EINVAL;
-	if (ghash->digestsize != 16)
+	if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
+	    ghash->digestsize != 16)
 		goto err_drop_ghash;
 
 	crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
 
 	ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
 
-	/* We only support 16-byte blocks. */
+	/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
 	err = -EINVAL;
-	if (crypto_skcipher_alg_ivsize(ctr) != 16)
+	if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+	    crypto_skcipher_alg_ivsize(ctr) != 16 ||
+	    ctr->base.cra_blocksize != 1)
 		goto out_put_ctr;
 
-	/* Not a stream cipher? */
-	if (ctr->base.cra_blocksize != 1)
+	err = -ENAMETOOLONG;
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
 		goto out_put_ctr;
 
-	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "gcm_base(%s,%s)", ctr->base.cra_driver_name,
 		     ghash_alg->cra_driver_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 		goto out_put_ctr;
 
-	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
 	inst->alg.base.cra_flags = (ghash->base.cra_flags |
 				    ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = (ghash->base.cra_priority +
@@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
@@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
 	    CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
-	    CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_gcm_create_common(tmpl, tb, full_name,
-					ctr_name, "ghash");
+	return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
 }
 
 static int crypto_gcm_base_create(struct crypto_template *tmpl,
@@ -732,7 +726,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
 {
 	const char *ctr_name;
 	const char *ghash_name;
-	char full_name[CRYPTO_MAX_ALG_NAME];
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
@@ -742,12 +735,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
 	if (IS_ERR(ghash_name))
 		return PTR_ERR(ghash_name);
 
-	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
-		     ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
-		return -ENAMETOOLONG;
-
-	return crypto_gcm_create_common(tmpl, tb, full_name,
-					ctr_name, ghash_name);
+	return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
 }
 
 static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1258,7 +1246,7 @@ static void __exit crypto_gcm_module_exit(void)
 				    ARRAY_SIZE(crypto_gcm_tmpls));
 }
 
-module_init(crypto_gcm_module_init);
+subsys_initcall(crypto_gcm_module_init);
 module_exit(crypto_gcm_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index d9f192b953b2..e6307935413c 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -149,7 +149,7 @@ static void __exit ghash_mod_exit(void)
 	crypto_unregister_shash(&ghash_alg);
 }
 
-module_init(ghash_mod_init);
+subsys_initcall(ghash_mod_init);
 module_exit(ghash_mod_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e74730224f0a..a68c1266121f 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -57,8 +57,6 @@ static int hmac_setkey(struct crypto_shash *parent,
 	unsigned int i;
 
 	shash->tfm = hash;
-	shash->flags = crypto_shash_get_flags(parent)
-		& CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	if (keylen > bs) {
 		int err;
@@ -91,8 +89,6 @@ static int hmac_export(struct shash_desc *pdesc, void *out)
 {
 	struct shash_desc *desc = shash_desc_ctx(pdesc);
 
-	desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
 	return crypto_shash_export(desc, out);
 }
 
@@ -102,7 +98,6 @@ static int hmac_import(struct shash_desc *pdesc, const void *in)
 	struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
 
 	desc->tfm = ctx->hash;
-	desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	return crypto_shash_import(desc, in);
 }
@@ -117,8 +112,6 @@ static int hmac_update(struct shash_desc *pdesc,
 {
 	struct shash_desc *desc = shash_desc_ctx(pdesc);
 
-	desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
 	return crypto_shash_update(desc, data, nbytes);
 }
 
@@ -130,8 +123,6 @@ static int hmac_final(struct shash_desc *pdesc, u8 *out)
 	char *opad = crypto_shash_ctx_aligned(parent) + ss;
 	struct shash_desc *desc = shash_desc_ctx(pdesc);
 
-	desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
 	return crypto_shash_final(desc, out) ?:
 	       crypto_shash_import(desc, opad) ?:
 	       crypto_shash_finup(desc, out, ds, out);
@@ -147,8 +138,6 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
 	char *opad = crypto_shash_ctx_aligned(parent) + ss;
 	struct shash_desc *desc = shash_desc_ctx(pdesc);
 
-	desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
 	return crypto_shash_finup(desc, data, nbytes, out) ?:
 	       crypto_shash_import(desc, opad) ?:
 	       crypto_shash_finup(desc, out, ds, out);
@@ -268,7 +257,7 @@ static void __exit hmac_module_exit(void)
 	crypto_unregister_template(&hmac_tmpl);
 }
 
-module_init(hmac_module_init);
+subsys_initcall(hmac_module_init);
 module_exit(hmac_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 787dccca3715..6ea1a270b8dc 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -198,7 +198,7 @@ static void __exit jent_mod_exit(void)
 	crypto_unregister_rng(&jent_alg);
 }
 
-module_init(jent_mod_init);
+subsys_initcall(jent_mod_init);
 module_exit(jent_mod_exit);
 
 MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index a5cfe610d8f4..a155c88105ea 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -310,7 +310,7 @@ static void __exit crypto_kw_exit(void)
 	crypto_unregister_template(&crypto_kw_tmpl);
 }
 
-module_init(crypto_kw_init);
+subsys_initcall(crypto_kw_init);
 module_exit(crypto_kw_exit);
 
 MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 873eb5ded6d7..b50aa8a3ab4c 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -875,7 +875,7 @@ static void __exit khazad_mod_fini(void)
 }
 
 
-module_init(khazad_mod_init);
+subsys_initcall(khazad_mod_init);
 module_exit(khazad_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 08a0e458bc3e..fa302f3f161e 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -162,8 +162,10 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
 	}
 
 	err = skcipher_walk_virt(&w, req, false);
-	iv = (__be32 *)w.iv;
+	if (err)
+		return err;
 
+	iv = (__be32 *)w.iv;
 	counter[0] = be32_to_cpu(iv[3]);
 	counter[1] = be32_to_cpu(iv[2]);
 	counter[2] = be32_to_cpu(iv[1]);
@@ -435,7 +437,7 @@ static void __exit crypto_module_exit(void)
 	crypto_unregister_template(&crypto_tmpl);
 }
 
-module_init(crypto_module_init);
+subsys_initcall(crypto_module_init);
 module_exit(crypto_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/lz4.c b/crypto/lz4.c
index c160dfdbf2e0..1e35134d0a98 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -164,7 +164,7 @@ static void __exit lz4_mod_fini(void)
 	crypto_unregister_scomp(&scomp);
 }
 
-module_init(lz4_mod_init);
+subsys_initcall(lz4_mod_init);
 module_exit(lz4_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 583b5e013d7a..4a220b628fe7 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -165,7 +165,7 @@ static void __exit lz4hc_mod_fini(void)
 	crypto_unregister_scomp(&scomp);
 }
 
-module_init(lz4hc_mod_init);
+subsys_initcall(lz4hc_mod_init);
 module_exit(lz4hc_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index ea9c75b1db49..4c82bf18440f 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -167,7 +167,7 @@ static void __exit lzorle_mod_fini(void)
 	crypto_unregister_scomp(&scomp);
 }
 
-module_init(lzorle_mod_init);
+subsys_initcall(lzorle_mod_init);
 module_exit(lzorle_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 218567d717d6..4a6ac8f247d0 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -167,7 +167,7 @@ static void __exit lzo_mod_fini(void)
 	crypto_unregister_scomp(&scomp);
 }
 
-module_init(lzo_mod_init);
+subsys_initcall(lzo_mod_init);
 module_exit(lzo_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/md4.c b/crypto/md4.c
index 9965ec40d9f9..9a1a228a0c69 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -232,7 +232,7 @@ static void __exit md4_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(md4_mod_init);
+subsys_initcall(md4_mod_init);
 module_exit(md4_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/md5.c b/crypto/md5.c
index 94dd78144ba3..221c2c0932f8 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -244,7 +244,7 @@ static void __exit md5_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(md5_mod_init);
+subsys_initcall(md5_mod_init);
 module_exit(md5_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 46195e0d0f4d..538ae7933795 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -178,7 +178,7 @@ static void __exit michael_mic_exit(void)
 }
 
 
-module_init(michael_mic_init);
+subsys_initcall(michael_mic_init);
 module_exit(michael_mic_exit);
 
 MODULE_LICENSE("GPL v2");
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 0747732d5b78..f8734c6576af 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -532,7 +532,7 @@ static void __exit crypto_morus1280_module_exit(void)
 	crypto_unregister_aead(&crypto_morus1280_alg);
 }
 
-module_init(crypto_morus1280_module_init);
+subsys_initcall(crypto_morus1280_module_init);
 module_exit(crypto_morus1280_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 1617a1eb8be1..ae5aa9482cb4 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -523,7 +523,7 @@ static void __exit crypto_morus640_module_exit(void)
 	crypto_unregister_aead(&crypto_morus640_alg);
 }
 
-module_init(crypto_morus640_module_init);
+subsys_initcall(crypto_morus640_module_init);
 module_exit(crypto_morus640_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index ec831a5594d8..9ab4e07cde4d 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -244,7 +244,7 @@ static void __exit nhpoly1305_mod_exit(void)
 	crypto_unregister_shash(&nhpoly1305_alg);
 }
 
-module_init(nhpoly1305_mod_init);
+subsys_initcall(nhpoly1305_mod_init);
 module_exit(nhpoly1305_mod_exit);
 
 MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
diff --git a/crypto/ofb.c b/crypto/ofb.c
index 34b6e1f426f7..133ff4c7f2c6 100644
--- a/crypto/ofb.c
+++ b/crypto/ofb.c
@@ -95,7 +95,7 @@ static void __exit crypto_ofb_module_exit(void)
 	crypto_unregister_template(&crypto_ofb_tmpl);
 }
 
-module_init(crypto_ofb_module_init);
+subsys_initcall(crypto_ofb_module_init);
 module_exit(crypto_ofb_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 2fa03fc576fe..31b3ce948474 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -191,7 +191,7 @@ static void __exit crypto_pcbc_module_exit(void)
 	crypto_unregister_template(&crypto_pcbc_tmpl);
 }
 
-module_init(crypto_pcbc_module_init);
+subsys_initcall(crypto_pcbc_module_init);
 module_exit(crypto_pcbc_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index d47cfc47b1b1..0e9ce329fd47 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -512,7 +512,7 @@ static void __exit pcrypt_exit(void)
 	crypto_unregister_template(&pcrypt_tmpl);
 }
 
-module_init(pcrypt_init);
+subsys_initcall(pcrypt_init);
 module_exit(pcrypt_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 2a06874204e8..adc40298c749 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -318,7 +318,7 @@ static void __exit poly1305_mod_exit(void)
 	crypto_unregister_shash(&poly1305_alg);
 }
 
-module_init(poly1305_mod_init);
+subsys_initcall(poly1305_mod_init);
 module_exit(poly1305_mod_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 5f4472256e27..faf4252c4b85 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -318,7 +318,7 @@ static void __exit rmd128_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(rmd128_mod_init);
+subsys_initcall(rmd128_mod_init);
 module_exit(rmd128_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 737645344d1c..b33309916d4f 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -362,7 +362,7 @@ static void __exit rmd160_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(rmd160_mod_init);
+subsys_initcall(rmd160_mod_init);
 module_exit(rmd160_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 0e9d30676a01..2a643250c9a5 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -337,7 +337,7 @@ static void __exit rmd256_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(rmd256_mod_init);
+subsys_initcall(rmd256_mod_init);
 module_exit(rmd256_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 3ae1df5bb48c..2f062574fc8c 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -386,7 +386,7 @@ static void __exit rmd320_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(rmd320_mod_init);
+subsys_initcall(rmd320_mod_init);
 module_exit(rmd320_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 0a6680ca8cb6..29c336068dc0 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -429,7 +429,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
 	akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
 				   req->dst, ctx->key_size - 1, req->dst_len);
 
-	err = crypto_akcipher_sign(&req_ctx->child_req);
+	err = crypto_akcipher_decrypt(&req_ctx->child_req);
 	if (err != -EINPROGRESS && err != -EBUSY)
 		return pkcs1pad_encrypt_sign_complete(req, err);
 
@@ -488,14 +488,21 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
 
 	err = 0;
 
-	if (req->dst_len < dst_len - pos)
-		err = -EOVERFLOW;
-	req->dst_len = dst_len - pos;
-
-	if (!err)
-		sg_copy_from_buffer(req->dst,
-				sg_nents_for_len(req->dst, req->dst_len),
-				out_buf + pos, req->dst_len);
+	if (req->dst_len != dst_len - pos) {
+		err = -EKEYREJECTED;
+		req->dst_len = dst_len - pos;
+		goto done;
+	}
+	/* Extract appended digest. */
+	sg_pcopy_to_buffer(req->src,
+			   sg_nents_for_len(req->src,
+					    req->src_len + req->dst_len),
+			   req_ctx->out_buf + ctx->key_size,
+			   req->dst_len, ctx->key_size);
+	/* Do the actual verification step. */
+	if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
+		   req->dst_len) != 0)
+		err = -EKEYREJECTED;
 done:
 	kzfree(req_ctx->out_buf);
 
@@ -532,10 +539,12 @@ static int pkcs1pad_verify(struct akcipher_request *req)
 	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
 	int err;
 
-	if (!ctx->key_size || req->src_len < ctx->key_size)
+	if (WARN_ON(req->dst) ||
+	    WARN_ON(!req->dst_len) ||
+	    !ctx->key_size || req->src_len < ctx->key_size)
 		return -EINVAL;
 
-	req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
+	req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
 	if (!req_ctx->out_buf)
 		return -ENOMEM;
 
@@ -551,7 +560,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
 				   req_ctx->out_sg, req->src_len,
 				   ctx->key_size);
 
-	err = crypto_akcipher_verify(&req_ctx->child_req);
+	err = crypto_akcipher_encrypt(&req_ctx->child_req);
 	if (err != -EINPROGRESS && err != -EBUSY)
 		return pkcs1pad_verify_complete(req, err);
 
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 4167980c243d..dcbb03431778 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -50,34 +50,6 @@ static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
 	return mpi_powm(m, c, key->d, key->n);
 }
 
-/*
- * RSASP1 function [RFC3447 sec 5.2.1]
- * s = m^d mod n
- */
-static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m)
-{
-	/* (1) Validate 0 <= m < n */
-	if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
-		return -EINVAL;
-
-	/* (2) s = m^d mod n */
-	return mpi_powm(s, m, key->d, key->n);
-}
-
-/*
- * RSAVP1 function [RFC3447 sec 5.2.2]
- * m = s^e mod n;
- */
-static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s)
-{
-	/* (1) Validate 0 <= s < n */
-	if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
-		return -EINVAL;
-
-	/* (2) m = s^e mod n */
-	return mpi_powm(m, s, key->e, key->n);
-}
-
 static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
 {
 	return akcipher_tfm_ctx(tfm);
@@ -160,85 +132,6 @@ err_free_m:
 	return ret;
 }
 
-static int rsa_sign(struct akcipher_request *req)
-{
-	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-	const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
-	MPI m, s = mpi_alloc(0);
-	int ret = 0;
-	int sign;
-
-	if (!s)
-		return -ENOMEM;
-
-	if (unlikely(!pkey->n || !pkey->d)) {
-		ret = -EINVAL;
-		goto err_free_s;
-	}
-
-	ret = -ENOMEM;
-	m = mpi_read_raw_from_sgl(req->src, req->src_len);
-	if (!m)
-		goto err_free_s;
-
-	ret = _rsa_sign(pkey, s, m);
-	if (ret)
-		goto err_free_m;
-
-	ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign);
-	if (ret)
-		goto err_free_m;
-
-	if (sign < 0)
-		ret = -EBADMSG;
-
-err_free_m:
-	mpi_free(m);
-err_free_s:
-	mpi_free(s);
-	return ret;
-}
-
-static int rsa_verify(struct akcipher_request *req)
-{
-	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-	const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
-	MPI s, m = mpi_alloc(0);
-	int ret = 0;
-	int sign;
-
-	if (!m)
-		return -ENOMEM;
-
-	if (unlikely(!pkey->n || !pkey->e)) {
-		ret = -EINVAL;
-		goto err_free_m;
-	}
-
-	s = mpi_read_raw_from_sgl(req->src, req->src_len);
-	if (!s) {
-		ret = -ENOMEM;
-		goto err_free_m;
-	}
-
-	ret = _rsa_verify(pkey, m, s);
-	if (ret)
-		goto err_free_s;
-
-	ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
-	if (ret)
-		goto err_free_s;
-
-	if (sign < 0)
-		ret = -EBADMSG;
-
-err_free_s:
-	mpi_free(s);
-err_free_m:
-	mpi_free(m);
-	return ret;
-}
-
 static void rsa_free_mpi_key(struct rsa_mpi_key *key)
 {
 	mpi_free(key->d);
@@ -353,8 +246,6 @@ static void rsa_exit_tfm(struct crypto_akcipher *tfm)
 static struct akcipher_alg rsa = {
 	.encrypt = rsa_enc,
 	.decrypt = rsa_dec,
-	.sign = rsa_sign,
-	.verify = rsa_verify,
 	.set_priv_key = rsa_set_priv_key,
 	.set_pub_key = rsa_set_pub_key,
 	.max_size = rsa_max_size,
@@ -391,7 +282,7 @@ static void rsa_exit(void)
 	crypto_unregister_akcipher(&rsa);
 }
 
-module_init(rsa_init);
+subsys_initcall(rsa_init);
 module_exit(rsa_exit);
 MODULE_ALIAS_CRYPTO("rsa");
 MODULE_LICENSE("GPL");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 00fce32ae17a..c81a44404086 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -86,18 +86,17 @@ static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
 {
 	__le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
 
-	if (dst != src)
-		memcpy(dst, src, bytes);
-
 	while (bytes >= SALSA20_BLOCK_SIZE) {
 		salsa20_block(state, stream);
-		crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE);
+		crypto_xor_cpy(dst, src, (const u8 *)stream,
+			       SALSA20_BLOCK_SIZE);
 		bytes -= SALSA20_BLOCK_SIZE;
 		dst += SALSA20_BLOCK_SIZE;
+		src += SALSA20_BLOCK_SIZE;
 	}
 	if (bytes) {
 		salsa20_block(state, stream);
-		crypto_xor(dst, (const u8 *)stream, bytes);
+		crypto_xor_cpy(dst, src, (const u8 *)stream, bytes);
 	}
 }
 
@@ -161,7 +160,7 @@ static int salsa20_crypt(struct skcipher_request *req)
 
 	err = skcipher_walk_virt(&walk, req, false);
 
-	salsa20_init(state, ctx, walk.iv);
+	salsa20_init(state, ctx, req->iv);
 
 	while (walk.nbytes > 0) {
 		unsigned int nbytes = walk.nbytes;
@@ -204,7 +203,7 @@ static void __exit salsa20_generic_mod_fini(void)
 	crypto_unregister_skcipher(&alg);
 }
 
-module_init(salsa20_generic_mod_init);
+subsys_initcall(salsa20_generic_mod_init);
 module_exit(salsa20_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 6f8305f8c300..712b4c2ea021 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -29,9 +29,17 @@
 #include <crypto/internal/scompress.h>
 #include "internal.h"
 
+struct scomp_scratch {
+	spinlock_t	lock;
+	void		*src;
+	void		*dst;
+};
+
+static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
+	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
+};
+
 static const struct crypto_type crypto_scomp_type;
-static void * __percpu *scomp_src_scratches;
-static void * __percpu *scomp_dst_scratches;
 static int scomp_scratch_users;
 static DEFINE_MUTEX(scomp_lock);
 
@@ -62,76 +70,53 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 	seq_puts(m, "type         : scomp\n");
 }
 
-static void crypto_scomp_free_scratches(void * __percpu *scratches)
+static void crypto_scomp_free_scratches(void)
 {
+	struct scomp_scratch *scratch;
 	int i;
 
-	if (!scratches)
-		return;
-
-	for_each_possible_cpu(i)
-		vfree(*per_cpu_ptr(scratches, i));
+	for_each_possible_cpu(i) {
+		scratch = per_cpu_ptr(&scomp_scratch, i);
 
-	free_percpu(scratches);
+		vfree(scratch->src);
+		vfree(scratch->dst);
+		scratch->src = NULL;
+		scratch->dst = NULL;
+	}
 }
 
-static void * __percpu *crypto_scomp_alloc_scratches(void)
+static int crypto_scomp_alloc_scratches(void)
 {
-	void * __percpu *scratches;
+	struct scomp_scratch *scratch;
 	int i;
 
-	scratches = alloc_percpu(void *);
-	if (!scratches)
-		return NULL;
-
 	for_each_possible_cpu(i) {
-		void *scratch;
-
-		scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
-		if (!scratch)
-			goto error;
-		*per_cpu_ptr(scratches, i) = scratch;
-	}
-
-	return scratches;
-
-error:
-	crypto_scomp_free_scratches(scratches);
-	return NULL;
-}
+		void *mem;
 
-static void crypto_scomp_free_all_scratches(void)
-{
-	if (!--scomp_scratch_users) {
-		crypto_scomp_free_scratches(scomp_src_scratches);
-		crypto_scomp_free_scratches(scomp_dst_scratches);
-		scomp_src_scratches = NULL;
-		scomp_dst_scratches = NULL;
-	}
-}
+		scratch = per_cpu_ptr(&scomp_scratch, i);
 
-static int crypto_scomp_alloc_all_scratches(void)
-{
-	if (!scomp_scratch_users++) {
-		scomp_src_scratches = crypto_scomp_alloc_scratches();
-		if (!scomp_src_scratches)
-			return -ENOMEM;
-		scomp_dst_scratches = crypto_scomp_alloc_scratches();
-		if (!scomp_dst_scratches) {
-			crypto_scomp_free_scratches(scomp_src_scratches);
-			scomp_src_scratches = NULL;
-			return -ENOMEM;
-		}
+		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+		if (!mem)
+			goto error;
+		scratch->src = mem;
+		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+		if (!mem)
+			goto error;
+		scratch->dst = mem;
 	}
 	return 0;
+error:
+	crypto_scomp_free_scratches();
+	return -ENOMEM;
 }
 
 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
 {
-	int ret;
+	int ret = 0;
 
 	mutex_lock(&scomp_lock);
-	ret = crypto_scomp_alloc_all_scratches();
+	if (!scomp_scratch_users++)
+		ret = crypto_scomp_alloc_scratches();
 	mutex_unlock(&scomp_lock);
 
 	return ret;
@@ -143,42 +128,41 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
 	void **tfm_ctx = acomp_tfm_ctx(tfm);
 	struct crypto_scomp *scomp = *tfm_ctx;
 	void **ctx = acomp_request_ctx(req);
-	const int cpu = get_cpu();
-	u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
-	u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+	struct scomp_scratch *scratch;
 	int ret;
 
-	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
-		ret = -EINVAL;
-		goto out;
-	}
+	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+		return -EINVAL;
 
-	if (req->dst && !req->dlen) {
-		ret = -EINVAL;
-		goto out;
-	}
+	if (req->dst && !req->dlen)
+		return -EINVAL;
 
 	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
 		req->dlen = SCOMP_SCRATCH_SIZE;
 
-	scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
+	scratch = raw_cpu_ptr(&scomp_scratch);
+	spin_lock(&scratch->lock);
+
+	scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
 	if (dir)
-		ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
-					    scratch_dst, &req->dlen, *ctx);
+		ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
+					    scratch->dst, &req->dlen, *ctx);
 	else
-		ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
-					      scratch_dst, &req->dlen, *ctx);
+		ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
+					      scratch->dst, &req->dlen, *ctx);
 	if (!ret) {
 		if (!req->dst) {
 			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
-			if (!req->dst)
+			if (!req->dst) {
+				ret = -ENOMEM;
 				goto out;
+			}
 		}
-		scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
+		scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
 					 1);
 	}
 out:
-	put_cpu();
+	spin_unlock(&scratch->lock);
 	return ret;
 }
 
@@ -199,7 +183,8 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
 	crypto_free_scomp(*ctx);
 
 	mutex_lock(&scomp_lock);
-	crypto_scomp_free_all_scratches();
+	if (!--scomp_scratch_users)
+		crypto_scomp_free_scratches();
 	mutex_unlock(&scomp_lock);
 }
 
diff --git a/crypto/seed.c b/crypto/seed.c
index c6ba8438be43..a75ac50fa4fd 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -470,7 +470,7 @@ static void __exit seed_fini(void)
 	crypto_unregister_alg(&seed_alg);
 }
 
-module_init(seed_init);
+subsys_initcall(seed_init);
 module_exit(seed_fini);
 
 MODULE_DESCRIPTION("SEED Cipher Algorithm");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index ed1b0e9f2436..3f2fad615d26 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -211,7 +211,7 @@ static void __exit seqiv_module_exit(void)
 	crypto_unregister_template(&seqiv_tmpl);
 }
 
-module_init(seqiv_module_init);
+subsys_initcall(seqiv_module_init);
 module_exit(seqiv_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 7c3382facc82..ec4ec89ad108 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -664,7 +664,7 @@ static void __exit serpent_mod_fini(void)
 	crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
 }
 
-module_init(serpent_mod_init);
+subsys_initcall(serpent_mod_init);
 module_exit(serpent_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 2af64ef81f40..1b806d4584b2 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -92,7 +92,7 @@ static void __exit sha1_generic_mod_fini(void)
 	crypto_unregister_shash(&alg);
 }
 
-module_init(sha1_generic_mod_init);
+subsys_initcall(sha1_generic_mod_init);
 module_exit(sha1_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 1e5ba6649e8d..5844e9a469e8 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -301,7 +301,7 @@ static void __exit sha256_generic_mod_fini(void)
 	crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
 }
 
-module_init(sha256_generic_mod_init);
+subsys_initcall(sha256_generic_mod_init);
 module_exit(sha256_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7ed98367d4fb..60fd2be609d8 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -294,7 +294,7 @@ static void __exit sha3_generic_mod_fini(void)
 	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
 }
 
-module_init(sha3_generic_mod_init);
+subsys_initcall(sha3_generic_mod_init);
 module_exit(sha3_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 4097cd555eb6..0193ecb8ae10 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -223,7 +223,7 @@ static void __exit sha512_generic_mod_fini(void)
 	crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
 }
 
-module_init(sha512_generic_mod_init);
+subsys_initcall(sha512_generic_mod_init);
 module_exit(sha512_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/shash.c b/crypto/shash.c
index 15b369c4745f..e55c1f558bc3 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -238,7 +238,6 @@ static int shash_async_init(struct ahash_request *req)
 	struct shash_desc *desc = ahash_request_ctx(req);
 
 	desc->tfm = *ctx;
-	desc->flags = req->base.flags;
 
 	return crypto_shash_init(desc);
 }
@@ -293,7 +292,6 @@ static int shash_async_finup(struct ahash_request *req)
 	struct shash_desc *desc = ahash_request_ctx(req);
 
 	desc->tfm = *ctx;
-	desc->flags = req->base.flags;
 
 	return shash_ahash_finup(req, desc);
 }
@@ -307,14 +305,13 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
 
 	if (nbytes &&
 	    (sg = req->src, offset = sg->offset,
-	     nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
+	     nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
 		void *data;
 
 		data = kmap_atomic(sg_page(sg));
 		err = crypto_shash_digest(desc, data + offset, nbytes,
 					  req->result);
 		kunmap_atomic(data);
-		crypto_yield(desc->flags);
 	} else
 		err = crypto_shash_init(desc) ?:
 		      shash_ahash_finup(req, desc);
@@ -329,7 +326,6 @@ static int shash_async_digest(struct ahash_request *req)
 	struct shash_desc *desc = ahash_request_ctx(req);
 
 	desc->tfm = *ctx;
-	desc->flags = req->base.flags;
 
 	return shash_ahash_digest(req, desc);
 }
@@ -345,7 +341,6 @@ static int shash_async_import(struct ahash_request *req, const void *in)
 	struct shash_desc *desc = ahash_request_ctx(req);
 
 	desc->tfm = *ctx;
-	desc->flags = req->base.flags;
 
 	return crypto_shash_import(desc, in);
 }
diff --git a/crypto/simd.c b/crypto/simd.c
index 78e8d037ae2b..3e3b1d1a6b1f 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -3,6 +3,7 @@
  *
  * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2019 Google LLC
  *
  * Based on aesni-intel_glue.c by:
  *  Copyright (C) 2008, Intel Corp.
@@ -20,10 +21,26 @@
  *
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Shared crypto SIMD helpers.  These functions dynamically create and register
+ * an skcipher or AEAD algorithm that wraps another, internal algorithm.  The
+ * wrapper ensures that the internal algorithm is only executed in a context
+ * where SIMD instructions are usable, i.e. where may_use_simd() returns true.
+ * If SIMD is already usable, the wrapper directly calls the internal algorithm.
+ * Otherwise it defers execution to a workqueue via cryptd.
  *
+ * This is an alternative to the internal algorithm implementing a fallback for
+ * the !may_use_simd() case itself.
+ *
+ * Note that the wrapper algorithm is asynchronous, i.e. it has the
+ * CRYPTO_ALG_ASYNC flag set.  Therefore it won't be found by users who
+ * explicitly allocate a synchronous algorithm.
  */
 
 #include <crypto/cryptd.h>
+#include <crypto/internal/aead.h>
 #include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/kernel.h>
@@ -31,6 +48,8 @@
 #include <linux/preempt.h>
 #include <asm/simd.h>
 
+/* skcipher support */
+
 struct simd_skcipher_alg {
 	const char *ialg_name;
 	struct skcipher_alg alg;
@@ -66,7 +85,7 @@ static int simd_skcipher_encrypt(struct skcipher_request *req)
 	subreq = skcipher_request_ctx(req);
 	*subreq = *req;
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
 		child = &ctx->cryptd_tfm->base;
 	else
@@ -87,7 +106,7 @@ static int simd_skcipher_decrypt(struct skcipher_request *req)
 	subreq = skcipher_request_ctx(req);
 	*subreq = *req;
 
-	if (!may_use_simd() ||
+	if (!crypto_simd_usable() ||
 	    (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
 		child = &ctx->cryptd_tfm->base;
 	else
@@ -272,4 +291,254 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
 }
 EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
 
+/* AEAD support */
+
+struct simd_aead_alg {
+	const char *ialg_name;
+	struct aead_alg alg;
+};
+
+struct simd_aead_ctx {
+	struct cryptd_aead *cryptd_tfm;
+};
+
+static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+				unsigned int key_len)
+{
+	struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_aead *child = &ctx->cryptd_tfm->base;
+	int err;
+
+	crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) &
+				     CRYPTO_TFM_REQ_MASK);
+	err = crypto_aead_setkey(child, key, key_len);
+	crypto_aead_set_flags(tfm, crypto_aead_get_flags(child) &
+				   CRYPTO_TFM_RES_MASK);
+	return err;
+}
+
+static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+	struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_aead *child = &ctx->cryptd_tfm->base;
+
+	return crypto_aead_setauthsize(child, authsize);
+}
+
+static int simd_aead_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_request *subreq;
+	struct crypto_aead *child;
+
+	subreq = aead_request_ctx(req);
+	*subreq = *req;
+
+	if (!crypto_simd_usable() ||
+	    (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
+		child = &ctx->cryptd_tfm->base;
+	else
+		child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	aead_request_set_tfm(subreq, child);
+
+	return crypto_aead_encrypt(subreq);
+}
+
+static int simd_aead_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_request *subreq;
+	struct crypto_aead *child;
+
+	subreq = aead_request_ctx(req);
+	*subreq = *req;
+
+	if (!crypto_simd_usable() ||
+	    (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
+		child = &ctx->cryptd_tfm->base;
+	else
+		child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	aead_request_set_tfm(subreq, child);
+
+	return crypto_aead_decrypt(subreq);
+}
+
+static void simd_aead_exit(struct crypto_aead *tfm)
+{
+	struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+	cryptd_free_aead(ctx->cryptd_tfm);
+}
+
+static int simd_aead_init(struct crypto_aead *tfm)
+{
+	struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct cryptd_aead *cryptd_tfm;
+	struct simd_aead_alg *salg;
+	struct aead_alg *alg;
+	unsigned reqsize;
+
+	alg = crypto_aead_alg(tfm);
+	salg = container_of(alg, struct simd_aead_alg, alg);
+
+	cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL,
+				       CRYPTO_ALG_INTERNAL);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+
+	ctx->cryptd_tfm = cryptd_tfm;
+
+	reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm));
+	reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base));
+	reqsize += sizeof(struct aead_request);
+
+	crypto_aead_set_reqsize(tfm, reqsize);
+
+	return 0;
+}
+
+struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+					      const char *drvname,
+					      const char *basename)
+{
+	struct simd_aead_alg *salg;
+	struct crypto_aead *tfm;
+	struct aead_alg *ialg;
+	struct aead_alg *alg;
+	int err;
+
+	tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
+				CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm))
+		return ERR_CAST(tfm);
+
+	ialg = crypto_aead_alg(tfm);
+
+	salg = kzalloc(sizeof(*salg), GFP_KERNEL);
+	if (!salg) {
+		salg = ERR_PTR(-ENOMEM);
+		goto out_put_tfm;
+	}
+
+	salg->ialg_name = basename;
+	alg = &salg->alg;
+
+	err = -ENAMETOOLONG;
+	if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
+	    CRYPTO_MAX_ALG_NAME)
+		goto out_free_salg;
+
+	if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		     drvname) >= CRYPTO_MAX_ALG_NAME)
+		goto out_free_salg;
+
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+	alg->base.cra_priority = ialg->base.cra_priority;
+	alg->base.cra_blocksize = ialg->base.cra_blocksize;
+	alg->base.cra_alignmask = ialg->base.cra_alignmask;
+	alg->base.cra_module = ialg->base.cra_module;
+	alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx);
+
+	alg->ivsize = ialg->ivsize;
+	alg->maxauthsize = ialg->maxauthsize;
+	alg->chunksize = ialg->chunksize;
+
+	alg->init = simd_aead_init;
+	alg->exit = simd_aead_exit;
+
+	alg->setkey = simd_aead_setkey;
+	alg->setauthsize = simd_aead_setauthsize;
+	alg->encrypt = simd_aead_encrypt;
+	alg->decrypt = simd_aead_decrypt;
+
+	err = crypto_register_aead(alg);
+	if (err)
+		goto out_free_salg;
+
+out_put_tfm:
+	crypto_free_aead(tfm);
+	return salg;
+
+out_free_salg:
+	kfree(salg);
+	salg = ERR_PTR(err);
+	goto out_put_tfm;
+}
+EXPORT_SYMBOL_GPL(simd_aead_create_compat);
+
+struct simd_aead_alg *simd_aead_create(const char *algname,
+				       const char *basename)
+{
+	char drvname[CRYPTO_MAX_ALG_NAME];
+
+	if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
+	    CRYPTO_MAX_ALG_NAME)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	return simd_aead_create_compat(algname, drvname, basename);
+}
+EXPORT_SYMBOL_GPL(simd_aead_create);
+
+void simd_aead_free(struct simd_aead_alg *salg)
+{
+	crypto_unregister_aead(&salg->alg);
+	kfree(salg);
+}
+EXPORT_SYMBOL_GPL(simd_aead_free);
+
+int simd_register_aeads_compat(struct aead_alg *algs, int count,
+			       struct simd_aead_alg **simd_algs)
+{
+	int err;
+	int i;
+	const char *algname;
+	const char *drvname;
+	const char *basename;
+	struct simd_aead_alg *simd;
+
+	err = crypto_register_aeads(algs, count);
+	if (err)
+		return err;
+
+	for (i = 0; i < count; i++) {
+		WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
+		WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
+		algname = algs[i].base.cra_name + 2;
+		drvname = algs[i].base.cra_driver_name + 2;
+		basename = algs[i].base.cra_driver_name;
+		simd = simd_aead_create_compat(algname, drvname, basename);
+		err = PTR_ERR(simd);
+		if (IS_ERR(simd))
+			goto err_unregister;
+		simd_algs[i] = simd;
+	}
+	return 0;
+
+err_unregister:
+	simd_unregister_aeads(algs, count, simd_algs);
+	return err;
+}
+EXPORT_SYMBOL_GPL(simd_register_aeads_compat);
+
+void simd_unregister_aeads(struct aead_alg *algs, int count,
+			   struct simd_aead_alg **simd_algs)
+{
+	int i;
+
+	crypto_unregister_aeads(algs, count);
+
+	for (i = 0; i < count; i++) {
+		if (simd_algs[i]) {
+			simd_aead_free(simd_algs[i]);
+			simd_algs[i] = NULL;
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(simd_unregister_aeads);
+
 MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index bcf13d95f54a..2e66f312e2c4 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -131,8 +131,13 @@ unmap_src:
 		memcpy(walk->dst.virt.addr, walk->page, n);
 		skcipher_unmap_dst(walk);
 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
-		if (WARN_ON(err)) {
-			/* unexpected case; didn't process all bytes */
+		if (err) {
+			/*
+			 * Didn't process all bytes.  Either the algorithm is
+			 * broken, or this was the last step and it turned out
+			 * the message wasn't evenly divisible into blocks but
+			 * the algorithm requires it.
+			 */
 			err = -EINVAL;
 			goto finish;
 		}
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index c0cf87ae7ef6..e227bcada2a2 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -199,7 +199,7 @@ static void __exit sm3_generic_mod_fini(void)
 	crypto_unregister_shash(&sm3_alg);
 }
 
-module_init(sm3_generic_mod_init);
+subsys_initcall(sm3_generic_mod_init);
 module_exit(sm3_generic_mod_fini);
 
 MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index c18eebfd5edd..71ffb343709a 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -237,7 +237,7 @@ static void __exit sm4_fini(void)
 	crypto_unregister_alg(&sm4_alg);
 }
 
-module_init(sm4_init);
+subsys_initcall(sm4_init);
 module_exit(sm4_fini);
 
 MODULE_DESCRIPTION("SM4 Cipher Algorithm");
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index 5a2eafed9c29..63663c3bab7e 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -996,7 +996,7 @@ static void streebog_add512(const struct streebog_uint512 *x,
 
 static void streebog_g(struct streebog_uint512 *h,
 		       const struct streebog_uint512 *N,
-		       const u8 *m)
+		       const struct streebog_uint512 *m)
 {
 	struct streebog_uint512 Ki, data;
 	unsigned int i;
@@ -1005,7 +1005,7 @@ static void streebog_g(struct streebog_uint512 *h,
 
 	/* Starting E() */
 	Ki = data;
-	streebog_xlps(&Ki, (const struct streebog_uint512 *)&m[0], &data);
+	streebog_xlps(&Ki, m, &data);
 
 	for (i = 0; i < 11; i++)
 		streebog_round(i, &Ki, &data);
@@ -1015,16 +1015,19 @@ static void streebog_g(struct streebog_uint512 *h,
 	/* E() done */
 
 	streebog_xor(&data, h, &data);
-	streebog_xor(&data, (const struct streebog_uint512 *)&m[0], h);
+	streebog_xor(&data, m, h);
 }
 
 static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
 {
-	streebog_g(&ctx->h, &ctx->N, data);
+	struct streebog_uint512 m;
+
+	memcpy(&m, data, sizeof(m));
+
+	streebog_g(&ctx->h, &ctx->N, &m);
 
 	streebog_add512(&ctx->N, &buffer512, &ctx->N);
-	streebog_add512(&ctx->Sigma, (const struct streebog_uint512 *)data,
-			&ctx->Sigma);
+	streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
 }
 
 static void streebog_stage3(struct streebog_state *ctx)
@@ -1034,13 +1037,11 @@ static void streebog_stage3(struct streebog_state *ctx)
 	buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
 	streebog_pad(ctx);
 
-	streebog_g(&ctx->h, &ctx->N, (const u8 *)&ctx->buffer);
+	streebog_g(&ctx->h, &ctx->N, &ctx->m);
 	streebog_add512(&ctx->N, &buf, &ctx->N);
-	streebog_add512(&ctx->Sigma,
-			(const struct streebog_uint512 *)&ctx->buffer[0],
-			&ctx->Sigma);
-	streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->N);
-	streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->Sigma);
+	streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
+	streebog_g(&ctx->h, &buffer0, &ctx->N);
+	streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
 	memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
 }
 
@@ -1127,7 +1128,7 @@ static void __exit streebog_mod_fini(void)
 	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
 }
 
-module_init(streebog_mod_init);
+subsys_initcall(streebog_mod_init);
 module_exit(streebog_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1ea2d5007ff5..798253f05203 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -3053,7 +3053,7 @@ err_free_tv:
  */
 static void __exit tcrypt_mod_fini(void) { }
 
-module_init(tcrypt_mod_init);
+subsys_initcall(tcrypt_mod_init);
 module_exit(tcrypt_mod_fini);
 
 module_param(alg, charp, 0);
diff --git a/crypto/tea.c b/crypto/tea.c
index b70b441c7d1e..786b589e1399 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -274,7 +274,7 @@ MODULE_ALIAS_CRYPTO("tea");
 MODULE_ALIAS_CRYPTO("xtea");
 MODULE_ALIAS_CRYPTO("xeta");
 
-module_init(tea_mod_init);
+subsys_initcall(tea_mod_init);
 module_exit(tea_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 8386038d67c7..c9e67c2bd725 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -37,6 +37,7 @@
 #include <crypto/akcipher.h>
 #include <crypto/kpp.h>
 #include <crypto/acompress.h>
+#include <crypto/internal/simd.h>
 
 #include "internal.h"
 
@@ -44,6 +45,9 @@ static bool notests;
 module_param(notests, bool, 0644);
 MODULE_PARM_DESC(notests, "disable crypto self-tests");
 
+static bool panic_on_fail;
+module_param(panic_on_fail, bool, 0444);
+
 #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
 static bool noextratests;
 module_param(noextratests, bool, 0644);
@@ -52,6 +56,9 @@ MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
 static unsigned int fuzz_iterations = 100;
 module_param(fuzz_iterations, uint, 0644);
 MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
+
+DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
+EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
 #endif
 
 #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
@@ -121,6 +128,7 @@ struct kpp_test_suite {
 
 struct alg_test_desc {
 	const char *alg;
+	const char *generic_driver;
 	int (*test)(const struct alg_test_desc *desc, const char *driver,
 		    u32 type, u32 mask);
 	int fips_allowed;	/* set if alg is allowed in fips mode */
@@ -230,12 +238,14 @@ enum finalization_type {
  *				  @offset
  * @flush_type: for hashes, whether an update() should be done now vs.
  *		continuing to accumulate data
+ * @nosimd: if doing the pending update(), do it with SIMD disabled?
  */
 struct test_sg_division {
 	unsigned int proportion_of_total;
 	unsigned int offset;
 	bool offset_relative_to_alignmask;
 	enum flush_type flush_type;
+	bool nosimd;
 };
 
 /**
@@ -255,6 +265,7 @@ struct test_sg_division {
  * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
  *				     the @iv_offset
  * @finalization_type: what finalization function to use for hashes
+ * @nosimd: execute with SIMD disabled?  Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
  */
 struct testvec_config {
 	const char *name;
@@ -265,6 +276,7 @@ struct testvec_config {
 	unsigned int iv_offset;
 	bool iv_offset_relative_to_alignmask;
 	enum finalization_type finalization_type;
+	bool nosimd;
 };
 
 #define TESTVEC_CONFIG_NAMELEN	192
@@ -416,8 +428,11 @@ static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
 	return ndivs;
 }
 
+#define SGDIVS_HAVE_FLUSHES	BIT(0)
+#define SGDIVS_HAVE_NOSIMD	BIT(1)
+
 static bool valid_sg_divisions(const struct test_sg_division *divs,
-			       unsigned int count, bool *any_flushes_ret)
+			       unsigned int count, int *flags_ret)
 {
 	unsigned int total = 0;
 	unsigned int i;
@@ -428,7 +443,9 @@ static bool valid_sg_divisions(const struct test_sg_division *divs,
 			return false;
 		total += divs[i].proportion_of_total;
 		if (divs[i].flush_type != FLUSH_TYPE_NONE)
-			*any_flushes_ret = true;
+			*flags_ret |= SGDIVS_HAVE_FLUSHES;
+		if (divs[i].nosimd)
+			*flags_ret |= SGDIVS_HAVE_NOSIMD;
 	}
 	return total == TEST_SG_TOTAL &&
 		memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
@@ -441,19 +458,18 @@ static bool valid_sg_divisions(const struct test_sg_division *divs,
  */
 static bool valid_testvec_config(const struct testvec_config *cfg)
 {
-	bool any_flushes = false;
+	int flags = 0;
 
 	if (cfg->name == NULL)
 		return false;
 
 	if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
-				&any_flushes))
+				&flags))
 		return false;
 
 	if (cfg->dst_divs[0].proportion_of_total) {
 		if (!valid_sg_divisions(cfg->dst_divs,
-					ARRAY_SIZE(cfg->dst_divs),
-					&any_flushes))
+					ARRAY_SIZE(cfg->dst_divs), &flags))
 			return false;
 	} else {
 		if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
@@ -466,7 +482,12 @@ static bool valid_testvec_config(const struct testvec_config *cfg)
 	    MAX_ALGAPI_ALIGNMASK + 1)
 		return false;
 
-	if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
+	if ((flags & (SGDIVS_HAVE_FLUSHES | SGDIVS_HAVE_NOSIMD)) &&
+	    cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
+		return false;
+
+	if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) &&
+	    (cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP))
 		return false;
 
 	return true;
@@ -725,15 +746,101 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
 }
 
 #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+
+/* Generate a random length in range [0, max_len], but prefer smaller values */
+static unsigned int generate_random_length(unsigned int max_len)
+{
+	unsigned int len = prandom_u32() % (max_len + 1);
+
+	switch (prandom_u32() % 4) {
+	case 0:
+		return len % 64;
+	case 1:
+		return len % 256;
+	case 2:
+		return len % 1024;
+	default:
+		return len;
+	}
+}
+
+/* Sometimes make some random changes to the given data buffer */
+static void mutate_buffer(u8 *buf, size_t count)
+{
+	size_t num_flips;
+	size_t i;
+	size_t pos;
+
+	/* Sometimes flip some bits */
+	if (prandom_u32() % 4 == 0) {
+		num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count * 8);
+		for (i = 0; i < num_flips; i++) {
+			pos = prandom_u32() % (count * 8);
+			buf[pos / 8] ^= 1 << (pos % 8);
+		}
+	}
+
+	/* Sometimes flip some bytes */
+	if (prandom_u32() % 4 == 0) {
+		num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count);
+		for (i = 0; i < num_flips; i++)
+			buf[prandom_u32() % count] ^= 0xff;
+	}
+}
+
+/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
+static void generate_random_bytes(u8 *buf, size_t count)
+{
+	u8 b;
+	u8 increment;
+	size_t i;
+
+	if (count == 0)
+		return;
+
+	switch (prandom_u32() % 8) { /* Choose a generation strategy */
+	case 0:
+	case 1:
+		/* All the same byte, plus optional mutations */
+		switch (prandom_u32() % 4) {
+		case 0:
+			b = 0x00;
+			break;
+		case 1:
+			b = 0xff;
+			break;
+		default:
+			b = (u8)prandom_u32();
+			break;
+		}
+		memset(buf, b, count);
+		mutate_buffer(buf, count);
+		break;
+	case 2:
+		/* Ascending or descending bytes, plus optional mutations */
+		increment = (u8)prandom_u32();
+		b = (u8)prandom_u32();
+		for (i = 0; i < count; i++, b += increment)
+			buf[i] = b;
+		mutate_buffer(buf, count);
+		break;
+	default:
+		/* Fully random bytes */
+		for (i = 0; i < count; i++)
+			buf[i] = (u8)prandom_u32();
+	}
+}
+
 static char *generate_random_sgl_divisions(struct test_sg_division *divs,
 					   size_t max_divs, char *p, char *end,
-					   bool gen_flushes)
+					   bool gen_flushes, u32 req_flags)
 {
 	struct test_sg_division *div = divs;
 	unsigned int remaining = TEST_SG_TOTAL;
 
 	do {
 		unsigned int this_len;
+		const char *flushtype_str;
 
 		if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
 			this_len = remaining;
@@ -762,11 +869,31 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
 			}
 		}
 
+		if (div->flush_type != FLUSH_TYPE_NONE &&
+		    !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+		    prandom_u32() % 2 == 0)
+			div->nosimd = true;
+
+		switch (div->flush_type) {
+		case FLUSH_TYPE_FLUSH:
+			if (div->nosimd)
+				flushtype_str = "<flush,nosimd>";
+			else
+				flushtype_str = "<flush>";
+			break;
+		case FLUSH_TYPE_REIMPORT:
+			if (div->nosimd)
+				flushtype_str = "<reimport,nosimd>";
+			else
+				flushtype_str = "<reimport>";
+			break;
+		default:
+			flushtype_str = "";
+			break;
+		}
+
 		BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */
-		p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s",
-			       div->flush_type == FLUSH_TYPE_NONE ? "" :
-			       div->flush_type == FLUSH_TYPE_FLUSH ?
-			       "<flush> " : "<reimport> ",
+		p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", flushtype_str,
 			       this_len / 100, this_len % 100,
 			       div->offset_relative_to_alignmask ?
 					"alignmask" : "",
@@ -816,18 +943,26 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
 		break;
 	}
 
+	if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+	    prandom_u32() % 2 == 0) {
+		cfg->nosimd = true;
+		p += scnprintf(p, end - p, " nosimd");
+	}
+
 	p += scnprintf(p, end - p, " src_divs=[");
 	p = generate_random_sgl_divisions(cfg->src_divs,
 					  ARRAY_SIZE(cfg->src_divs), p, end,
 					  (cfg->finalization_type !=
-					   FINALIZATION_TYPE_DIGEST));
+					   FINALIZATION_TYPE_DIGEST),
+					  cfg->req_flags);
 	p += scnprintf(p, end - p, "]");
 
 	if (!cfg->inplace && prandom_u32() % 2 == 0) {
 		p += scnprintf(p, end - p, " dst_divs=[");
 		p = generate_random_sgl_divisions(cfg->dst_divs,
 						  ARRAY_SIZE(cfg->dst_divs),
-						  p, end, false);
+						  p, end, false,
+						  cfg->req_flags);
 		p += scnprintf(p, end - p, "]");
 	}
 
@@ -838,21 +973,100 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
 
 	WARN_ON_ONCE(!valid_testvec_config(cfg));
 }
-#endif /* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
+static void crypto_disable_simd_for_test(void)
+{
+	preempt_disable();
+	__this_cpu_write(crypto_simd_disabled_for_test, true);
+}
+
+static void crypto_reenable_simd_for_test(void)
+{
+	__this_cpu_write(crypto_simd_disabled_for_test, false);
+	preempt_enable();
+}
+
+/*
+ * Given an algorithm name, build the name of the generic implementation of that
+ * algorithm, assuming the usual naming convention.  Specifically, this appends
+ * "-generic" to every part of the name that is not a template name.  Examples:
+ *
+ *	aes => aes-generic
+ *	cbc(aes) => cbc(aes-generic)
+ *	cts(cbc(aes)) => cts(cbc(aes-generic))
+ *	rfc7539(chacha20,poly1305) => rfc7539(chacha20-generic,poly1305-generic)
+ *
+ * Return: 0 on success, or -ENAMETOOLONG if the generic name would be too long
+ */
+static int build_generic_driver_name(const char *algname,
+				     char driver_name[CRYPTO_MAX_ALG_NAME])
+{
+	const char *in = algname;
+	char *out = driver_name;
+	size_t len = strlen(algname);
+
+	if (len >= CRYPTO_MAX_ALG_NAME)
+		goto too_long;
+	do {
+		const char *in_saved = in;
+
+		while (*in && *in != '(' && *in != ')' && *in != ',')
+			*out++ = *in++;
+		if (*in != '(' && in > in_saved) {
+			len += 8;
+			if (len >= CRYPTO_MAX_ALG_NAME)
+				goto too_long;
+			memcpy(out, "-generic", 8);
+			out += 8;
+		}
+	} while ((*out++ = *in++) != '\0');
+	return 0;
+
+too_long:
+	pr_err("alg: generic driver name for \"%s\" would be too long\n",
+	       algname);
+	return -ENAMETOOLONG;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static void crypto_disable_simd_for_test(void)
+{
+}
+
+static void crypto_reenable_simd_for_test(void)
+{
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
+static int do_ahash_op(int (*op)(struct ahash_request *req),
+		       struct ahash_request *req,
+		       struct crypto_wait *wait, bool nosimd)
+{
+	int err;
+
+	if (nosimd)
+		crypto_disable_simd_for_test();
+
+	err = op(req);
+
+	if (nosimd)
+		crypto_reenable_simd_for_test();
+
+	return crypto_wait_req(err, wait);
+}
 
 static int check_nonfinal_hash_op(const char *op, int err,
 				  u8 *result, unsigned int digestsize,
-				  const char *driver, unsigned int vec_num,
+				  const char *driver, const char *vec_name,
 				  const struct testvec_config *cfg)
 {
 	if (err) {
-		pr_err("alg: hash: %s %s() failed with err %d on test vector %u, cfg=\"%s\"\n",
-		       driver, op, err, vec_num, cfg->name);
+		pr_err("alg: hash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
+		       driver, op, err, vec_name, cfg->name);
 		return err;
 	}
 	if (!testmgr_is_poison(result, digestsize)) {
-		pr_err("alg: hash: %s %s() used result buffer on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: hash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return -EINVAL;
 	}
 	return 0;
@@ -860,7 +1074,7 @@ static int check_nonfinal_hash_op(const char *op, int err,
 
 static int test_hash_vec_cfg(const char *driver,
 			     const struct hash_testvec *vec,
-			     unsigned int vec_num,
+			     const char *vec_name,
 			     const struct testvec_config *cfg,
 			     struct ahash_request *req,
 			     struct test_sglist *tsgl,
@@ -885,11 +1099,18 @@ static int test_hash_vec_cfg(const char *driver,
 	if (vec->ksize) {
 		err = crypto_ahash_setkey(tfm, vec->key, vec->ksize);
 		if (err) {
-			pr_err("alg: hash: %s setkey failed with err %d on test vector %u; flags=%#x\n",
-			       driver, err, vec_num,
+			if (err == vec->setkey_error)
+				return 0;
+			pr_err("alg: hash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+			       driver, vec_name, vec->setkey_error, err,
 			       crypto_ahash_get_flags(tfm));
 			return err;
 		}
+		if (vec->setkey_error) {
+			pr_err("alg: hash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+			       driver, vec_name, vec->setkey_error);
+			return -EINVAL;
+		}
 	}
 
 	/* Build the scatterlist for the source data */
@@ -899,8 +1120,8 @@ static int test_hash_vec_cfg(const char *driver,
 	err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
 				&input, divs);
 	if (err) {
-		pr_err("alg: hash: %s: error preparing scatterlist for test vector %u, cfg=\"%s\"\n",
-		       driver, vec_num, cfg->name);
+		pr_err("alg: hash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
+		       driver, vec_name, cfg->name);
 		return err;
 	}
 
@@ -909,17 +1130,26 @@ static int test_hash_vec_cfg(const char *driver,
 	testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
 	testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
 
-	if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST) {
+	if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
+	    vec->digest_error) {
 		/* Just using digest() */
 		ahash_request_set_callback(req, req_flags, crypto_req_done,
 					   &wait);
 		ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
-		err = crypto_wait_req(crypto_ahash_digest(req), &wait);
+		err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
 		if (err) {
-			pr_err("alg: hash: %s digest() failed with err %d on test vector %u, cfg=\"%s\"\n",
-			       driver, err, vec_num, cfg->name);
+			if (err == vec->digest_error)
+				return 0;
+			pr_err("alg: hash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+			       driver, vec_name, vec->digest_error, err,
+			       cfg->name);
 			return err;
 		}
+		if (vec->digest_error) {
+			pr_err("alg: hash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+			       driver, vec_name, vec->digest_error, cfg->name);
+			return -EINVAL;
+		}
 		goto result_ready;
 	}
 
@@ -927,9 +1157,9 @@ static int test_hash_vec_cfg(const char *driver,
 
 	ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
 	ahash_request_set_crypt(req, NULL, result, 0);
-	err = crypto_wait_req(crypto_ahash_init(req), &wait);
+	err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
 	err = check_nonfinal_hash_op("init", err, result, digestsize,
-				     driver, vec_num, cfg);
+				     driver, vec_name, cfg);
 	if (err)
 		return err;
 
@@ -943,10 +1173,11 @@ static int test_hash_vec_cfg(const char *driver,
 						   crypto_req_done, &wait);
 			ahash_request_set_crypt(req, pending_sgl, result,
 						pending_len);
-			err = crypto_wait_req(crypto_ahash_update(req), &wait);
+			err = do_ahash_op(crypto_ahash_update, req, &wait,
+					  divs[i]->nosimd);
 			err = check_nonfinal_hash_op("update", err,
 						     result, digestsize,
-						     driver, vec_num, cfg);
+						     driver, vec_name, cfg);
 			if (err)
 				return err;
 			pending_sgl = NULL;
@@ -959,13 +1190,13 @@ static int test_hash_vec_cfg(const char *driver,
 			err = crypto_ahash_export(req, hashstate);
 			err = check_nonfinal_hash_op("export", err,
 						     result, digestsize,
-						     driver, vec_num, cfg);
+						     driver, vec_name, cfg);
 			if (err)
 				return err;
 			if (!testmgr_is_poison(hashstate + statesize,
 					       TESTMGR_POISON_LEN)) {
-				pr_err("alg: hash: %s export() overran state buffer on test vector %u, cfg=\"%s\"\n",
-				       driver, vec_num, cfg->name);
+				pr_err("alg: hash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
+				       driver, vec_name, cfg->name);
 				return -EOVERFLOW;
 			}
 
@@ -973,7 +1204,7 @@ static int test_hash_vec_cfg(const char *driver,
 			err = crypto_ahash_import(req, hashstate);
 			err = check_nonfinal_hash_op("import", err,
 						     result, digestsize,
-						     driver, vec_num, cfg);
+						     driver, vec_name, cfg);
 			if (err)
 				return err;
 		}
@@ -986,23 +1217,23 @@ static int test_hash_vec_cfg(const char *driver,
 	ahash_request_set_crypt(req, pending_sgl, result, pending_len);
 	if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
 		/* finish with update() and final() */
-		err = crypto_wait_req(crypto_ahash_update(req), &wait);
+		err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
 		err = check_nonfinal_hash_op("update", err, result, digestsize,
-					     driver, vec_num, cfg);
+					     driver, vec_name, cfg);
 		if (err)
 			return err;
-		err = crypto_wait_req(crypto_ahash_final(req), &wait);
+		err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
 		if (err) {
-			pr_err("alg: hash: %s final() failed with err %d on test vector %u, cfg=\"%s\"\n",
-			       driver, err, vec_num, cfg->name);
+			pr_err("alg: hash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
+			       driver, err, vec_name, cfg->name);
 			return err;
 		}
 	} else {
 		/* finish with finup() */
-		err = crypto_wait_req(crypto_ahash_finup(req), &wait);
+		err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
 		if (err) {
-			pr_err("alg: hash: %s finup() failed with err %d on test vector %u, cfg=\"%s\"\n",
-			       driver, err, vec_num, cfg->name);
+			pr_err("alg: hash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
+			       driver, err, vec_name, cfg->name);
 			return err;
 		}
 	}
@@ -1010,13 +1241,13 @@ static int test_hash_vec_cfg(const char *driver,
 result_ready:
 	/* Check that the algorithm produced the correct digest */
 	if (memcmp(result, vec->digest, digestsize) != 0) {
-		pr_err("alg: hash: %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
-		       driver, vec_num, cfg->name);
+		pr_err("alg: hash: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+		       driver, vec_name, cfg->name);
 		return -EINVAL;
 	}
 	if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
-		pr_err("alg: hash: %s overran result buffer on test vector %u, cfg=\"%s\"\n",
-		       driver, vec_num, cfg->name);
+		pr_err("alg: hash: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
+		       driver, vec_name, cfg->name);
 		return -EOVERFLOW;
 	}
 
@@ -1027,11 +1258,14 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
 			 unsigned int vec_num, struct ahash_request *req,
 			 struct test_sglist *tsgl, u8 *hashstate)
 {
+	char vec_name[16];
 	unsigned int i;
 	int err;
 
+	sprintf(vec_name, "%u", vec_num);
+
 	for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
-		err = test_hash_vec_cfg(driver, vec, vec_num,
+		err = test_hash_vec_cfg(driver, vec, vec_name,
 					&default_hash_testvec_configs[i],
 					req, tsgl, hashstate);
 		if (err)
@@ -1046,7 +1280,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
 		for (i = 0; i < fuzz_iterations; i++) {
 			generate_random_testvec_config(&cfg, cfgname,
 						       sizeof(cfgname));
-			err = test_hash_vec_cfg(driver, vec, vec_num, &cfg,
+			err = test_hash_vec_cfg(driver, vec, vec_name, &cfg,
 						req, tsgl, hashstate);
 			if (err)
 				return err;
@@ -1056,9 +1290,168 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
 	return 0;
 }
 
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate a hash test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_hash_testvec(struct crypto_shash *tfm,
+					 struct hash_testvec *vec,
+					 unsigned int maxkeysize,
+					 unsigned int maxdatasize,
+					 char *name, size_t max_namelen)
+{
+	SHASH_DESC_ON_STACK(desc, tfm);
+
+	/* Data */
+	vec->psize = generate_random_length(maxdatasize);
+	generate_random_bytes((u8 *)vec->plaintext, vec->psize);
+
+	/*
+	 * Key: length in range [1, maxkeysize], but usually choose maxkeysize.
+	 * If algorithm is unkeyed, then maxkeysize == 0 and set ksize = 0.
+	 */
+	vec->setkey_error = 0;
+	vec->ksize = 0;
+	if (maxkeysize) {
+		vec->ksize = maxkeysize;
+		if (prandom_u32() % 4 == 0)
+			vec->ksize = 1 + (prandom_u32() % maxkeysize);
+		generate_random_bytes((u8 *)vec->key, vec->ksize);
+
+		vec->setkey_error = crypto_shash_setkey(tfm, vec->key,
+							vec->ksize);
+		/* If the key couldn't be set, no need to continue to digest. */
+		if (vec->setkey_error)
+			goto done;
+	}
+
+	/* Digest */
+	desc->tfm = tfm;
+	vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
+						vec->psize, (u8 *)vec->digest);
+done:
+	snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
+		 vec->psize, vec->ksize);
+}
+
+/*
+ * Test the hash algorithm represented by @req against the corresponding generic
+ * implementation, if one is available.
+ */
+static int test_hash_vs_generic_impl(const char *driver,
+				     const char *generic_driver,
+				     unsigned int maxkeysize,
+				     struct ahash_request *req,
+				     struct test_sglist *tsgl,
+				     u8 *hashstate)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	const unsigned int digestsize = crypto_ahash_digestsize(tfm);
+	const unsigned int blocksize = crypto_ahash_blocksize(tfm);
+	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+	const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
+	char _generic_driver[CRYPTO_MAX_ALG_NAME];
+	struct crypto_shash *generic_tfm = NULL;
+	unsigned int i;
+	struct hash_testvec vec = { 0 };
+	char vec_name[64];
+	struct testvec_config cfg;
+	char cfgname[TESTVEC_CONFIG_NAMELEN];
+	int err;
+
+	if (noextratests)
+		return 0;
+
+	if (!generic_driver) { /* Use default naming convention? */
+		err = build_generic_driver_name(algname, _generic_driver);
+		if (err)
+			return err;
+		generic_driver = _generic_driver;
+	}
+
+	if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+		return 0;
+
+	generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
+	if (IS_ERR(generic_tfm)) {
+		err = PTR_ERR(generic_tfm);
+		if (err == -ENOENT) {
+			pr_warn("alg: hash: skipping comparison tests for %s because %s is unavailable\n",
+				driver, generic_driver);
+			return 0;
+		}
+		pr_err("alg: hash: error allocating %s (generic impl of %s): %d\n",
+		       generic_driver, algname, err);
+		return err;
+	}
+
+	/* Check the algorithm properties for consistency. */
+
+	if (digestsize != crypto_shash_digestsize(generic_tfm)) {
+		pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
+		       driver, digestsize,
+		       crypto_shash_digestsize(generic_tfm));
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (blocksize != crypto_shash_blocksize(generic_tfm)) {
+		pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+		       driver, blocksize, crypto_shash_blocksize(generic_tfm));
+		err = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Now generate test vectors using the generic implementation, and test
+	 * the other implementation against them.
+	 */
+
+	vec.key = kmalloc(maxkeysize, GFP_KERNEL);
+	vec.plaintext = kmalloc(maxdatasize, GFP_KERNEL);
+	vec.digest = kmalloc(digestsize, GFP_KERNEL);
+	if (!vec.key || !vec.plaintext || !vec.digest) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < fuzz_iterations * 8; i++) {
+		generate_random_hash_testvec(generic_tfm, &vec,
+					     maxkeysize, maxdatasize,
+					     vec_name, sizeof(vec_name));
+		generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+		err = test_hash_vec_cfg(driver, &vec, vec_name, &cfg,
+					req, tsgl, hashstate);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	err = 0;
+out:
+	kfree(vec.key);
+	kfree(vec.plaintext);
+	kfree(vec.digest);
+	crypto_free_shash(generic_tfm);
+	return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_hash_vs_generic_impl(const char *driver,
+				     const char *generic_driver,
+				     unsigned int maxkeysize,
+				     struct ahash_request *req,
+				     struct test_sglist *tsgl,
+				     u8 *hashstate)
+{
+	return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
 static int __alg_test_hash(const struct hash_testvec *vecs,
 			   unsigned int num_vecs, const char *driver,
-			   u32 type, u32 mask)
+			   u32 type, u32 mask,
+			   const char *generic_driver, unsigned int maxkeysize)
 {
 	struct crypto_ahash *tfm;
 	struct ahash_request *req = NULL;
@@ -1106,7 +1499,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
 		if (err)
 			goto out;
 	}
-	err = 0;
+	err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
+					tsgl, hashstate);
 out:
 	kfree(hashstate);
 	if (tsgl) {
@@ -1124,6 +1518,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
 	const struct hash_testvec *template = desc->suite.hash.vecs;
 	unsigned int tcount = desc->suite.hash.count;
 	unsigned int nr_unkeyed, nr_keyed;
+	unsigned int maxkeysize = 0;
 	int err;
 
 	/*
@@ -1142,23 +1537,27 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
 			       "unkeyed ones must come first\n", desc->alg);
 			return -EINVAL;
 		}
+		maxkeysize = max_t(unsigned int, maxkeysize,
+				   template[nr_unkeyed + nr_keyed].ksize);
 	}
 
 	err = 0;
 	if (nr_unkeyed) {
-		err = __alg_test_hash(template, nr_unkeyed, driver, type, mask);
+		err = __alg_test_hash(template, nr_unkeyed, driver, type, mask,
+				      desc->generic_driver, maxkeysize);
 		template += nr_unkeyed;
 	}
 
 	if (!err && nr_keyed)
-		err = __alg_test_hash(template, nr_keyed, driver, type, mask);
+		err = __alg_test_hash(template, nr_keyed, driver, type, mask,
+				      desc->generic_driver, maxkeysize);
 
 	return err;
 }
 
 static int test_aead_vec_cfg(const char *driver, int enc,
 			     const struct aead_testvec *vec,
-			     unsigned int vec_num,
+			     const char *vec_name,
 			     const struct testvec_config *cfg,
 			     struct aead_request *req,
 			     struct cipher_test_sglists *tsgls)
@@ -1175,6 +1574,7 @@ static int test_aead_vec_cfg(const char *driver, int enc,
 		 cfg->iv_offset +
 		 (cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
 	struct kvec input[2];
+	int expected_error;
 	int err;
 
 	/* Set the key */
@@ -1183,26 +1583,33 @@ static int test_aead_vec_cfg(const char *driver, int enc,
 	else
 		crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
 	err = crypto_aead_setkey(tfm, vec->key, vec->klen);
-	if (err) {
-		if (vec->fail) /* expectedly failed to set key? */
-			return 0;
-		pr_err("alg: aead: %s setkey failed with err %d on test vector %u; flags=%#x\n",
-		       driver, err, vec_num, crypto_aead_get_flags(tfm));
+	if (err && err != vec->setkey_error) {
+		pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+		       driver, vec_name, vec->setkey_error, err,
+		       crypto_aead_get_flags(tfm));
 		return err;
 	}
-	if (vec->fail) {
-		pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %u\n",
-		       driver, vec_num);
+	if (!err && vec->setkey_error) {
+		pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+		       driver, vec_name, vec->setkey_error);
 		return -EINVAL;
 	}
 
 	/* Set the authentication tag size */
 	err = crypto_aead_setauthsize(tfm, authsize);
-	if (err) {
-		pr_err("alg: aead: %s setauthsize failed with err %d on test vector %u\n",
-		       driver, err, vec_num);
+	if (err && err != vec->setauthsize_error) {
+		pr_err("alg: aead: %s setauthsize failed on test vector %s; expected_error=%d, actual_error=%d\n",
+		       driver, vec_name, vec->setauthsize_error, err);
 		return err;
 	}
+	if (!err && vec->setauthsize_error) {
+		pr_err("alg: aead: %s setauthsize unexpectedly succeeded on test vector %s; expected_error=%d\n",
+		       driver, vec_name, vec->setauthsize_error);
+		return -EINVAL;
+	}
+
+	if (vec->setkey_error || vec->setauthsize_error)
+		return 0;
 
 	/* The IV must be copied to a buffer, as the algorithm may modify it */
 	if (WARN_ON(ivsize > MAX_IVLEN))
@@ -1224,8 +1631,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
 						     vec->plen),
 					input, 2);
 	if (err) {
-		pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return err;
 	}
 
@@ -1235,23 +1642,12 @@ static int test_aead_vec_cfg(const char *driver, int enc,
 	aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
 			       enc ? vec->plen : vec->clen, iv);
 	aead_request_set_ad(req, vec->alen);
-	err = crypto_wait_req(enc ? crypto_aead_encrypt(req) :
-			      crypto_aead_decrypt(req), &wait);
-
-	aead_request_set_tfm(req, tfm); /* TODO: get rid of this */
-
-	if (err) {
-		if (err == -EBADMSG && vec->novrfy)
-			return 0;
-		pr_err("alg: aead: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
-		       driver, op, err, vec_num, cfg->name);
-		return err;
-	}
-	if (vec->novrfy) {
-		pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
-		return -EINVAL;
-	}
+	if (cfg->nosimd)
+		crypto_disable_simd_for_test();
+	err = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+	if (cfg->nosimd)
+		crypto_reenable_simd_for_test();
+	err = crypto_wait_req(err, &wait);
 
 	/* Check that the algorithm didn't overwrite things it shouldn't have */
 	if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
@@ -1263,8 +1659,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
 	    req->base.complete != crypto_req_done ||
 	    req->base.flags != req_flags ||
 	    req->base.data != &wait) {
-		pr_err("alg: aead: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: aead: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		if (req->cryptlen != (enc ? vec->plen : vec->clen))
 			pr_err("alg: aead: changed 'req->cryptlen'\n");
 		if (req->assoclen != vec->alen)
@@ -1286,14 +1682,29 @@ static int test_aead_vec_cfg(const char *driver, int enc,
 		return -EINVAL;
 	}
 	if (is_test_sglist_corrupted(&tsgls->src)) {
-		pr_err("alg: aead: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: aead: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return -EINVAL;
 	}
 	if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
 	    is_test_sglist_corrupted(&tsgls->dst)) {
-		pr_err("alg: aead: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: aead: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
+		return -EINVAL;
+	}
+
+	/* Check for success or failure */
+	expected_error = vec->novrfy ? -EBADMSG : vec->crypt_error;
+	if (err) {
+		if (err == expected_error)
+			return 0;
+		pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+		       driver, op, vec_name, expected_error, err, cfg->name);
+		return err;
+	}
+	if (expected_error) {
+		pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+		       driver, op, vec_name, expected_error, cfg->name);
 		return -EINVAL;
 	}
 
@@ -1302,13 +1713,13 @@ static int test_aead_vec_cfg(const char *driver, int enc,
 				    enc ? vec->clen : vec->plen,
 				    vec->alen, enc || !cfg->inplace);
 	if (err == -EOVERFLOW) {
-		pr_err("alg: aead: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: aead: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return err;
 	}
 	if (err) {
-		pr_err("alg: aead: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: aead: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return err;
 	}
 
@@ -1320,14 +1731,17 @@ static int test_aead_vec(const char *driver, int enc,
 			 struct aead_request *req,
 			 struct cipher_test_sglists *tsgls)
 {
+	char vec_name[16];
 	unsigned int i;
 	int err;
 
 	if (enc && vec->novrfy)
 		return 0;
 
+	sprintf(vec_name, "%u", vec_num);
+
 	for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
-		err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+		err = test_aead_vec_cfg(driver, enc, vec, vec_name,
 					&default_cipher_testvec_configs[i],
 					req, tsgls);
 		if (err)
@@ -1342,7 +1756,7 @@ static int test_aead_vec(const char *driver, int enc,
 		for (i = 0; i < fuzz_iterations; i++) {
 			generate_random_testvec_config(&cfg, cfgname,
 						       sizeof(cfgname));
-			err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+			err = test_aead_vec_cfg(driver, enc, vec, vec_name,
 						&cfg, req, tsgls);
 			if (err)
 				return err;
@@ -1352,6 +1766,226 @@ static int test_aead_vec(const char *driver, int enc,
 	return 0;
 }
 
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate an AEAD test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_aead_testvec(struct aead_request *req,
+					 struct aead_testvec *vec,
+					 unsigned int maxkeysize,
+					 unsigned int maxdatasize,
+					 char *name, size_t max_namelen)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	const unsigned int ivsize = crypto_aead_ivsize(tfm);
+	unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
+	unsigned int authsize;
+	unsigned int total_len;
+	int i;
+	struct scatterlist src[2], dst;
+	u8 iv[MAX_IVLEN];
+	DECLARE_CRYPTO_WAIT(wait);
+
+	/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+	vec->klen = maxkeysize;
+	if (prandom_u32() % 4 == 0)
+		vec->klen = prandom_u32() % (maxkeysize + 1);
+	generate_random_bytes((u8 *)vec->key, vec->klen);
+	vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
+
+	/* IV */
+	generate_random_bytes((u8 *)vec->iv, ivsize);
+
+	/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
+	authsize = maxauthsize;
+	if (prandom_u32() % 4 == 0)
+		authsize = prandom_u32() % (maxauthsize + 1);
+	if (WARN_ON(authsize > maxdatasize))
+		authsize = maxdatasize;
+	maxdatasize -= authsize;
+	vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
+
+	/* Plaintext and associated data */
+	total_len = generate_random_length(maxdatasize);
+	if (prandom_u32() % 4 == 0)
+		vec->alen = 0;
+	else
+		vec->alen = generate_random_length(total_len);
+	vec->plen = total_len - vec->alen;
+	generate_random_bytes((u8 *)vec->assoc, vec->alen);
+	generate_random_bytes((u8 *)vec->ptext, vec->plen);
+
+	vec->clen = vec->plen + authsize;
+
+	/*
+	 * If the key or authentication tag size couldn't be set, no need to
+	 * continue to encrypt.
+	 */
+	if (vec->setkey_error || vec->setauthsize_error)
+		goto done;
+
+	/* Ciphertext */
+	sg_init_table(src, 2);
+	i = 0;
+	if (vec->alen)
+		sg_set_buf(&src[i++], vec->assoc, vec->alen);
+	if (vec->plen)
+		sg_set_buf(&src[i++], vec->ptext, vec->plen);
+	sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
+	memcpy(iv, vec->iv, ivsize);
+	aead_request_set_callback(req, 0, crypto_req_done, &wait);
+	aead_request_set_crypt(req, src, &dst, vec->plen, iv);
+	aead_request_set_ad(req, vec->alen);
+	vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req), &wait);
+	if (vec->crypt_error == 0)
+		memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen);
+done:
+	snprintf(name, max_namelen,
+		 "\"random: alen=%u plen=%u authsize=%u klen=%u\"",
+		 vec->alen, vec->plen, authsize, vec->klen);
+}
+
+/*
+ * Test the AEAD algorithm represented by @req against the corresponding generic
+ * implementation, if one is available.
+ */
+static int test_aead_vs_generic_impl(const char *driver,
+				     const struct alg_test_desc *test_desc,
+				     struct aead_request *req,
+				     struct cipher_test_sglists *tsgls)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	const unsigned int ivsize = crypto_aead_ivsize(tfm);
+	const unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
+	const unsigned int blocksize = crypto_aead_blocksize(tfm);
+	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+	const char *algname = crypto_aead_alg(tfm)->base.cra_name;
+	const char *generic_driver = test_desc->generic_driver;
+	char _generic_driver[CRYPTO_MAX_ALG_NAME];
+	struct crypto_aead *generic_tfm = NULL;
+	struct aead_request *generic_req = NULL;
+	unsigned int maxkeysize;
+	unsigned int i;
+	struct aead_testvec vec = { 0 };
+	char vec_name[64];
+	struct testvec_config cfg;
+	char cfgname[TESTVEC_CONFIG_NAMELEN];
+	int err;
+
+	if (noextratests)
+		return 0;
+
+	if (!generic_driver) { /* Use default naming convention? */
+		err = build_generic_driver_name(algname, _generic_driver);
+		if (err)
+			return err;
+		generic_driver = _generic_driver;
+	}
+
+	if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+		return 0;
+
+	generic_tfm = crypto_alloc_aead(generic_driver, 0, 0);
+	if (IS_ERR(generic_tfm)) {
+		err = PTR_ERR(generic_tfm);
+		if (err == -ENOENT) {
+			pr_warn("alg: aead: skipping comparison tests for %s because %s is unavailable\n",
+				driver, generic_driver);
+			return 0;
+		}
+		pr_err("alg: aead: error allocating %s (generic impl of %s): %d\n",
+		       generic_driver, algname, err);
+		return err;
+	}
+
+	generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
+	if (!generic_req) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/* Check the algorithm properties for consistency. */
+
+	if (maxauthsize != crypto_aead_alg(generic_tfm)->maxauthsize) {
+		pr_err("alg: aead: maxauthsize for %s (%u) doesn't match generic impl (%u)\n",
+		       driver, maxauthsize,
+		       crypto_aead_alg(generic_tfm)->maxauthsize);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (ivsize != crypto_aead_ivsize(generic_tfm)) {
+		pr_err("alg: aead: ivsize for %s (%u) doesn't match generic impl (%u)\n",
+		       driver, ivsize, crypto_aead_ivsize(generic_tfm));
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (blocksize != crypto_aead_blocksize(generic_tfm)) {
+		pr_err("alg: aead: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+		       driver, blocksize, crypto_aead_blocksize(generic_tfm));
+		err = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Now generate test vectors using the generic implementation, and test
+	 * the other implementation against them.
+	 */
+
+	maxkeysize = 0;
+	for (i = 0; i < test_desc->suite.aead.count; i++)
+		maxkeysize = max_t(unsigned int, maxkeysize,
+				   test_desc->suite.aead.vecs[i].klen);
+
+	vec.key = kmalloc(maxkeysize, GFP_KERNEL);
+	vec.iv = kmalloc(ivsize, GFP_KERNEL);
+	vec.assoc = kmalloc(maxdatasize, GFP_KERNEL);
+	vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
+	vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
+	if (!vec.key || !vec.iv || !vec.assoc || !vec.ptext || !vec.ctext) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < fuzz_iterations * 8; i++) {
+		generate_random_aead_testvec(generic_req, &vec,
+					     maxkeysize, maxdatasize,
+					     vec_name, sizeof(vec_name));
+		generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+		err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, &cfg,
+					req, tsgls);
+		if (err)
+			goto out;
+		err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, &cfg,
+					req, tsgls);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	err = 0;
+out:
+	kfree(vec.key);
+	kfree(vec.iv);
+	kfree(vec.assoc);
+	kfree(vec.ptext);
+	kfree(vec.ctext);
+	crypto_free_aead(generic_tfm);
+	aead_request_free(generic_req);
+	return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_aead_vs_generic_impl(const char *driver,
+				     const struct alg_test_desc *test_desc,
+				     struct aead_request *req,
+				     struct cipher_test_sglists *tsgls)
+{
+	return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
 static int test_aead(const char *driver, int enc,
 		     const struct aead_test_suite *suite,
 		     struct aead_request *req,
@@ -1411,6 +2045,10 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
 		goto out;
 
 	err = test_aead(driver, DECRYPT, suite, req, tsgls);
+	if (err)
+		goto out;
+
+	err = test_aead_vs_generic_impl(driver, desc, req, tsgls);
 out:
 	free_cipher_test_sglists(tsgls);
 	aead_request_free(req);
@@ -1462,13 +2100,20 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
 
 		ret = crypto_cipher_setkey(tfm, template[i].key,
 					   template[i].klen);
-		if (template[i].fail == !ret) {
-			printk(KERN_ERR "alg: cipher: setkey failed "
-			       "on test %d for %s: flags=%x\n", j,
-			       algo, crypto_cipher_get_flags(tfm));
+		if (ret) {
+			if (ret == template[i].setkey_error)
+				continue;
+			pr_err("alg: cipher: %s setkey failed on test vector %u; expected_error=%d, actual_error=%d, flags=%#x\n",
+			       algo, j, template[i].setkey_error, ret,
+			       crypto_cipher_get_flags(tfm));
 			goto out;
-		} else if (ret)
-			continue;
+		}
+		if (template[i].setkey_error) {
+			pr_err("alg: cipher: %s setkey unexpectedly succeeded on test vector %u; expected_error=%d\n",
+			       algo, j, template[i].setkey_error);
+			ret = -EINVAL;
+			goto out;
+		}
 
 		for (k = 0; k < template[i].len;
 		     k += crypto_cipher_blocksize(tfm)) {
@@ -1500,7 +2145,7 @@ out_nobuf:
 
 static int test_skcipher_vec_cfg(const char *driver, int enc,
 				 const struct cipher_testvec *vec,
-				 unsigned int vec_num,
+				 const char *vec_name,
 				 const struct testvec_config *cfg,
 				 struct skcipher_request *req,
 				 struct cipher_test_sglists *tsgls)
@@ -1526,15 +2171,16 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
 					    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
 	err = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
 	if (err) {
-		if (vec->fail) /* expectedly failed to set key? */
+		if (err == vec->setkey_error)
 			return 0;
-		pr_err("alg: skcipher: %s setkey failed with err %d on test vector %u; flags=%#x\n",
-		       driver, err, vec_num, crypto_skcipher_get_flags(tfm));
+		pr_err("alg: skcipher: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+		       driver, vec_name, vec->setkey_error, err,
+		       crypto_skcipher_get_flags(tfm));
 		return err;
 	}
-	if (vec->fail) {
-		pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %u\n",
-		       driver, vec_num);
+	if (vec->setkey_error) {
+		pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+		       driver, vec_name, vec->setkey_error);
 		return -EINVAL;
 	}
 
@@ -1550,8 +2196,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
 			memset(iv, 0, ivsize);
 	} else {
 		if (vec->generates_iv) {
-			pr_err("alg: skcipher: %s has ivsize=0 but test vector %u generates IV!\n",
-			       driver, vec_num);
+			pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
+			       driver, vec_name);
 			return -EINVAL;
 		}
 		iv = NULL;
@@ -1563,8 +2209,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
 	err = build_cipher_test_sglists(tsgls, cfg, alignmask,
 					vec->len, vec->len, &input, 1);
 	if (err) {
-		pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return err;
 	}
 
@@ -1573,13 +2219,12 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
 	skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
 	skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
 				   vec->len, iv);
-	err = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
-			      crypto_skcipher_decrypt(req), &wait);
-	if (err) {
-		pr_err("alg: skcipher: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
-		       driver, op, err, vec_num, cfg->name);
-		return err;
-	}
+	if (cfg->nosimd)
+		crypto_disable_simd_for_test();
+	err = enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
+	if (cfg->nosimd)
+		crypto_reenable_simd_for_test();
+	err = crypto_wait_req(err, &wait);
 
 	/* Check that the algorithm didn't overwrite things it shouldn't have */
 	if (req->cryptlen != vec->len ||
@@ -1590,8 +2235,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
 	    req->base.complete != crypto_req_done ||
 	    req->base.flags != req_flags ||
 	    req->base.data != &wait) {
-		pr_err("alg: skcipher: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: skcipher: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		if (req->cryptlen != vec->len)
 			pr_err("alg: skcipher: changed 'req->cryptlen'\n");
 		if (req->iv != iv)
@@ -1611,14 +2256,28 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
 		return -EINVAL;
 	}
 	if (is_test_sglist_corrupted(&tsgls->src)) {
-		pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return -EINVAL;
 	}
 	if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
 	    is_test_sglist_corrupted(&tsgls->dst)) {
-		pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
+		return -EINVAL;
+	}
+
+	/* Check for success or failure */
+	if (err) {
+		if (err == vec->crypt_error)
+			return 0;
+		pr_err("alg: skcipher: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+		       driver, op, vec_name, vec->crypt_error, err, cfg->name);
+		return err;
+	}
+	if (vec->crypt_error) {
+		pr_err("alg: skcipher: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+		       driver, op, vec_name, vec->crypt_error, cfg->name);
 		return -EINVAL;
 	}
 
@@ -1626,20 +2285,20 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
 	err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
 				    vec->len, 0, true);
 	if (err == -EOVERFLOW) {
-		pr_err("alg: skcipher: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: skcipher: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return err;
 	}
 	if (err) {
-		pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		return err;
 	}
 
 	/* If applicable, check that the algorithm generated the correct IV */
 	if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) {
-		pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %u, cfg=\"%s\"\n",
-		       driver, op, vec_num, cfg->name);
+		pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %s, cfg=\"%s\"\n",
+		       driver, op, vec_name, cfg->name);
 		hexdump(iv, ivsize);
 		return -EINVAL;
 	}
@@ -1653,14 +2312,17 @@ static int test_skcipher_vec(const char *driver, int enc,
 			     struct skcipher_request *req,
 			     struct cipher_test_sglists *tsgls)
 {
+	char vec_name[16];
 	unsigned int i;
 	int err;
 
 	if (fips_enabled && vec->fips_skip)
 		return 0;
 
+	sprintf(vec_name, "%u", vec_num);
+
 	for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
-		err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+		err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
 					    &default_cipher_testvec_configs[i],
 					    req, tsgls);
 		if (err)
@@ -1675,7 +2337,7 @@ static int test_skcipher_vec(const char *driver, int enc,
 		for (i = 0; i < fuzz_iterations; i++) {
 			generate_random_testvec_config(&cfg, cfgname,
 						       sizeof(cfgname));
-			err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+			err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
 						    &cfg, req, tsgls);
 			if (err)
 				return err;
@@ -1685,6 +2347,186 @@ static int test_skcipher_vec(const char *driver, int enc,
 	return 0;
 }
 
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate a symmetric cipher test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_cipher_testvec(struct skcipher_request *req,
+					   struct cipher_testvec *vec,
+					   unsigned int maxdatasize,
+					   char *name, size_t max_namelen)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	const unsigned int maxkeysize = tfm->keysize;
+	const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+	struct scatterlist src, dst;
+	u8 iv[MAX_IVLEN];
+	DECLARE_CRYPTO_WAIT(wait);
+
+	/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+	vec->klen = maxkeysize;
+	if (prandom_u32() % 4 == 0)
+		vec->klen = prandom_u32() % (maxkeysize + 1);
+	generate_random_bytes((u8 *)vec->key, vec->klen);
+	vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
+
+	/* IV */
+	generate_random_bytes((u8 *)vec->iv, ivsize);
+
+	/* Plaintext */
+	vec->len = generate_random_length(maxdatasize);
+	generate_random_bytes((u8 *)vec->ptext, vec->len);
+
+	/* If the key couldn't be set, no need to continue to encrypt. */
+	if (vec->setkey_error)
+		goto done;
+
+	/* Ciphertext */
+	sg_init_one(&src, vec->ptext, vec->len);
+	sg_init_one(&dst, vec->ctext, vec->len);
+	memcpy(iv, vec->iv, ivsize);
+	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+	skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
+	vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+done:
+	snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
+		 vec->len, vec->klen);
+}
+
+/*
+ * Test the skcipher algorithm represented by @req against the corresponding
+ * generic implementation, if one is available.
+ */
+static int test_skcipher_vs_generic_impl(const char *driver,
+					 const char *generic_driver,
+					 struct skcipher_request *req,
+					 struct cipher_test_sglists *tsgls)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+	const unsigned int blocksize = crypto_skcipher_blocksize(tfm);
+	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+	const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
+	char _generic_driver[CRYPTO_MAX_ALG_NAME];
+	struct crypto_skcipher *generic_tfm = NULL;
+	struct skcipher_request *generic_req = NULL;
+	unsigned int i;
+	struct cipher_testvec vec = { 0 };
+	char vec_name[64];
+	struct testvec_config cfg;
+	char cfgname[TESTVEC_CONFIG_NAMELEN];
+	int err;
+
+	if (noextratests)
+		return 0;
+
+	/* Keywrap isn't supported here yet as it handles its IV differently. */
+	if (strncmp(algname, "kw(", 3) == 0)
+		return 0;
+
+	if (!generic_driver) { /* Use default naming convention? */
+		err = build_generic_driver_name(algname, _generic_driver);
+		if (err)
+			return err;
+		generic_driver = _generic_driver;
+	}
+
+	if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+		return 0;
+
+	generic_tfm = crypto_alloc_skcipher(generic_driver, 0, 0);
+	if (IS_ERR(generic_tfm)) {
+		err = PTR_ERR(generic_tfm);
+		if (err == -ENOENT) {
+			pr_warn("alg: skcipher: skipping comparison tests for %s because %s is unavailable\n",
+				driver, generic_driver);
+			return 0;
+		}
+		pr_err("alg: skcipher: error allocating %s (generic impl of %s): %d\n",
+		       generic_driver, algname, err);
+		return err;
+	}
+
+	generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
+	if (!generic_req) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/* Check the algorithm properties for consistency. */
+
+	if (tfm->keysize != generic_tfm->keysize) {
+		pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n",
+		       driver, tfm->keysize, generic_tfm->keysize);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (ivsize != crypto_skcipher_ivsize(generic_tfm)) {
+		pr_err("alg: skcipher: ivsize for %s (%u) doesn't match generic impl (%u)\n",
+		       driver, ivsize, crypto_skcipher_ivsize(generic_tfm));
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (blocksize != crypto_skcipher_blocksize(generic_tfm)) {
+		pr_err("alg: skcipher: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+		       driver, blocksize,
+		       crypto_skcipher_blocksize(generic_tfm));
+		err = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Now generate test vectors using the generic implementation, and test
+	 * the other implementation against them.
+	 */
+
+	vec.key = kmalloc(tfm->keysize, GFP_KERNEL);
+	vec.iv = kmalloc(ivsize, GFP_KERNEL);
+	vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
+	vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
+	if (!vec.key || !vec.iv || !vec.ptext || !vec.ctext) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < fuzz_iterations * 8; i++) {
+		generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
+					       vec_name, sizeof(vec_name));
+		generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+		err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name,
+					    &cfg, req, tsgls);
+		if (err)
+			goto out;
+		err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name,
+					    &cfg, req, tsgls);
+		if (err)
+			goto out;
+		cond_resched();
+	}
+	err = 0;
+out:
+	kfree(vec.key);
+	kfree(vec.iv);
+	kfree(vec.ptext);
+	kfree(vec.ctext);
+	crypto_free_skcipher(generic_tfm);
+	skcipher_request_free(generic_req);
+	return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_skcipher_vs_generic_impl(const char *driver,
+					 const char *generic_driver,
+					 struct skcipher_request *req,
+					 struct cipher_test_sglists *tsgls)
+{
+	return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
 static int test_skcipher(const char *driver, int enc,
 			 const struct cipher_test_suite *suite,
 			 struct skcipher_request *req,
@@ -1744,6 +2586,11 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
 		goto out;
 
 	err = test_skcipher(driver, DECRYPT, suite, req, tsgls);
+	if (err)
+		goto out;
+
+	err = test_skcipher_vs_generic_impl(driver, desc->generic_driver, req,
+					    tsgls);
 out:
 	free_cipher_test_sglists(tsgls);
 	skcipher_request_free(req);
@@ -2179,7 +3026,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
 		u32 *ctx = (u32 *)shash_desc_ctx(shash);
 
 		shash->tfm = tfm;
-		shash->flags = 0;
 
 		*ctx = 420553207;
 		err = crypto_shash_final(shash, (u8 *)&val);
@@ -2493,6 +3339,12 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
 	return err;
 }
 
+static u8 *test_pack_u32(u8 *dst, u32 val)
+{
+	memcpy(dst, &val, sizeof(val));
+	return dst + sizeof(val);
+}
+
 static int test_akcipher_one(struct crypto_akcipher *tfm,
 			     const struct akcipher_testvec *vecs)
 {
@@ -2503,10 +3355,11 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
 	struct crypto_wait wait;
 	unsigned int out_len_max, out_len = 0;
 	int err = -ENOMEM;
-	struct scatterlist src, dst, src_tab[2];
+	struct scatterlist src, dst, src_tab[3];
 	const char *m, *c;
 	unsigned int m_size, c_size;
 	const char *op;
+	u8 *key, *ptr;
 
 	if (testmgr_alloc_buf(xbuf))
 		return err;
@@ -2517,22 +3370,29 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
 
 	crypto_init_wait(&wait);
 
+	key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
+		      GFP_KERNEL);
+	if (!key)
+		goto free_xbuf;
+	memcpy(key, vecs->key, vecs->key_len);
+	ptr = key + vecs->key_len;
+	ptr = test_pack_u32(ptr, vecs->algo);
+	ptr = test_pack_u32(ptr, vecs->param_len);
+	memcpy(ptr, vecs->params, vecs->param_len);
+
 	if (vecs->public_key_vec)
-		err = crypto_akcipher_set_pub_key(tfm, vecs->key,
-						  vecs->key_len);
+		err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len);
 	else
-		err = crypto_akcipher_set_priv_key(tfm, vecs->key,
-						   vecs->key_len);
+		err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
 	if (err)
 		goto free_req;
 
-	err = -ENOMEM;
-	out_len_max = crypto_akcipher_maxsize(tfm);
-
 	/*
 	 * First run test which do not require a private key, such as
 	 * encrypt or verify.
 	 */
+	err = -ENOMEM;
+	out_len_max = crypto_akcipher_maxsize(tfm);
 	outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
 	if (!outbuf_enc)
 		goto free_req;
@@ -2558,12 +3418,20 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
 		goto free_all;
 	memcpy(xbuf[0], m, m_size);
 
-	sg_init_table(src_tab, 2);
+	sg_init_table(src_tab, 3);
 	sg_set_buf(&src_tab[0], xbuf[0], 8);
 	sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
-	sg_init_one(&dst, outbuf_enc, out_len_max);
-	akcipher_request_set_crypt(req, src_tab, &dst, m_size,
-				   out_len_max);
+	if (vecs->siggen_sigver_test) {
+		if (WARN_ON(c_size > PAGE_SIZE))
+			goto free_all;
+		memcpy(xbuf[1], c, c_size);
+		sg_set_buf(&src_tab[2], xbuf[1], c_size);
+		akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
+	} else {
+		sg_init_one(&dst, outbuf_enc, out_len_max);
+		akcipher_request_set_crypt(req, src_tab, &dst, m_size,
+					   out_len_max);
+	}
 	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 				      crypto_req_done, &wait);
 
@@ -2576,18 +3444,21 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
 		pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
 		goto free_all;
 	}
-	if (req->dst_len != c_size) {
-		pr_err("alg: akcipher: %s test failed. Invalid output len\n",
-		       op);
-		err = -EINVAL;
-		goto free_all;
-	}
-	/* verify that encrypted message is equal to expected */
-	if (memcmp(c, outbuf_enc, c_size)) {
-		pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
-		hexdump(outbuf_enc, c_size);
-		err = -EINVAL;
-		goto free_all;
+	if (!vecs->siggen_sigver_test) {
+		if (req->dst_len != c_size) {
+			pr_err("alg: akcipher: %s test failed. Invalid output len\n",
+			       op);
+			err = -EINVAL;
+			goto free_all;
+		}
+		/* verify that encrypted message is equal to expected */
+		if (memcmp(c, outbuf_enc, c_size) != 0) {
+			pr_err("alg: akcipher: %s test failed. Invalid output\n",
+			       op);
+			hexdump(outbuf_enc, c_size);
+			err = -EINVAL;
+			goto free_all;
+		}
 	}
 
 	/*
@@ -2642,6 +3513,7 @@ free_all:
 	kfree(outbuf_enc);
 free_req:
 	akcipher_request_free(req);
+	kfree(key);
 free_xbuf:
 	testmgr_free_buf(xbuf);
 	return err;
@@ -2699,12 +3571,14 @@ static int alg_test_null(const struct alg_test_desc *desc,
 static const struct alg_test_desc alg_test_descs[] = {
 	{
 		.alg = "adiantum(xchacha12,aes)",
+		.generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(adiantum_xchacha12_aes_tv_template)
 		},
 	}, {
 		.alg = "adiantum(xchacha20,aes)",
+		.generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(adiantum_xchacha20_aes_tv_template)
@@ -2921,6 +3795,12 @@ static const struct alg_test_desc alg_test_descs[] = {
 		.test = alg_test_null,
 		.fips_allowed = 1,
 	}, {
+		/* Same as cbc(sm4) except the key is stored in
+		 * hardware secure memory which we reference by index
+		 */
+		.alg = "cbc(psm4)",
+		.test = alg_test_null,
+	}, {
 		.alg = "cbc(serpent)",
 		.test = alg_test_skcipher,
 		.suite = {
@@ -2947,6 +3827,7 @@ static const struct alg_test_desc alg_test_descs[] = {
 		}
 	}, {
 		.alg = "ccm(aes)",
+		.generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
 		.test = alg_test_aead,
 		.fips_allowed = 1,
 		.suite = {
@@ -3055,6 +3936,13 @@ static const struct alg_test_desc alg_test_descs[] = {
 		.test = alg_test_null,
 		.fips_allowed = 1,
 	}, {
+
+		/* Same as ctr(sm4) except the key is stored in
+		 * hardware secure memory which we reference by index
+		 */
+		.alg = "ctr(psm4)",
+		.test = alg_test_null,
+	}, {
 		.alg = "ctr(serpent)",
 		.test = alg_test_skcipher,
 		.suite = {
@@ -3080,6 +3968,13 @@ static const struct alg_test_desc alg_test_descs[] = {
 			.cipher = __VECS(cts_mode_tv_template)
 		}
 	}, {
+		/* Same as cts(cbc((aes)) except the key is stored in
+		 * hardware secure memory which we reference by index
+		 */
+		.alg = "cts(cbc(paes))",
+		.test = alg_test_null,
+		.fips_allowed = 1,
+	}, {
 		.alg = "deflate",
 		.test = alg_test_comp,
 		.fips_allowed = 1,
@@ -3358,7 +4253,14 @@ static const struct alg_test_desc alg_test_descs[] = {
 			.kpp = __VECS(ecdh_tv_template)
 		}
 	}, {
+		.alg = "ecrdsa",
+		.test = alg_test_akcipher,
+		.suite = {
+			.akcipher = __VECS(ecrdsa_tv_template)
+		}
+	}, {
 		.alg = "gcm(aes)",
+		.generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
 		.test = alg_test_aead,
 		.fips_allowed = 1,
 		.suite = {
@@ -3477,30 +4379,35 @@ static const struct alg_test_desc alg_test_descs[] = {
 		}
 	}, {
 		.alg = "lrw(aes)",
+		.generic_driver = "lrw(ecb(aes-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(aes_lrw_tv_template)
 		}
 	}, {
 		.alg = "lrw(camellia)",
+		.generic_driver = "lrw(ecb(camellia-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(camellia_lrw_tv_template)
 		}
 	}, {
 		.alg = "lrw(cast6)",
+		.generic_driver = "lrw(ecb(cast6-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(cast6_lrw_tv_template)
 		}
 	}, {
 		.alg = "lrw(serpent)",
+		.generic_driver = "lrw(ecb(serpent-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(serpent_lrw_tv_template)
 		}
 	}, {
 		.alg = "lrw(twofish)",
+		.generic_driver = "lrw(ecb(twofish-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(tf_lrw_tv_template)
@@ -3625,6 +4532,7 @@ static const struct alg_test_desc alg_test_descs[] = {
 		}
 	}, {
 		.alg = "rfc4106(gcm(aes))",
+		.generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
 		.test = alg_test_aead,
 		.fips_allowed = 1,
 		.suite = {
@@ -3632,6 +4540,7 @@ static const struct alg_test_desc alg_test_descs[] = {
 		}
 	}, {
 		.alg = "rfc4309(ccm(aes))",
+		.generic_driver = "rfc4309(ccm_base(ctr(aes-generic),cbcmac(aes-generic)))",
 		.test = alg_test_aead,
 		.fips_allowed = 1,
 		.suite = {
@@ -3639,6 +4548,7 @@ static const struct alg_test_desc alg_test_descs[] = {
 		}
 	}, {
 		.alg = "rfc4543(gcm(aes))",
+		.generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))",
 		.test = alg_test_aead,
 		.suite = {
 			.aead = __VECS(aes_gcm_rfc4543_tv_template)
@@ -3835,6 +4745,7 @@ static const struct alg_test_desc alg_test_descs[] = {
 		},
 	}, {
 		.alg = "xts(aes)",
+		.generic_driver = "xts(ecb(aes-generic))",
 		.test = alg_test_skcipher,
 		.fips_allowed = 1,
 		.suite = {
@@ -3842,12 +4753,14 @@ static const struct alg_test_desc alg_test_descs[] = {
 		}
 	}, {
 		.alg = "xts(camellia)",
+		.generic_driver = "xts(ecb(camellia-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(camellia_xts_tv_template)
 		}
 	}, {
 		.alg = "xts(cast6)",
+		.generic_driver = "xts(ecb(cast6-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(cast6_xts_tv_template)
@@ -3861,12 +4774,14 @@ static const struct alg_test_desc alg_test_descs[] = {
 		.fips_allowed = 1,
 	}, {
 		.alg = "xts(serpent)",
+		.generic_driver = "xts(ecb(serpent-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(serpent_xts_tv_template)
 		}
 	}, {
 		.alg = "xts(twofish)",
+		.generic_driver = "xts(ecb(twofish-generic))",
 		.test = alg_test_skcipher,
 		.suite = {
 			.cipher = __VECS(tf_xts_tv_template)
@@ -4020,8 +4935,9 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
 					     type, mask);
 
 test_done:
-	if (fips_enabled && rc)
-		panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
+	if (rc && (fips_enabled || panic_on_fail))
+		panic("alg: self-tests for %s (%s) failed in %s mode!\n",
+		      driver, alg, fips_enabled ? "fips" : "panic_on_fail");
 
 	if (fips_enabled && !rc)
 		pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d18a37629f05..b6daae1f6a1d 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -25,6 +25,8 @@
 #ifndef _CRYPTO_TESTMGR_H
 #define _CRYPTO_TESTMGR_H
 
+#include <linux/oid_registry.h>
+
 #define MAX_IVLEN		32
 
 /*
@@ -34,6 +36,8 @@
  * @digest:	Pointer to expected digest
  * @psize:	Length of source data in bytes
  * @ksize:	Length of @key in bytes (0 if no key)
+ * @setkey_error: Expected error from setkey()
+ * @digest_error: Expected error from digest()
  */
 struct hash_testvec {
 	const char *key;
@@ -41,6 +45,8 @@ struct hash_testvec {
 	const char *digest;
 	unsigned short psize;
 	unsigned short ksize;
+	int setkey_error;
+	int digest_error;
 };
 
 /*
@@ -52,12 +58,13 @@ struct hash_testvec {
  * @ptext:	Pointer to plaintext
  * @ctext:	Pointer to ciphertext
  * @len:	Length of @ptext and @ctext in bytes
- * @fail:	If set to one, the test need to fail
  * @wk:		Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
  * 		( e.g. test needs to fail due to a weak key )
  * @fips_skip:	Skip the test vector in FIPS mode
  * @generates_iv: Encryption should ignore the given IV, and output @iv_out.
  *		  Decryption takes @iv_out.  Needed for AES Keywrap ("kw(aes)").
+ * @setkey_error: Expected error from setkey()
+ * @crypt_error: Expected error from encrypt() and decrypt()
  */
 struct cipher_testvec {
 	const char *key;
@@ -65,12 +72,13 @@ struct cipher_testvec {
 	const char *iv_out;
 	const char *ptext;
 	const char *ctext;
-	bool fail;
 	unsigned char wk; /* weak key flag */
-	unsigned char klen;
+	unsigned short klen;
 	unsigned short len;
 	bool fips_skip;
 	bool generates_iv;
+	int setkey_error;
+	int crypt_error;
 };
 
 /*
@@ -82,7 +90,6 @@ struct cipher_testvec {
  * @ctext:	Pointer to the full authenticated ciphertext.  For AEADs that
  *		produce a separate "ciphertext" and "authentication tag", these
  *		two parts are concatenated: ciphertext || tag.
- * @fail:	setkey() failure expected?
  * @novrfy:	Decryption verification failure expected?
  * @wk:		Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
  *		(e.g. setkey() needs to fail due to a weak key)
@@ -90,6 +97,9 @@ struct cipher_testvec {
  * @plen:	Length of @ptext in bytes
  * @alen:	Length of @assoc in bytes
  * @clen:	Length of @ctext in bytes
+ * @setkey_error: Expected error from setkey()
+ * @setauthsize_error: Expected error from setauthsize()
+ * @crypt_error: Expected error from encrypt() and decrypt()
  */
 struct aead_testvec {
 	const char *key;
@@ -97,13 +107,15 @@ struct aead_testvec {
 	const char *ptext;
 	const char *assoc;
 	const char *ctext;
-	bool fail;
 	unsigned char novrfy;
 	unsigned char wk;
 	unsigned char klen;
 	unsigned short plen;
 	unsigned short clen;
 	unsigned short alen;
+	int setkey_error;
+	int setauthsize_error;
+	int crypt_error;
 };
 
 struct cprng_testvec {
@@ -135,13 +147,16 @@ struct drbg_testvec {
 
 struct akcipher_testvec {
 	const unsigned char *key;
+	const unsigned char *params;
 	const unsigned char *m;
 	const unsigned char *c;
 	unsigned int key_len;
+	unsigned int param_len;
 	unsigned int m_size;
 	unsigned int c_size;
 	bool public_key_vec;
 	bool siggen_sigver_test;
+	enum OID algo;
 };
 
 struct kpp_testvec {
@@ -551,6 +566,160 @@ static const struct akcipher_testvec rsa_tv_template[] = {
 };
 
 /*
+ * EC-RDSA test vectors are generated by gost-engine.
+ */
+static const struct akcipher_testvec ecrdsa_tv_template[] = {
+	{
+	.key =
+	"\x04\x40\xd5\xa7\x77\xf9\x26\x2f\x8c\xbd\xcc\xe3\x1f\x01\x94\x05"
+	"\x3d\x2f\xec\xb5\x00\x34\xf5\x51\x6d\x3b\x90\x4b\x23\x28\x6f\x1d"
+	"\xc8\x36\x61\x60\x36\xec\xbb\xb4\x0b\x95\x4e\x54\x4f\x15\x21\x05"
+	"\xd8\x52\x66\x44\x31\x7e\x5d\xc5\xd1\x26\x00\x5f\x60\xd8\xf0\xc7"
+	"\x27\xfc",
+	.key_len = 66,
+	.params = /* OID_gostCPSignA */
+	"\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x01\x06\x08\x2a\x85\x03"
+	"\x07\x01\x01\x02\x02",
+	.param_len = 21,
+	.c =
+	"\x41\x32\x09\x73\xa4\xc1\x38\xd6\x63\x7d\x8b\xf7\x50\x3f\xda\x9f"
+	"\x68\x48\xc1\x50\xe3\x42\x3a\x9b\x2b\x28\x12\x2a\xa7\xc2\x75\x31"
+	"\x65\x77\x8c\x3c\x9e\x0d\x56\xb2\xf9\xdc\x04\x33\x3e\xb0\x9e\xf9"
+	"\x74\x4e\x59\xb3\x83\xf2\x91\x27\xda\x5e\xc7\x33\xc0\xc1\x8f\x41",
+	.c_size = 64,
+	.algo = OID_gost2012PKey256,
+	.m =
+	"\x75\x1b\x9b\x40\x25\xb9\x96\xd2\x9b\x00\x41\xb3\x58\xbf\x23\x14"
+	"\x79\xd2\x76\x64\xa3\xbd\x66\x10\x79\x05\x5a\x06\x42\xec\xb9\xc9",
+	.m_size = 32,
+	.public_key_vec = true,
+	.siggen_sigver_test = true,
+	},
+	{
+	.key =
+	"\x04\x40\x66\x6f\xd6\xb7\x06\xd0\xf5\xa5\x6f\x69\x5c\xa5\x13\x45"
+	"\x14\xdd\xcb\x12\x9c\x1b\xf5\x28\x64\x7a\x49\x48\x29\x14\x66\x42"
+	"\xb8\x1b\x5c\xf9\x56\x6d\x08\x3b\xce\xbb\x62\x2f\xc2\x3c\xc5\x49"
+	"\x93\x27\x70\x20\xcc\x79\xeb\xdc\x76\x8e\x48\x6e\x04\x96\xc3\x29"
+	"\xa0\x73",
+	.key_len = 66,
+	.params = /* OID_gostCPSignB */
+	"\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x02\x06\x08\x2a\x85\x03"
+	"\x07\x01\x01\x02\x02",
+	.param_len = 21,
+	.c =
+	"\x45\x6d\x4a\x03\x1d\x5c\x0b\x17\x79\xe7\x19\xdb\xbf\x81\x9f\x82"
+	"\xae\x06\xda\xf5\x47\x00\x05\x80\xc3\x16\x06\x9a\x8e\x7c\xb2\x8e"
+	"\x7f\x74\xaa\xec\x6b\x7b\x7f\x8b\xc6\x0b\x10\x42\x4e\x91\x2c\xdf"
+	"\x7b\x8b\x15\xf4\x9e\x59\x0f\xc7\xa4\x68\x2e\xce\x89\xdf\x84\xe9",
+	.c_size = 64,
+	.algo = OID_gost2012PKey256,
+	.m =
+	"\xd0\x54\x00\x27\x6a\xeb\xce\x6c\xf5\xf6\xfb\x57\x18\x18\x21\x13"
+	"\x11\x23\x4a\x70\x43\x52\x7a\x68\x11\x65\x45\x37\xbb\x25\xb7\x40",
+	.m_size = 32,
+	.public_key_vec = true,
+	.siggen_sigver_test = true,
+	},
+	{
+	.key =
+	"\x04\x40\x05\x91\xa9\x7d\xcb\x87\xdc\x98\xa1\xbf\xff\xdd\x20\x61"
+	"\xaa\x58\x3b\x2d\x8e\x9c\x41\x9d\x4f\xc6\x23\x17\xf9\xca\x60\x65"
+	"\xbc\x97\x97\xf6\x6b\x24\xe8\xac\xb1\xa7\x61\x29\x3c\x71\xdc\xad"
+	"\xcb\x20\xbe\x96\xe8\xf4\x44\x2e\x49\xd5\x2c\xb9\xc9\x3b\x9c\xaa"
+	"\xba\x15",
+	.key_len = 66,
+	.params = /* OID_gostCPSignC */
+	"\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x03\x06\x08\x2a\x85\x03"
+	"\x07\x01\x01\x02\x02",
+	.param_len = 21,
+	.c =
+	"\x3b\x2e\x2e\x74\x74\x47\xda\xea\x93\x90\x6a\xe2\xf5\xf5\xe6\x46"
+	"\x11\xfc\xab\xdc\x52\xbc\x58\xdb\x45\x44\x12\x4a\xf7\xd0\xab\xc9"
+	"\x73\xba\x64\xab\x0d\xac\x4e\x72\x10\xa8\x04\xf6\x1e\xe0\x48\x6a"
+	"\xcd\xe8\xe3\x78\x73\x77\x82\x24\x8d\xf1\xd3\xeb\x4c\x25\x7e\xc0",
+	.c_size = 64,
+	.algo = OID_gost2012PKey256,
+	.m =
+	"\x52\x33\xf4\x3f\x7b\x5d\xcf\x20\xee\xe4\x5c\xab\x0b\x3f\x14\xd6"
+	"\x9f\x16\xc6\x1c\xb1\x3f\x84\x41\x69\xec\x34\xfd\xf1\xf9\xa3\x39",
+	.m_size = 32,
+	.public_key_vec = true,
+	.siggen_sigver_test = true,
+	},
+	{
+	.key =
+	"\x04\x81\x80\x85\x46\x8f\x16\xf8\x7a\x7e\x4a\xc3\x81\x9e\xf1\x6e"
+	"\x94\x1e\x5d\x02\x87\xea\xfa\xa0\x0a\x17\x70\x49\x64\xad\x95\x68"
+	"\x60\x0a\xf0\x57\x29\x41\x79\x30\x3c\x61\x69\xf2\xa6\x94\x87\x17"
+	"\x54\xfa\x97\x2c\xe6\x1e\x0a\xbb\x55\x10\x57\xbe\xf7\xc1\x77\x2b"
+	"\x11\x74\x0a\x50\x37\x14\x10\x2a\x45\xfc\x7a\xae\x1c\x4c\xce\x08"
+	"\x05\xb7\xa4\x50\xc8\x3d\x39\x3d\xdc\x5c\x8f\x96\x6c\xe7\xfc\x21"
+	"\xc3\x2d\x1e\x9f\x11\xb3\xec\x22\x18\x8a\x8c\x08\x6b\x8b\xed\xf5"
+	"\xc5\x47\x3c\x7e\x73\x59\x44\x1e\x77\x83\x84\x52\x9e\x3b\x7d\xff"
+	"\x9d\x86\x1a",
+	.key_len = 131,
+	.params = /* OID_gostTC26Sign512A */
+	"\x30\x0b\x06\x09\x2a\x85\x03\x07\x01\x02\x01\x02\x01",
+	.param_len = 13,
+	.c =
+	"\x92\x81\x74\x5f\x95\x48\x38\x87\xd9\x8f\x5e\xc8\x8a\xbb\x01\x4e"
+	"\xb0\x75\x3c\x2f\xc7\x5a\x08\x4c\x68\xab\x75\x01\x32\x75\x75\xb5"
+	"\x37\xe0\x74\x6d\x94\x84\x31\x2a\x6b\xf4\xf7\xb7\xa7\x39\x7b\x46"
+	"\x07\xf0\x98\xbd\x33\x18\xa1\x72\xb2\x6d\x54\xe3\xde\x91\xc2\x2e"
+	"\x4f\x6a\xf8\xb7\xec\xa8\x83\xc9\x8f\xd9\xce\x7c\x45\x06\x02\xf4"
+	"\x4f\x21\xb5\x24\x3d\xb4\xb5\xd8\x58\x42\xbe\x2d\x29\xae\x93\xc0"
+	"\x13\x41\x96\x35\x08\x69\xe8\x36\xc7\xd1\x83\x81\xd7\xca\xfb\xc0"
+	"\xd2\xb7\x78\x32\x3e\x30\x1a\x1e\xce\xdc\x34\x35\xc6\xad\x68\x24",
+	.c_size = 128,
+	.algo = OID_gost2012PKey512,
+	.m =
+	"\x1f\x70\xb5\xe9\x55\x12\xd6\x88\xcc\x55\xb9\x0c\x7f\xc4\x94\xf2"
+	"\x04\x77\x41\x12\x02\xd6\xf1\x1f\x83\x56\xe9\xd6\x5a\x6a\x72\xb9"
+	"\x6e\x8e\x24\x2a\x84\xf1\xba\x67\xe8\xbf\xff\xc1\xd3\xde\xfb\xc6"
+	"\xa8\xf6\x80\x01\xb9\x27\xac\xd8\x45\x96\x66\xa1\xee\x48\x08\x3f",
+	.m_size = 64,
+	.public_key_vec = true,
+	.siggen_sigver_test = true,
+	},
+	{
+	.key =
+	"\x04\x81\x80\x28\xf3\x2b\x92\x04\x32\xea\x66\x20\xde\xa0\x2f\x74"
+	"\xbf\x2d\xf7\xb5\x30\x76\xb1\xc8\xee\x38\x9f\xea\xe5\xad\xc6\xa3"
+	"\x28\x1e\x51\x3d\x67\xa3\x41\xcc\x6b\x81\xe2\xe2\x9e\x82\xf3\x78"
+	"\x56\xd7\x2e\xb2\xb5\xbe\xb4\x50\x21\x05\xe5\x29\x82\xef\x15\x1b"
+	"\xc0\xd7\x30\xd6\x2f\x96\xe8\xff\x99\x4c\x25\xcf\x9a\xfc\x54\x30"
+	"\xce\xdf\x59\xe9\xc6\x45\xce\xe4\x22\xe8\x01\xd5\xcd\x2f\xaa\x78"
+	"\x99\xc6\x04\x1e\x6f\x4c\x25\x6a\x76\xad\xff\x48\xf3\xb3\xb4\xd6"
+	"\x14\x5c\x2c\x0e\xea\xa2\x4b\xb9\x7e\x89\x77\x02\x3a\x29\xc8\x16"
+	"\x8e\x78\x48",
+	.key_len = 131,
+	.params = /* OID_gostTC26Sign512B */
+	"\x30\x0b\x06\x09\x2a\x85\x03\x07\x01\x02\x01\x02\x02",
+	.param_len = 13,
+	.c =
+	"\x0a\xed\xb6\x27\xea\xa7\xa6\x7e\x2f\xc1\x02\x21\x74\xce\x27\xd2"
+	"\xee\x8a\x92\x4d\xa9\x43\x2d\xa4\x5b\xdc\x23\x02\xfc\x3a\xf3\xb2"
+	"\x10\x93\x0b\x40\x1b\x75\x95\x3e\x39\x41\x37\xb9\xab\x51\x09\xeb"
+	"\xf1\xb9\x49\x58\xec\x58\xc7\xf9\x2e\xb9\xc9\x40\xf2\x00\x39\x7e"
+	"\x3f\xde\x72\xe3\x85\x67\x06\xbe\xd8\xb8\xc1\x81\x1e\xe3\x0a\xfe"
+	"\xce\xd3\x77\x92\x56\x8c\x58\xf9\x37\x60\x2d\xe6\x8b\x66\xa3\xdd"
+	"\xd2\xf0\xf8\xda\x1b\x20\xbc\x9c\xec\x29\x5d\xd1\x8f\xcc\x37\xd1"
+	"\x3b\x8d\xb7\xc1\xe0\xb8\x3b\xef\x14\x1b\x87\xbc\xc1\x03\x9a\x93",
+	.c_size = 128,
+	.algo = OID_gost2012PKey512,
+	.m =
+	"\x11\x24\x21\x27\xf2\x42\x9f\xce\x5a\xf9\x01\x70\xe0\x07\x2b\x57"
+	"\xfb\x7d\x77\x5e\x74\x66\xe6\xa5\x40\x4c\x1a\x85\x18\xff\xd0\x63"
+	"\xe0\x39\xd3\xd6\xe5\x17\xf8\xc3\x4b\xc6\x1c\x33\x1a\xca\xa6\x66"
+	"\x6d\xf4\xd2\x45\xc2\x83\xa0\x42\x95\x05\x9d\x89\x8e\x0a\xca\xcc",
+	.m_size = 64,
+	.public_key_vec = true,
+	.siggen_sigver_test = true,
+	},
+};
+
+/*
  * PKCS#1 RSA test vectors. Obtained from CAVS testing.
  */
 static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
@@ -7084,7 +7253,7 @@ static const struct cipher_testvec des_tv_template[] = {
 			  "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
 		.len	= 24,
 	}, { /* Weak key */
-		.fail	= true,
+		.setkey_error = -EINVAL,
 		.wk	= 1,
 		.key	= "\x01\x01\x01\x01\x01\x01\x01\x01",
 		.klen	= 8,
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index f8e1d9f9938f..40020f8adc46 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -677,7 +677,7 @@ MODULE_ALIAS_CRYPTO("tgr192");
 MODULE_ALIAS_CRYPTO("tgr160");
 MODULE_ALIAS_CRYPTO("tgr128");
 
-module_init(tgr192_mod_init);
+subsys_initcall(tgr192_mod_init);
 module_exit(tgr192_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 07e62433fbfb..dbac6e233285 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -205,7 +205,7 @@ static void __exit twofish_mod_fini(void)
 	crypto_unregister_alg(&alg);
 }
 
-module_init(twofish_mod_init);
+subsys_initcall(twofish_mod_init);
 module_exit(twofish_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 5f436dfdfc61..f50a85060b39 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -690,7 +690,7 @@ static void __exit vmac_module_exit(void)
 	crypto_unregister_template(&vmac64_tmpl);
 }
 
-module_init(vmac_module_init);
+subsys_initcall(vmac_module_init);
 module_exit(vmac_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 149e577fb772..1b8e502d999f 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1168,7 +1168,7 @@ MODULE_ALIAS_CRYPTO("wp512");
 MODULE_ALIAS_CRYPTO("wp384");
 MODULE_ALIAS_CRYPTO("wp256");
 
-module_init(wp512_mod_init);
+subsys_initcall(wp512_mod_init);
 module_exit(wp512_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index c055f57fab11..94ca694ef091 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -282,7 +282,7 @@ static void __exit crypto_xcbc_module_exit(void)
 	crypto_unregister_template(&crypto_xcbc_tmpl);
 }
 
-module_init(crypto_xcbc_module_init);
+subsys_initcall(crypto_xcbc_module_init);
 module_exit(crypto_xcbc_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/xts.c b/crypto/xts.c
index 2f948328cabb..33cf726df4ac 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -363,7 +363,7 @@ static void __exit crypto_module_exit(void)
 	crypto_unregister_template(&crypto_tmpl);
 }
 
-module_init(crypto_module_init);
+subsys_initcall(crypto_module_init);
 module_exit(crypto_module_exit);
 
 MODULE_LICENSE("GPL");
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 9a76b3ed8b8b..2c04055e407f 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -257,7 +257,7 @@ static void __exit zstd_mod_fini(void)
 	crypto_unregister_scomp(&scomp);
 }
 
-module_init(zstd_mod_init);
+subsys_initcall(zstd_mod_init);
 module_exit(zstd_mod_fini);
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7ad88d91a09..843a9b9b3d74 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5443,7 +5443,6 @@ static int drbd_do_auth(struct drbd_connection *connection)
 	rcu_read_unlock();
 
 	desc->tfm = connection->cram_hmac_tfm;
-	desc->flags = 0;
 
 	rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
 	if (rv) {
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 268ef0c5d4ab..6781bcf3ec26 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -304,7 +304,6 @@ void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req,
 	void *src;
 
 	desc->tfm = tfm;
-	desc->flags = 0;
 
 	crypto_shash_init(desc);
 
@@ -332,7 +331,6 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
 	struct bvec_iter iter;
 
 	desc->tfm = tfm;
-	desc->flags = 0;
 
 	crypto_shash_init(desc);
 
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b65ff6962899..e9b6ac61fb7f 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
 	priv->rng.read = omap_rng_do_read;
 	priv->rng.init = omap_rng_init;
 	priv->rng.cleanup = omap_rng_cleanup;
+	priv->rng.quality = 900;
 
 	priv->rng.priv = (unsigned long)priv;
 	platform_set_drvdata(pdev, priv);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 042860d97b15..0ef5b6a3f560 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -161,6 +161,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
 #endif
 	priv->rng.read = stm32_rng_read,
 	priv->rng.priv = (unsigned long) dev;
+	priv->rng.quality = 900;
 
 	pm_runtime_set_autosuspend_delay(dev, 100);
 	pm_runtime_use_autosuspend(dev);
@@ -169,6 +170,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
 	return devm_hwrng_register(dev, &priv->rng);
 }
 
+static int stm32_rng_remove(struct platform_device *ofdev)
+{
+	pm_runtime_disable(&ofdev->dev);
+
+	return 0;
+}
+
 #ifdef CONFIG_PM
 static int stm32_rng_runtime_suspend(struct device *dev)
 {
@@ -210,6 +218,7 @@ static struct platform_driver stm32_rng_driver = {
 		.of_match_table = stm32_rng_match,
 	},
 	.probe = stm32_rng_probe,
+	.remove = stm32_rng_remove,
 };
 
 module_platform_driver(stm32_rng_driver);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0be55fcc19ba..177b7713bd2d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -404,15 +404,6 @@ config CRYPTO_DEV_SAHARA
 	  This option enables support for the SAHARA HW crypto accelerator
 	  found in some Freescale i.MX chips.
 
-config CRYPTO_DEV_MXC_SCC
-	tristate "Support for Freescale Security Controller (SCC)"
-	depends on ARCH_MXC && OF
-	select CRYPTO_BLKCIPHER
-	select CRYPTO_DES
-	help
-	  This option enables support for the Security Controller (SCC)
-	  found in Freescale i.MX25 chips.
-
 config CRYPTO_DEV_EXYNOS_RNG
 	tristate "EXYNOS HW pseudo random number generator support"
 	depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 8e7e225d2446..a23a7197fcd7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
 obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
 obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
 obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
 n2_crypto-y := n2_core.o n2_asm.o
 obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4092c2aad8e2..307f5cfa9ba4 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
 	/* Setup SA */
 	sa = ctx->sa_in;
 
-	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
-				 SA_SAVE_IV : SA_NOT_SAVE_IV),
-				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
+				 SA_NOT_SAVE_IV : SA_SAVE_IV),
+				 SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
+				 SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
 				 SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
 				 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
 				 SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
@@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
 	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
 	sa = ctx->sa_out;
 	sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+	/*
+	 * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
+	 * it's the DIR_(IN|OUT)BOUND that matters
+	 */
+	sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
 
 	return 0;
 }
@@ -258,10 +264,10 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
 	 * overlow.
 	 */
 	if (counter + nblks < counter) {
-		struct skcipher_request *subreq = skcipher_request_ctx(req);
+		SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher.cipher);
 		int ret;
 
-		skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
+		skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher.cipher);
 		skcipher_request_set_callback(subreq, req->base.flags,
 			NULL, NULL);
 		skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -283,14 +289,14 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
 {
 	int rc;
 
-	crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
+	crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
 				    CRYPTO_TFM_REQ_MASK);
-	crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
+	crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
 		crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
-	rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
+	rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
 	crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
 	crypto_skcipher_set_flags(cipher,
-		crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
+		crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
 			CRYPTO_TFM_RES_MASK);
 
 	return rc;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 06574a884715..3934c2523762 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -539,7 +539,7 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
 
 	req = skcipher_request_cast(pd_uinfo->async_req);
 
-	if (pd_uinfo->using_sd) {
+	if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
 		crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
 					  req->cryptlen, req->dst);
 	} else {
@@ -593,7 +593,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
 	u32 icv[AES_BLOCK_SIZE];
 	int err = 0;
 
-	if (pd_uinfo->using_sd) {
+	if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
 		crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
 					  pd->pd_ctl_len.bf.pkt_len,
 					  dst);
@@ -714,7 +714,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 	size_t offset_to_sr_ptr;
 	u32 gd_idx = 0;
 	int tmp;
-	bool is_busy;
+	bool is_busy, force_sd;
+
+	/*
+	 * There's a very subtile/disguised "bug" in the hardware that
+	 * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
+	 * of the hardware spec:
+	 * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
+	 * operation modes for >>> "Block ciphers" <<<.
+	 *
+	 * To workaround this issue and stop the hardware from causing
+	 * "overran dst buffer" on crypttexts that are not a multiple
+	 * of 16 (AES_BLOCK_SIZE), we force the driver to use the
+	 * scatter buffers.
+	 */
+	force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
+		|| req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
+		&& (datalen % AES_BLOCK_SIZE);
 
 	/* figure how many gd are needed */
 	tmp = sg_nents_for_len(src, assoclen + datalen);
@@ -732,7 +748,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 	}
 
 	/* figure how many sd are needed */
-	if (sg_is_last(dst)) {
+	if (sg_is_last(dst) && force_sd == false) {
 		num_sd = 0;
 	} else {
 		if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -807,9 +823,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 	pd->sa_len = sa_len;
 
 	pd_uinfo = &dev->pdr_uinfo[pd_entry];
-	pd_uinfo->async_req = req;
 	pd_uinfo->num_gd = num_gd;
 	pd_uinfo->num_sd = num_sd;
+	pd_uinfo->dest_va = dst;
+	pd_uinfo->async_req = req;
 
 	if (iv_len)
 		memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
@@ -828,7 +845,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 		/* get first gd we are going to use */
 		gd_idx = fst_gd;
 		pd_uinfo->first_gd = fst_gd;
-		pd_uinfo->num_gd = num_gd;
 		gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
 		pd->src = gd_dma;
 		/* enable gather */
@@ -865,17 +881,13 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 		 * Indicate gather array is not used
 		 */
 		pd_uinfo->first_gd = 0xffffffff;
-		pd_uinfo->num_gd = 0;
 	}
-	if (sg_is_last(dst)) {
+	if (!num_sd) {
 		/*
 		 * we know application give us dst a whole piece of memory
 		 * no need to use scatter ring.
 		 */
-		pd_uinfo->using_sd = 0;
 		pd_uinfo->first_sd = 0xffffffff;
-		pd_uinfo->num_sd = 0;
-		pd_uinfo->dest_va = dst;
 		sa->sa_command_0.bf.scatter = 0;
 		pd->dest = (u32)dma_map_page(dev->core_dev->device,
 					     sg_page(dst), dst->offset,
@@ -888,10 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
 		u32 sd_idx = fst_sd;
 		nbytes = datalen;
 		sa->sa_command_0.bf.scatter = 1;
-		pd_uinfo->using_sd = 1;
-		pd_uinfo->dest_va = dst;
 		pd_uinfo->first_sd = fst_sd;
-		pd_uinfo->num_sd = num_sd;
 		sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
 		pd->dest = sd_dma;
 		/* setup scatter descriptor */
@@ -954,15 +963,10 @@ static int crypto4xx_sk_init(struct crypto_skcipher *sk)
 
 	if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
 		ctx->sw_cipher.cipher =
-			crypto_alloc_skcipher(alg->base.cra_name, 0,
-					      CRYPTO_ALG_NEED_FALLBACK |
-					      CRYPTO_ALG_ASYNC);
+			crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
+					      CRYPTO_ALG_NEED_FALLBACK);
 		if (IS_ERR(ctx->sw_cipher.cipher))
 			return PTR_ERR(ctx->sw_cipher.cipher);
-
-		crypto_skcipher_set_reqsize(sk,
-			sizeof(struct skcipher_request) + 32 +
-			crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
 	}
 
 	amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
@@ -981,7 +985,7 @@ static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
 
 	crypto4xx_common_exit(ctx);
 	if (ctx->sw_cipher.cipher)
-		crypto_free_skcipher(ctx->sw_cipher.cipher);
+		crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
 }
 
 static int crypto4xx_aead_init(struct crypto_aead *tfm)
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 18df695ca6b1..c624f8cd3d2e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -64,7 +64,6 @@ union shadow_sa_buf {
 struct pd_uinfo {
 	struct crypto4xx_device *dev;
 	u32   state;
-	u32 using_sd;
 	u32 first_gd;		/* first gather discriptor
 				used by this packet */
 	u32 num_gd;             /* number of gather discriptor
@@ -131,7 +130,7 @@ struct crypto4xx_ctx {
 	__le32 iv_nonce;
 	u32 sa_len;
 	union {
-		struct crypto_skcipher *cipher;
+		struct crypto_sync_skcipher *cipher;
 		struct crypto_aead *aead;
 	} sw_cipher;
 };
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 65bf1a299562..fa76620281e8 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -800,20 +800,14 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 			   unsigned int keylen)
 {
 	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
-	const char *alg_name;
-
-	alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
+	u32 flags;
+	int err;
 
-	/*
-	 * HW bug in cfb 3-keys mode.
-	 */
-	if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
-			&& (keylen != 2*DES_KEY_SIZE)) {
-		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	} else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
-		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
+	flags = crypto_ablkcipher_get_flags(tfm);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(tfm, flags);
+		return err;
 	}
 
 	memcpy(ctx->key, key, keylen);
@@ -1060,7 +1054,7 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
 	.cra_u.ablkcipher = {
-		.min_keysize	= 2 * DES_KEY_SIZE,
+		.min_keysize	= 3 * DES_KEY_SIZE,
 		.max_keysize	= 3 * DES_KEY_SIZE,
 		.setkey		= atmel_tdes_setkey,
 		.encrypt	= atmel_tdes_ecb_encrypt,
@@ -1079,7 +1073,7 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
 	.cra_u.ablkcipher = {
-		.min_keysize	= 2*DES_KEY_SIZE,
+		.min_keysize	= 3*DES_KEY_SIZE,
 		.max_keysize	= 3*DES_KEY_SIZE,
 		.ivsize		= DES_BLOCK_SIZE,
 		.setkey		= atmel_tdes_setkey,
@@ -1088,86 +1082,6 @@ static struct crypto_alg tdes_algs[] = {
 	}
 },
 {
-	.cra_name		= "cfb(des3_ede)",
-	.cra_driver_name	= "atmel-cfb-tdes",
-	.cra_priority		= 100,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= DES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
-	.cra_alignmask		= 0x7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= atmel_tdes_cra_init,
-	.cra_u.ablkcipher = {
-		.min_keysize	= 2*DES_KEY_SIZE,
-		.max_keysize	= 2*DES_KEY_SIZE,
-		.ivsize		= DES_BLOCK_SIZE,
-		.setkey		= atmel_tdes_setkey,
-		.encrypt	= atmel_tdes_cfb_encrypt,
-		.decrypt	= atmel_tdes_cfb_decrypt,
-	}
-},
-{
-	.cra_name		= "cfb8(des3_ede)",
-	.cra_driver_name	= "atmel-cfb8-tdes",
-	.cra_priority		= 100,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= CFB8_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= atmel_tdes_cra_init,
-	.cra_u.ablkcipher = {
-		.min_keysize	= 2*DES_KEY_SIZE,
-		.max_keysize	= 2*DES_KEY_SIZE,
-		.ivsize		= DES_BLOCK_SIZE,
-		.setkey		= atmel_tdes_setkey,
-		.encrypt	= atmel_tdes_cfb8_encrypt,
-		.decrypt	= atmel_tdes_cfb8_decrypt,
-	}
-},
-{
-	.cra_name		= "cfb16(des3_ede)",
-	.cra_driver_name	= "atmel-cfb16-tdes",
-	.cra_priority		= 100,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= CFB16_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
-	.cra_alignmask		= 0x1,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= atmel_tdes_cra_init,
-	.cra_u.ablkcipher = {
-		.min_keysize	= 2*DES_KEY_SIZE,
-		.max_keysize	= 2*DES_KEY_SIZE,
-		.ivsize		= DES_BLOCK_SIZE,
-		.setkey		= atmel_tdes_setkey,
-		.encrypt	= atmel_tdes_cfb16_encrypt,
-		.decrypt	= atmel_tdes_cfb16_decrypt,
-	}
-},
-{
-	.cra_name		= "cfb32(des3_ede)",
-	.cra_driver_name	= "atmel-cfb32-tdes",
-	.cra_priority		= 100,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= CFB32_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
-	.cra_alignmask		= 0x3,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= atmel_tdes_cra_init,
-	.cra_u.ablkcipher = {
-		.min_keysize	= 2*DES_KEY_SIZE,
-		.max_keysize	= 2*DES_KEY_SIZE,
-		.ivsize		= DES_BLOCK_SIZE,
-		.setkey		= atmel_tdes_setkey,
-		.encrypt	= atmel_tdes_cfb32_encrypt,
-		.decrypt	= atmel_tdes_cfb32_decrypt,
-	}
-},
-{
 	.cra_name		= "ofb(des3_ede)",
 	.cra_driver_name	= "atmel-ofb-tdes",
 	.cra_priority		= 100,
@@ -1179,7 +1093,7 @@ static struct crypto_alg tdes_algs[] = {
 	.cra_module		= THIS_MODULE,
 	.cra_init		= atmel_tdes_cra_init,
 	.cra_u.ablkcipher = {
-		.min_keysize	= 2*DES_KEY_SIZE,
+		.min_keysize	= 3*DES_KEY_SIZE,
 		.max_keysize	= 3*DES_KEY_SIZE,
 		.ivsize		= DES_BLOCK_SIZE,
 		.setkey		= atmel_tdes_setkey,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 57e5dca3253f..d2fb72811442 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2247,8 +2247,6 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
 		SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
 
 		hdesc->tfm = tfm_ctx->child_hash;
-		hdesc->flags = crypto_ahash_get_flags(tfm) &
-			       CRYPTO_TFM_REQ_MAY_SLEEP;
 
 		tfm_ctx->hmac_key_length = blocksize;
 		ret = crypto_shash_digest(hdesc, key, keylen,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 28f592f7e1b7..25f8d3913ceb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1840,13 +1840,14 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
 
 	if (keylen == (DES_KEY_SIZE * 3)) {
-		const u32 *K = (const u32 *)key;
-		u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+		u32 flags;
+		int ret;
 
-		if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
-		    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+		flags = crypto_ablkcipher_get_flags(cipher);
+		ret = __des3_verify_key(&flags, key);
+		if (unlikely(ret)) {
 			crypto_ablkcipher_set_flags(cipher, flags);
-			return -EINVAL;
+			return ret;
 		}
 
 		ctx->cipher_type = CIPHER_TYPE_3DES;
@@ -2139,7 +2140,6 @@ static int ahash_init(struct ahash_request *req)
 			goto err_hash;
 		}
 		ctx->shash->tfm = hash;
-		ctx->shash->flags = 0;
 
 		/* Set the key using data we already have from setkey */
 		if (ctx->authkeylen > 0) {
@@ -2885,13 +2885,13 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
 		break;
 	case CIPHER_ALG_3DES:
 		if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
-			const u32 *K = (const u32 *)keys.enckey;
-			u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+			u32 flags;
 
-			if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
-			    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+			flags = crypto_aead_get_flags(cipher);
+			ret = __des3_verify_key(&flags, keys.enckey);
+			if (unlikely(ret)) {
 				crypto_aead_set_flags(cipher, flags);
-				return -EINVAL;
+				return ret;
 			}
 
 			ctx->cipher_type = CIPHER_TYPE_3DES;
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index dbb5c03dde49..2baf6d7f2c1d 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -22,9 +22,6 @@
 #include "spum.h"
 #include "cipher.h"
 
-/* This array is based on the hash algo type supported in spu.h */
-char *tag_to_hash_idx[] = { "none", "md5", "sha1", "sha224", "sha256" };
-
 char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
 	"sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
 
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index d8cda5fb75ad..91ec56399d84 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -242,7 +242,6 @@ int do_shash(unsigned char *name, unsigned char *result,
 		goto do_shash_err;
 	}
 	sdesc->shash.tfm = hash;
-	sdesc->shash.flags = 0x0;
 
 	if (key_len > 0) {
 		rc = crypto_shash_setkey(hash, key, key_len);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 579578498deb..3e23d4b2cce2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -638,6 +638,39 @@ badkey:
 	return -EINVAL;
 }
 
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+			    unsigned int keylen)
+{
+	struct crypto_authenc_keys keys;
+	u32 flags;
+	int err;
+
+	err = crypto_authenc_extractkeys(&keys, key, keylen);
+	if (unlikely(err))
+		goto badkey;
+
+	err = -EINVAL;
+	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+		goto badkey;
+
+	flags = crypto_aead_get_flags(aead);
+	err = __des3_verify_key(&flags, keys.enckey);
+	if (unlikely(err)) {
+		crypto_aead_set_flags(aead, flags);
+		goto out;
+	}
+
+	err = aead_setkey(aead, key, keylen);
+
+out:
+	memzero_explicit(&keys, sizeof(keys));
+	return err;
+
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	goto out;
+}
+
 static int gcm_setkey(struct crypto_aead *aead,
 		      const u8 *key, unsigned int keylen)
 {
@@ -2457,7 +2490,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2479,7 +2512,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2502,7 +2535,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2525,7 +2558,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2548,7 +2581,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2571,7 +2604,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2594,7 +2627,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2617,7 +2650,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2640,7 +2673,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2663,7 +2696,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2686,7 +2719,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2709,7 +2742,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -3460,7 +3493,7 @@ static int __init caam_algapi_init(void)
 	u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
 	u32 arc4_inst;
 	unsigned int md_limit = SHA512_DIGEST_SIZE;
-	bool registered = false;
+	bool registered = false, gcm_support;
 
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
 	if (!dev_node) {
@@ -3493,7 +3526,7 @@ static int __init caam_algapi_init(void)
 	 * First, detect presence and attributes of DES, AES, and MD blocks.
 	 */
 	if (priv->era < 10) {
-		u32 cha_vid, cha_inst;
+		u32 cha_vid, cha_inst, aes_rn;
 
 		cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
 		aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
@@ -3508,6 +3541,10 @@ static int __init caam_algapi_init(void)
 			    CHA_ID_LS_ARC4_SHIFT;
 		ccha_inst = 0;
 		ptha_inst = 0;
+
+		aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
+			 CHA_ID_LS_AES_MASK;
+		gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
 	} else {
 		u32 aesa, mdha;
 
@@ -3523,6 +3560,8 @@ static int __init caam_algapi_init(void)
 		ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
 		ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
 		arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
+
+		gcm_support = aesa & CHA_VER_MISC_AES_GCM;
 	}
 
 	/* If MD is present, limit digest size based on LP256 */
@@ -3595,11 +3634,9 @@ static int __init caam_algapi_init(void)
 		if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
 			continue;
 
-		/*
-		 * Check support for AES algorithms not available
-		 * on LP devices.
-		 */
-		if (aes_vid  == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+		/* Skip GCM algorithms if not supported by device */
+		if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
+		    alg_aai == OP_ALG_AAI_GCM && !gcm_support)
 			continue;
 
 		/*
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c61921d32489..70af211d2d01 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -292,6 +292,39 @@ badkey:
 	return -EINVAL;
 }
 
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+			    unsigned int keylen)
+{
+	struct crypto_authenc_keys keys;
+	u32 flags;
+	int err;
+
+	err = crypto_authenc_extractkeys(&keys, key, keylen);
+	if (unlikely(err))
+		goto badkey;
+
+	err = -EINVAL;
+	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+		goto badkey;
+
+	flags = crypto_aead_get_flags(aead);
+	err = __des3_verify_key(&flags, keys.enckey);
+	if (unlikely(err)) {
+		crypto_aead_set_flags(aead, flags);
+		goto out;
+	}
+
+	err = aead_setkey(aead, key, keylen);
+
+out:
+	memzero_explicit(&keys, sizeof(keys));
+	return err;
+
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	goto out;
+}
+
 static int gcm_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -667,6 +700,13 @@ badkey:
 	return -EINVAL;
 }
 
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+				const u8 *key, unsigned int keylen)
+{
+	return unlikely(des3_verify_key(skcipher, key)) ?:
+	       skcipher_setkey(skcipher, key, keylen);
+}
+
 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 			       unsigned int keylen)
 {
@@ -1382,7 +1422,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 				.cra_driver_name = "cbc-3des-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = skcipher_setkey,
+			.setkey = des3_skcipher_setkey,
 			.encrypt = skcipher_encrypt,
 			.decrypt = skcipher_decrypt,
 			.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1798,7 +1838,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1820,7 +1860,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1843,7 +1883,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1866,7 +1906,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1889,7 +1929,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1912,7 +1952,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1935,7 +1975,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1958,7 +1998,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1981,7 +2021,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2004,7 +2044,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2027,7 +2067,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2050,7 +2090,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index c2c1abc68f81..33a4df6b81de 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -323,6 +323,39 @@ badkey:
 	return -EINVAL;
 }
 
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+			    unsigned int keylen)
+{
+	struct crypto_authenc_keys keys;
+	u32 flags;
+	int err;
+
+	err = crypto_authenc_extractkeys(&keys, key, keylen);
+	if (unlikely(err))
+		goto badkey;
+
+	err = -EINVAL;
+	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+		goto badkey;
+
+	flags = crypto_aead_get_flags(aead);
+	err = __des3_verify_key(&flags, keys.enckey);
+	if (unlikely(err)) {
+		crypto_aead_set_flags(aead, flags);
+		goto out;
+	}
+
+	err = aead_setkey(aead, key, keylen);
+
+out:
+	memzero_explicit(&keys, sizeof(keys));
+	return err;
+
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	goto out;
+}
+
 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 					   bool encrypt)
 {
@@ -938,6 +971,13 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 	return 0;
 }
 
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+				const u8 *key, unsigned int keylen)
+{
+	return unlikely(des3_verify_key(skcipher, key)) ?:
+	       skcipher_setkey(skcipher, key, keylen);
+}
+
 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 			       unsigned int keylen)
 {
@@ -1484,7 +1524,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 				.cra_driver_name = "cbc-3des-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = skcipher_setkey,
+			.setkey = des3_skcipher_setkey,
 			.encrypt = skcipher_encrypt,
 			.decrypt = skcipher_decrypt,
 			.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1916,7 +1956,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1938,7 +1978,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1961,7 +2001,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -1984,7 +2024,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2007,7 +2047,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2030,7 +2070,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2053,7 +2093,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2076,7 +2116,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2099,7 +2139,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2122,7 +2162,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2145,7 +2185,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2168,7 +2208,7 @@ static struct caam_aead_alg driver_aeads[] = {
 						   "cbc-des3_ede-caam-qi2",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = aead_setkey,
+			.setkey = des3_aead_setkey,
 			.setauthsize = aead_setauthsize,
 			.encrypt = aead_encrypt,
 			.decrypt = aead_decrypt,
@@ -2854,6 +2894,7 @@ struct caam_hash_state {
 	struct caam_request caam_req;
 	dma_addr_t buf_dma;
 	dma_addr_t ctx_dma;
+	int ctx_dma_len;
 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
 	int buflen_0;
 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -2927,6 +2968,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
 				   struct caam_hash_state *state, int ctx_len,
 				   struct dpaa2_sg_entry *qm_sg, u32 flag)
 {
+	state->ctx_dma_len = ctx_len;
 	state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
 	if (dma_mapping_error(dev, state->ctx_dma)) {
 		dev_err(dev, "unable to map ctx\n");
@@ -3018,13 +3060,13 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
 }
 
 /* Digest hash size if it is too large */
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
-			   u32 *keylen, u8 *key_out, u32 digestsize)
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+			   u32 digestsize)
 {
 	struct caam_request *req_ctx;
 	u32 *desc;
 	struct split_key_sh_result result;
-	dma_addr_t src_dma, dst_dma;
+	dma_addr_t key_dma;
 	struct caam_flc *flc;
 	dma_addr_t flc_dma;
 	int ret = -ENOMEM;
@@ -3041,17 +3083,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 	if (!flc)
 		goto err_flc;
 
-	src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
-				 DMA_TO_DEVICE);
-	if (dma_mapping_error(ctx->dev, src_dma)) {
-		dev_err(ctx->dev, "unable to map key input memory\n");
-		goto err_src_dma;
-	}
-	dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
-				 DMA_FROM_DEVICE);
-	if (dma_mapping_error(ctx->dev, dst_dma)) {
-		dev_err(ctx->dev, "unable to map key output memory\n");
-		goto err_dst_dma;
+	key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(ctx->dev, key_dma)) {
+		dev_err(ctx->dev, "unable to map key memory\n");
+		goto err_key_dma;
 	}
 
 	desc = flc->sh_desc;
@@ -3076,14 +3111,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 
 	dpaa2_fl_set_final(in_fle, true);
 	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-	dpaa2_fl_set_addr(in_fle, src_dma);
+	dpaa2_fl_set_addr(in_fle, key_dma);
 	dpaa2_fl_set_len(in_fle, *keylen);
 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-	dpaa2_fl_set_addr(out_fle, dst_dma);
+	dpaa2_fl_set_addr(out_fle, key_dma);
 	dpaa2_fl_set_len(out_fle, digestsize);
 
 	print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
-			     DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+			     DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
 	print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
 			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
 			     1);
@@ -3103,17 +3138,15 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 		wait_for_completion(&result.completion);
 		ret = result.err;
 		print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
-				     DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+				     DUMP_PREFIX_ADDRESS, 16, 4, key,
 				     digestsize, 1);
 	}
 
 	dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
 			 DMA_TO_DEVICE);
 err_flc_dma:
-	dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
-err_dst_dma:
-	dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
-err_src_dma:
+	dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
+err_key_dma:
 	kfree(flc);
 err_flc:
 	kfree(req_ctx);
@@ -3135,12 +3168,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
 	dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
 
 	if (keylen > blocksize) {
-		hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
-					   GFP_KERNEL | GFP_DMA);
+		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
 		if (!hashed_key)
 			return -ENOMEM;
-		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
-				      digestsize);
+		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
 		if (ret)
 			goto bad_free_key;
 		key = hashed_key;
@@ -3165,14 +3196,12 @@ bad_free_key:
 }
 
 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
-			       struct ahash_request *req, int dst_len)
+			       struct ahash_request *req)
 {
 	struct caam_hash_state *state = ahash_request_ctx(req);
 
 	if (edesc->src_nents)
 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
-	if (edesc->dst_dma)
-		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
 
 	if (edesc->qm_sg_bytes)
 		dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
@@ -3187,18 +3216,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
 
 static inline void ahash_unmap_ctx(struct device *dev,
 				   struct ahash_edesc *edesc,
-				   struct ahash_request *req, int dst_len,
-				   u32 flag)
+				   struct ahash_request *req, u32 flag)
 {
-	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 	struct caam_hash_state *state = ahash_request_ctx(req);
 
 	if (state->ctx_dma) {
-		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
 		state->ctx_dma = 0;
 	}
-	ahash_unmap(dev, edesc, req, dst_len);
+	ahash_unmap(dev, edesc, req);
 }
 
 static void ahash_done(void *cbk_ctx, u32 status)
@@ -3219,16 +3245,13 @@ static void ahash_done(void *cbk_ctx, u32 status)
 		ecode = -EIO;
 	}
 
-	ahash_unmap(ctx->dev, edesc, req, digestsize);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+	memcpy(req->result, state->caam_ctx, digestsize);
 	qi_cache_free(edesc);
 
 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
 			     ctx->ctx_len, 1);
-	if (req->result)
-		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
-				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
-				     digestsize, 1);
 
 	req->base.complete(&req->base, ecode);
 }
@@ -3250,7 +3273,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
 		ecode = -EIO;
 	}
 
-	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
 	switch_buf(state);
 	qi_cache_free(edesc);
 
@@ -3283,16 +3306,13 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
 		ecode = -EIO;
 	}
 
-	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+	memcpy(req->result, state->caam_ctx, digestsize);
 	qi_cache_free(edesc);
 
 	print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
 			     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
 			     ctx->ctx_len, 1);
-	if (req->result)
-		print_hex_dump_debug("result@" __stringify(__LINE__)": ",
-				     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
-				     digestsize, 1);
 
 	req->base.complete(&req->base, ecode);
 }
@@ -3314,7 +3334,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
 		ecode = -EIO;
 	}
 
-	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
 	switch_buf(state);
 	qi_cache_free(edesc);
 
@@ -3452,7 +3472,7 @@ static int ahash_update_ctx(struct ahash_request *req)
 
 	return ret;
 unmap_ctx:
-	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
 	qi_cache_free(edesc);
 	return ret;
 }
@@ -3484,7 +3504,7 @@ static int ahash_final_ctx(struct ahash_request *req)
 	sg_table = &edesc->sgt[0];
 
 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
-			       DMA_TO_DEVICE);
+			       DMA_BIDIRECTIONAL);
 	if (ret)
 		goto unmap_ctx;
 
@@ -3503,22 +3523,13 @@ static int ahash_final_ctx(struct ahash_request *req)
 	}
 	edesc->qm_sg_bytes = qm_sg_bytes;
 
-	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
-					DMA_FROM_DEVICE);
-	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-		dev_err(ctx->dev, "unable to map dst\n");
-		edesc->dst_dma = 0;
-		ret = -ENOMEM;
-		goto unmap_ctx;
-	}
-
 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
 	dpaa2_fl_set_final(in_fle, true);
 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
 	dpaa2_fl_set_len(out_fle, digestsize);
 
 	req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3533,7 +3544,7 @@ static int ahash_final_ctx(struct ahash_request *req)
 		return ret;
 
 unmap_ctx:
-	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
 	qi_cache_free(edesc);
 	return ret;
 }
@@ -3586,7 +3597,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 	sg_table = &edesc->sgt[0];
 
 	ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
-			       DMA_TO_DEVICE);
+			       DMA_BIDIRECTIONAL);
 	if (ret)
 		goto unmap_ctx;
 
@@ -3605,22 +3616,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
 	}
 	edesc->qm_sg_bytes = qm_sg_bytes;
 
-	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
-					DMA_FROM_DEVICE);
-	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-		dev_err(ctx->dev, "unable to map dst\n");
-		edesc->dst_dma = 0;
-		ret = -ENOMEM;
-		goto unmap_ctx;
-	}
-
 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
 	dpaa2_fl_set_final(in_fle, true);
 	dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
 	dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
 	dpaa2_fl_set_len(out_fle, digestsize);
 
 	req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3635,7 +3637,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 		return ret;
 
 unmap_ctx:
-	ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
 	qi_cache_free(edesc);
 	return ret;
 }
@@ -3704,18 +3706,19 @@ static int ahash_digest(struct ahash_request *req)
 		dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
 	}
 
-	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+	state->ctx_dma_len = digestsize;
+	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
 					DMA_FROM_DEVICE);
-	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-		dev_err(ctx->dev, "unable to map dst\n");
-		edesc->dst_dma = 0;
+	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+		dev_err(ctx->dev, "unable to map ctx\n");
+		state->ctx_dma = 0;
 		goto unmap;
 	}
 
 	dpaa2_fl_set_final(in_fle, true);
 	dpaa2_fl_set_len(in_fle, req->nbytes);
 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
 	dpaa2_fl_set_len(out_fle, digestsize);
 
 	req_ctx->flc = &ctx->flc[DIGEST];
@@ -3729,7 +3732,7 @@ static int ahash_digest(struct ahash_request *req)
 		return ret;
 
 unmap:
-	ahash_unmap(ctx->dev, edesc, req, digestsize);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
 	qi_cache_free(edesc);
 	return ret;
 }
@@ -3755,27 +3758,39 @@ static int ahash_final_no_ctx(struct ahash_request *req)
 	if (!edesc)
 		return ret;
 
-	state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
-	if (dma_mapping_error(ctx->dev, state->buf_dma)) {
-		dev_err(ctx->dev, "unable to map src\n");
-		goto unmap;
+	if (buflen) {
+		state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
+						DMA_TO_DEVICE);
+		if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+			dev_err(ctx->dev, "unable to map src\n");
+			goto unmap;
+		}
 	}
 
-	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+	state->ctx_dma_len = digestsize;
+	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
 					DMA_FROM_DEVICE);
-	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-		dev_err(ctx->dev, "unable to map dst\n");
-		edesc->dst_dma = 0;
+	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+		dev_err(ctx->dev, "unable to map ctx\n");
+		state->ctx_dma = 0;
 		goto unmap;
 	}
 
 	memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
 	dpaa2_fl_set_final(in_fle, true);
-	dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-	dpaa2_fl_set_addr(in_fle, state->buf_dma);
-	dpaa2_fl_set_len(in_fle, buflen);
+	/*
+	 * crypto engine requires the input entry to be present when
+	 * "frame list" FD is used.
+	 * Since engine does not support FMT=2'b11 (unused entry type), leaving
+	 * in_fle zeroized (except for "Final" flag) is the best option.
+	 */
+	if (buflen) {
+		dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+		dpaa2_fl_set_addr(in_fle, state->buf_dma);
+		dpaa2_fl_set_len(in_fle, buflen);
+	}
 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
 	dpaa2_fl_set_len(out_fle, digestsize);
 
 	req_ctx->flc = &ctx->flc[DIGEST];
@@ -3790,7 +3805,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
 		return ret;
 
 unmap:
-	ahash_unmap(ctx->dev, edesc, req, digestsize);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
 	qi_cache_free(edesc);
 	return ret;
 }
@@ -3870,6 +3885,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 		}
 		edesc->qm_sg_bytes = qm_sg_bytes;
 
+		state->ctx_dma_len = ctx->ctx_len;
 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
 						ctx->ctx_len, DMA_FROM_DEVICE);
 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -3918,7 +3934,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 
 	return ret;
 unmap_ctx:
-	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
 	qi_cache_free(edesc);
 	return ret;
 }
@@ -3983,11 +3999,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 	}
 	edesc->qm_sg_bytes = qm_sg_bytes;
 
-	edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+	state->ctx_dma_len = digestsize;
+	state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
 					DMA_FROM_DEVICE);
-	if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
-		dev_err(ctx->dev, "unable to map dst\n");
-		edesc->dst_dma = 0;
+	if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+		dev_err(ctx->dev, "unable to map ctx\n");
+		state->ctx_dma = 0;
 		ret = -ENOMEM;
 		goto unmap;
 	}
@@ -3998,7 +4015,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 	dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
 	dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
 	dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-	dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+	dpaa2_fl_set_addr(out_fle, state->ctx_dma);
 	dpaa2_fl_set_len(out_fle, digestsize);
 
 	req_ctx->flc = &ctx->flc[DIGEST];
@@ -4013,7 +4030,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 
 	return ret;
 unmap:
-	ahash_unmap(ctx->dev, edesc, req, digestsize);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
 	qi_cache_free(edesc);
 	return -ENOMEM;
 }
@@ -4100,6 +4117,7 @@ static int ahash_update_first(struct ahash_request *req)
 			scatterwalk_map_and_copy(next_buf, req->src, to_hash,
 						 *next_buflen, 0);
 
+		state->ctx_dma_len = ctx->ctx_len;
 		state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
 						ctx->ctx_len, DMA_FROM_DEVICE);
 		if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -4143,7 +4161,7 @@ static int ahash_update_first(struct ahash_request *req)
 
 	return ret;
 unmap_ctx:
-	ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+	ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
 	qi_cache_free(edesc);
 	return ret;
 }
@@ -4162,6 +4180,7 @@ static int ahash_init(struct ahash_request *req)
 	state->final = ahash_final_no_ctx;
 
 	state->ctx_dma = 0;
+	state->ctx_dma_len = 0;
 	state->current_buf = 0;
 	state->buf_dma = 0;
 	state->buflen_0 = 0;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 20890780fb82..be5085451053 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -162,14 +162,12 @@ struct skcipher_edesc {
 
 /*
  * ahash_edesc - s/w-extended ahash descriptor
- * @dst_dma: I/O virtual address of req->result
  * @qm_sg_dma: I/O virtual address of h/w link table
  * @src_nents: number of segments in input scatterlist
  * @qm_sg_bytes: length of dma mapped qm_sg space
  * @sgt: pointer to h/w link table
  */
 struct ahash_edesc {
-	dma_addr_t dst_dma;
 	dma_addr_t qm_sg_dma;
 	int src_nents;
 	int qm_sg_bytes;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 58285642306e..fe24485274e1 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -994,8 +994,6 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
 static struct akcipher_alg caam_rsa = {
 	.encrypt = caam_rsa_enc,
 	.decrypt = caam_rsa_dec,
-	.sign = caam_rsa_dec,
-	.verify = caam_rsa_enc,
 	.set_pub_key = caam_rsa_set_pub_key,
 	.set_priv_key = caam_rsa_set_priv_key,
 	.max_size = caam_rsa_max_size,
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 858bdc9ab4a3..e2ba3d202da5 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -468,6 +468,24 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
 		return caam_get_era_from_hw(ctrl);
 }
 
+/*
+ * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6DQ)
+ * have an issue wherein AXI bus transactions may not occur in the correct
+ * order. This isn't a problem running single descriptors, but can be if
+ * running multiple concurrent descriptors. Reworking the driver to throttle
+ * to single requests is impractical, thus the workaround is to limit the AXI
+ * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
+ * from occurring.
+ */
+static void handle_imx6_err005766(u32 *mcr)
+{
+	if (of_machine_is_compatible("fsl,imx6q") ||
+	    of_machine_is_compatible("fsl,imx6dl") ||
+	    of_machine_is_compatible("fsl,imx6qp"))
+		clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
+			      1 << MCFGR_AXIPIPE_SHIFT);
+}
+
 static const struct of_device_id caam_match[] = {
 	{
 		.compatible = "fsl,sec-v4.0",
@@ -640,6 +658,8 @@ static int caam_probe(struct platform_device *pdev)
 			      (sizeof(dma_addr_t) == sizeof(u64) ?
 			       MCFGR_LONG_PTR : 0));
 
+	handle_imx6_err005766(&ctrl->mcr);
+
 	/*
 	 *  Read the Compile Time paramters and SCFGR to determine
 	 * if Virtualization is enabled for this platform
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 21a70fd32f5d..a4129a35a330 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -138,7 +138,7 @@ static const struct {
 	{ 0x46, "Annotation length exceeds offset (reuse mode)"},
 	{ 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
 	{ 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
-	{ 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+	{ 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"},
 	{ 0x51, "Unsupported IF reuse mode"},
 	{ 0x52, "Unsupported FL use mode"},
 	{ 0x53, "Unsupported RJD use mode"},
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5869ad58d497..3392615dc91b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -49,13 +49,11 @@ struct caam_drv_private_jr {
 	atomic_t tfm_count ____cacheline_aligned;
 
 	/* Job ring info */
-	int ringsize;	/* Size of rings (assume input = output) */
 	struct caam_jrentry_info *entinfo;	/* Alloc'ed 1 per ring entry */
 	spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
-	int inp_ring_write_index;	/* Input index "tail" */
+	u32 inpring_avail;	/* Number of free entries in input ring */
 	int head;			/* entinfo (s/w ring) head index */
 	dma_addr_t *inpring;	/* Base of input ring, alloc DMA-safe */
-	spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
 	int out_ring_read_index;	/* Output index "tail" */
 	int tail;			/* entinfo (s/w ring) tail index */
 	struct jr_outentry *outring;	/* Base of output ring, DMA-safe */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d50085a03597..044a69b526f7 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -170,13 +170,13 @@ static void caam_jr_dequeue(unsigned long devarg)
 	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
 	u32 *userdesc, userstatus;
 	void *userarg;
+	u32 outring_used = 0;
 
-	while (rd_reg32(&jrp->rregs->outring_used)) {
+	while (outring_used ||
+	       (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
 
 		head = READ_ONCE(jrp->head);
 
-		spin_lock(&jrp->outlock);
-
 		sw_idx = tail = jrp->tail;
 		hw_idx = jrp->out_ring_read_index;
 
@@ -199,7 +199,7 @@ static void caam_jr_dequeue(unsigned long devarg)
 		/* mark completed, avoid matching on a recycled desc addr */
 		jrp->entinfo[sw_idx].desc_addr_dma = 0;
 
-		/* Stash callback params for use outside of lock */
+		/* Stash callback params */
 		usercall = jrp->entinfo[sw_idx].callbk;
 		userarg = jrp->entinfo[sw_idx].cbkarg;
 		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
@@ -213,7 +213,7 @@ static void caam_jr_dequeue(unsigned long devarg)
 		mb();
 
 		/* set done */
-		wr_reg32(&jrp->rregs->outring_rmvd, 1);
+		wr_reg32_relaxed(&jrp->rregs->outring_rmvd, 1);
 
 		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
 					   (JOBR_DEPTH - 1);
@@ -232,10 +232,9 @@ static void caam_jr_dequeue(unsigned long devarg)
 			jrp->tail = tail;
 		}
 
-		spin_unlock(&jrp->outlock);
-
 		/* Finally, execute user's callback */
 		usercall(dev, userdesc, userstatus, userarg);
+		outring_used--;
 	}
 
 	/* reenable / unmask IRQs */
@@ -345,7 +344,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
 	head = jrp->head;
 	tail = READ_ONCE(jrp->tail);
 
-	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+	if (!jrp->inpring_avail ||
 	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
 		spin_unlock_bh(&jrp->inplock);
 		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
@@ -359,7 +358,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
 	head_entry->cbkarg = areq;
 	head_entry->desc_addr_dma = desc_dma;
 
-	jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
+	jrp->inpring[head] = cpu_to_caam_dma(desc_dma);
 
 	/*
 	 * Guarantee that the descriptor's DMA address has been written to
@@ -368,18 +367,22 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
 	 */
 	smp_wmb();
 
-	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
-				    (JOBR_DEPTH - 1);
 	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
 
 	/*
 	 * Ensure that all job information has been written before
-	 * notifying CAAM that a new job was added to the input ring.
+	 * notifying CAAM that a new job was added to the input ring
+	 * using a memory barrier. The wr_reg32() uses api iowrite32()
+	 * to do the register write. iowrite32() issues a memory barrier
+	 * before the write operation.
 	 */
-	wmb();
 
 	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
 
+	jrp->inpring_avail--;
+	if (!jrp->inpring_avail)
+		jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
+
 	spin_unlock_bh(&jrp->inplock);
 
 	return 0;
@@ -431,7 +434,6 @@ static int caam_jr_init(struct device *dev)
 		jrp->entinfo[i].desc_addr_dma = !0;
 
 	/* Setup rings */
-	jrp->inp_ring_write_index = 0;
 	jrp->out_ring_read_index = 0;
 	jrp->head = 0;
 	jrp->tail = 0;
@@ -441,10 +443,9 @@ static int caam_jr_init(struct device *dev)
 	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
 	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
 
-	jrp->ringsize = JOBR_DEPTH;
+	jrp->inpring_avail = JOBR_DEPTH;
 
 	spin_lock_init(&jrp->inplock);
-	spin_lock_init(&jrp->outlock);
 
 	/* Select interrupt coalescing parameters */
 	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 7cb8b1755e57..9f08f84cca59 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
 	/* Create a new req FQ in parked state */
 	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
 				    drv_ctx->context_a, 0);
-	if (IS_ERR_OR_NULL(new_fq)) {
+	if (IS_ERR(new_fq)) {
 		dev_err(qidev, "FQ allocation for shdesc update failed\n");
 		return PTR_ERR(new_fq);
 	}
@@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
 	/* Attach request FQ */
 	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
 					     QMAN_INITFQ_FLAG_SCHED);
-	if (IS_ERR_OR_NULL(drv_ctx->req_fq)) {
+	if (IS_ERR(drv_ctx->req_fq)) {
 		dev_err(qidev, "create_caam_req_fq failed\n");
 		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
 		kfree(drv_ctx);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 3cd0822ea819..c1fa1ec701d9 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -96,6 +96,14 @@ cpu_to_caam(16)
 cpu_to_caam(32)
 cpu_to_caam(64)
 
+static inline void wr_reg32_relaxed(void __iomem *reg, u32 data)
+{
+	if (caam_little_end)
+		writel_relaxed(data, reg);
+	else
+		writel_relaxed(cpu_to_be32(data), reg);
+}
+
 static inline void wr_reg32(void __iomem *reg, u32 data)
 {
 	if (caam_little_end)
@@ -253,6 +261,9 @@ struct version_regs {
 #define CHA_VER_VID_SHIFT	24
 #define CHA_VER_VID_MASK	(0xffull << CHA_VER_VID_SHIFT)
 
+/* CHA Miscellaneous Information - AESA_MISC specific */
+#define CHA_VER_MISC_AES_GCM	BIT(1 + CHA_VER_MISC_SHIFT)
+
 /*
  * caam_perfmon - Performance Monitor/Secure Memory Status/
  *                CAAM Global Status/Component Version IDs
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 600336d169a9..9810ad8ac519 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -10,7 +10,6 @@
 #include <crypto/aes.h>
 #include <crypto/algapi.h>
 #include <crypto/authenc.h>
-#include <crypto/cryptd.h>
 #include <crypto/crypto_wq.h>
 #include <crypto/des.h>
 #include <crypto/xts.h>
@@ -327,27 +326,36 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 			       u32 keylen)
 {
+	u32 flags = crypto_ablkcipher_get_flags(cipher);
+	int err;
+
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
+	}
+
 	return cvm_setkey(cipher, key, keylen, DES3_CBC);
 }
 
 static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 			       u32 keylen)
 {
+	u32 flags = crypto_ablkcipher_get_flags(cipher);
+	int err;
+
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
+	}
+
 	return cvm_setkey(cipher, key, keylen, DES3_ECB);
 }
 
 static int cvm_enc_dec_init(struct crypto_tfm *tfm)
 {
-	struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	memset(ctx, 0, sizeof(*ctx));
-	tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
-					sizeof(struct ablkcipher_request);
-	/* Additional memory for ablkcipher_request is
-	 * allocated since the cryptd daemon uses
-	 * this memory for request_ctx information
-	 */
-
+	tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
 	return 0;
 }
 
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 2ca431ed1db8..88a0166f5477 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -641,7 +641,7 @@ static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
 }
 
-void cptvf_device_init(struct cpt_vf *cptvf)
+static void cptvf_device_init(struct cpt_vf *cptvf)
 {
 	u64 base_addr = 0;
 
diff --git a/drivers/crypto/cavium/cpt/cptvf_mbox.c b/drivers/crypto/cavium/cpt/cptvf_mbox.c
index d5ec3b8a9e61..4f438eceb506 100644
--- a/drivers/crypto/cavium/cpt/cptvf_mbox.c
+++ b/drivers/crypto/cavium/cpt/cptvf_mbox.c
@@ -17,23 +17,6 @@ static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
 			mbx->data);
 }
 
-/* ACKs PF's mailbox message
- */
-void cptvf_mbox_send_ack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
-{
-	mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
-	cptvf_send_msg_to_pf(cptvf, mbx);
-}
-
-/* NACKs PF's mailbox message that VF is not able to
- * complete the action
- */
-void cptvf_mbox_send_nack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
-{
-	mbx->msg = CPT_MBOX_MSG_TYPE_NACK;
-	cptvf_send_msg_to_pf(cptvf, mbx);
-}
-
 /* Interrupt handler to handle mailbox messages from VFs */
 void cptvf_handle_mbox_intr(struct cpt_vf *cptvf)
 {
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index ca549c5dc08e..f16f61504241 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -223,7 +223,7 @@ scatter_gather_clean:
 	return ret;
 }
 
-int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
+static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
 		     u32 qno)
 {
 	struct pci_dev *pdev = cptvf->pdev;
@@ -270,7 +270,7 @@ int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
 	return ret;
 }
 
-void do_request_cleanup(struct cpt_vf *cptvf,
+static void do_request_cleanup(struct cpt_vf *cptvf,
 			struct cpt_info_buffer *info)
 {
 	int i;
@@ -316,7 +316,7 @@ void do_request_cleanup(struct cpt_vf *cptvf,
 	kzfree(info);
 }
 
-void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
+static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
 {
 	struct pci_dev *pdev = cptvf->pdev;
 
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 4f43eacd2557..e4841eb2a09f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -18,26 +18,6 @@
 
 #define GCM_AES_SALT_SIZE	4
 
-/**
- * struct nitrox_crypt_params - Params to set nitrox crypto request.
- * @cryptlen: Encryption/Decryption data length
- * @authlen: Assoc data length + Cryptlen
- * @srclen: Input buffer length
- * @dstlen: Output buffer length
- * @iv: IV data
- * @ivsize: IV data length
- * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
- */
-struct nitrox_crypt_params {
-	unsigned int cryptlen;
-	unsigned int authlen;
-	unsigned int srclen;
-	unsigned int dstlen;
-	u8 *iv;
-	int ivsize;
-	u8 ctrl_arg;
-};
-
 union gph_p3 {
 	struct {
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -94,36 +74,40 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
 	return 0;
 }
 
-static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize,
+static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
+			    struct scatterlist *src, char *iv, int ivsize,
 			    int buflen)
 {
-	struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-	int nents = sg_nents_for_len(areq->src, buflen) + 1;
+	int nents = sg_nents_for_len(src, buflen);
 	int ret;
 
 	if (nents < 0)
 		return nents;
 
+	/* IV entry */
+	nents += 1;
 	/* Allocate buffer to hold IV and input scatterlist array */
 	ret = alloc_src_req_buf(nkreq, nents, ivsize);
 	if (ret)
 		return ret;
 
 	nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
-	nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen);
+	nitrox_creq_set_src_sg(nkreq, nents, ivsize, src, buflen);
 
 	return 0;
 }
 
-static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
+static int alloc_dst_sglist(struct nitrox_kcrypt_request *nkreq,
+			    struct scatterlist *dst, int ivsize, int buflen)
 {
-	struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-	int nents = sg_nents_for_len(areq->dst, buflen) + 3;
+	int nents = sg_nents_for_len(dst, buflen);
 	int ret;
 
 	if (nents < 0)
 		return nents;
 
+	/* IV, ORH, COMPLETION entries */
+	nents += 3;
 	/* Allocate buffer to hold ORH, COMPLETION and output scatterlist
 	 * array
 	 */
@@ -133,61 +117,54 @@ static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
 
 	nitrox_creq_set_orh(nkreq);
 	nitrox_creq_set_comp(nkreq);
-	nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen);
+	nitrox_creq_set_dst_sg(nkreq, nents, ivsize, dst, buflen);
 
 	return 0;
 }
 
-static void free_src_sglist(struct aead_request *areq)
+static void free_src_sglist(struct nitrox_kcrypt_request *nkreq)
 {
-	struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-
 	kfree(nkreq->src);
 }
 
-static void free_dst_sglist(struct aead_request *areq)
+static void free_dst_sglist(struct nitrox_kcrypt_request *nkreq)
 {
-	struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-
 	kfree(nkreq->dst);
 }
 
-static int nitrox_set_creq(struct aead_request *areq,
-			   struct nitrox_crypt_params *params)
+static int nitrox_set_creq(struct nitrox_aead_rctx *rctx)
 {
-	struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-	struct se_crypto_request *creq = &nkreq->creq;
-	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct se_crypto_request *creq = &rctx->nkreq.creq;
 	union gph_p3 param3;
-	struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
 	int ret;
 
-	creq->flags = areq->base.flags;
-	creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-		GFP_KERNEL : GFP_ATOMIC;
+	creq->flags = rctx->flags;
+	creq->gfp = (rctx->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+							       GFP_ATOMIC;
 
 	creq->ctrl.value = 0;
 	creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
-	creq->ctrl.s.arg = params->ctrl_arg;
+	creq->ctrl.s.arg = rctx->ctrl_arg;
 
-	creq->gph.param0 = cpu_to_be16(params->cryptlen);
-	creq->gph.param1 = cpu_to_be16(params->authlen);
-	creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen);
+	creq->gph.param0 = cpu_to_be16(rctx->cryptlen);
+	creq->gph.param1 = cpu_to_be16(rctx->cryptlen + rctx->assoclen);
+	creq->gph.param2 = cpu_to_be16(rctx->ivsize + rctx->assoclen);
 	param3.iv_offset = 0;
-	param3.auth_offset = params->ivsize;
+	param3.auth_offset = rctx->ivsize;
 	creq->gph.param3 = cpu_to_be16(param3.param);
 
-	creq->ctx_handle = nctx->u.ctx_handle;
+	creq->ctx_handle = rctx->ctx_handle;
 	creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
 
-	ret = alloc_src_sglist(areq, params->iv, params->ivsize,
-			       params->srclen);
+	ret = alloc_src_sglist(&rctx->nkreq, rctx->src, rctx->iv, rctx->ivsize,
+			       rctx->srclen);
 	if (ret)
 		return ret;
 
-	ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen);
+	ret = alloc_dst_sglist(&rctx->nkreq, rctx->dst, rctx->ivsize,
+			       rctx->dstlen);
 	if (ret) {
-		free_src_sglist(areq);
+		free_src_sglist(&rctx->nkreq);
 		return ret;
 	}
 
@@ -197,9 +174,10 @@ static int nitrox_set_creq(struct aead_request *areq,
 static void nitrox_aead_callback(void *arg, int err)
 {
 	struct aead_request *areq = arg;
+	struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
 
-	free_src_sglist(areq);
-	free_dst_sglist(areq);
+	free_src_sglist(&rctx->nkreq);
+	free_dst_sglist(&rctx->nkreq);
 	if (err) {
 		pr_err_ratelimited("request failed status 0x%0x\n", err);
 		err = -EINVAL;
@@ -212,23 +190,25 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 	struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
-	struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-	struct se_crypto_request *creq = &nkreq->creq;
+	struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
+	struct se_crypto_request *creq = &rctx->nkreq.creq;
 	struct flexi_crypto_context *fctx = nctx->u.fctx;
-	struct nitrox_crypt_params params;
 	int ret;
 
 	memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
 
-	memset(&params, 0, sizeof(params));
-	params.cryptlen = areq->cryptlen;
-	params.authlen = areq->assoclen + params.cryptlen;
-	params.srclen = params.authlen;
-	params.dstlen = params.srclen + aead->authsize;
-	params.iv = &areq->iv[GCM_AES_SALT_SIZE];
-	params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
-	params.ctrl_arg = ENCRYPT;
-	ret = nitrox_set_creq(areq, &params);
+	rctx->cryptlen = areq->cryptlen;
+	rctx->assoclen = areq->assoclen;
+	rctx->srclen = areq->assoclen + areq->cryptlen;
+	rctx->dstlen = rctx->srclen + aead->authsize;
+	rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
+	rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+	rctx->flags = areq->base.flags;
+	rctx->ctx_handle = nctx->u.ctx_handle;
+	rctx->src = areq->src;
+	rctx->dst = areq->dst;
+	rctx->ctrl_arg = ENCRYPT;
+	ret = nitrox_set_creq(rctx);
 	if (ret)
 		return ret;
 
@@ -241,23 +221,25 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 	struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
-	struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-	struct se_crypto_request *creq = &nkreq->creq;
+	struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
+	struct se_crypto_request *creq = &rctx->nkreq.creq;
 	struct flexi_crypto_context *fctx = nctx->u.fctx;
-	struct nitrox_crypt_params params;
 	int ret;
 
 	memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
 
-	memset(&params, 0, sizeof(params));
-	params.cryptlen = areq->cryptlen - aead->authsize;
-	params.authlen = areq->assoclen + params.cryptlen;
-	params.srclen = areq->cryptlen + areq->assoclen;
-	params.dstlen = params.srclen - aead->authsize;
-	params.iv = &areq->iv[GCM_AES_SALT_SIZE];
-	params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
-	params.ctrl_arg = DECRYPT;
-	ret = nitrox_set_creq(areq, &params);
+	rctx->cryptlen = areq->cryptlen - aead->authsize;
+	rctx->assoclen = areq->assoclen;
+	rctx->srclen = areq->cryptlen + areq->assoclen;
+	rctx->dstlen = rctx->srclen - aead->authsize;
+	rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
+	rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+	rctx->flags = areq->base.flags;
+	rctx->ctx_handle = nctx->u.ctx_handle;
+	rctx->src = areq->src;
+	rctx->dst = areq->dst;
+	rctx->ctrl_arg = DECRYPT;
+	ret = nitrox_set_creq(rctx);
 	if (ret)
 		return ret;
 
@@ -290,7 +272,7 @@ static int nitrox_aead_init(struct crypto_aead *aead)
 	return 0;
 }
 
-static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+static int nitrox_gcm_common_init(struct crypto_aead *aead)
 {
 	int ret;
 	struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
@@ -308,8 +290,20 @@ static int nitrox_aes_gcm_init(struct crypto_aead *aead)
 	flags->w0.auth_input_type = 1;
 	flags->f = be64_to_cpu(flags->f);
 
-	crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
-				sizeof(struct nitrox_kcrypt_request));
+	return 0;
+}
+
+static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+{
+	int ret;
+
+	ret = nitrox_gcm_common_init(aead);
+	if (ret)
+		return ret;
+
+	crypto_aead_set_reqsize(aead,
+				sizeof(struct aead_request) +
+					sizeof(struct nitrox_aead_rctx));
 
 	return 0;
 }
@@ -332,6 +326,166 @@ static void nitrox_aead_exit(struct crypto_aead *aead)
 	nctx->ndev = NULL;
 }
 
+static int nitrox_rfc4106_setkey(struct crypto_aead *aead, const u8 *key,
+				 unsigned int keylen)
+{
+	struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+	struct flexi_crypto_context *fctx = nctx->u.fctx;
+	int ret;
+
+	if (keylen < GCM_AES_SALT_SIZE)
+		return -EINVAL;
+
+	keylen -= GCM_AES_SALT_SIZE;
+	ret = nitrox_aes_gcm_setkey(aead, key, keylen);
+	if (ret)
+		return ret;
+
+	memcpy(fctx->crypto.iv, key + keylen, GCM_AES_SALT_SIZE);
+	return 0;
+}
+
+static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead,
+				      unsigned int authsize)
+{
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return nitrox_aead_setauthsize(aead, authsize);
+}
+
+static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
+{
+	struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+	struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+	unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+	struct scatterlist *sg;
+
+	if (areq->assoclen != 16 && areq->assoclen != 20)
+		return -EINVAL;
+
+	scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0);
+	sg_init_table(rctx->src, 3);
+	sg_set_buf(rctx->src, rctx->assoc, assoclen);
+	sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen);
+	if (sg != rctx->src + 1)
+		sg_chain(rctx->src, 2, sg);
+
+	if (areq->src != areq->dst) {
+		sg_init_table(rctx->dst, 3);
+		sg_set_buf(rctx->dst, rctx->assoc, assoclen);
+		sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen);
+		if (sg != rctx->dst + 1)
+			sg_chain(rctx->dst, 2, sg);
+	}
+
+	aead_rctx->src = rctx->src;
+	aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst;
+
+	return 0;
+}
+
+static void nitrox_rfc4106_callback(void *arg, int err)
+{
+	struct aead_request *areq = arg;
+	struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+	struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq;
+
+	free_src_sglist(nkreq);
+	free_dst_sglist(nkreq);
+	if (err) {
+		pr_err_ratelimited("request failed status 0x%0x\n", err);
+		err = -EINVAL;
+	}
+
+	areq->base.complete(&areq->base, err);
+}
+
+static int nitrox_rfc4106_enc(struct aead_request *areq)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+	struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+	struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+	struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
+	int ret;
+
+	aead_rctx->cryptlen = areq->cryptlen;
+	aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+	aead_rctx->srclen = aead_rctx->assoclen + aead_rctx->cryptlen;
+	aead_rctx->dstlen = aead_rctx->srclen + aead->authsize;
+	aead_rctx->iv = areq->iv;
+	aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
+	aead_rctx->flags = areq->base.flags;
+	aead_rctx->ctx_handle = nctx->u.ctx_handle;
+	aead_rctx->ctrl_arg = ENCRYPT;
+
+	ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
+	if (ret)
+		return ret;
+
+	ret = nitrox_set_creq(aead_rctx);
+	if (ret)
+		return ret;
+
+	/* send the crypto request */
+	return nitrox_process_se_request(nctx->ndev, creq,
+					 nitrox_rfc4106_callback, areq);
+}
+
+static int nitrox_rfc4106_dec(struct aead_request *areq)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+	struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+	struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+	struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
+	int ret;
+
+	aead_rctx->cryptlen = areq->cryptlen - aead->authsize;
+	aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+	aead_rctx->srclen =
+		areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen;
+	aead_rctx->dstlen = aead_rctx->srclen - aead->authsize;
+	aead_rctx->iv = areq->iv;
+	aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
+	aead_rctx->flags = areq->base.flags;
+	aead_rctx->ctx_handle = nctx->u.ctx_handle;
+	aead_rctx->ctrl_arg = DECRYPT;
+
+	ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
+	if (ret)
+		return ret;
+
+	ret = nitrox_set_creq(aead_rctx);
+	if (ret)
+		return ret;
+
+	/* send the crypto request */
+	return nitrox_process_se_request(nctx->ndev, creq,
+					 nitrox_rfc4106_callback, areq);
+}
+
+static int nitrox_rfc4106_init(struct crypto_aead *aead)
+{
+	int ret;
+
+	ret = nitrox_gcm_common_init(aead);
+	if (ret)
+		return ret;
+
+	crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
+				sizeof(struct nitrox_rfc4106_rctx));
+
+	return 0;
+}
+
 static struct aead_alg nitrox_aeads[] = { {
 	.base = {
 		.cra_name = "gcm(aes)",
@@ -351,6 +505,25 @@ static struct aead_alg nitrox_aeads[] = { {
 	.exit = nitrox_aead_exit,
 	.ivsize = GCM_AES_IV_SIZE,
 	.maxauthsize = AES_BLOCK_SIZE,
+}, {
+	.base = {
+		.cra_name = "rfc4106(gcm(aes))",
+		.cra_driver_name = "n5_rfc4106",
+		.cra_priority = PRIO,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+		.cra_alignmask = 0,
+		.cra_module = THIS_MODULE,
+	},
+	.setkey = nitrox_rfc4106_setkey,
+	.setauthsize = nitrox_rfc4106_setauthsize,
+	.encrypt = nitrox_rfc4106_enc,
+	.decrypt = nitrox_rfc4106_dec,
+	.init = nitrox_rfc4106_init,
+	.exit = nitrox_aead_exit,
+	.ivsize = GCM_RFC4106_IV_SIZE,
+	.maxauthsize = AES_BLOCK_SIZE,
 } };
 
 int nitrox_register_aeads(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index c08d9f33a3b1..3f0df60267a9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -437,6 +437,45 @@ void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
 	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
 }
 
+static const char *get_core_option(u8 se_cores, u8 ae_cores)
+{
+	const char *option = "";
+
+	if (ae_cores == AE_MAX_CORES) {
+		switch (se_cores) {
+		case SE_MAX_CORES:
+			option = "60";
+			break;
+		case 40:
+			option = "60s";
+			break;
+		}
+	} else if (ae_cores == (AE_MAX_CORES / 2)) {
+		option = "30";
+	} else {
+		option = "60i";
+	}
+
+	return option;
+}
+
+static const char *get_feature_option(u8 zip_cores, int core_freq)
+{
+	if (zip_cores == 0)
+		return "";
+	else if (zip_cores < ZIP_MAX_CORES)
+		return "-C15";
+
+	if (core_freq >= 850)
+		return "-C45";
+	else if (core_freq >= 750)
+		return "-C35";
+	else if (core_freq >= 550)
+		return "-C25";
+
+	return "";
+}
+
 void nitrox_get_hwinfo(struct nitrox_device *ndev)
 {
 	union emu_fuse_map emu_fuse;
@@ -469,24 +508,14 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
 		ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
 	}
 
-	/* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
-	if (ndev->hw.ae_cores == AE_MAX_CORES) {
-		switch (ndev->hw.se_cores) {
-		case SE_MAX_CORES:
-			i = snprintf(name, sizeof(name), "CNN5560");
-			break;
-		case 40:
-			i = snprintf(name, sizeof(name), "CNN5560s");
-			break;
-		}
-	} else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
-		i = snprintf(name, sizeof(name), "CNN5530");
-	} else {
-		i = snprintf(name, sizeof(name), "CNN5560i");
-	}
-
-	snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
-		 ndev->hw.freq, ndev->hw.revision_id);
+	/* determine the partname
+	 * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
+	 */
+	snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
+		 get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
+		 ndev->hw.freq,
+		 get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
+		 ndev->hw.revision_id);
 
 	/* copy partname */
 	strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index 76c0f0be7233..efdbd0fc3e3b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -212,6 +212,50 @@ struct nitrox_kcrypt_request {
 };
 
 /**
+ * struct nitrox_aead_rctx - AEAD request context
+ * @nkreq: Base request context
+ * @cryptlen: Encryption/Decryption data length
+ * @assoclen: AAD length
+ * @srclen: Input buffer length
+ * @dstlen: Output buffer length
+ * @iv: IV data
+ * @ivsize: IV data length
+ * @flags: AEAD req flags
+ * @ctx_handle: Device context handle
+ * @src: Source sglist
+ * @dst: Destination sglist
+ * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
+ */
+struct nitrox_aead_rctx {
+	struct nitrox_kcrypt_request nkreq;
+	unsigned int cryptlen;
+	unsigned int assoclen;
+	unsigned int srclen;
+	unsigned int dstlen;
+	u8 *iv;
+	int ivsize;
+	u32 flags;
+	u64 ctx_handle;
+	struct scatterlist *src;
+	struct scatterlist *dst;
+	u8 ctrl_arg;
+};
+
+/**
+ * struct nitrox_rfc4106_rctx - rfc4106 cipher request context
+ * @base: AEAD request context
+ * @src: Source sglist
+ * @dst: Destination sglist
+ * @assoc: AAD
+ */
+struct nitrox_rfc4106_rctx {
+	struct nitrox_aead_rctx base;
+	struct scatterlist src[3];
+	struct scatterlist dst[3];
+	u8 assoc[20];
+};
+
+/**
  * struct pkt_instr_hdr - Packet Instruction Header
  * @g: Gather used
  *   When [G] is set and [GSZ] != 0, the instruction is
@@ -512,7 +556,7 @@ static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
 	struct scatterlist *sg = to_sg;
 	unsigned int sglen;
 
-	for (; buflen; buflen -= sglen) {
+	for (; buflen && from_sg; buflen -= sglen) {
 		sglen = from_sg->length;
 		if (sglen > buflen)
 			sglen = buflen;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index d4935d6cefdd..7e4a5e69085e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -257,12 +257,8 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq)
 static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
 			      const u8 *key, unsigned int keylen)
 {
-	if (keylen != DES3_EDE_KEY_SIZE) {
-		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
-
-	return nitrox_skcipher_setkey(cipher, 0, key, keylen);
+	return unlikely(des3_verify_key(cipher, key)) ?:
+	       nitrox_skcipher_setkey(cipher, 0, key, keylen);
 }
 
 static int nitrox_3des_encrypt(struct skcipher_request *skreq)
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index b92b6e7e100f..4985bc812b0e 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -69,7 +69,7 @@ static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
 	zip_ops->csum	      = 1; /* Adler checksum desired */
 }
 
-int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
+static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
 {
 	struct zip_operation  *comp_ctx   = &zip_ctx->zip_comp;
 	struct zip_operation  *decomp_ctx = &zip_ctx->zip_decomp;
@@ -107,7 +107,7 @@ err_comp_input:
 	return -ENOMEM;
 }
 
-void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
+static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
 {
 	struct zip_operation  *comp_ctx   = &zip_ctx->zip_comp;
 	struct zip_operation  *dec_ctx = &zip_ctx->zip_decomp;
@@ -119,7 +119,7 @@ void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
 	zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
 }
 
-int zip_compress(const u8 *src, unsigned int slen,
+static int zip_compress(const u8 *src, unsigned int slen,
 		 u8 *dst, unsigned int *dlen,
 		 struct zip_kernel_ctx *zip_ctx)
 {
@@ -155,7 +155,7 @@ int zip_compress(const u8 *src, unsigned int slen,
 	return ret;
 }
 
-int zip_decompress(const u8 *src, unsigned int slen,
+static int zip_decompress(const u8 *src, unsigned int slen,
 		   u8 *dst, unsigned int *dlen,
 		   struct zip_kernel_ctx *zip_ctx)
 {
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index c2ff551d215b..91482ffcac59 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -43,24 +43,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 	struct ccp_crypto_ablkcipher_alg *alg =
 		ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
 	u32 *flags = &tfm->base.crt_flags;
+	int err;
 
-
-	/* From des_generic.c:
-	 *
-	 * RFC2451:
-	 *   If the first two or last two independent 64-bit keys are
-	 *   equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
-	 *   same as DES.  Implementers MUST reject keys that exhibit this
-	 *   property.
-	 */
-	const u32 *K = (const u32 *)key;
-
-	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
-		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
-		     (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
-		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		return -EINVAL;
-	}
+	err = __des3_verify_key(flags, key);
+	if (unlikely(err))
+		return err;
 
 	/* It's not clear that there is any support for a keysize of 112.
 	 * If needed, the caller should make K1 == K3
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 05850dfd7940..a2570c0c8cdc 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -37,10 +37,9 @@ static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
 		if (buf[nskip])
 			break;
 	*kplen = sz - nskip;
-	*kpbuf = kzalloc(*kplen, GFP_KERNEL);
+	*kpbuf = kmemdup(buf + nskip, *kplen, GFP_KERNEL);
 	if (!*kpbuf)
 		return -ENOMEM;
-	memcpy(*kpbuf, buf + nskip, *kplen);
 
 	return 0;
 }
@@ -214,8 +213,6 @@ static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
 static struct akcipher_alg ccp_rsa_defaults = {
 	.encrypt = ccp_rsa_encrypt,
 	.decrypt = ccp_rsa_decrypt,
-	.sign = ccp_rsa_decrypt,
-	.verify = ccp_rsa_encrypt,
 	.set_pub_key = ccp_rsa_setpubkey,
 	.set_priv_key = ccp_rsa_setprivkey,
 	.max_size = ccp_rsa_maxsize,
@@ -248,7 +245,8 @@ static struct ccp_rsa_def rsa_algs[] = {
 	}
 };
 
-int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+static int ccp_register_rsa_alg(struct list_head *head,
+			        const struct ccp_rsa_def *def)
 {
 	struct ccp_crypto_akcipher_alg *ccp_alg;
 	struct akcipher_alg *alg;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 10a61cd54fce..3e10573f589e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,8 +293,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
 	if (key_len > block_size) {
 		/* Must hash the input key */
 		sdesc->tfm = shash;
-		sdesc->flags = crypto_ahash_get_flags(tfm) &
-			CRYPTO_TFM_REQ_MAY_SLEEP;
 
 		ret = crypto_shash_digest(sdesc, key, key_len,
 					  ctx->u.sha.key);
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index fadf859a14b8..656838433f2f 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -583,6 +583,69 @@ e_free:
 	return ret;
 }
 
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+	struct sev_user_data_get_id2 input;
+	struct sev_data_get_id *data;
+	void *id_blob = NULL;
+	int ret;
+
+	/* SEV GET_ID is available from SEV API v0.16 and up */
+	if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
+		return -ENOTSUPP;
+
+	if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+		return -EFAULT;
+
+	/* Check if we have write access to the userspace buffer */
+	if (input.address &&
+	    input.length &&
+	    !access_ok(input.address, input.length))
+		return -EFAULT;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	if (input.address && input.length) {
+		id_blob = kmalloc(input.length, GFP_KERNEL);
+		if (!id_blob) {
+			kfree(data);
+			return -ENOMEM;
+		}
+
+		data->address = __psp_pa(id_blob);
+		data->len = input.length;
+	}
+
+	ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+	/*
+	 * Firmware will return the length of the ID value (either the minimum
+	 * required length or the actual length written), return it to the user.
+	 */
+	input.length = data->len;
+
+	if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+		ret = -EFAULT;
+		goto e_free;
+	}
+
+	if (id_blob) {
+		if (copy_to_user((void __user *)input.address,
+				 id_blob, data->len)) {
+			ret = -EFAULT;
+			goto e_free;
+		}
+	}
+
+e_free:
+	kfree(id_blob);
+	kfree(data);
+
+	return ret;
+}
+
 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
 {
 	struct sev_data_get_id *data;
@@ -761,8 +824,12 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
 		ret = sev_ioctl_do_pdh_export(&input);
 		break;
 	case SEV_GET_ID:
+		pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
 		ret = sev_ioctl_do_get_id(&input);
 		break;
+	case SEV_GET_ID2:
+		ret = sev_ioctl_do_get_id2(&input);
+		break;
 	default:
 		ret = -EINVAL;
 		goto out;
@@ -997,7 +1064,7 @@ void psp_pci_init(void)
 	rc = sev_platform_init(&error);
 	if (rc) {
 		dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
-		goto err;
+		return;
 	}
 
 	dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index bdc27970f95f..145e50bdbf16 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
 
 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
 ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index a3527c00b29a..7aa4cbe19a86 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -23,12 +23,8 @@
 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
 
-#define AES_CCM_RFC4309_NONCE_SIZE 3
 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
 
-/* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01
-
 struct cc_aead_handle {
 	cc_sram_addr_t sram_workspace_addr;
 	struct list_head aead_list;
@@ -220,6 +216,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
 	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
+	/* BACKLOG notification */
+	if (err == -EINPROGRESS)
+		goto done;
+
 	cc_unmap_aead_request(dev, areq);
 
 	/* Restore ordinary iv pointer */
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 /* This function prepers the user key so it can pass to the hmac processing
  * (copy to intenral buffer or hash in case of key longer than block
  */
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
 				 unsigned int keylen)
 {
 	dma_addr_t key_dma_addr = 0;
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
 	unsigned int hashmode;
 	unsigned int idx = 0;
 	int rc = 0;
+	u8 *key = NULL;
 	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 	dma_addr_t padded_authkey_dma_addr =
 		ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
 	}
 
 	if (keylen != 0) {
+
+		key = kmemdup(authkey, keylen, GFP_KERNEL);
+		if (!key)
+			return -ENOMEM;
+
 		key_dma_addr = dma_map_single(dev, (void *)key, keylen,
 					      DMA_TO_DEVICE);
 		if (dma_mapping_error(dev, key_dma_addr)) {
 			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 				key, keylen);
+			kzfree(key);
 			return -ENOMEM;
 		}
 		if (keylen > blocksize) {
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
 	if (key_dma_addr)
 		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 
+	kzfree(key);
+
 	return rc;
 }
 
@@ -650,6 +659,39 @@ setkey_error:
 	return rc;
 }
 
+static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+			       unsigned int keylen)
+{
+	struct crypto_authenc_keys keys;
+	u32 flags;
+	int err;
+
+	err = crypto_authenc_extractkeys(&keys, key, keylen);
+	if (unlikely(err))
+		goto badkey;
+
+	err = -EINVAL;
+	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+		goto badkey;
+
+	flags = crypto_aead_get_flags(aead);
+	err = __des3_verify_key(&flags, keys.enckey);
+	if (unlikely(err)) {
+		crypto_aead_set_flags(aead, flags);
+		goto out;
+	}
+
+	err = cc_aead_setkey(aead, key, keylen);
+
+out:
+	memzero_explicit(&keys, sizeof(keys));
+	return err;
+
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	goto out;
+}
+
 static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
 				 unsigned int keylen)
 {
@@ -731,7 +773,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
 		dev_dbg(dev, "ASSOC buffer type DLLI\n");
 		hw_desc_init(&desc[idx]);
 		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
-			     areq->assoclen, NS_BIT);
+			     areq_ctx->assoclen, NS_BIT);
 		set_flow_mode(&desc[idx], flow_mode);
 		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 		    areq_ctx->cryptlen > 0)
@@ -1080,9 +1122,11 @@ static void cc_proc_header_desc(struct aead_request *req,
 				struct cc_hw_desc desc[],
 				unsigned int *seq_size)
 {
+	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int idx = *seq_size;
+
 	/* Hash associated data */
-	if (req->assoclen > 0)
+	if (areq_ctx->assoclen > 0)
 		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
 
 	/* Hash IV */
@@ -1159,9 +1203,9 @@ static void cc_mlli_to_sram(struct aead_request *req,
 	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
-	if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+	if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
 	    req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
-	    !req_ctx->is_single_pass) {
+	    !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
 		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
 			(unsigned int)ctx->drvdata->mlli_sram_addr,
 			req_ctx->mlli_params.mlli_len);
@@ -1310,7 +1354,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct device *dev = drvdata_to_dev(ctx->drvdata);
-	unsigned int assoclen = req->assoclen;
+	unsigned int assoclen = areq_ctx->assoclen;
 	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
 			(req->cryptlen - ctx->authsize) : req->cryptlen;
 
@@ -1469,7 +1513,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
 	idx++;
 
 	/* process assoc data */
-	if (req->assoclen > 0) {
+	if (req_ctx->assoclen > 0) {
 		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
 	} else {
 		hw_desc_init(&desc[idx]);
@@ -1561,7 +1605,7 @@ static int config_ccm_adata(struct aead_request *req)
 	 * NIST Special Publication 800-38C
 	 */
 	*b0 |= (8 * ((m - 2) / 2));
-	if (req->assoclen > 0)
+	if (req_ctx->assoclen > 0)
 		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
 
 	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
@@ -1572,7 +1616,7 @@ static int config_ccm_adata(struct aead_request *req)
 	 /* END of "taken from crypto/ccm.c" */
 
 	/* l(a) - size of associated data. */
-	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
 
 	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
 	req->iv[15] = 1;
@@ -1604,7 +1648,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
 	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
 	       CCM_BLOCK_IV_SIZE);
 	req->iv = areq_ctx->ctr_iv;
-	req->assoclen -= CCM_BLOCK_IV_SIZE;
+	areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
 }
 
 static void cc_set_ghash_desc(struct aead_request *req,
@@ -1812,7 +1856,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
 	// for gcm and rfc4106.
 	cc_set_ghash_desc(req, desc, seq_size);
 	/* process(ghash) assoc data */
-	if (req->assoclen > 0)
+	if (req_ctx->assoclen > 0)
 		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
 	cc_set_gctr_desc(req, desc, seq_size);
 	/* process(gctr+ghash) */
@@ -1836,8 +1880,8 @@ static int config_gcm_context(struct aead_request *req)
 				(req->cryptlen - ctx->authsize);
 	__be32 counter = cpu_to_be32(2);
 
-	dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
-		__func__, cryptlen, req->assoclen, ctx->authsize);
+	dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+		__func__, cryptlen, req_ctx->assoclen, ctx->authsize);
 
 	memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
 
@@ -1853,7 +1897,7 @@ static int config_gcm_context(struct aead_request *req)
 	if (!req_ctx->plaintext_authenticate_only) {
 		__be64 temp64;
 
-		temp64 = cpu_to_be64(req->assoclen * 8);
+		temp64 = cpu_to_be64(req_ctx->assoclen * 8);
 		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
 		temp64 = cpu_to_be64(cryptlen * 8);
 		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1863,8 +1907,8 @@ static int config_gcm_context(struct aead_request *req)
 		 */
 		__be64 temp64;
 
-		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
-				      cryptlen) * 8);
+		temp64 = cpu_to_be64((req_ctx->assoclen +
+				      GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
 		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
 		temp64 = 0;
 		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1884,7 +1928,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
 	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
 	       GCM_BLOCK_RFC4_IV_SIZE);
 	req->iv = areq_ctx->ctr_iv;
-	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+	areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
 }
 
 static int cc_proc_aead(struct aead_request *req,
@@ -1909,7 +1953,7 @@ static int cc_proc_aead(struct aead_request *req,
 	/* Check data length according to mode */
 	if (validate_data_size(ctx, direct, req)) {
 		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
-			req->cryptlen, req->assoclen);
+			req->cryptlen, areq_ctx->assoclen);
 		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
 		return -EINVAL;
 	}
@@ -2058,8 +2102,11 @@ static int cc_aead_encrypt(struct aead_request *req)
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
 
+	memset(areq_ctx, 0, sizeof(*areq_ctx));
+
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
+	areq_ctx->assoclen = req->assoclen;
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->is_gcm4543 = false;
 
@@ -2087,8 +2134,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
 		goto out;
 	}
 
+	memset(areq_ctx, 0, sizeof(*areq_ctx));
+
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
+	areq_ctx->assoclen = req->assoclen;
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->is_gcm4543 = true;
 
@@ -2106,8 +2156,11 @@ static int cc_aead_decrypt(struct aead_request *req)
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
 
+	memset(areq_ctx, 0, sizeof(*areq_ctx));
+
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
+	areq_ctx->assoclen = req->assoclen;
 	areq_ctx->backup_giv = NULL;
 	areq_ctx->is_gcm4543 = false;
 
@@ -2133,8 +2186,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
 		goto out;
 	}
 
+	memset(areq_ctx, 0, sizeof(*areq_ctx));
+
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
+	areq_ctx->assoclen = req->assoclen;
 	areq_ctx->backup_giv = NULL;
 
 	areq_ctx->is_gcm4543 = true;
@@ -2250,8 +2306,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
 		goto out;
 	}
 
+	memset(areq_ctx, 0, sizeof(*areq_ctx));
+
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
+	areq_ctx->assoclen = req->assoclen;
 	areq_ctx->backup_giv = NULL;
 
 	areq_ctx->plaintext_authenticate_only = false;
@@ -2273,11 +2332,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
 
+	memset(areq_ctx, 0, sizeof(*areq_ctx));
+
 	//plaintext is not encryped with rfc4543
 	areq_ctx->plaintext_authenticate_only = true;
 
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
+	areq_ctx->assoclen = req->assoclen;
 	areq_ctx->backup_giv = NULL;
 
 	cc_proc_rfc4_gcm(req);
@@ -2305,8 +2367,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
 		goto out;
 	}
 
+	memset(areq_ctx, 0, sizeof(*areq_ctx));
+
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
+	areq_ctx->assoclen = req->assoclen;
 	areq_ctx->backup_giv = NULL;
 
 	areq_ctx->plaintext_authenticate_only = false;
@@ -2328,11 +2393,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc;
 
+	memset(areq_ctx, 0, sizeof(*areq_ctx));
+
 	//plaintext is not decryped with rfc4543
 	areq_ctx->plaintext_authenticate_only = true;
 
 	/* No generated IV required */
 	areq_ctx->backup_iv = req->iv;
+	areq_ctx->assoclen = req->assoclen;
 	areq_ctx->backup_giv = NULL;
 
 	cc_proc_rfc4_gcm(req);
@@ -2372,7 +2440,7 @@ static struct cc_alg_template aead_algs[] = {
 		.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.template_aead = {
-			.setkey = cc_aead_setkey,
+			.setkey = cc_des3_aead_setkey,
 			.setauthsize = cc_aead_setauthsize,
 			.encrypt = cc_aead_encrypt,
 			.decrypt = cc_aead_decrypt,
@@ -2412,7 +2480,7 @@ static struct cc_alg_template aead_algs[] = {
 		.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
 		.blocksize = DES3_EDE_BLOCK_SIZE,
 		.template_aead = {
-			.setkey = cc_aead_setkey,
+			.setkey = cc_des3_aead_setkey,
 			.setauthsize = cc_aead_setauthsize,
 			.encrypt = cc_aead_encrypt,
 			.decrypt = cc_aead_decrypt,
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 5edf3b351fa4..e51724b96c56 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 /* \file cc_aead.h
  * ARM CryptoCell AEAD Crypto API
@@ -67,6 +67,7 @@ struct aead_req_ctx {
 	u8 backup_mac[MAX_MAC_SIZE];
 	u8 *backup_iv; /*store iv for generated IV flow*/
 	u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+	u32 assoclen; /* internal assoclen */
 	dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
 	/* buffer for internal ccm configurations */
 	dma_addr_t ccm_iv0_dma_addr;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 0ee1c52da0a4..c81ad33f9115 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include <crypto/internal/aead.h>
 #include <crypto/authenc.h>
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	u32 skip = req->assoclen + req->cryptlen;
+	u32 skip = areq_ctx->assoclen + req->cryptlen;
 
 	if (areq_ctx->is_gcm4543)
 		skip += crypto_aead_ivsize(tfm);
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
  */
 static unsigned int cc_get_sgl_nents(struct device *dev,
 				     struct scatterlist *sg_list,
-				     unsigned int nbytes, u32 *lbytes,
-				     bool *is_chained)
+				     unsigned int nbytes, u32 *lbytes)
 {
 	unsigned int nents = 0;
 
 	while (nbytes && sg_list) {
-		if (sg_list->length) {
-			nents++;
-			/* get the number of bytes in the last entry */
-			*lbytes = nbytes;
-			nbytes -= (sg_list->length > nbytes) ?
-					nbytes : sg_list->length;
-			sg_list = sg_next(sg_list);
-		} else {
-			sg_list = (struct scatterlist *)sg_page(sg_list);
-			if (is_chained)
-				*is_chained = true;
-		}
+		nents++;
+		/* get the number of bytes in the last entry */
+		*lbytes = nbytes;
+		nbytes -= (sg_list->length > nbytes) ?
+				nbytes : sg_list->length;
+		sg_list = sg_next(sg_list);
 	}
 	dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
 	return nents;
@@ -140,9 +133,9 @@ void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 {
-	u32 nents, lbytes;
+	u32 nents;
 
-	nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+	nents = sg_nents_for_len(sg, end);
 	sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
 		       (direct == CC_SG_TO_BUF));
 }
@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 	sgl_data->num_of_buffers++;
 }
 
-static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
-			 enum dma_data_direction direction)
-{
-	u32 i, j;
-	struct scatterlist *l_sg = sg;
-
-	for (i = 0; i < nents; i++) {
-		if (!l_sg)
-			break;
-		if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
-			dev_err(dev, "dma_map_page() sg buffer failed\n");
-			goto err;
-		}
-		l_sg = sg_next(l_sg);
-	}
-	return nents;
-
-err:
-	/* Restore mapped parts */
-	for (j = 0; j < i; j++) {
-		if (!sg)
-			break;
-		dma_unmap_sg(dev, sg, 1, direction);
-		sg = sg_next(sg);
-	}
-	return 0;
-}
-
 static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 		     unsigned int nbytes, int direction, u32 *nents,
 		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 {
-	bool is_chained = false;
-
 	if (sg_is_last(sg)) {
 		/* One entry only case -set to DLLI */
 		if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 		*nents = 1;
 		*mapped_nents = 1;
 	} else {  /*sg_is_last*/
-		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
-					  &is_chained);
+		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 		if (*nents > max_sg_nents) {
 			*nents = 0;
 			dev_err(dev, "Too many fragments. current %d max %d\n",
 				*nents, max_sg_nents);
 			return -ENOMEM;
 		}
-		if (!is_chained) {
-			/* In case of mmu the number of mapped nents might
-			 * be changed from the original sgl nents
-			 */
-			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
-			if (*mapped_nents == 0) {
-				*nents = 0;
-				dev_err(dev, "dma_map_sg() sg buffer failed\n");
-				return -ENOMEM;
-			}
-		} else {
-			/*In this case the driver maps entry by entry so it
-			 * must have the same nents before and after map
-			 */
-			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
-						      direction);
-			if (*mapped_nents != *nents) {
-				*nents = *mapped_nents;
-				dev_err(dev, "dma_map_sg() sg buffer failed\n");
-				return -ENOMEM;
-			}
+		/* In case of mmu the number of mapped nents might
+		 * be changed from the original sgl nents
+		 */
+		*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+		if (*mapped_nents == 0) {
+			*nents = 0;
+			dev_err(dev, "dma_map_sg() sg buffer failed\n");
+			return -ENOMEM;
 		}
 	}
 
@@ -457,7 +406,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
 		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
 		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
-				 ivsize, DMA_TO_DEVICE);
+				 ivsize, DMA_BIDIRECTIONAL);
 	}
 	/* Release pool */
 	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -499,7 +448,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 		dump_byte_array("iv", (u8 *)info, ivsize);
 		req_ctx->gen_ctx.iv_dma_addr =
 			dma_map_single(dev, (void *)info,
-				       ivsize, DMA_TO_DEVICE);
+				       ivsize, DMA_BIDIRECTIONAL);
 		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 				ivsize, info);
@@ -568,11 +517,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
-	u32 dummy;
-	bool chained;
-	u32 size_to_unmap = 0;
 
 	if (areq_ctx->mac_buf_dma_addr) {
 		dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -612,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 	if (areq_ctx->gen_ctx.iv_dma_addr) {
 		dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 				 hw_iv_size, DMA_BIDIRECTIONAL);
+		kzfree(areq_ctx->gen_ctx.iv);
 	}
 
 	/* Release pool */
@@ -628,23 +574,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 
 	dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 		sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
-		req->assoclen, req->cryptlen);
-	size_to_unmap = req->assoclen + req->cryptlen;
-	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
-		size_to_unmap += areq_ctx->req_authsize;
-	if (areq_ctx->is_gcm4543)
-		size_to_unmap += crypto_aead_ivsize(tfm);
+		areq_ctx->assoclen, req->cryptlen);
 
-	dma_unmap_sg(dev, req->src,
-		     cc_get_sgl_nents(dev, req->src, size_to_unmap,
-				      &dummy, &chained),
-		     DMA_BIDIRECTIONAL);
+	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
 	if (req->src != req->dst) {
 		dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 			sg_virt(req->dst));
-		dma_unmap_sg(dev, req->dst,
-			     cc_get_sgl_nents(dev, req->dst, size_to_unmap,
-					      &dummy, &chained),
+		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
 			     DMA_BIDIRECTIONAL);
 	}
 	if (drvdata->coherent &&
@@ -658,55 +594,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 	}
 }
 
-static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
-				 unsigned int sgl_nents, unsigned int authsize,
-				 u32 last_entry_data_size,
-				 bool *is_icv_fragmented)
+static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
+			   u32 last_entry_data_size)
 {
-	unsigned int icv_max_size = 0;
-	unsigned int icv_required_size = authsize > last_entry_data_size ?
-					(authsize - last_entry_data_size) :
-					authsize;
-	unsigned int nents;
-	unsigned int i;
-
-	if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
-		*is_icv_fragmented = false;
-		return 0;
-	}
-
-	for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
-		if (!sgl)
-			break;
-		sgl = sg_next(sgl);
-	}
-
-	if (sgl)
-		icv_max_size = sgl->length;
-
-	if (last_entry_data_size > authsize) {
-		/* ICV attached to data in last entry (not fragmented!) */
-		nents = 0;
-		*is_icv_fragmented = false;
-	} else if (last_entry_data_size == authsize) {
-		/* ICV placed in whole last entry (not fragmented!) */
-		nents = 1;
-		*is_icv_fragmented = false;
-	} else if (icv_max_size > icv_required_size) {
-		nents = 1;
-		*is_icv_fragmented = true;
-	} else if (icv_max_size == icv_required_size) {
-		nents = 2;
-		*is_icv_fragmented = true;
-	} else {
-		dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
-			MAX_ICV_NENTS_SUPPORTED);
-		nents = -1; /*unsupported*/
-	}
-	dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
-		(*is_icv_fragmented ? "true" : "false"), nents);
-
-	return nents;
+	return ((sgl_nents > 1) && (last_entry_data_size < authsize));
 }
 
 static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
@@ -717,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 	struct device *dev = drvdata_to_dev(drvdata);
+	gfp_t flags = cc_gfp_flags(&req->base);
 	int rc = 0;
 
 	if (!req->iv) {
 		areq_ctx->gen_ctx.iv_dma_addr = 0;
+		areq_ctx->gen_ctx.iv = NULL;
 		goto chain_iv_exit;
 	}
 
-	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
-						       hw_iv_size,
-						       DMA_BIDIRECTIONAL);
+	areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+	if (!areq_ctx->gen_ctx.iv)
+		return -ENOMEM;
+
+	areq_ctx->gen_ctx.iv_dma_addr =
+		dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+			       DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 			hw_iv_size, req->iv);
+		kzfree(areq_ctx->gen_ctx.iv);
+		areq_ctx->gen_ctx.iv = NULL;
 		rc = -ENOMEM;
 		goto chain_iv_exit;
 	}
@@ -760,11 +659,9 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	int rc = 0;
-	u32 mapped_nents = 0;
-	struct scatterlist *current_sg = req->src;
+	int mapped_nents = 0;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	unsigned int sg_index = 0;
-	u32 size_of_assoc = req->assoclen;
+	unsigned int size_of_assoc = areq_ctx->assoclen;
 	struct device *dev = drvdata_to_dev(drvdata);
 
 	if (areq_ctx->is_gcm4543)
@@ -775,7 +672,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 		goto chain_assoc_exit;
 	}
 
-	if (req->assoclen == 0) {
+	if (areq_ctx->assoclen == 0) {
 		areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 		areq_ctx->assoc.nents = 0;
 		areq_ctx->assoc.mlli_nents = 0;
@@ -785,26 +682,10 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 		goto chain_assoc_exit;
 	}
 
-	//iterate over the sgl to see how many entries are for associated data
-	//it is assumed that if we reach here , the sgl is already mapped
-	sg_index = current_sg->length;
-	//the first entry in the scatter list contains all the associated data
-	if (sg_index > size_of_assoc) {
-		mapped_nents++;
-	} else {
-		while (sg_index <= size_of_assoc) {
-			current_sg = sg_next(current_sg);
-			/* if have reached the end of the sgl, then this is
-			 * unexpected
-			 */
-			if (!current_sg) {
-				dev_err(dev, "reached end of sg list. unexpected\n");
-				return -EINVAL;
-			}
-			sg_index += current_sg->length;
-			mapped_nents++;
-		}
-	}
+	mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
+	if (mapped_nents < 0)
+		return mapped_nents;
+
 	if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 		dev_err(dev, "Too many fragments. current %d max %d\n",
 			mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -835,7 +716,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 			cc_dma_buf_type(areq_ctx->assoc_buff_type),
 			areq_ctx->assoc.nents);
 		cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
-				req->assoclen, 0, is_last,
+				areq_ctx->assoclen, 0, is_last,
 				&areq_ctx->assoc.mlli_nents);
 		areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 	}
@@ -850,39 +731,32 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 	unsigned int authsize = areq_ctx->req_authsize;
+	struct scatterlist *sg;
+	ssize_t offset;
 
 	areq_ctx->is_icv_fragmented = false;
-	if (req->src == req->dst) {
-		/*INPLACE*/
-		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
-			(*src_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
-			(*src_last_bytes - authsize);
-	} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
-		/*NON-INPLACE and DECRYPT*/
-		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
-			(*src_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
-			(*src_last_bytes - authsize);
+
+	if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+		sg = areq_ctx->src_sgl;
+		offset = *src_last_bytes - authsize;
 	} else {
-		/*NON-INPLACE and ENCRYPT*/
-		areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
-			(*dst_last_bytes - authsize);
-		areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
-			(*dst_last_bytes - authsize);
+		sg = areq_ctx->dst_sgl;
+		offset = *dst_last_bytes - authsize;
 	}
+
+	areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
+	areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
 }
 
-static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
-				     struct aead_request *req,
-				     struct buffer_array *sg_data,
-				     u32 *src_last_bytes, u32 *dst_last_bytes,
-				     bool is_last_table)
+static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+				      struct aead_request *req,
+				      struct buffer_array *sg_data,
+				      u32 *src_last_bytes, u32 *dst_last_bytes,
+				      bool is_last_table)
 {
 	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 	enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 	unsigned int authsize = areq_ctx->req_authsize;
-	int rc = 0, icv_nents;
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct scatterlist *sg;
 
@@ -893,14 +767,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 				areq_ctx->src_offset, is_last_table,
 				&areq_ctx->src.mlli_nents);
 
-		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
-						  areq_ctx->src.nents,
-						  authsize, *src_last_bytes,
-						  &areq_ctx->is_icv_fragmented);
-		if (icv_nents < 0) {
-			rc = -ENOTSUPP;
-			goto prepare_data_mlli_exit;
-		}
+		areq_ctx->is_icv_fragmented =
+			cc_is_icv_frag(areq_ctx->src.nents, authsize,
+				       *src_last_bytes);
 
 		if (areq_ctx->is_icv_fragmented) {
 			/* Backup happens only when ICV is fragmented, ICV
@@ -942,16 +811,11 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 				areq_ctx->dst_offset, is_last_table,
 				&areq_ctx->dst.mlli_nents);
 
-		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
-						  areq_ctx->src.nents,
-						  authsize, *src_last_bytes,
-						  &areq_ctx->is_icv_fragmented);
-		if (icv_nents < 0) {
-			rc = -ENOTSUPP;
-			goto prepare_data_mlli_exit;
-		}
-
+		areq_ctx->is_icv_fragmented =
+			cc_is_icv_frag(areq_ctx->src.nents, authsize,
+				       *src_last_bytes);
 		/* Backup happens only when ICV is fragmented, ICV
+
 		 * verification is made by CPU compare in order to simplify
 		 * MAC verification upon request completion
 		 */
@@ -979,14 +843,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 				areq_ctx->src_offset, is_last_table,
 				&areq_ctx->src.mlli_nents);
 
-		icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
-						  areq_ctx->dst.nents,
-						  authsize, *dst_last_bytes,
-						  &areq_ctx->is_icv_fragmented);
-		if (icv_nents < 0) {
-			rc = -ENOTSUPP;
-			goto prepare_data_mlli_exit;
-		}
+		areq_ctx->is_icv_fragmented =
+			cc_is_icv_frag(areq_ctx->dst.nents, authsize,
+				       *dst_last_bytes);
 
 		if (!areq_ctx->is_icv_fragmented) {
 			sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
@@ -1000,9 +859,6 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 			areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 		}
 	}
-
-prepare_data_mlli_exit:
-	return rc;
 }
 
 static int cc_aead_chain_data(struct cc_drvdata *drvdata,
@@ -1019,12 +875,12 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 	u32 offset = 0;
 	/* non-inplace mode */
-	unsigned int size_for_map = req->assoclen + req->cryptlen;
+	unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	u32 sg_index = 0;
-	bool chained = false;
 	bool is_gcm4543 = areq_ctx->is_gcm4543;
-	u32 size_to_skip = req->assoclen;
+	u32 size_to_skip = areq_ctx->assoclen;
+	struct scatterlist *sgl;
 
 	if (is_gcm4543)
 		size_to_skip += crypto_aead_ivsize(tfm);
@@ -1043,19 +899,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 			authsize : 0;
 	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
-					    &src_last_bytes, &chained);
+					    &src_last_bytes);
 	sg_index = areq_ctx->src_sgl->length;
 	//check where the data starts
 	while (sg_index <= size_to_skip) {
+		src_mapped_nents--;
 		offset -= areq_ctx->src_sgl->length;
-		areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
-		//if have reached the end of the sgl, then this is unexpected
-		if (!areq_ctx->src_sgl) {
-			dev_err(dev, "reached end of sg list. unexpected\n");
-			return -EINVAL;
-		}
+		sgl = sg_next(areq_ctx->src_sgl);
+		if (!sgl)
+			break;
+		areq_ctx->src_sgl = sgl;
 		sg_index += areq_ctx->src_sgl->length;
-		src_mapped_nents--;
 	}
 	if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 		dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1068,7 +922,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 	areq_ctx->src_offset = offset;
 
 	if (req->src != req->dst) {
-		size_for_map = req->assoclen + req->cryptlen;
+		size_for_map = areq_ctx->assoclen + req->cryptlen;
 		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 				authsize : 0;
 		if (is_gcm4543)
@@ -1083,21 +937,19 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 	}
 
 	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
-					    &dst_last_bytes, &chained);
+					    &dst_last_bytes);
 	sg_index = areq_ctx->dst_sgl->length;
 	offset = size_to_skip;
 
 	//check where the data starts
 	while (sg_index <= size_to_skip) {
+		dst_mapped_nents--;
 		offset -= areq_ctx->dst_sgl->length;
-		areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
-		//if have reached the end of the sgl, then this is unexpected
-		if (!areq_ctx->dst_sgl) {
-			dev_err(dev, "reached end of sg list. unexpected\n");
-			return -EINVAL;
-		}
+		sgl = sg_next(areq_ctx->dst_sgl);
+		if (!sgl)
+			break;
+		areq_ctx->dst_sgl = sgl;
 		sg_index += areq_ctx->dst_sgl->length;
-		dst_mapped_nents--;
 	}
 	if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 		dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1110,9 +962,9 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 	    dst_mapped_nents  > 1 ||
 	    do_chain) {
 		areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
-		rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
-					       &src_last_bytes,
-					       &dst_last_bytes, is_last_table);
+		cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+					  &src_last_bytes, &dst_last_bytes,
+					  is_last_table);
 	} else {
 		areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
 		cc_prepare_aead_data_dlli(req, &src_last_bytes,
@@ -1234,7 +1086,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 		areq_ctx->ccm_iv0_dma_addr = dma_addr;
 
 		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
-					  &sg_data, req->assoclen);
+					  &sg_data, areq_ctx->assoclen);
 		if (rc)
 			goto aead_map_failure;
 	}
@@ -1285,7 +1137,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 		areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
 	}
 
-	size_to_map = req->cryptlen + req->assoclen;
+	size_to_map = req->cryptlen + areq_ctx->assoclen;
 	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 		size_to_map += authsize;
 
@@ -1483,8 +1335,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
 	if (total_in_len < block_size) {
 		dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
 			curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
-		areq_ctx->in_nents =
-			cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+		areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
 		sg_copy_to_buffer(src, areq_ctx->in_nents,
 				  &curr_buff[*curr_buff_cnt], nbytes);
 		*curr_buff_cnt += nbytes;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 3ec4b4db5247..a726016bdbc1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 /* \file cc_buffer_mgr.h
  * Buffer Manager
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d9c17078517b..5b58226ea24d 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -34,6 +34,18 @@ struct cc_hw_key_info {
 	enum cc_hw_crypto_key key2_slot;
 };
 
+struct cc_cpp_key_info {
+	u8 slot;
+	enum cc_cpp_alg alg;
+};
+
+enum cc_key_type {
+	CC_UNPROTECTED_KEY,		/* User key */
+	CC_HW_PROTECTED_KEY,		/* HW (FDE) key */
+	CC_POLICY_PROTECTED_KEY,	/* CPP key */
+	CC_INVALID_PROTECTED_KEY	/* Invalid key */
+};
+
 struct cc_cipher_ctx {
 	struct cc_drvdata *drvdata;
 	int keylen;
@@ -41,19 +53,22 @@ struct cc_cipher_ctx {
 	int cipher_mode;
 	int flow_mode;
 	unsigned int flags;
-	bool hw_key;
+	enum cc_key_type key_type;
 	struct cc_user_key_info user;
-	struct cc_hw_key_info hw;
+	union {
+		struct cc_hw_key_info hw;
+		struct cc_cpp_key_info cpp;
+	};
 	struct crypto_shash *shash_tfm;
 };
 
 static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
 
-static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
 {
 	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 
-	return ctx_p->hw_key;
+	return ctx_p->key_type;
 }
 
 static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
@@ -232,7 +247,7 @@ struct tdes_keys {
 	u8	key3[DES_KEY_SIZE];
 };
 
-static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
+static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
 {
 	switch (slot_num) {
 	case 0:
@@ -247,6 +262,22 @@ static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
 	return END_OF_KEYS;
 }
 
+static u8 cc_slot_to_cpp_key(u8 slot_num)
+{
+	return (slot_num - CC_FIRST_CPP_KEY_SLOT);
+}
+
+static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
+{
+	if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
+		return CC_HW_PROTECTED_KEY;
+	else if (slot_num >=  CC_FIRST_CPP_KEY_SLOT &&
+		 slot_num <=  CC_LAST_CPP_KEY_SLOT)
+		return CC_POLICY_PROTECTED_KEY;
+	else
+		return CC_INVALID_PROTECTED_KEY;
+}
+
 static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
 			     unsigned int keylen)
 {
@@ -261,18 +292,13 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
 
 	/* STAT_PHASE_0: Init and sanity checks */
 
-	/* This check the size of the hardware key token */
+	/* This check the size of the protected key token */
 	if (keylen != sizeof(hki)) {
-		dev_err(dev, "Unsupported HW key size %d.\n", keylen);
+		dev_err(dev, "Unsupported protected key size %d.\n", keylen);
 		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 	}
 
-	if (ctx_p->flow_mode != S_DIN_to_AES) {
-		dev_err(dev, "HW key not supported for non-AES flows\n");
-		return -EINVAL;
-	}
-
 	memcpy(&hki, key, keylen);
 
 	/* The real key len for crypto op is the size of the HW key
@@ -286,31 +312,70 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
 		return -EINVAL;
 	}
 
-	ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
-	if (ctx_p->hw.key1_slot == END_OF_KEYS) {
-		dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
-		return -EINVAL;
-	}
+	ctx_p->keylen = keylen;
 
-	if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
-	    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
-	    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
-		if (hki.hw_key1 == hki.hw_key2) {
-			dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
-				hki.hw_key1, hki.hw_key2);
+	switch (cc_slot_to_key_type(hki.hw_key1)) {
+	case CC_HW_PROTECTED_KEY:
+		if (ctx_p->flow_mode == S_DIN_to_SM4) {
+			dev_err(dev, "Only AES HW protected keys are supported\n");
 			return -EINVAL;
 		}
-		ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
-		if (ctx_p->hw.key2_slot == END_OF_KEYS) {
-			dev_err(dev, "Unsupported hw key2 number (%d)\n",
-				hki.hw_key2);
+
+		ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+		if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+			dev_err(dev, "Unsupported hw key1 number (%d)\n",
+				hki.hw_key1);
 			return -EINVAL;
 		}
-	}
 
-	ctx_p->keylen = keylen;
-	ctx_p->hw_key = true;
-	dev_dbg(dev, "cc_is_hw_key ret 0");
+		if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+		    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+		    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+			if (hki.hw_key1 == hki.hw_key2) {
+				dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+					hki.hw_key1, hki.hw_key2);
+				return -EINVAL;
+			}
+
+			ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+			if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+				dev_err(dev, "Unsupported hw key2 number (%d)\n",
+					hki.hw_key2);
+				return -EINVAL;
+			}
+		}
+
+		ctx_p->key_type = CC_HW_PROTECTED_KEY;
+		dev_dbg(dev, "HW protected key  %d/%d set\n.",
+			ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
+		break;
+
+	case CC_POLICY_PROTECTED_KEY:
+		if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
+			dev_err(dev, "CPP keys not supported in this hardware revision.\n");
+			return -EINVAL;
+		}
+
+		if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
+		    ctx_p->cipher_mode != DRV_CIPHER_CTR) {
+			dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
+			return -EINVAL;
+		}
+
+		ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
+		if (ctx_p->flow_mode == S_DIN_to_AES)
+			ctx_p->cpp.alg = CC_CPP_AES;
+		else /* Must be SM4 since due to sethkey registration */
+			ctx_p->cpp.alg = CC_CPP_SM4;
+		ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
+		dev_dbg(dev, "policy protected key alg: %d slot: %d.\n",
+			ctx_p->cpp.alg, ctx_p->cpp.slot);
+		break;
+
+	default:
+		dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
+		return -EINVAL;
+	}
 
 	return 0;
 }
@@ -321,7 +386,6 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
 	struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
 	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
-	u32 tmp[DES3_EDE_EXPKEY_WORDS];
 	struct cc_crypto_alg *cc_alg =
 			container_of(tfm->__crt_alg, struct cc_crypto_alg,
 				     skcipher_alg.base);
@@ -339,7 +403,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
 		return -EINVAL;
 	}
 
-	ctx_p->hw_key = false;
+	ctx_p->key_type = CC_UNPROTECTED_KEY;
 
 	/*
 	 * Verify DES weak keys
@@ -347,6 +411,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
 	 * HW does the expansion on its own.
 	 */
 	if (ctx_p->flow_mode == S_DIN_to_DES) {
+		u32 tmp[DES3_EDE_EXPKEY_WORDS];
 		if (keylen == DES3_EDE_KEY_SIZE &&
 		    __des3_ede_setkey(tmp, &tfm->crt_flags, key,
 				      DES3_EDE_KEY_SIZE)) {
@@ -399,7 +464,77 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
 	return 0;
 }
 
-static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
+{
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		return S_AES_to_DOUT;
+	case S_DIN_to_DES:
+		return S_DES_to_DOUT;
+	case S_DIN_to_SM4:
+		return S_SM4_to_DOUT;
+	default:
+		return ctx_p->flow_mode;
+	}
+}
+
+static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
+				 struct cipher_req_ctx *req_ctx,
+				 unsigned int ivsize, struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	int cipher_mode = ctx_p->cipher_mode;
+	int flow_mode = cc_out_setup_mode(ctx_p);
+	int direction = req_ctx->gen_ctx.op_type;
+	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+
+	if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
+		return;
+
+	switch (cipher_mode) {
+	case DRV_CIPHER_ECB:
+		break;
+	case DRV_CIPHER_CBC:
+	case DRV_CIPHER_CBC_CTS:
+	case DRV_CIPHER_CTR:
+	case DRV_CIPHER_OFB:
+		/* Read next IV */
+		hw_desc_init(&desc[*seq_size]);
+		set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		if (cipher_mode == DRV_CIPHER_CTR ||
+		    cipher_mode == DRV_CIPHER_OFB) {
+			set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+		} else {
+			set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
+		}
+		set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+		(*seq_size)++;
+		break;
+	case DRV_CIPHER_XTS:
+	case DRV_CIPHER_ESSIV:
+	case DRV_CIPHER_BITLOCKER:
+		/*  IV */
+		hw_desc_init(&desc[*seq_size]);
+		set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
+			     NS_BIT, 1);
+		set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+		(*seq_size)++;
+		break;
+	default:
+		dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+	}
+}
+
+static void cc_setup_state_desc(struct crypto_tfm *tfm,
 				 struct cipher_req_ctx *req_ctx,
 				 unsigned int ivsize, unsigned int nbytes,
 				 struct cc_hw_desc desc[],
@@ -423,11 +558,13 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
 		du_size = cc_alg->data_unit;
 
 	switch (cipher_mode) {
+	case DRV_CIPHER_ECB:
+		break;
 	case DRV_CIPHER_CBC:
 	case DRV_CIPHER_CBC_CTS:
 	case DRV_CIPHER_CTR:
 	case DRV_CIPHER_OFB:
-		/* Load cipher state */
+		/* Load IV */
 		hw_desc_init(&desc[*seq_size]);
 		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
 			     NS_BIT);
@@ -441,57 +578,15 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
 			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
 		}
 		(*seq_size)++;
-		/*FALLTHROUGH*/
-	case DRV_CIPHER_ECB:
-		/* Load key */
-		hw_desc_init(&desc[*seq_size]);
-		set_cipher_mode(&desc[*seq_size], cipher_mode);
-		set_cipher_config0(&desc[*seq_size], direction);
-		if (flow_mode == S_DIN_to_AES) {
-			if (cc_is_hw_key(tfm)) {
-				set_hw_crypto_key(&desc[*seq_size],
-						  ctx_p->hw.key1_slot);
-			} else {
-				set_din_type(&desc[*seq_size], DMA_DLLI,
-					     key_dma_addr, ((key_len == 24) ?
-							    AES_MAX_KEY_SIZE :
-							    key_len), NS_BIT);
-			}
-			set_key_size_aes(&desc[*seq_size], key_len);
-		} else {
-			/*des*/
-			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
-				     key_len, NS_BIT);
-			set_key_size_des(&desc[*seq_size], key_len);
-		}
-		set_flow_mode(&desc[*seq_size], flow_mode);
-		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
-		(*seq_size)++;
 		break;
 	case DRV_CIPHER_XTS:
 	case DRV_CIPHER_ESSIV:
 	case DRV_CIPHER_BITLOCKER:
-		/* Load AES key */
-		hw_desc_init(&desc[*seq_size]);
-		set_cipher_mode(&desc[*seq_size], cipher_mode);
-		set_cipher_config0(&desc[*seq_size], direction);
-		if (cc_is_hw_key(tfm)) {
-			set_hw_crypto_key(&desc[*seq_size],
-					  ctx_p->hw.key1_slot);
-		} else {
-			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
-				     (key_len / 2), NS_BIT);
-		}
-		set_key_size_aes(&desc[*seq_size], (key_len / 2));
-		set_flow_mode(&desc[*seq_size], flow_mode);
-		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
-		(*seq_size)++;
-
 		/* load XEX key */
 		hw_desc_init(&desc[*seq_size]);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
 		set_cipher_config0(&desc[*seq_size], direction);
-		if (cc_is_hw_key(tfm)) {
+		if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
 			set_hw_crypto_key(&desc[*seq_size],
 					  ctx_p->hw.key2_slot);
 		} else {
@@ -505,7 +600,7 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
 		set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
 		(*seq_size)++;
 
-		/* Set state */
+		/* Load IV */
 		hw_desc_init(&desc[*seq_size]);
 		set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
 		set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -521,48 +616,113 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
 	}
 }
 
-static void cc_setup_cipher_data(struct crypto_tfm *tfm,
-				 struct cipher_req_ctx *req_ctx,
-				 struct scatterlist *dst,
-				 struct scatterlist *src, unsigned int nbytes,
-				 void *areq, struct cc_hw_desc desc[],
-				 unsigned int *seq_size)
+static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
 {
-	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
-	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
-	unsigned int flow_mode = ctx_p->flow_mode;
-
 	switch (ctx_p->flow_mode) {
 	case S_DIN_to_AES:
-		flow_mode = DIN_AES_DOUT;
-		break;
+		return DIN_AES_DOUT;
 	case S_DIN_to_DES:
-		flow_mode = DIN_DES_DOUT;
-		break;
+		return DIN_DES_DOUT;
 	case S_DIN_to_SM4:
-		flow_mode = DIN_SM4_DOUT;
-		break;
+		return DIN_SM4_DOUT;
 	default:
-		dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
-		return;
+		return ctx_p->flow_mode;
 	}
-	/* Process */
-	if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
-		dev_dbg(dev, " data params addr %pad length 0x%X\n",
-			&sg_dma_address(src), nbytes);
-		dev_dbg(dev, " data params addr %pad length 0x%X\n",
-			&sg_dma_address(dst), nbytes);
+}
+
+static void cc_setup_key_desc(struct crypto_tfm *tfm,
+			      struct cipher_req_ctx *req_ctx,
+			      unsigned int nbytes, struct cc_hw_desc desc[],
+			      unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	int cipher_mode = ctx_p->cipher_mode;
+	int flow_mode = ctx_p->flow_mode;
+	int direction = req_ctx->gen_ctx.op_type;
+	dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+	unsigned int key_len = ctx_p->keylen;
+	unsigned int din_size;
+
+	switch (cipher_mode) {
+	case DRV_CIPHER_CBC:
+	case DRV_CIPHER_CBC_CTS:
+	case DRV_CIPHER_CTR:
+	case DRV_CIPHER_OFB:
+	case DRV_CIPHER_ECB:
+		/* Load key */
 		hw_desc_init(&desc[*seq_size]);
-		set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
-			     nbytes, NS_BIT);
-		set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
-			      nbytes, NS_BIT, (!areq ? 0 : 1));
-		if (areq)
-			set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
 
+		if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
+			/* We use the AES key size coding for all CPP algs */
+			set_key_size_aes(&desc[*seq_size], key_len);
+			set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
+			flow_mode = cc_out_flow_mode(ctx_p);
+		} else {
+			if (flow_mode == S_DIN_to_AES) {
+				if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+					set_hw_crypto_key(&desc[*seq_size],
+							  ctx_p->hw.key1_slot);
+				} else {
+					/* CC_POLICY_UNPROTECTED_KEY
+					 * Invalid keys are filtered out in
+					 * sethkey()
+					 */
+					din_size = (key_len == 24) ?
+						AES_MAX_KEY_SIZE : key_len;
+
+					set_din_type(&desc[*seq_size], DMA_DLLI,
+						     key_dma_addr, din_size,
+						     NS_BIT);
+				}
+				set_key_size_aes(&desc[*seq_size], key_len);
+			} else {
+				/*des*/
+				set_din_type(&desc[*seq_size], DMA_DLLI,
+					     key_dma_addr, key_len, NS_BIT);
+				set_key_size_des(&desc[*seq_size], key_len);
+			}
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+		}
 		set_flow_mode(&desc[*seq_size], flow_mode);
 		(*seq_size)++;
-	} else {
+		break;
+	case DRV_CIPHER_XTS:
+	case DRV_CIPHER_ESSIV:
+	case DRV_CIPHER_BITLOCKER:
+		/* Load AES key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key1_slot);
+		} else {
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     (key_len / 2), NS_BIT);
+		}
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+		(*seq_size)++;
+		break;
+	default:
+		dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+	}
+}
+
+static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
+			       struct cipher_req_ctx *req_ctx,
+			       struct scatterlist *dst, struct scatterlist *src,
+			       unsigned int nbytes, void *areq,
+			       struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 		/* bypass */
 		dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
 			&req_ctx->mlli_params.mlli_dma_addr,
@@ -577,7 +737,38 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
 			      req_ctx->mlli_params.mlli_len);
 		set_flow_mode(&desc[*seq_size], BYPASS);
 		(*seq_size)++;
+	}
+}
+
+static void cc_setup_flow_desc(struct crypto_tfm *tfm,
+			       struct cipher_req_ctx *req_ctx,
+			       struct scatterlist *dst, struct scatterlist *src,
+			       unsigned int nbytes, struct cc_hw_desc desc[],
+			       unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	unsigned int flow_mode = cc_out_flow_mode(ctx_p);
+	bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
+			  ctx_p->cipher_mode == DRV_CIPHER_ECB);
 
+	/* Process */
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+		dev_dbg(dev, " data params addr %pad length 0x%X\n",
+			&sg_dma_address(src), nbytes);
+		dev_dbg(dev, " data params addr %pad length 0x%X\n",
+			&sg_dma_address(dst), nbytes);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+			     nbytes, NS_BIT);
+		set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+			      nbytes, NS_BIT, (!last_desc ? 0 : 1));
+		if (last_desc)
+			set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		(*seq_size)++;
+	} else {
 		hw_desc_init(&desc[*seq_size]);
 		set_din_type(&desc[*seq_size], DMA_MLLI,
 			     ctx_p->drvdata->mlli_sram_addr,
@@ -589,7 +780,7 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
 			set_dout_mlli(&desc[*seq_size],
 				      ctx_p->drvdata->mlli_sram_addr,
 				      req_ctx->in_mlli_nents, NS_BIT,
-				      (!areq ? 0 : 1));
+				      (!last_desc ? 0 : 1));
 		} else {
 			dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
 				(unsigned int)ctx_p->drvdata->mlli_sram_addr,
@@ -600,9 +791,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
 				       (LLI_ENTRY_BYTE_SIZE *
 					req_ctx->in_mlli_nents)),
 				      req_ctx->out_mlli_nents, NS_BIT,
-				      (!areq ? 0 : 1));
+				      (!last_desc ? 0 : 1));
 		}
-		if (areq)
+		if (last_desc)
 			set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
 
 		set_flow_mode(&desc[*seq_size], flow_mode);
@@ -610,38 +801,6 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
 	}
 }
 
-/*
- * Update a CTR-AES 128 bit counter
- */
-static void cc_update_ctr(u8 *ctr, unsigned int increment)
-{
-	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
-	    IS_ALIGNED((unsigned long)ctr, 8)) {
-
-		__be64 *high_be = (__be64 *)ctr;
-		__be64 *low_be = high_be + 1;
-		u64 orig_low = __be64_to_cpu(*low_be);
-		u64 new_low = orig_low + (u64)increment;
-
-		*low_be = __cpu_to_be64(new_low);
-
-		if (new_low < orig_low)
-			*high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
-	} else {
-		u8 *pos = (ctr + AES_BLOCK_SIZE);
-		u8 val;
-		unsigned int size;
-
-		for (; increment; increment--)
-			for (size = AES_BLOCK_SIZE; size; size--) {
-				val = *--pos + 1;
-				*pos = val;
-				if (val)
-					break;
-			}
-	}
-}
-
 static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
 {
 	struct skcipher_request *req = (struct skcipher_request *)cc_req;
@@ -649,44 +808,15 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
 	struct scatterlist *src = req->src;
 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
 	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
-	struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
-	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
-	unsigned int len;
 
-	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
-
-	switch (ctx_p->cipher_mode) {
-	case DRV_CIPHER_CBC:
-		/*
-		 * The crypto API expects us to set the req->iv to the last
-		 * ciphertext block. For encrypt, simply copy from the result.
-		 * For decrypt, we must copy from a saved buffer since this
-		 * could be an in-place decryption operation and the src is
-		 * lost by this point.
-		 */
-		if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
-			memcpy(req->iv, req_ctx->backup_info, ivsize);
-			kzfree(req_ctx->backup_info);
-		} else if (!err) {
-			len = req->cryptlen - ivsize;
-			scatterwalk_map_and_copy(req->iv, req->dst, len,
-						 ivsize, 0);
-		}
-		break;
-
-	case DRV_CIPHER_CTR:
-		/* Compute the counter of the last block */
-		len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
-		cc_update_ctr((u8 *)req->iv, len);
-		break;
-
-	default:
-		break;
+	if (err != -EINPROGRESS) {
+		/* Not a BACKLOG notification */
+		cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+		memcpy(req->iv, req_ctx->iv, ivsize);
+		kzfree(req_ctx->iv);
 	}
 
-	kzfree(req_ctx->iv);
-
 	skcipher_request_complete(req, err);
 }
 
@@ -741,6 +871,13 @@ static int cc_cipher_process(struct skcipher_request *req,
 	cc_req.user_cb = (void *)cc_cipher_complete;
 	cc_req.user_arg = (void *)req;
 
+	/* Setup CPP operation details */
+	if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
+		cc_req.cpp.is_cpp = true;
+		cc_req.cpp.alg = ctx_p->cpp.alg;
+		cc_req.cpp.slot = ctx_p->cpp.slot;
+	}
+
 	/* Setup request context */
 	req_ctx->gen_ctx.op_type = direction;
 
@@ -755,11 +892,16 @@ static int cc_cipher_process(struct skcipher_request *req,
 
 	/* STAT_PHASE_2: Create sequence */
 
-	/* Setup processing */
-	cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+	/* Setup IV and XEX key used */
+	cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+	/* Setup MLLI line, if needed */
+	cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
+	/* Setup key */
+	cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
 	/* Data processing */
-	cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
-			     &seq_len);
+	cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
+	/* Read next IV */
+	cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
 
 	/* STAT_PHASE_3: Lock HW and push sequence */
 
@@ -774,7 +916,6 @@ static int cc_cipher_process(struct skcipher_request *req,
 
 exit_process:
 	if (rc != -EINPROGRESS && rc != -EBUSY) {
-		kzfree(req_ctx->backup_info);
 		kzfree(req_ctx->iv);
 	}
 
@@ -792,31 +933,10 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
 
 static int cc_cipher_decrypt(struct skcipher_request *req)
 {
-	struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
-	struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
-	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
 	struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
-	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
-	gfp_t flags = cc_gfp_flags(&req->base);
-	unsigned int len;
 
 	memset(req_ctx, 0, sizeof(*req_ctx));
 
-	if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
-	    (req->cryptlen >= ivsize)) {
-
-		/* Allocate and save the last IV sized bytes of the source,
-		 * which will be lost in case of in-place decryption.
-		 */
-		req_ctx->backup_info = kzalloc(ivsize, flags);
-		if (!req_ctx->backup_info)
-			return -ENOMEM;
-
-		len = req->cryptlen - ivsize;
-		scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
-					 ivsize, 0);
-	}
-
 	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
 }
 
@@ -838,6 +958,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.min_hw_rev = CC_HW_REV_630,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "xts512(paes)",
@@ -856,6 +977,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.data_unit = 512,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "xts4096(paes)",
@@ -874,6 +996,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.data_unit = 4096,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "essiv(paes)",
@@ -891,6 +1014,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "essiv512(paes)",
@@ -909,6 +1033,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.data_unit = 512,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "essiv4096(paes)",
@@ -927,6 +1052,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.data_unit = 4096,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "bitlocker(paes)",
@@ -944,6 +1070,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "bitlocker512(paes)",
@@ -962,6 +1089,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.data_unit = 512,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "bitlocker4096(paes)",
@@ -980,6 +1108,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.data_unit = 4096,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "ecb(paes)",
@@ -997,6 +1126,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "cbc(paes)",
@@ -1014,6 +1144,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "ofb(paes)",
@@ -1031,6 +1162,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "cts(cbc(paes))",
@@ -1048,6 +1180,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "ctr(paes)",
@@ -1065,6 +1198,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.flow_mode = S_DIN_to_AES,
 		.min_hw_rev = CC_HW_REV_712,
 		.std_body = CC_STD_NIST,
+		.sec_func = true,
 	},
 	{
 		.name = "xts(aes)",
@@ -1429,6 +1563,42 @@ static const struct cc_alg_template skcipher_algs[] = {
 		.min_hw_rev = CC_HW_REV_713,
 		.std_body = CC_STD_OSCCA,
 	},
+	{
+		.name = "cbc(psm4)",
+		.driver_name = "cbc-psm4-ccree",
+		.blocksize = SM4_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = SM4_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_SM4,
+		.min_hw_rev = CC_HW_REV_713,
+		.std_body = CC_STD_OSCCA,
+		.sec_func = true,
+	},
+	{
+		.name = "ctr(psm4)",
+		.driver_name = "ctr-psm4-ccree",
+		.blocksize = SM4_BLOCK_SIZE,
+		.template_skcipher = {
+			.setkey = cc_cipher_sethkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = CC_HW_KEY_SIZE,
+			.max_keysize = CC_HW_KEY_SIZE,
+			.ivsize = SM4_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CTR,
+		.flow_mode = S_DIN_to_SM4,
+		.min_hw_rev = CC_HW_REV_713,
+		.std_body = CC_STD_OSCCA,
+		.sec_func = true,
+	},
 };
 
 static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
@@ -1504,7 +1674,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
 		ARRAY_SIZE(skcipher_algs));
 	for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
 		if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
-		    !(drvdata->std_bodies & skcipher_algs[alg].std_body))
+		    !(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
+		    (drvdata->sec_disabled && skcipher_algs[alg].sec_func))
 			continue;
 
 		dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 4dbc0a1e6d5c..da3a38707fae 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 /* \file cc_cipher.h
  * ARM CryptoCell Cipher Crypto API
@@ -20,7 +20,6 @@ struct cipher_req_ctx {
 	u32 in_mlli_nents;
 	u32 out_nents;
 	u32 out_mlli_nents;
-	u8 *backup_info; /*store iv for generated IV flow*/
 	u8 *iv;
 	struct mlli_params mlli_params;
 };
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index c8dac273c563..ccf960a0d989 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #ifndef _CC_CRYPTO_CTX_H_
 #define _CC_CRYPTO_CTX_H_
@@ -55,6 +55,14 @@
 
 #define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
 
+#define CC_CPP_NUM_SLOTS	8
+#define CC_CPP_NUM_ALGS		2
+
+enum cc_cpp_alg {
+	CC_CPP_SM4 = 1,
+	CC_CPP_AES = 0
+};
+
 enum drv_engine_type {
 	DRV_ENGINE_NULL = 0,
 	DRV_ENGINE_AES = 1,
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5fa05a7bcf36..566999738698 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
 
 #include <linux/kernel.h>
 #include <linux/debugfs.h>
@@ -25,9 +25,24 @@ struct cc_debugfs_ctx {
  */
 static struct dentry *cc_debugfs_dir;
 
-static struct debugfs_reg32 debug_regs[] = {
+static struct debugfs_reg32 ver_sig_regs[] = {
 	{ .name = "SIGNATURE" }, /* Must be 0th */
 	{ .name = "VERSION" }, /* Must be 1st */
+};
+
+static struct debugfs_reg32 pid_cid_regs[] = {
+	CC_DEBUG_REG(PERIPHERAL_ID_0),
+	CC_DEBUG_REG(PERIPHERAL_ID_1),
+	CC_DEBUG_REG(PERIPHERAL_ID_2),
+	CC_DEBUG_REG(PERIPHERAL_ID_3),
+	CC_DEBUG_REG(PERIPHERAL_ID_4),
+	CC_DEBUG_REG(COMPONENT_ID_0),
+	CC_DEBUG_REG(COMPONENT_ID_1),
+	CC_DEBUG_REG(COMPONENT_ID_2),
+	CC_DEBUG_REG(COMPONENT_ID_3),
+};
+
+static struct debugfs_reg32 debug_regs[] = {
 	CC_DEBUG_REG(HOST_IRR),
 	CC_DEBUG_REG(HOST_POWER_DOWN_EN),
 	CC_DEBUG_REG(AXIM_MON_ERR),
@@ -53,10 +68,7 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
 {
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct cc_debugfs_ctx *ctx;
-	struct debugfs_regset32 *regset;
-
-	debug_regs[0].offset = drvdata->sig_offset;
-	debug_regs[1].offset = drvdata->ver_offset;
+	struct debugfs_regset32 *regset, *verset;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -75,8 +87,26 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
 	debugfs_create_regset32("regs", 0400, ctx->dir, regset);
 	debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
 
-	drvdata->debugfs = ctx;
+	verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
+	/* Failing here is not important enough to fail the module load */
+	if (!verset)
+		goto out;
+
+	if (drvdata->hw_rev <= CC_HW_REV_712) {
+		ver_sig_regs[0].offset = drvdata->sig_offset;
+		ver_sig_regs[1].offset = drvdata->ver_offset;
+		verset->regs = ver_sig_regs;
+		verset->nregs = ARRAY_SIZE(ver_sig_regs);
+	} else {
+		verset->regs = pid_cid_regs;
+		verset->nregs = ARRAY_SIZE(pid_cid_regs);
+	}
+	verset->base = drvdata->cc_base;
 
+	debugfs_create_regset32("version", 0400, ctx->dir, verset);
+
+out:
+	drvdata->debugfs = ctx;
 	return 0;
 }
 
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 01cbd9a95659..664ff402e0e9 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #ifndef __CC_DEBUGFS_H__
 #define __CC_DEBUGFS_H__
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 3bcc6c76e090..86ac7b443355 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -30,27 +30,47 @@
 bool cc_dump_desc;
 module_param_named(dump_desc, cc_dump_desc, bool, 0600);
 MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
-
 bool cc_dump_bytes;
 module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
 MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
 
+static bool cc_sec_disable;
+module_param_named(sec_disable, cc_sec_disable, bool, 0600);
+MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
+
 struct cc_hw_data {
 	char *name;
 	enum cc_hw_rev rev;
 	u32 sig;
+	u32 cidr_0123;
+	u32 pidr_0124;
 	int std_bodies;
 };
 
+#define CC_NUM_IDRS 4
+
+/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
+static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
+	CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
+	CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
+};
+
+static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
+	CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
+	CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
+};
+
 /* Hardware revisions defs. */
 
 /* The 703 is a OSCCA only variant of the 713 */
 static const struct cc_hw_data cc703_hw = {
-	.name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA
+	.name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+	.pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
 };
 
 static const struct cc_hw_data cc713_hw = {
-	.name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL
+	.name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+	.pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
 };
 
 static const struct cc_hw_data cc712_hw = {
@@ -78,6 +98,20 @@ static const struct of_device_id arm_ccree_dev_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
 
+static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
+{
+	int i;
+	union {
+		u8 regs[CC_NUM_IDRS];
+		__le32 val;
+	} idr;
+
+	for (i = 0; i < CC_NUM_IDRS; ++i)
+		idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
+
+	return le32_to_cpu(idr.val);
+}
+
 void __dump_byte_array(const char *name, const u8 *buf, size_t len)
 {
 	char prefix[64];
@@ -114,12 +148,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 
 	drvdata->irq = irr;
 	/* Completion interrupt - most probable */
-	if (irr & CC_COMP_IRQ_MASK) {
-		/* Mask AXI completion interrupt - will be unmasked in
-		 * Deferred service handler
+	if (irr & drvdata->comp_mask) {
+		/* Mask all completion interrupts - will be unmasked in
+		 * deferred service handler
 		 */
-		cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
-		irr &= ~CC_COMP_IRQ_MASK;
+		cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
+		irr &= ~drvdata->comp_mask;
 		complete_request(drvdata);
 	}
 #ifdef CONFIG_CRYPTO_FIPS
@@ -159,11 +193,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
 	unsigned int val, cache_params;
 	struct device *dev = drvdata_to_dev(drvdata);
 
-	/* Unmask all AXI interrupt sources AXI_CFG1 register */
-	val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
-	cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
-	dev_dbg(dev, "AXIM_CFG=0x%08X\n",
-		cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+	/* Unmask all AXI interrupt sources AXI_CFG1 register   */
+	/* AXI interrupt config are obsoleted startign at cc7x3 */
+	if (drvdata->hw_rev <= CC_HW_REV_712) {
+		val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+		cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+		dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+			cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+	}
 
 	/* Clear all pending interrupts */
 	val = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -171,7 +208,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
 	cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
 
 	/* Unmask relevant interrupt cause */
-	val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+	val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
 
 	if (drvdata->hw_rev >= CC_HW_REV_712)
 		val |= CC_GPR0_IRQ_MASK;
@@ -201,7 +238,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
 	struct cc_drvdata *new_drvdata;
 	struct device *dev = &plat_dev->dev;
 	struct device_node *np = dev->of_node;
-	u32 signature_val;
+	u32 val, hw_rev_pidr, sig_cidr;
 	u64 dma_mask;
 	const struct cc_hw_data *hw_rev;
 	const struct of_device_id *dev_id;
@@ -231,6 +268,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
 		new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
 	}
 
+	new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
+
 	platform_set_drvdata(plat_dev, new_drvdata);
 	new_drvdata->plat_dev = plat_dev;
 
@@ -311,22 +350,57 @@ static int init_cc_resources(struct platform_device *plat_dev)
 		return rc;
 	}
 
+	new_drvdata->sec_disabled = cc_sec_disable;
+
 	if (hw_rev->rev <= CC_HW_REV_712) {
 		/* Verify correct mapping */
-		signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
-		if (signature_val != hw_rev->sig) {
+		val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+		if (val != hw_rev->sig) {
 			dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
-				signature_val, hw_rev->sig);
+				val, hw_rev->sig);
+			rc = -EINVAL;
+			goto post_clk_err;
+		}
+		sig_cidr = val;
+		hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
+	} else {
+		/* Verify correct mapping */
+		val = cc_read_idr(new_drvdata, pidr_0124_offsets);
+		if (val != hw_rev->pidr_0124) {
+			dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
+				val,  hw_rev->pidr_0124);
+			rc = -EINVAL;
+			goto post_clk_err;
+		}
+		hw_rev_pidr = val;
+
+		val = cc_read_idr(new_drvdata, cidr_0123_offsets);
+		if (val != hw_rev->cidr_0123) {
+			dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
+			val,  hw_rev->cidr_0123);
 			rc = -EINVAL;
 			goto post_clk_err;
 		}
-		dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+		sig_cidr = val;
+
+		/* Check security disable state */
+		val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
+		val &= CC_SECURITY_DISABLED_MASK;
+		new_drvdata->sec_disabled |= !!val;
+
+		if (!new_drvdata->sec_disabled) {
+			new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
+			if (new_drvdata->std_bodies & CC_STD_NIST)
+				new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
+		}
 	}
 
+	if (new_drvdata->sec_disabled)
+		dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
+
 	/* Display HW versions */
-	dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
-		 hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
-		 DRV_MODULE_VERSION);
+	dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
+		 hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
 
 	rc = init_cc_regs(new_drvdata, true);
 	if (rc) {
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 33dbf3e6d15d..b76181335c08 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 /* \file cc_driver.h
  * ARM CryptoCell Linux Crypto Driver
@@ -65,10 +65,32 @@ enum cc_std_body {
 
 #define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
 
+#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+
 #define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
 				    CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
 				    CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
 
+#define CC_CPP_AES_ABORT_MASK ( \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
+
+#define CC_CPP_SM4_ABORT_MASK ( \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
+	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
+
 /* Register name mangling macro */
 #define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
 
@@ -81,7 +103,6 @@ enum cc_std_body {
 
 #define MAX_REQUEST_QUEUE_SIZE 4096
 #define MAX_MLLI_BUFF_SIZE 2080
-#define MAX_ICV_NENTS_SUPPORTED 2
 
 /* Definitions for HW descriptors DIN/DOUT fields */
 #define NS_BIT 1
@@ -90,6 +111,12 @@ enum cc_std_body {
  * field in the HW descriptor. The DMA engine +8 that value.
  */
 
+struct cc_cpp_req {
+	bool is_cpp;
+	enum cc_cpp_alg alg;
+	u8 slot;
+};
+
 #define CC_MAX_IVGEN_DMA_ADDRESSES	3
 struct cc_crypto_req {
 	void (*user_cb)(struct device *dev, void *req, int err);
@@ -104,6 +131,7 @@ struct cc_crypto_req {
 	/* The generated IV size required, 8/16 B allowed. */
 	unsigned int ivgen_size;
 	struct completion seq_compl; /* request completion */
+	struct cc_cpp_req cpp;
 };
 
 /**
@@ -136,6 +164,8 @@ struct cc_drvdata {
 	u32 sig_offset;
 	u32 ver_offset;
 	int std_bodies;
+	bool sec_disabled;
+	u32 comp_mask;
 };
 
 struct cc_crypto_alg {
@@ -162,12 +192,14 @@ struct cc_alg_template {
 	int auth_mode;
 	u32 min_hw_rev;
 	enum cc_std_body std_body;
+	bool sec_func;
 	unsigned int data_unit;
 	struct cc_drvdata *drvdata;
 };
 
 struct async_gen_req_ctx {
 	dma_addr_t iv_dma_addr;
+	u8 *iv;
 	enum drv_crypto_direction op_type;
 };
 
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index b4d0a6d983e0..5ad3ffb7acaa 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include <linux/kernel.h>
 #include <linux/fips.h>
@@ -49,8 +49,6 @@ void cc_fips_fini(struct cc_drvdata *drvdata)
 
 	/* Kill tasklet */
 	tasklet_kill(&fips_h->tasklet);
-
-	kfree(fips_h);
 	drvdata->fips_handle = NULL;
 }
 
@@ -72,20 +70,28 @@ static inline void tee_fips_error(struct device *dev)
 		dev_err(dev, "TEE reported error!\n");
 }
 
+/*
+ * This function check if cryptocell tee fips error occurred
+ * and in such case triggers system error
+ */
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
+{
+	struct device *dev = drvdata_to_dev(p_drvdata);
+
+	if (!cc_get_tee_fips_status(p_drvdata))
+		tee_fips_error(dev);
+}
+
 /* Deferred service handler, run as interrupt-fired tasklet */
 static void fips_dsr(unsigned long devarg)
 {
 	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
-	struct device *dev = drvdata_to_dev(drvdata);
-	u32 irq, state, val;
+	u32 irq, val;
 
 	irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
 
 	if (irq) {
-		state = cc_ioread(drvdata, CC_REG(GPR_HOST));
-
-		if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
-			tee_fips_error(dev);
+		cc_tee_handle_fips_error(drvdata);
 	}
 
 	/* after verifing that there is nothing to do,
@@ -104,7 +110,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
 	if (p_drvdata->hw_rev < CC_HW_REV_712)
 		return 0;
 
-	fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+	fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
 	if (!fips_h)
 		return -ENOMEM;
 
@@ -113,8 +119,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
 	dev_dbg(dev, "Initializing fips tasklet\n");
 	tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
 
-	if (!cc_get_tee_fips_status(p_drvdata))
-		tee_fips_error(dev);
+	cc_tee_handle_fips_error(p_drvdata);
 
 	return 0;
 }
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 645e096a7a82..fc33eeb4d566 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #ifndef __CC_FIPS_H__
 #define __CC_FIPS_H__
@@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
 void cc_fips_fini(struct cc_drvdata *drvdata);
 void fips_handler(struct cc_drvdata *drvdata);
 void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
 
 #else  /* CONFIG_CRYPTO_FIPS */
 
@@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
 static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
 					  bool ok) {}
 static inline void fips_handler(struct cc_drvdata *drvdata) {}
+static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
 
 #endif /* CONFIG_CRYPTO_FIPS */
 
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 2c4ddc8fb76b..a6abe4e3bb0e 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -69,6 +69,7 @@ struct cc_hash_alg {
 struct hash_key_req_ctx {
 	u32 keylen;
 	dma_addr_t key_dma_addr;
+	u8 *key;
 };
 
 /* hash per-session context */
@@ -280,9 +281,13 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
 
 	dev_dbg(dev, "req=%pK\n", req);
 
-	cc_unmap_hash_request(dev, state, req->src, false);
-	cc_unmap_req(dev, state, ctx);
-	req->base.complete(&req->base, err);
+	if (err != -EINPROGRESS) {
+		/* Not a BACKLOG notification */
+		cc_unmap_hash_request(dev, state, req->src, false);
+		cc_unmap_req(dev, state, ctx);
+	}
+
+	ahash_request_complete(req, err);
 }
 
 static void cc_digest_complete(struct device *dev, void *cc_req, int err)
@@ -295,10 +300,14 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
 
 	dev_dbg(dev, "req=%pK\n", req);
 
-	cc_unmap_hash_request(dev, state, req->src, false);
-	cc_unmap_result(dev, state, digestsize, req->result);
-	cc_unmap_req(dev, state, ctx);
-	req->base.complete(&req->base, err);
+	if (err != -EINPROGRESS) {
+		/* Not a BACKLOG notification */
+		cc_unmap_hash_request(dev, state, req->src, false);
+		cc_unmap_result(dev, state, digestsize, req->result);
+		cc_unmap_req(dev, state, ctx);
+	}
+
+	ahash_request_complete(req, err);
 }
 
 static void cc_hash_complete(struct device *dev, void *cc_req, int err)
@@ -311,10 +320,14 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
 
 	dev_dbg(dev, "req=%pK\n", req);
 
-	cc_unmap_hash_request(dev, state, req->src, false);
-	cc_unmap_result(dev, state, digestsize, req->result);
-	cc_unmap_req(dev, state, ctx);
-	req->base.complete(&req->base, err);
+	if (err != -EINPROGRESS) {
+		/* Not a BACKLOG notification */
+		cc_unmap_hash_request(dev, state, req->src, false);
+		cc_unmap_result(dev, state, digestsize, req->result);
+		cc_unmap_req(dev, state, ctx);
+	}
+
+	ahash_request_complete(req, err);
 }
 
 static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
@@ -730,13 +743,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
 	ctx->key_params.keylen = keylen;
 	ctx->key_params.key_dma_addr = 0;
 	ctx->is_hmac = true;
+	ctx->key_params.key = NULL;
 
 	if (keylen) {
+		ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+		if (!ctx->key_params.key)
+			return -ENOMEM;
+
 		ctx->key_params.key_dma_addr =
-			dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+			dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+				       DMA_TO_DEVICE);
 		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
-				key, keylen);
+				ctx->key_params.key, keylen);
+			kzfree(ctx->key_params.key);
 			return -ENOMEM;
 		}
 		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -887,6 +907,9 @@ out:
 		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 	}
+
+	kzfree(ctx->key_params.key);
+
 	return rc;
 }
 
@@ -913,11 +936,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
 
 	ctx->key_params.keylen = keylen;
 
+	ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+	if (!ctx->key_params.key)
+		return -ENOMEM;
+
 	ctx->key_params.key_dma_addr =
-		dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+		dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
 	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 			key, keylen);
+		kzfree(ctx->key_params.key);
 		return -ENOMEM;
 	}
 	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -969,6 +997,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
 	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 
+	kzfree(ctx->key_params.key);
+
 	return rc;
 }
 
@@ -1621,7 +1651,7 @@ static struct cc_hash_template driver_hash[] = {
 			.setkey = cc_hash_setkey,
 			.halg = {
 				.digestsize = SHA224_DIGEST_SIZE,
-				.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
 			},
 		},
 		.hash_mode = DRV_HASH_SHA224,
@@ -1648,7 +1678,7 @@ static struct cc_hash_template driver_hash[] = {
 			.setkey = cc_hash_setkey,
 			.halg = {
 				.digestsize = SHA384_DIGEST_SIZE,
-				.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
 			},
 		},
 		.hash_mode = DRV_HASH_SHA384,
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 2e5bf8b0bbb6..0d6dc61484d7 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 /* \file cc_hash.h
  * ARM CryptoCell Hash Crypto API
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index 616b2e1c41ba..d0764147573f 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
 
 #ifndef __CC_HOST_H__
 #define __CC_HOST_H__
@@ -7,33 +7,102 @@
 // --------------------------------------
 // BLOCK: HOST_P
 // --------------------------------------
+
+
+/* IRR */
 #define CC_HOST_IRR_REG_OFFSET	0xA00UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE	0x1UL
 #define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT	0x2UL
 #define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT	0x3UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT	0x4UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT	0x5UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT	0x6UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT	0x7UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE	0x1UL
 #define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT	0x8UL
 #define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT	0x9UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT	0xAUL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE	0x1UL
 #define CC_HOST_IRR_GPR0_BIT_SHIFT	0xBUL
 #define CC_HOST_IRR_GPR0_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT	0xCUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT	0xDUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT	0xEUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT	0xFUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT	0x10UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT	0x11UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT	0x12UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE	0x1UL
 #define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT	0x13UL
 #define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE	0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT	0x14UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE	0x1UL
 #define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT	0x17UL
 #define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE	0x1UL
 #define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET	0xA10UL
 #define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT	0x0UL
 #define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE	0xCUL
-#define CC_HOST_IMR_REG_OFFSET	0xA04UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT	0x1UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE	0x1UL
+
+/* IMR */
+#define CC_HOST_IMR_REG_OFFSET	0x0A04UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE	0x1UL
 #define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT	0x2UL
 #define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT	0x3UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT	0x4UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT	0x5UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT	0x6UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT	0x7UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE	0x1UL
 #define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT	0x8UL
 #define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT	0x9UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT	0xAUL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE	0x1UL
 #define CC_HOST_IMR_GPR0_BIT_SHIFT	0xBUL
 #define CC_HOST_IMR_GPR0_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT	0xCUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT	0xDUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT	0xEUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT	0xFUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT	0x10UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT	0x11UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT	0x12UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE	0x1UL
 #define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT	0x13UL
 #define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE	0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT	0x14UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE	0x1UL
 #define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT	0x17UL
 #define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE	0x1UL
+
+/* ICR */
 #define CC_HOST_ICR_REG_OFFSET	0xA08UL
 #define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT	0x2UL
 #define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE	0x1UL
@@ -45,6 +114,9 @@
 #define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE	0x1UL
 #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT	0x17UL
 #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE	0x1UL
+#define CC_SECURITY_DISABLED_REG_OFFSET		0x0A1CUL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT	0x0UL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE	0x1UL
 #define CC_HOST_SIGNATURE_712_REG_OFFSET	0xA24UL
 #define CC_HOST_SIGNATURE_630_REG_OFFSET	0xAC8UL
 #define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT	0x0UL
@@ -132,6 +204,49 @@
 #define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT	0x0UL
 #define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE	0x1UL
 // --------------------------------------
+// BLOCK: ID_REGISTERS
+// --------------------------------------
+#define CC_PERIPHERAL_ID_4_REG_OFFSET	0x0FD0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT	0x0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE	0x4UL
+#define CC_PIDRESERVED0_REG_OFFSET	0x0FD4UL
+#define CC_PIDRESERVED1_REG_OFFSET	0x0FD8UL
+#define CC_PIDRESERVED2_REG_OFFSET	0x0FDCUL
+#define CC_PERIPHERAL_ID_0_REG_OFFSET	0x0FE0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT	0x0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE	0x8UL
+#define CC_PERIPHERAL_ID_1_REG_OFFSET	0x0FE4UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT	0x0UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE	0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT	0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE	0x4UL
+#define CC_PERIPHERAL_ID_2_REG_OFFSET	0x0FE8UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT	0x0UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE	0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT	0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE	0x1UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT	0x4UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE	0x4UL
+#define CC_PERIPHERAL_ID_3_REG_OFFSET	0x0FECUL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT	0x0UL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE	0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT	0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE	0x4UL
+#define CC_COMPONENT_ID_0_REG_OFFSET	0x0FF0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT	0x0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE	0x8UL
+#define CC_COMPONENT_ID_1_REG_OFFSET	0x0FF4UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT	0x0UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE	0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT	0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE	0x4UL
+#define CC_COMPONENT_ID_2_REG_OFFSET	0x0FF8UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT	0x0UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE	0x8UL
+#define CC_COMPONENT_ID_3_REG_OFFSET	0x0FFCUL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT	0x0UL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE	0x8UL
+// --------------------------------------
 // BLOCK: HOST_SRAM
 // --------------------------------------
 #define CC_SRAM_DATA_REG_OFFSET	0xF00UL
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 7a9b90db7db7..9f4db9956e91 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #ifndef __CC_HW_QUEUE_DEFS_H__
 #define __CC_HW_QUEUE_DEFS_H__
@@ -28,11 +28,13 @@
 	GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
 
 #define WORD0_VALUE		CC_GENMASK(0, VALUE)
+#define	WORD0_CPP_CIPHER_MODE	CC_GENMASK(0, CPP_CIPHER_MODE)
 #define WORD1_DIN_CONST_VALUE	CC_GENMASK(1, DIN_CONST_VALUE)
 #define WORD1_DIN_DMA_MODE	CC_GENMASK(1, DIN_DMA_MODE)
 #define WORD1_DIN_SIZE		CC_GENMASK(1, DIN_SIZE)
 #define WORD1_NOT_LAST		CC_GENMASK(1, NOT_LAST)
 #define WORD1_NS_BIT		CC_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE	CC_GENMASK(1, LOCK_QUEUE)
 #define WORD2_VALUE		CC_GENMASK(2, VALUE)
 #define WORD3_DOUT_DMA_MODE	CC_GENMASK(3, DOUT_DMA_MODE)
 #define WORD3_DOUT_LAST_IND	CC_GENMASK(3, DOUT_LAST_IND)
@@ -176,6 +178,15 @@ enum cc_hw_crypto_key {
 	END_OF_KEYS = S32_MAX,
 };
 
+#define CC_NUM_HW_KEY_SLOTS	4
+#define CC_FIRST_HW_KEY_SLOT	0
+#define CC_LAST_HW_KEY_SLOT	(CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
+
+#define CC_NUM_CPP_KEY_SLOTS	8
+#define CC_FIRST_CPP_KEY_SLOT	16
+#define CC_LAST_CPP_KEY_SLOT	(CC_FIRST_CPP_KEY_SLOT + \
+					CC_NUM_CPP_KEY_SLOTS - 1)
+
 enum cc_hw_aes_key_size {
 	AES_128_KEY = 0,
 	AES_192_KEY = 1,
@@ -189,6 +200,9 @@ enum cc_hash_cipher_pad {
 	HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
 };
 
+#define CC_CPP_DIN_ADDR	0xFF00FF00UL
+#define CC_CPP_DIN_SIZE 0xFF00FFUL
+
 /*****************************/
 /* Descriptor packing macros */
 /*****************************/
@@ -249,6 +263,25 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
 }
 
 /*
+ * Setup the special CPP descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @alg: cipher used (AES / SM4)
+ * @mode: mode used (CTR or CBC)
+ * @slot: slot number
+ * @ksize: key size
+ */
+static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
+{
+	pdesc->word[0] |= CC_CPP_DIN_ADDR;
+
+	pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
+	pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
+
+	pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
+}
+
+/*
  * Set the DIN field of a HW descriptors to SRAM mode.
  * Note: No need to check SRAM alignment since host requests do not use SRAM and
  * adaptor will enforce alignment check.
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
index 769458323394..99dc69383e20 100644
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include <crypto/ctr.h>
 #include "cc_driver.h"
@@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
 	}
 
 	ivgen_ctx->pool = NULL_SRAM_ADDR;
-
-	/* release "this" context */
-	kfree(ivgen_ctx);
 }
 
 /*!
@@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
 	int rc;
 
 	/* Allocate "this" context */
-	ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+	ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
 	if (!ivgen_ctx)
 		return -ENOMEM;
 
+	drvdata->ivgen_handle = ivgen_ctx;
+
 	/* Allocate pool's header for initial enc. key/IV */
 	ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
 						  &ivgen_ctx->pool_meta_dma,
@@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
 		goto out;
 	}
 
-	drvdata->ivgen_handle = ivgen_ctx;
-
 	return cc_init_iv_sram(drvdata);
 
 out:
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
index b6ac16903dda..a9f5e8bba4f1 100644
--- a/drivers/crypto/ccree/cc_ivgen.h
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #ifndef __CC_IVGEN_H__
 #define __CC_IVGEN_H__
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index 8d7262a35156..582bae450596 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #ifndef __CC_CRYS_KERNEL_H__
 #define __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
index 64b15ac9f1d3..f891ab813f41 100644
--- a/drivers/crypto/ccree/cc_lli_defs.h
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #ifndef _CC_LLI_DEFS_H_
 #define _CC_LLI_DEFS_H_
@@ -14,7 +14,7 @@
 #define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
 
 #define LLI_MAX_NUM_OF_DATA_ENTRIES 128
-#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
 #define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
 #define MAX_NUM_OF_BUFFERS_IN_MLLI 4
 #define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6ff7e75ad90e..2dad9c9543c6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
@@ -11,6 +11,7 @@
 #include "cc_ivgen.h"
 #include "cc_hash.h"
 #include "cc_pm.h"
+#include "cc_fips.h"
 
 #define POWER_DOWN_ENABLE 0x01
 #define POWER_DOWN_DISABLE 0x00
@@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
 	int rc;
 
 	dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
-	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
 	rc = cc_suspend_req_queue(drvdata);
 	if (rc) {
 		dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
 		return rc;
 	}
 	fini_cc_regs(drvdata);
+	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
 	cc_clk_off(drvdata);
 	return 0;
 }
@@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
 	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 
 	dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
-	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
+	/* Enables the device source clk */
 	rc = cc_clk_on(drvdata);
 	if (rc) {
 		dev_err(dev, "failed getting clock back on. We're toast.\n");
 		return rc;
 	}
 
+	cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
 	rc = init_cc_regs(drvdata, false);
 	if (rc) {
 		dev_err(dev, "init_cc_regs (%x)\n", rc);
 		return rc;
 	}
+	/* check if tee fips error occurred during power down */
+	cc_tee_handle_fips_error(drvdata);
 
 	rc = cc_resume_req_queue(drvdata);
 	if (rc) {
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 907a6db4d6c0..6190cdba5dad 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 /* \file cc_pm.h
  */
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 83a8aaae61c7..0bc6ccb0b899 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include <linux/kernel.h>
+#include <linux/nospec.h>
 #include "cc_driver.h"
 #include "cc_buffer_mgr.h"
 #include "cc_request_mgr.h"
@@ -52,11 +53,38 @@ struct cc_bl_item {
 	bool notif;
 };
 
+static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
+	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
+	{ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
+	  BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
+};
+
 static void comp_handler(unsigned long devarg);
 #ifdef COMP_IN_WQ
 static void comp_work_handler(struct work_struct *work);
 #endif
 
+static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
+{
+	alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
+	slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
+
+	return cc_cpp_int_masks[alg][slot];
+}
+
 void cc_req_mgr_fini(struct cc_drvdata *drvdata)
 {
 	struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -336,10 +364,12 @@ static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
 			       struct cc_bl_item *bli)
 {
 	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+	struct device *dev = drvdata_to_dev(drvdata);
 
 	spin_lock_bh(&mgr->bl_lock);
 	list_add_tail(&bli->list, &mgr->backlog);
 	++mgr->bl_len;
+	dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
 	spin_unlock_bh(&mgr->bl_lock);
 	tasklet_schedule(&mgr->comptask);
 }
@@ -349,7 +379,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
 	struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
 	struct cc_bl_item *bli;
 	struct cc_crypto_req *creq;
-	struct crypto_async_request *req;
+	void *req;
 	bool ivgen;
 	unsigned int total_len;
 	struct device *dev = drvdata_to_dev(drvdata);
@@ -359,17 +389,20 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
 
 	while (mgr->bl_len) {
 		bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+		dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
+
 		spin_unlock(&mgr->bl_lock);
 
+
 		creq = &bli->creq;
-		req = (struct crypto_async_request *)creq->user_arg;
+		req = creq->user_arg;
 
 		/*
 		 * Notify the request we're moving out of the backlog
 		 * but only if we haven't done so already.
 		 */
 		if (!bli->notif) {
-			req->complete(req, -EINPROGRESS);
+			creq->user_cb(dev, req, -EINPROGRESS);
 			bli->notif = true;
 		}
 
@@ -579,6 +612,8 @@ static void proc_completions(struct cc_drvdata *drvdata)
 						drvdata->request_mgr_handle;
 	unsigned int *tail = &request_mgr_handle->req_queue_tail;
 	unsigned int *head = &request_mgr_handle->req_queue_head;
+	int rc;
+	u32 mask;
 
 	while (request_mgr_handle->axi_completed) {
 		request_mgr_handle->axi_completed--;
@@ -596,8 +631,22 @@ static void proc_completions(struct cc_drvdata *drvdata)
 
 		cc_req = &request_mgr_handle->req_queue[*tail];
 
+		if (cc_req->cpp.is_cpp) {
+
+			dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
+				cc_req->cpp.slot, cc_req->cpp.alg);
+			mask = cc_cpp_int_mask(cc_req->cpp.alg,
+					       cc_req->cpp.slot);
+			rc = (drvdata->irq & mask ? -EPERM : 0);
+			dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
+				drvdata->irq, rc);
+		} else {
+			dev_dbg(dev, "None CPP request completion\n");
+			rc = 0;
+		}
+
 		if (cc_req->user_cb)
-			cc_req->user_cb(dev, cc_req->user_arg, 0);
+			cc_req->user_cb(dev, cc_req->user_arg, rc);
 		*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
 		dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
 		dev_dbg(dev, "Request completed. axi_completed=%d\n",
@@ -618,47 +667,50 @@ static void comp_handler(unsigned long devarg)
 	struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
 	struct cc_req_mgr_handle *request_mgr_handle =
 						drvdata->request_mgr_handle;
-
+	struct device *dev = drvdata_to_dev(drvdata);
 	u32 irq;
 
-	irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+	dev_dbg(dev, "Completion handler called!\n");
+	irq = (drvdata->irq & drvdata->comp_mask);
 
-	if (irq & CC_COMP_IRQ_MASK) {
-		/* To avoid the interrupt from firing as we unmask it,
-		 * we clear it now
-		 */
-		cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+	/* To avoid the interrupt from firing as we unmask it,
+	 * we clear it now
+	 */
+	cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
 
-		/* Avoid race with above clear: Test completion counter
-		 * once more
-		 */
-		request_mgr_handle->axi_completed +=
-				cc_axi_comp_count(drvdata);
-
-		while (request_mgr_handle->axi_completed) {
-			do {
-				proc_completions(drvdata);
-				/* At this point (after proc_completions()),
-				 * request_mgr_handle->axi_completed is 0.
-				 */
-				request_mgr_handle->axi_completed =
-						cc_axi_comp_count(drvdata);
-			} while (request_mgr_handle->axi_completed > 0);
+	/* Avoid race with above clear: Test completion counter once more */
 
-			cc_iowrite(drvdata, CC_REG(HOST_ICR),
-				   CC_COMP_IRQ_MASK);
+	request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
+
+	dev_dbg(dev, "AXI completion after updated: %d\n",
+		request_mgr_handle->axi_completed);
+
+	while (request_mgr_handle->axi_completed) {
+		do {
+			drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
+			irq = (drvdata->irq & drvdata->comp_mask);
+			proc_completions(drvdata);
 
+			/* At this point (after proc_completions()),
+			 * request_mgr_handle->axi_completed is 0.
+			 */
 			request_mgr_handle->axi_completed +=
-					cc_axi_comp_count(drvdata);
-		}
+						cc_axi_comp_count(drvdata);
+		} while (request_mgr_handle->axi_completed > 0);
+
+		cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
+
+		request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
 	}
+
 	/* after verifing that there is nothing to do,
 	 * unmask AXI completion interrupt
 	 */
 	cc_iowrite(drvdata, CC_REG(HOST_IMR),
-		   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+		   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
 
 	cc_proc_backlog(drvdata);
+	dev_dbg(dev, "Comp. handler done.\n");
 }
 
 /*
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index 573cb97af085..f46cf766fe4d 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 /* \file cc_request_mgr.h
  * Request Manager
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index c8c276f6dee9..62c885e6e791 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #include "cc_driver.h"
 #include "cc_sram_mgr.h"
@@ -19,8 +19,7 @@ struct cc_sram_ctx {
  */
 void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
 {
-	/* Free "this" context */
-	kfree(drvdata->sram_mgr_handle);
+	/* Nothing needed */
 }
 
 /**
@@ -48,7 +47,7 @@ int cc_sram_mgr_init(struct cc_drvdata *drvdata)
 	}
 
 	/* Allocate "this" context */
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 
 	if (!ctx)
 		return -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index d48649fb3323..1d14de9ee8c3 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
 
 #ifndef __CC_SRAM_MGR_H__
 #define __CC_SRAM_MGR_H__
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 8d8cf80b9294..8a76fce22943 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2130,7 +2130,6 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
 	 */
 	shash->tfm = hmacctx->base_hash;
-	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
 	if (keylen > bs) {
 		err = crypto_shash_digest(shash, key, keylen,
 					  hmacctx->ipad);
@@ -3517,7 +3516,6 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
 		SHASH_DESC_ON_STACK(shash, base_hash);
 
 		shash->tfm = base_hash;
-		shash->flags = crypto_shash_get_flags(base_hash);
 		bs = crypto_shash_blocksize(base_hash);
 		align = KEYCTX_ALIGN_PAD(max_authsize);
 		o_ptr =  actx->h_iopad + param.result_size + align;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index dad212cabe63..d656be0a142b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1976,6 +1976,29 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	return 0;
 }
 
+static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			    unsigned int len)
+{
+	struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+	struct hifn_device *dev = ctx->dev;
+	u32 flags;
+	int err;
+
+	flags = crypto_ablkcipher_get_flags(cipher);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
+	}
+
+	dev->flags &= ~HIFN_FLAG_OLD_KEY;
+
+	memcpy(ctx->key, key, len);
+	ctx->keysize = len;
+
+	return 0;
+}
+
 static int hifn_handle_req(struct ablkcipher_request *req)
 {
 	struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -2240,7 +2263,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
 		.ablkcipher = {
 			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
 			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
-			.setkey		=	hifn_setkey,
+			.setkey		=	hifn_des3_setkey,
 			.encrypt	=	hifn_encrypt_3des_cfb,
 			.decrypt	=	hifn_decrypt_3des_cfb,
 		},
@@ -2250,7 +2273,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
 		.ablkcipher = {
 			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
 			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
-			.setkey		=	hifn_setkey,
+			.setkey		=	hifn_des3_setkey,
 			.encrypt	=	hifn_encrypt_3des_ofb,
 			.decrypt	=	hifn_decrypt_3des_ofb,
 		},
@@ -2261,7 +2284,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
 			.ivsize		=	HIFN_IV_LENGTH,
 			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
 			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
-			.setkey		=	hifn_setkey,
+			.setkey		=	hifn_des3_setkey,
 			.encrypt	=	hifn_encrypt_3des_cbc,
 			.decrypt	=	hifn_decrypt_3des_cbc,
 		},
@@ -2271,7 +2294,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
 		.ablkcipher = {
 			.min_keysize	=	HIFN_3DES_KEY_LENGTH,
 			.max_keysize	=	HIFN_3DES_KEY_LENGTH,
-			.setkey		=	hifn_setkey,
+			.setkey		=	hifn_des3_setkey,
 			.encrypt	=	hifn_encrypt_3des_ecb,
 			.decrypt	=	hifn_decrypt_3des_ecb,
 		},
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index adc0cd8ae97b..02768af0dccd 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -365,20 +365,16 @@ static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
 static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
 					    const u8 *key, unsigned int keylen)
 {
-	if (keylen != DES_KEY_SIZE * 3)
-		return -EINVAL;
-
-	return sec_alg_skcipher_setkey(tfm, key, keylen,
+	return unlikely(des3_verify_key(tfm, key)) ?:
+	       sec_alg_skcipher_setkey(tfm, key, keylen,
 				       SEC_C_3DES_ECB_192_3KEY);
 }
 
 static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
 					    const u8 *key, unsigned int keylen)
 {
-	if (keylen != DES3_EDE_KEY_SIZE)
-		return -EINVAL;
-
-	return sec_alg_skcipher_setkey(tfm, key, keylen,
+	return unlikely(des3_verify_key(tfm, key)) ?:
+	       sec_alg_skcipher_setkey(tfm, key, keylen,
 				       SEC_C_3DES_CBC_192_3KEY);
 }
 
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 7ef30a98cb24..de4be10b172f 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1039,13 +1039,12 @@ static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
 static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
 				   const u8 *key, unsigned int len)
 {
-	struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
-	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+	int err;
 
-	if (len != DES3_EDE_KEY_SIZE) {
-		crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
+	err = des3_verify_key(ctfm, key);
+	if (unlikely(err))
+		return err;
 
 	/* if context exits and key changed, need to invalidate it */
 	if (ctx->base.ctxr_dma) {
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 5c4659b04d70..9bbde2f26cac 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -758,14 +758,6 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
 			return -EINVAL;
 		}
 		cipher_cfg |= keylen_cfg;
-	} else if (cipher_cfg & MOD_3DES) {
-		const u32 *K = (const u32 *)key;
-		if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
-			     !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
-		{
-			*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
-			return -EINVAL;
-		}
 	} else {
 		u32 tmp[DES_EXPKEY_WORDS];
 		if (des_ekey(tmp, key) == 0) {
@@ -859,6 +851,19 @@ out:
 	return ret;
 }
 
+static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			    unsigned int key_len)
+{
+	u32 flags = crypto_ablkcipher_get_flags(tfm);
+	int err;
+
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err))
+		crypto_ablkcipher_set_flags(tfm, flags);
+
+	return ablk_setkey(tfm, key, key_len);
+}
+
 static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 		unsigned int key_len)
 {
@@ -1175,6 +1180,43 @@ badkey:
 	return -EINVAL;
 }
 
+static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			    unsigned int keylen)
+{
+	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+	u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
+	struct crypto_authenc_keys keys;
+	int err;
+
+	err = crypto_authenc_extractkeys(&keys, key, keylen);
+	if (unlikely(err))
+		goto badkey;
+
+	err = -EINVAL;
+	if (keys.authkeylen > sizeof(ctx->authkey))
+		goto badkey;
+
+	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+		goto badkey;
+
+	flags = crypto_aead_get_flags(tfm);
+	err = __des3_verify_key(&flags, keys.enckey);
+	if (unlikely(err))
+		goto badkey;
+
+	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+	ctx->authkey_len = keys.authkeylen;
+	ctx->enckey_len = keys.enckeylen;
+
+	memzero_explicit(&keys, sizeof(keys));
+	return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+	crypto_aead_set_flags(tfm, flags);
+	memzero_explicit(&keys, sizeof(keys));
+	return err;
+}
+
 static int aead_encrypt(struct aead_request *req)
 {
 	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
@@ -1220,6 +1262,7 @@ static struct ixp_alg ixp4xx_algos[] = {
 			.min_keysize	= DES3_EDE_KEY_SIZE,
 			.max_keysize	= DES3_EDE_KEY_SIZE,
 			.ivsize		= DES3_EDE_BLOCK_SIZE,
+			.setkey		= ablk_des3_setkey,
 			}
 		}
 	},
@@ -1232,6 +1275,7 @@ static struct ixp_alg ixp4xx_algos[] = {
 		.cra_u		= { .ablkcipher = {
 			.min_keysize	= DES3_EDE_KEY_SIZE,
 			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.setkey		= ablk_des3_setkey,
 			}
 		}
 	},
@@ -1313,6 +1357,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
 		},
 		.ivsize		= DES3_EDE_BLOCK_SIZE,
 		.maxauthsize	= MD5_DIGEST_SIZE,
+		.setkey		= des3_aead_setkey,
 	},
 	.hash = &hash_alg_md5,
 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1337,6 +1382,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
 		},
 		.ivsize		= DES3_EDE_BLOCK_SIZE,
 		.maxauthsize	= SHA1_DIGEST_SIZE,
+		.setkey		= des3_aead_setkey,
 	},
 	.hash = &hash_alg_sha1,
 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1443,7 +1489,7 @@ static int __init ixp_module_init(void)
 		/* authenc */
 		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
 				      CRYPTO_ALG_ASYNC;
-		cra->setkey = aead_setkey;
+		cra->setkey = cra->setkey ?: aead_setkey;
 		cra->setauthsize = aead_setauthsize;
 		cra->encrypt = aead_encrypt;
 		cra->decrypt = aead_decrypt;
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index fb279b3a1ca1..2fd936b19c6d 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -299,13 +299,12 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
 				   const u8 *key, unsigned int len)
 {
-	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
-	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
+	int err;
 
-	if (len != DES3_EDE_KEY_SIZE) {
-		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
+	err = des3_verify_key(cipher, key);
+	if (unlikely(err))
+		return err;
 
 	memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
 
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 99ff54cc8a15..fd456dd703bf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -135,11 +135,10 @@ static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
 
 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
 {
-	unsigned int index, padlen;
+	unsigned int padlen;
 
 	buf[0] = 0x80;
 	/* Pad out to 56 mod 64 */
-	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 	padlen = mv_cesa_ahash_pad_len(creq);
 	memset(buf + 1, 0, padlen - 1);
 
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 5f4f845adbb8..a0806ba40c68 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -365,7 +365,6 @@ static int mtk_sha_finish_hmac(struct ahash_request *req)
 	SHASH_DESC_ON_STACK(shash, bctx->shash);
 
 	shash->tfm = bctx->shash;
-	shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
 
 	return crypto_shash_init(shash) ?:
 	       crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
@@ -810,8 +809,6 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
 	SHASH_DESC_ON_STACK(shash, bctx->shash);
 
 	shash->tfm = bctx->shash;
-	shash->flags = crypto_shash_get_flags(bctx->shash) &
-		       CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	if (keylen > bs) {
 		err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
deleted file mode 100644
index 519086730791..000000000000
--- a/drivers/crypto/mxc-scc.c
+++ /dev/null
@@ -1,767 +0,0 @@
-/*
- * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
- *
- * The driver is based on information gathered from
- * drivers/mxc/security/mxc_scc.c which can be found in
- * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-
-/* Secure Memory (SCM) registers */
-#define SCC_SCM_RED_START		0x0000
-#define SCC_SCM_BLACK_START		0x0004
-#define SCC_SCM_LENGTH			0x0008
-#define SCC_SCM_CTRL			0x000C
-#define SCC_SCM_STATUS			0x0010
-#define SCC_SCM_ERROR_STATUS		0x0014
-#define SCC_SCM_INTR_CTRL		0x0018
-#define SCC_SCM_CFG			0x001C
-#define SCC_SCM_INIT_VECTOR_0		0x0020
-#define SCC_SCM_INIT_VECTOR_1		0x0024
-#define SCC_SCM_RED_MEMORY		0x0400
-#define SCC_SCM_BLACK_MEMORY		0x0800
-
-/* Security Monitor (SMN) Registers */
-#define SCC_SMN_STATUS			0x1000
-#define SCC_SMN_COMMAND		0x1004
-#define SCC_SMN_SEQ_START		0x1008
-#define SCC_SMN_SEQ_END		0x100C
-#define SCC_SMN_SEQ_CHECK		0x1010
-#define SCC_SMN_BIT_COUNT		0x1014
-#define SCC_SMN_BITBANK_INC_SIZE	0x1018
-#define SCC_SMN_BITBANK_DECREMENT	0x101C
-#define SCC_SMN_COMPARE_SIZE		0x1020
-#define SCC_SMN_PLAINTEXT_CHECK	0x1024
-#define SCC_SMN_CIPHERTEXT_CHECK	0x1028
-#define SCC_SMN_TIMER_IV		0x102C
-#define SCC_SMN_TIMER_CONTROL		0x1030
-#define SCC_SMN_DEBUG_DETECT_STAT	0x1034
-#define SCC_SMN_TIMER			0x1038
-
-#define SCC_SCM_CTRL_START_CIPHER	BIT(2)
-#define SCC_SCM_CTRL_CBC_MODE		BIT(1)
-#define SCC_SCM_CTRL_DECRYPT_MODE	BIT(0)
-
-#define SCC_SCM_STATUS_LEN_ERR		BIT(12)
-#define SCC_SCM_STATUS_SMN_UNBLOCKED	BIT(11)
-#define SCC_SCM_STATUS_CIPHERING_DONE	BIT(10)
-#define SCC_SCM_STATUS_ZEROIZING_DONE	BIT(9)
-#define SCC_SCM_STATUS_INTR_STATUS	BIT(8)
-#define SCC_SCM_STATUS_SEC_KEY		BIT(7)
-#define SCC_SCM_STATUS_INTERNAL_ERR	BIT(6)
-#define SCC_SCM_STATUS_BAD_SEC_KEY	BIT(5)
-#define SCC_SCM_STATUS_ZEROIZE_FAIL	BIT(4)
-#define SCC_SCM_STATUS_SMN_BLOCKED	BIT(3)
-#define SCC_SCM_STATUS_CIPHERING	BIT(2)
-#define SCC_SCM_STATUS_ZEROIZING	BIT(1)
-#define SCC_SCM_STATUS_BUSY		BIT(0)
-
-#define SCC_SMN_STATUS_STATE_MASK	0x0000001F
-#define SCC_SMN_STATE_START		0x0
-/* The SMN is zeroizing its RAM during reset */
-#define SCC_SMN_STATE_ZEROIZE_RAM	0x5
-/* SMN has passed internal checks */
-#define SCC_SMN_STATE_HEALTH_CHECK	0x6
-/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
-#define SCC_SMN_STATE_FAIL		0x9
-/* SCC is in secure state. SCM is using secret key. */
-#define SCC_SMN_STATE_SECURE		0xA
-/* SCC is not secure. SCM is using default key. */
-#define SCC_SMN_STATE_NON_SECURE	0xC
-
-#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM	BIT(2)
-#define SCC_SCM_INTR_CTRL_CLR_INTR	BIT(1)
-#define SCC_SCM_INTR_CTRL_MASK_INTR	BIT(0)
-
-/* Size, in blocks, of Red memory. */
-#define SCC_SCM_CFG_BLACK_SIZE_MASK	0x07fe0000
-#define SCC_SCM_CFG_BLACK_SIZE_SHIFT	17
-/* Size, in blocks, of Black memory. */
-#define SCC_SCM_CFG_RED_SIZE_MASK	0x0001ff80
-#define SCC_SCM_CFG_RED_SIZE_SHIFT	7
-/* Number of bytes per block. */
-#define SCC_SCM_CFG_BLOCK_SIZE_MASK	0x0000007f
-
-#define SCC_SMN_COMMAND_TAMPER_LOCK	BIT(4)
-#define SCC_SMN_COMMAND_CLR_INTR	BIT(3)
-#define SCC_SMN_COMMAND_CLR_BIT_BANK	BIT(2)
-#define SCC_SMN_COMMAND_EN_INTR	BIT(1)
-#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM  BIT(0)
-
-#define SCC_KEY_SLOTS			20
-#define SCC_MAX_KEY_SIZE		32
-#define SCC_KEY_SLOT_SIZE		32
-
-#define SCC_CRC_CCITT_START		0xFFFF
-
-/*
- * Offset into each RAM of the base of the area which is not
- * used for Stored Keys.
- */
-#define SCC_NON_RESERVED_OFFSET	(SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
-
-/* Fixed padding for appending to plaintext to fill out a block */
-static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
-
-enum mxc_scc_state {
-	SCC_STATE_OK,
-	SCC_STATE_UNIMPLEMENTED,
-	SCC_STATE_FAILED
-};
-
-struct mxc_scc {
-	struct device		*dev;
-	void __iomem		*base;
-	struct clk		*clk;
-	bool			hw_busy;
-	spinlock_t		lock;
-	struct crypto_queue	queue;
-	struct crypto_async_request *req;
-	int			block_size_bytes;
-	int			black_ram_size_blocks;
-	int			memory_size_bytes;
-	int			bytes_remaining;
-
-	void __iomem		*red_memory;
-	void __iomem		*black_memory;
-};
-
-struct mxc_scc_ctx {
-	struct mxc_scc		*scc;
-	struct scatterlist	*sg_src;
-	size_t			src_nents;
-	struct scatterlist	*sg_dst;
-	size_t			dst_nents;
-	unsigned int		offset;
-	unsigned int		size;
-	unsigned int		ctrl;
-};
-
-struct mxc_scc_crypto_tmpl {
-	struct mxc_scc *scc;
-	struct crypto_alg alg;
-};
-
-static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
-			    struct crypto_async_request *req)
-{
-	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
-	struct mxc_scc *scc = ctx->scc;
-	size_t len;
-	void __iomem *from;
-
-	if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
-		from = scc->red_memory;
-	else
-		from = scc->black_memory;
-
-	dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
-		ctx->dst_nents * 8);
-	len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
-				   from, ctx->size, ctx->offset);
-	if (!len) {
-		dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
-		return -EINVAL;
-	}
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "red memory@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4,
-		       scc->red_memory, ctx->size, 1);
-	print_hex_dump(KERN_ERR,
-		       "black memory@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4,
-		       scc->black_memory, ctx->size, 1);
-#endif
-
-	ctx->offset += len;
-
-	if (ctx->offset < ablkreq->nbytes)
-		return -EINPROGRESS;
-
-	return 0;
-}
-
-static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
-				       struct mxc_scc_ctx *ctx)
-{
-	struct mxc_scc *scc = ctx->scc;
-	int nents;
-
-	nents = sg_nents_for_len(req->src, req->nbytes);
-	if (nents < 0) {
-		dev_err(scc->dev, "Invalid number of src SC");
-		return nents;
-	}
-	ctx->src_nents = nents;
-
-	nents = sg_nents_for_len(req->dst, req->nbytes);
-	if (nents < 0) {
-		dev_err(scc->dev, "Invalid number of dst SC");
-		return nents;
-	}
-	ctx->dst_nents = nents;
-
-	ctx->size = 0;
-	ctx->offset = 0;
-
-	return 0;
-}
-
-static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
-					   struct mxc_scc_ctx *ctx,
-					   int result)
-{
-	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
-	struct mxc_scc *scc = ctx->scc;
-
-	scc->req = NULL;
-	scc->bytes_remaining = scc->memory_size_bytes;
-
-	if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
-		memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
-		       scc->block_size_bytes);
-
-	req->complete(req, result);
-	scc->hw_busy = false;
-
-	return 0;
-}
-
-static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
-			     struct ablkcipher_request *req)
-{
-	u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
-	size_t len = min_t(size_t, req->nbytes - ctx->offset,
-			   ctx->scc->bytes_remaining);
-	unsigned int padding_byte_count = 0;
-	struct mxc_scc *scc = ctx->scc;
-	void __iomem *to;
-
-	if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
-		to = scc->black_memory;
-	else
-		to = scc->red_memory;
-
-	if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
-		memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
-		       scc->block_size_bytes);
-
-	len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
-				 to, len, ctx->offset);
-	if (!len) {
-		dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
-		return -EINVAL;
-	}
-
-	ctx->size = len;
-
-#ifdef DEBUG
-	dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
-	print_hex_dump(KERN_ERR,
-		       "init vector0@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4,
-		       scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
-		       1);
-	print_hex_dump(KERN_ERR,
-		       "red memory@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4,
-		       scc->red_memory, ctx->size, 1);
-	print_hex_dump(KERN_ERR,
-		       "black memory@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4,
-		       scc->black_memory, ctx->size, 1);
-#endif
-
-	scc->bytes_remaining -= len;
-
-	padding_byte_count = len % scc->block_size_bytes;
-
-	if (padding_byte_count) {
-		memcpy(padding_buffer, scc_block_padding, padding_byte_count);
-		memcpy(to + len, padding_buffer, padding_byte_count);
-		ctx->size += padding_byte_count;
-	}
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "data to encrypt@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4,
-		       to, ctx->size, 1);
-#endif
-
-	return 0;
-}
-
-static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
-				    struct crypto_async_request *req)
-{
-	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
-	struct mxc_scc *scc = ctx->scc;
-	int err;
-
-	dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
-		ablkreq->nbytes, ablkreq->src, ablkreq->dst);
-
-	writel(0, scc->base + SCC_SCM_ERROR_STATUS);
-
-	err = mxc_scc_put_data(ctx, ablkreq);
-	if (err) {
-		mxc_scc_ablkcipher_req_complete(req, ctx, err);
-		return;
-	}
-
-	dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
-		readl(scc->base + SCC_SCM_RED_START),
-		readl(scc->base + SCC_SCM_BLACK_START));
-
-	/* clear interrupt control registers */
-	writel(SCC_SCM_INTR_CTRL_CLR_INTR,
-	       scc->base + SCC_SCM_INTR_CTRL);
-
-	writel((ctx->size / ctx->scc->block_size_bytes) - 1,
-	       scc->base + SCC_SCM_LENGTH);
-
-	dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
-		ctx->size / ctx->scc->block_size_bytes,
-		(ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
-		scc->red_memory);
-
-	writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
-}
-
-static irqreturn_t mxc_scc_int(int irq, void *priv)
-{
-	struct crypto_async_request *req;
-	struct mxc_scc_ctx *ctx;
-	struct mxc_scc *scc = priv;
-	int status;
-	int ret;
-
-	status = readl(scc->base + SCC_SCM_STATUS);
-
-	/* clear interrupt control registers */
-	writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
-
-	if (status & SCC_SCM_STATUS_BUSY)
-		return IRQ_NONE;
-
-	req = scc->req;
-	if (req) {
-		ctx = crypto_tfm_ctx(req->tfm);
-		ret = mxc_scc_get_data(ctx, req);
-		if (ret != -EINPROGRESS)
-			mxc_scc_ablkcipher_req_complete(req, ctx, ret);
-		else
-			mxc_scc_ablkcipher_next(ctx, req);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static int mxc_scc_cra_init(struct crypto_tfm *tfm)
-{
-	struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct crypto_alg *alg = tfm->__crt_alg;
-	struct mxc_scc_crypto_tmpl *algt;
-
-	algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
-
-	ctx->scc = algt->scc;
-	return 0;
-}
-
-static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
-{
-	struct crypto_async_request *req, *backlog;
-
-	if (ctx->scc->hw_busy)
-		return;
-
-	spin_lock_bh(&ctx->scc->lock);
-	backlog = crypto_get_backlog(&ctx->scc->queue);
-	req = crypto_dequeue_request(&ctx->scc->queue);
-	ctx->scc->req = req;
-	ctx->scc->hw_busy = true;
-	spin_unlock_bh(&ctx->scc->lock);
-
-	if (!req)
-		return;
-
-	if (backlog)
-		backlog->complete(backlog, -EINPROGRESS);
-
-	mxc_scc_ablkcipher_next(ctx, req);
-}
-
-static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
-			     struct crypto_async_request *req)
-{
-	int ret;
-
-	spin_lock_bh(&ctx->scc->lock);
-	ret = crypto_enqueue_request(&ctx->scc->queue, req);
-	spin_unlock_bh(&ctx->scc->lock);
-
-	if (ret != -EINPROGRESS)
-		return ret;
-
-	mxc_scc_dequeue_req_unlocked(ctx);
-
-	return -EINPROGRESS;
-}
-
-static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
-			   struct ablkcipher_request *req)
-{
-	int err;
-
-	err = mxc_scc_ablkcipher_req_init(req, ctx);
-	if (err)
-		return err;
-
-	return mxc_scc_queue_req(ctx, &req->base);
-}
-
-static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
-{
-	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
-	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
-	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
-
-	return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
-{
-	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
-	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
-	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
-	ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
-
-	return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
-{
-	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
-	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
-	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
-	ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
-
-	return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
-{
-	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
-	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
-	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
-	ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
-	ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
-
-	return mxc_scc_des3_op(ctx, req);
-}
-
-static void mxc_scc_hw_init(struct mxc_scc *scc)
-{
-	int offset;
-
-	offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
-
-	/* Fill the RED_START register */
-	writel(offset, scc->base + SCC_SCM_RED_START);
-
-	/* Fill the BLACK_START register */
-	writel(offset, scc->base + SCC_SCM_BLACK_START);
-
-	scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
-			  SCC_NON_RESERVED_OFFSET;
-
-	scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
-			    SCC_NON_RESERVED_OFFSET;
-
-	scc->bytes_remaining = scc->memory_size_bytes;
-}
-
-static int mxc_scc_get_config(struct mxc_scc *scc)
-{
-	int config;
-
-	config = readl(scc->base + SCC_SCM_CFG);
-
-	scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
-
-	scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
-
-	scc->memory_size_bytes = (scc->block_size_bytes *
-				  scc->black_ram_size_blocks) -
-				  SCC_NON_RESERVED_OFFSET;
-
-	return 0;
-}
-
-static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
-{
-	enum mxc_scc_state state;
-	int status;
-
-	status = readl(scc->base + SCC_SMN_STATUS) &
-		       SCC_SMN_STATUS_STATE_MASK;
-
-	/* If in Health Check, try to bringup to secure state */
-	if (status & SCC_SMN_STATE_HEALTH_CHECK) {
-		/*
-		 * Write a simple algorithm to the Algorithm Sequence
-		 * Checker (ASC)
-		 */
-		writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
-		writel(0x5555, scc->base + SCC_SMN_SEQ_END);
-		writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
-
-		status = readl(scc->base + SCC_SMN_STATUS) &
-			       SCC_SMN_STATUS_STATE_MASK;
-	}
-
-	switch (status) {
-	case SCC_SMN_STATE_NON_SECURE:
-	case SCC_SMN_STATE_SECURE:
-		state = SCC_STATE_OK;
-		break;
-	case SCC_SMN_STATE_FAIL:
-		state = SCC_STATE_FAILED;
-		break;
-	default:
-		state = SCC_STATE_UNIMPLEMENTED;
-		break;
-	}
-
-	return state;
-}
-
-static struct mxc_scc_crypto_tmpl scc_ecb_des = {
-	.alg = {
-		.cra_name = "ecb(des3_ede)",
-		.cra_driver_name = "ecb-des3-scc",
-		.cra_priority = 300,
-		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
-		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-		.cra_ctxsize = sizeof(struct mxc_scc_ctx),
-		.cra_alignmask = 0,
-		.cra_type = &crypto_ablkcipher_type,
-		.cra_module = THIS_MODULE,
-		.cra_init = mxc_scc_cra_init,
-		.cra_u.ablkcipher = {
-			.min_keysize = DES3_EDE_KEY_SIZE,
-			.max_keysize = DES3_EDE_KEY_SIZE,
-			.encrypt = mxc_scc_ecb_des_encrypt,
-			.decrypt = mxc_scc_ecb_des_decrypt,
-		}
-	}
-};
-
-static struct mxc_scc_crypto_tmpl scc_cbc_des = {
-	.alg = {
-		.cra_name = "cbc(des3_ede)",
-		.cra_driver_name = "cbc-des3-scc",
-		.cra_priority = 300,
-		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
-		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-		.cra_ctxsize = sizeof(struct mxc_scc_ctx),
-		.cra_alignmask = 0,
-		.cra_type = &crypto_ablkcipher_type,
-		.cra_module = THIS_MODULE,
-		.cra_init = mxc_scc_cra_init,
-		.cra_u.ablkcipher = {
-			.min_keysize = DES3_EDE_KEY_SIZE,
-			.max_keysize = DES3_EDE_KEY_SIZE,
-			.encrypt = mxc_scc_cbc_des_encrypt,
-			.decrypt = mxc_scc_cbc_des_decrypt,
-		}
-	}
-};
-
-static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
-	&scc_ecb_des,
-	&scc_cbc_des,
-};
-
-static int mxc_scc_crypto_register(struct mxc_scc *scc)
-{
-	int i;
-	int err = 0;
-
-	for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
-		scc_crypto_algs[i]->scc = scc;
-		err = crypto_register_alg(&scc_crypto_algs[i]->alg);
-		if (err)
-			goto err_out;
-	}
-
-	return 0;
-
-err_out:
-	while (--i >= 0)
-		crypto_unregister_alg(&scc_crypto_algs[i]->alg);
-
-	return err;
-}
-
-static void mxc_scc_crypto_unregister(void)
-{
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
-		crypto_unregister_alg(&scc_crypto_algs[i]->alg);
-}
-
-static int mxc_scc_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct resource *res;
-	struct mxc_scc *scc;
-	enum mxc_scc_state state;
-	int irq;
-	int ret;
-	int i;
-
-	scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
-	if (!scc)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	scc->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(scc->base))
-		return PTR_ERR(scc->base);
-
-	scc->clk = devm_clk_get(&pdev->dev, "ipg");
-	if (IS_ERR(scc->clk)) {
-		dev_err(dev, "Could not get ipg clock\n");
-		return PTR_ERR(scc->clk);
-	}
-
-	ret = clk_prepare_enable(scc->clk);
-	if (ret)
-		return ret;
-
-	/* clear error status register */
-	writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
-
-	/* clear interrupt control registers */
-	writel(SCC_SCM_INTR_CTRL_CLR_INTR |
-	       SCC_SCM_INTR_CTRL_MASK_INTR,
-	       scc->base + SCC_SCM_INTR_CTRL);
-
-	writel(SCC_SMN_COMMAND_CLR_INTR |
-	       SCC_SMN_COMMAND_EN_INTR,
-	       scc->base + SCC_SMN_COMMAND);
-
-	scc->dev = dev;
-	platform_set_drvdata(pdev, scc);
-
-	ret = mxc_scc_get_config(scc);
-	if (ret)
-		goto err_out;
-
-	state = mxc_scc_get_state(scc);
-
-	if (state != SCC_STATE_OK) {
-		dev_err(dev, "SCC in unusable state %d\n", state);
-		ret = -EINVAL;
-		goto err_out;
-	}
-
-	mxc_scc_hw_init(scc);
-
-	spin_lock_init(&scc->lock);
-	/* FIXME: calculate queue from RAM slots */
-	crypto_init_queue(&scc->queue, 50);
-
-	for (i = 0; i < 2; i++) {
-		irq = platform_get_irq(pdev, i);
-		if (irq < 0) {
-			dev_err(dev, "failed to get irq resource: %d\n", irq);
-			ret = irq;
-			goto err_out;
-		}
-
-		ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
-						IRQF_ONESHOT, dev_name(dev), scc);
-		if (ret)
-			goto err_out;
-	}
-
-	ret = mxc_scc_crypto_register(scc);
-	if (ret) {
-		dev_err(dev, "could not register algorithms");
-		goto err_out;
-	}
-
-	dev_info(dev, "registered successfully.\n");
-
-	return 0;
-
-err_out:
-	clk_disable_unprepare(scc->clk);
-
-	return ret;
-}
-
-static int mxc_scc_remove(struct platform_device *pdev)
-{
-	struct mxc_scc *scc = platform_get_drvdata(pdev);
-
-	mxc_scc_crypto_unregister();
-
-	clk_disable_unprepare(scc->clk);
-
-	return 0;
-}
-
-static const struct of_device_id mxc_scc_dt_ids[] = {
-	{ .compatible = "fsl,imx25-scc", .data = NULL, },
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
-
-static struct platform_driver mxc_scc_driver = {
-	.probe	= mxc_scc_probe,
-	.remove	= mxc_scc_remove,
-	.driver	= {
-		.name		= "mxc-scc",
-		.of_match_table	= mxc_scc_dt_ids,
-	},
-};
-
-module_platform_driver(mxc_scc_driver);
-MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
-MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a2105cf33abb..b4429891e368 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -471,7 +471,7 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
 
 	wake_up_process(sdcp->thread[actx->chan]);
 
-	return -EINPROGRESS;
+	return ret;
 }
 
 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
@@ -700,11 +700,7 @@ static int dcp_chan_thread_sha(void *data)
 
 	struct crypto_async_request *backlog;
 	struct crypto_async_request *arq;
-
-	struct dcp_sha_req_ctx *rctx;
-
-	struct ahash_request *req;
-	int ret, fini;
+	int ret;
 
 	while (!kthread_should_stop()) {
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -725,11 +721,7 @@ static int dcp_chan_thread_sha(void *data)
 			backlog->complete(backlog, -EINPROGRESS);
 
 		if (arq) {
-			req = ahash_request_cast(arq);
-			rctx = ahash_request_ctx(req);
-
 			ret = dcp_sha_req_to_buf(arq);
-			fini = rctx->fini;
 			arq->complete(arq, ret);
 		}
 	}
@@ -797,7 +789,7 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 	wake_up_process(sdcp->thread[actx->chan]);
 	mutex_unlock(&actx->mutex);
 
-	return -EINPROGRESS;
+	return ret;
 }
 
 static int dcp_sha_update(struct ahash_request *req)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 9450c41211b2..0d5d3d8eb680 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -469,8 +469,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
 		return err;
 
 	shash->tfm = child_shash;
-	shash->flags = crypto_ahash_get_flags(tfm) &
-		CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	bs = crypto_shash_blocksize(child_shash);
 	ds = crypto_shash_digestsize(child_shash);
@@ -788,13 +786,18 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 	struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
 	struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+	u32 flags;
+	int err;
+
+	flags = crypto_ablkcipher_get_flags(cipher);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
+	}
 
 	ctx->enc_type = n2alg->enc_type;
 
-	if (keylen != (3 * DES_KEY_SIZE)) {
-		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
 	ctx->key_len = keylen;
 	memcpy(ctx->key.des3, key, keylen);
 	return 0;
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 66869976cfa2..57932848361b 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -296,7 +296,7 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
 	struct nx842_workmem *workmem;
 	struct nx842_scatterlist slin, slout;
 	struct nx_csbcpb *csbcpb;
-	int ret = 0, max_sync_size;
+	int ret = 0;
 	unsigned long inbuf, outbuf;
 	struct vio_pfo_op op = {
 		.done = NULL,
@@ -319,7 +319,6 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
 		rcu_read_unlock();
 		return -ENODEV;
 	}
-	max_sync_size = local_devdata->max_sync_size;
 	dev = local_devdata->dev;
 
 	/* Init scatterlist */
@@ -427,7 +426,7 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
 	struct nx842_workmem *workmem;
 	struct nx842_scatterlist slin, slout;
 	struct nx_csbcpb *csbcpb;
-	int ret = 0, max_sync_size;
+	int ret = 0;
 	unsigned long inbuf, outbuf;
 	struct vio_pfo_op op = {
 		.done = NULL,
@@ -451,7 +450,6 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
 		rcu_read_unlock();
 		return -ENODEV;
 	}
-	max_sync_size = local_devdata->max_sync_size;
 	dev = local_devdata->dev;
 
 	workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index d94e25df503b..f06565df2a12 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -353,7 +353,7 @@ static int decompress(struct nx842_crypto_ctx *ctx,
 	unsigned int adj_slen = slen;
 	u8 *src = p->in, *dst = p->out;
 	u16 padding = be16_to_cpu(g->padding);
-	int ret, spadding = 0, dpadding = 0;
+	int ret, spadding = 0;
 	ktime_t timeout;
 
 	if (!slen || !required_len)
@@ -413,7 +413,6 @@ usesw:
 		spadding = 0;
 		dst = p->out;
 		dlen = p->oremain;
-		dpadding = 0;
 		if (dlen < required_len) { /* have ignore bytes */
 			dst = ctx->dbounce;
 			dlen = BOUNCE_BUFFER_SIZE;
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index ad3358e74f5c..8f5820b78a83 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -105,8 +105,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
-	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 	if (rc)
 		goto out;
 	atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -134,8 +133,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
-	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 	if (rc)
 		goto out;
 	atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -279,8 +277,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
 			goto out;
 		}
 
-		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 		if (rc)
 			goto out;
 
@@ -361,8 +358,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
 		goto out;
 	}
 
-	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 	if (rc)
 		goto out;
 
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index a6764af83c6d..e06f0431dee5 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -162,8 +162,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
 			goto out;
 		}
 
-		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 		if (rc)
 			goto out;
 
@@ -243,8 +242,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
 		goto out;
 	}
 
-	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 	if (rc)
 		goto out;
 
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 92956bc6e45e..0293b17903d0 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -166,8 +166,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
 			goto out;
 		}
 
-		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-				   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 		if (rc)
 			goto out;
 
@@ -249,8 +248,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
 		goto out;
 	}
 
-	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 	if (rc)
 		goto out;
 
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 1ba2633e90d6..3d82d18ff810 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -656,9 +656,6 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 
-	if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE))
-		return -EINVAL;
-
 	pr_debug("enter, keylen: %d\n", keylen);
 
 	/* Do we need to test against weak key? */
@@ -678,6 +675,28 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	return 0;
 }
 
+static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			   unsigned int keylen)
+{
+	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 flags;
+	int err;
+
+	pr_debug("enter, keylen: %d\n", keylen);
+
+	flags = crypto_ablkcipher_get_flags(cipher);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+}
+
 static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
 {
 	return omap_des_crypt(req, FLAGS_ENCRYPT);
@@ -788,7 +807,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
 	.cra_u.ablkcipher = {
 		.min_keysize	= 3*DES_KEY_SIZE,
 		.max_keysize	= 3*DES_KEY_SIZE,
-		.setkey		= omap_des_setkey,
+		.setkey		= omap_des3_setkey,
 		.encrypt	= omap_des_ecb_encrypt,
 		.decrypt	= omap_des_ecb_decrypt,
 	}
@@ -811,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
 		.min_keysize	= 3*DES_KEY_SIZE,
 		.max_keysize	= 3*DES_KEY_SIZE,
 		.ivsize		= DES_BLOCK_SIZE,
-		.setkey		= omap_des_setkey,
+		.setkey		= omap_des3_setkey,
 		.encrypt	= omap_des_cbc_encrypt,
 		.decrypt	= omap_des_cbc_decrypt,
 	}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 0641185bd82f..51b20abac464 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1055,7 +1055,6 @@ static int omap_sham_finish_hmac(struct ahash_request *req)
 	SHASH_DESC_ON_STACK(shash, bctx->shash);
 
 	shash->tfm = bctx->shash;
-	shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
 
 	return crypto_shash_init(shash) ?:
 	       crypto_shash_update(shash, bctx->opad, bs) ?:
@@ -1226,7 +1225,6 @@ static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
 	SHASH_DESC_ON_STACK(shash, tfm);
 
 	shash->tfm = tfm;
-	shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	return crypto_shash_digest(shash, data, len, out);
 }
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 21e5cae0a1e0..e641481a3cd9 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -39,7 +39,6 @@ static int padlock_sha_init(struct shash_desc *desc)
 	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
 
 	dctx->fallback.tfm = ctx->fallback;
-	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 	return crypto_shash_init(&dctx->fallback);
 }
 
@@ -48,7 +47,6 @@ static int padlock_sha_update(struct shash_desc *desc,
 {
 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 
-	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 	return crypto_shash_update(&dctx->fallback, data, length);
 }
 
@@ -65,7 +63,6 @@ static int padlock_sha_import(struct shash_desc *desc, const void *in)
 	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
 
 	dctx->fallback.tfm = ctx->fallback;
-	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 	return crypto_shash_import(&dctx->fallback, in);
 }
 
@@ -91,7 +88,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
 	unsigned int leftover;
 	int err;
 
-	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 	err = crypto_shash_export(&dctx->fallback, &state);
 	if (err)
 		goto out;
@@ -153,7 +149,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
 	unsigned int leftover;
 	int err;
 
-	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 	err = crypto_shash_export(&dctx->fallback, &state);
 	if (err)
 		goto out;
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 1b3acdeffede..05b89e703903 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -753,11 +753,6 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
 	u32 tmp[DES_EXPKEY_WORDS];
 
-	if (len > DES3_EDE_KEY_SIZE) {
-		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
-
 	if (unlikely(!des_ekey(tmp, key)) &&
 	    (crypto_ablkcipher_get_flags(cipher) &
 	     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
@@ -772,6 +767,30 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 }
 
 /*
+ * Set the 3DES key for a block cipher transform. This also performs weak key
+ * checking if the transform has requested it.
+ */
+static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+			     unsigned int len)
+{
+	struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 flags;
+	int err;
+
+	flags = crypto_ablkcipher_get_flags(cipher);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
+	}
+
+	memcpy(ctx->key, key, len);
+	ctx->key_len = len;
+
+	return 0;
+}
+
+/*
  * Set the key for an AES block cipher. Some key lengths are not supported in
  * hardware so this must also check whether a fallback is needed.
  */
@@ -1196,7 +1215,7 @@ static const struct dev_pm_ops spacc_pm_ops = {
 
 static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
 {
-	return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
+	return dev ? dev_get_drvdata(dev) : NULL;
 }
 
 static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
@@ -1353,7 +1372,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
 			.cra_type = &crypto_ablkcipher_type,
 			.cra_module = THIS_MODULE,
 			.cra_ablkcipher = {
-				.setkey = spacc_des_setkey,
+				.setkey = spacc_des3_setkey,
 				.encrypt = spacc_ablk_encrypt,
 				.decrypt = spacc_ablk_decrypt,
 				.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1380,7 +1399,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
 			.cra_type = &crypto_ablkcipher_type,
 			.cra_module = THIS_MODULE,
 			.cra_ablkcipher = {
-				.setkey = spacc_des_setkey,
+				.setkey = spacc_des3_setkey,
 				.encrypt = spacc_ablk_encrypt,
 				.decrypt = spacc_ablk_decrypt,
 				.min_keysize = DES3_EDE_KEY_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 975c75198f56..c8d401646902 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -164,7 +164,6 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 	memset(ctx->ipad, 0, block_size);
 	memset(ctx->opad, 0, block_size);
 	shash->tfm = ctx->hash_tfm;
-	shash->flags = 0x0;
 
 	if (auth_keylen > block_size) {
 		int ret = crypto_shash_digest(shash, auth_key,
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index c9f324730d71..692a7aaee749 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -1300,8 +1300,6 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
 static struct akcipher_alg rsa = {
 	.encrypt = qat_rsa_enc,
 	.decrypt = qat_rsa_dec,
-	.sign = qat_rsa_dec,
-	.verify = qat_rsa_enc,
 	.set_pub_key = qat_rsa_setpubkey,
 	.set_priv_key = qat_rsa_setprivkey,
 	.max_size = qat_rsa_max_size,
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 154b6baa124e..8d3493855a70 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -198,6 +198,25 @@ weakkey:
 	return -EINVAL;
 }
 
+static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+			   unsigned int keylen)
+{
+	struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+	u32 flags;
+	int err;
+
+	flags = crypto_ablkcipher_get_flags(ablk);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(ablk, flags);
+		return err;
+	}
+
+	ctx->enc_keylen = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	return 0;
+}
+
 static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
 {
 	struct crypto_tfm *tfm =
@@ -363,7 +382,8 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
 	alg->cra_ablkcipher.ivsize = def->ivsize;
 	alg->cra_ablkcipher.min_keysize = def->min_keysize;
 	alg->cra_ablkcipher.max_keysize = def->max_keysize;
-	alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+	alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ?
+				     qce_des3_setkey : qce_ablkcipher_setkey;
 	alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
 	alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
 
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 02dac6ae7e53..313759521a0f 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -46,24 +46,36 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
 	return 0;
 }
 
-static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
-			  const u8 *key, unsigned int keylen)
+static int rk_des_setkey(struct crypto_ablkcipher *cipher,
+			 const u8 *key, unsigned int keylen)
 {
 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
 	u32 tmp[DES_EXPKEY_WORDS];
 
-	if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
-		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	if (!des_ekey(tmp, key) &&
+	    (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 		return -EINVAL;
 	}
 
-	if (keylen == DES_KEY_SIZE) {
-		if (!des_ekey(tmp, key) &&
-		    (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
-			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
-			return -EINVAL;
-		}
+	ctx->keylen = keylen;
+	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+	return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
+			  const u8 *key, unsigned int keylen)
+{
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	u32 flags;
+	int err;
+
+	flags = crypto_ablkcipher_get_flags(cipher);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
 	}
 
 	ctx->keylen = keylen;
@@ -250,9 +262,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
 	u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
 		dev->sg_src->offset + dev->sg_src->length - ivsize;
 
-	/* store the iv that need to be updated in chain mode */
-	if (ctx->mode & RK_CRYPTO_DEC)
+	/* Store the iv that need to be updated in chain mode.
+	 * And update the IV buffer to contain the next IV for decryption mode.
+	 */
+	if (ctx->mode & RK_CRYPTO_DEC) {
 		memcpy(ctx->iv, src_last_blk, ivsize);
+		sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
+				   ivsize, dev->total - ivsize);
+	}
 
 	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
 	if (!err)
@@ -288,13 +305,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
 	struct ablkcipher_request *req =
 		ablkcipher_request_cast(dev->async_req);
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
 
-	if (ivsize == DES_BLOCK_SIZE)
-		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
-			      ivsize);
-	else if (ivsize == AES_BLOCK_SIZE)
-		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+	/* Update the IV buffer to contain the next IV for encryption mode. */
+	if (!(ctx->mode & RK_CRYPTO_DEC)) {
+		if (dev->aligned) {
+			memcpy(req->info, sg_virt(dev->sg_dst) +
+				dev->sg_dst->length - ivsize, ivsize);
+		} else {
+			memcpy(req->info, dev->addr_vir +
+				dev->count - ivsize, ivsize);
+		}
+	}
 }
 
 static void rk_update_iv(struct rk_crypto_info *dev)
@@ -457,7 +480,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
 		.cra_u.ablkcipher	= {
 			.min_keysize	= DES_KEY_SIZE,
 			.max_keysize	= DES_KEY_SIZE,
-			.setkey		= rk_tdes_setkey,
+			.setkey		= rk_des_setkey,
 			.encrypt	= rk_des_ecb_encrypt,
 			.decrypt	= rk_des_ecb_decrypt,
 		}
@@ -483,7 +506,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
 			.min_keysize	= DES_KEY_SIZE,
 			.max_keysize	= DES_KEY_SIZE,
 			.ivsize		= DES_BLOCK_SIZE,
-			.setkey		= rk_tdes_setkey,
+			.setkey		= rk_des_setkey,
 			.encrypt	= rk_des_cbc_encrypt,
 			.decrypt	= rk_des_cbc_decrypt,
 		}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 1afdcb81d8ed..9ef25230c199 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1534,7 +1534,6 @@ static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
 	SHASH_DESC_ON_STACK(shash, tfm);
 
 	shash->tfm = tfm;
-	shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	return crypto_shash_digest(shash, data, len, out);
 }
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8c32a3059b4a..fd11162a915e 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -354,7 +354,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 {
 	u8 state;
 
-	if (!IS_ENABLED(DEBUG))
+	if (!__is_defined(DEBUG))
 		return;
 
 	state = SAHARA_STATUS_GET_STATE(status);
@@ -406,7 +406,7 @@ static void sahara_dump_descriptors(struct sahara_dev *dev)
 {
 	int i;
 
-	if (!IS_ENABLED(DEBUG))
+	if (!__is_defined(DEBUG))
 		return;
 
 	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
@@ -427,7 +427,7 @@ static void sahara_dump_links(struct sahara_dev *dev)
 {
 	int i;
 
-	if (!IS_ENABLED(DEBUG))
+	if (!__is_defined(DEBUG))
 		return;
 
 	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 63aa78c0b12b..4491e2197d9f 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -24,6 +24,7 @@ config CRYPTO_DEV_STM32_CRYP
 	depends on ARCH_STM32
 	select CRYPTO_HASH
 	select CRYPTO_ENGINE
+	select CRYPTO_DES
 	help
           This enables support for the CRYP (AES/DES/TDES) hw accelerator which
 	  can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 23b0b7bd64c7..cddcc97875b2 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -137,7 +137,6 @@ struct stm32_cryp {
 
 	struct crypto_engine    *engine;
 
-	struct mutex            lock; /* protects req / areq */
 	struct ablkcipher_request *req;
 	struct aead_request     *areq;
 
@@ -394,6 +393,23 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
 	}
 }
 
+static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
+{
+	struct ablkcipher_request *req = cryp->req;
+	u32 *tmp = req->info;
+
+	if (!tmp)
+		return;
+
+	*tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
+	*tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
+
+	if (is_aes(cryp)) {
+		*tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
+		*tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
+	}
+}
+
 static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
 {
 	unsigned int i;
@@ -623,6 +639,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
 		/* Phase 4 : output tag */
 		err = stm32_cryp_read_auth_tag(cryp);
 
+	if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
+		stm32_cryp_get_iv(cryp);
+
 	if (cryp->sgs_copied) {
 		void *buf_in, *buf_out;
 		int pages, len;
@@ -645,18 +664,13 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
 	pm_runtime_mark_last_busy(cryp->dev);
 	pm_runtime_put_autosuspend(cryp->dev);
 
-	if (is_gcm(cryp) || is_ccm(cryp)) {
+	if (is_gcm(cryp) || is_ccm(cryp))
 		crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
-		cryp->areq = NULL;
-	} else {
+	else
 		crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
 						   err);
-		cryp->req = NULL;
-	}
 
 	memset(cryp->ctx->key, 0, cryp->ctx->keylen);
-
-	mutex_unlock(&cryp->lock);
 }
 
 static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
@@ -753,19 +767,35 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 				 unsigned int keylen)
 {
+	u32 tmp[DES_EXPKEY_WORDS];
+
 	if (keylen != DES_KEY_SIZE)
 		return -EINVAL;
-	else
-		return stm32_cryp_setkey(tfm, key, keylen);
+
+	if ((crypto_ablkcipher_get_flags(tfm) &
+	     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
+	    unlikely(!des_ekey(tmp, key))) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+		return -EINVAL;
+	}
+
+	return stm32_cryp_setkey(tfm, key, keylen);
 }
 
 static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 				  unsigned int keylen)
 {
-	if (keylen != (3 * DES_KEY_SIZE))
-		return -EINVAL;
-	else
-		return stm32_cryp_setkey(tfm, key, keylen);
+	u32 flags;
+	int err;
+
+	flags = crypto_ablkcipher_get_flags(tfm);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(tfm, flags);
+		return err;
+	}
+
+	return stm32_cryp_setkey(tfm, key, keylen);
 }
 
 static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -917,8 +947,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
 	if (!cryp)
 		return -ENODEV;
 
-	mutex_lock(&cryp->lock);
-
 	rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
 	rctx->mode &= FLG_MODE_MASK;
 
@@ -930,6 +958,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
 
 	if (req) {
 		cryp->req = req;
+		cryp->areq = NULL;
 		cryp->total_in = req->nbytes;
 		cryp->total_out = cryp->total_in;
 	} else {
@@ -955,6 +984,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
 		 *          <---------- total_out ----------------->
 		 */
 		cryp->areq = areq;
+		cryp->req = NULL;
 		cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
 		cryp->total_in = areq->assoclen + areq->cryptlen;
 		if (is_encrypt(cryp))
@@ -976,19 +1006,19 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
 	if (cryp->in_sg_len < 0) {
 		dev_err(cryp->dev, "Cannot get in_sg_len\n");
 		ret = cryp->in_sg_len;
-		goto out;
+		return ret;
 	}
 
 	cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
 	if (cryp->out_sg_len < 0) {
 		dev_err(cryp->dev, "Cannot get out_sg_len\n");
 		ret = cryp->out_sg_len;
-		goto out;
+		return ret;
 	}
 
 	ret = stm32_cryp_copy_sgs(cryp);
 	if (ret)
-		goto out;
+		return ret;
 
 	scatterwalk_start(&cryp->in_walk, cryp->in_sg);
 	scatterwalk_start(&cryp->out_walk, cryp->out_sg);
@@ -1000,10 +1030,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
 	}
 
 	ret = stm32_cryp_hw_init(cryp);
-out:
-	if (ret)
-		mutex_unlock(&cryp->lock);
-
 	return ret;
 }
 
@@ -1943,8 +1969,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
 
 	cryp->dev = dev;
 
-	mutex_init(&cryp->lock);
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	cryp->regs = devm_ioremap_resource(dev, res);
 	if (IS_ERR(cryp->regs))
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4a6cc8a3045d..bfc49e67124b 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -181,8 +181,6 @@ struct stm32_hash_dev {
 	u32			dma_mode;
 	u32			dma_maxburst;
 
-	spinlock_t		lock; /* lock to protect queue */
-
 	struct ahash_request	*req;
 	struct crypto_engine	*engine;
 
@@ -977,7 +975,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
 
 	pm_runtime_get_sync(hdev->dev);
 
-	while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
+	while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
 		cpu_relax();
 
 	rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 54fd714d53ca..b060a0810934 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -41,11 +41,6 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
 	if (!areq->cryptlen)
 		return 0;
 
-	if (!areq->iv) {
-		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
-		return -EINVAL;
-	}
-
 	if (!areq->src || !areq->dst) {
 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
 		return -EINVAL;
@@ -134,6 +129,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
 	struct scatterlist *out_sg = areq->dst;
 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+	struct sun4i_ss_alg_template *algt;
 	u32 mode = ctx->mode;
 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
 	u32 rx_cnt = SS_RX_DEFAULT;
@@ -153,20 +150,20 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
 	unsigned int obo = 0;	/* offset in bufo*/
 	unsigned int obl = 0;	/* length of data in bufo */
 	unsigned long flags;
+	bool need_fallback;
 
 	if (!areq->cryptlen)
 		return 0;
 
-	if (!areq->iv) {
-		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
-		return -EINVAL;
-	}
-
 	if (!areq->src || !areq->dst) {
 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
 		return -EINVAL;
 	}
 
+	algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+	if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
+		need_fallback = true;
+
 	/*
 	 * if we have only SGs with size multiple of 4,
 	 * we can use the SS optimized function
@@ -182,9 +179,24 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
 		out_sg = sg_next(out_sg);
 	}
 
-	if (no_chunk == 1)
+	if (no_chunk == 1 && !need_fallback)
 		return sun4i_ss_opti_poll(areq);
 
+	if (need_fallback) {
+		SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+		skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+		skcipher_request_set_callback(subreq, areq->base.flags, NULL,
+					      NULL);
+		skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+					   areq->cryptlen, areq->iv);
+		if (ctx->mode & SS_DECRYPTION)
+			err = crypto_skcipher_decrypt(subreq);
+		else
+			err = crypto_skcipher_encrypt(subreq);
+		skcipher_request_zero(subreq);
+		return err;
+	}
+
 	spin_lock_irqsave(&ss->slock, flags);
 
 	for (i = 0; i < op->keylen; i += 4)
@@ -458,6 +470,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
 {
 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
 	struct sun4i_ss_alg_template *algt;
+	const char *name = crypto_tfm_alg_name(tfm);
 
 	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
 
@@ -468,9 +481,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
 	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
 				    sizeof(struct sun4i_cipher_req_ctx));
 
+	op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(op->fallback_tfm)) {
+		dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+			name, PTR_ERR(op->fallback_tfm));
+		return PTR_ERR(op->fallback_tfm);
+	}
+
 	return 0;
 }
 
+void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
+{
+	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+	crypto_free_sync_skcipher(op->fallback_tfm);
+}
+
 /* check and set the AES key, prepare the mode to be used */
 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 			unsigned int keylen)
@@ -495,7 +521,11 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 	}
 	op->keylen = keylen;
 	memcpy(op->key, key, keylen);
-	return 0;
+
+	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
 }
 
 /* check and set the DES key, prepare the mode to be used */
@@ -525,7 +555,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
 
 	op->keylen = keylen;
 	memcpy(op->key, key, keylen);
-	return 0;
+
+	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
 }
 
 /* check and set the 3DES key, prepare the mode to be used */
@@ -533,14 +567,18 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
 			 unsigned int keylen)
 {
 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
-	struct sun4i_ss_ctx *ss = op->ss;
+	int err;
+
+	err = des3_verify_key(tfm, key);
+	if (unlikely(err))
+		return err;
 
-	if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
-		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
-		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
 	op->keylen = keylen;
 	memcpy(op->key, key, keylen);
-	return 0;
+
+	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+
 }
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 89adf9e0fed2..05b3d3c32f6d 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -92,11 +92,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 			.cra_driver_name = "cbc-aes-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
 			.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
 			.cra_init = sun4i_ss_cipher_init,
+			.cra_exit = sun4i_ss_cipher_exit,
 		}
 	}
 },
@@ -107,17 +108,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 		.decrypt        = sun4i_ss_ecb_aes_decrypt,
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
 		.base = {
 			.cra_name = "ecb(aes)",
 			.cra_driver_name = "ecb-aes-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
 			.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
 			.cra_init = sun4i_ss_cipher_init,
+			.cra_exit = sun4i_ss_cipher_exit,
 		}
 	}
 },
@@ -134,11 +135,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 			.cra_driver_name = "cbc-des-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = DES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
 			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
 			.cra_init = sun4i_ss_cipher_init,
+			.cra_exit = sun4i_ss_cipher_exit,
 		}
 	}
 },
@@ -154,11 +156,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 			.cra_driver_name = "ecb-des-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = DES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
 			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
 			.cra_init = sun4i_ss_cipher_init,
+			.cra_exit = sun4i_ss_cipher_exit,
 		}
 	}
 },
@@ -175,11 +178,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 			.cra_driver_name = "cbc-des3-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
 			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
 			.cra_init = sun4i_ss_cipher_init,
+			.cra_exit = sun4i_ss_cipher_exit,
 		}
 	}
 },
@@ -190,16 +194,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 		.decrypt        = sun4i_ss_ecb_des3_decrypt,
 		.min_keysize    = DES3_EDE_KEY_SIZE,
 		.max_keysize    = DES3_EDE_KEY_SIZE,
-		.ivsize         = DES3_EDE_BLOCK_SIZE,
 		.base = {
 			.cra_name = "ecb(des3_ede)",
 			.cra_driver_name = "ecb-des3-sun4i-ss",
 			.cra_priority = 300,
 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
 			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
 			.cra_module = THIS_MODULE,
 			.cra_alignmask = 3,
 			.cra_init = sun4i_ss_cipher_init,
+			.cra_exit = sun4i_ss_cipher_exit,
 		}
 	}
 },
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index a4b5ff2b72f8..f6936bb3b7be 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
 		}
 	} else {
 		/* Since we have the flag final, we can go up to modulo 4 */
-		end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+		if (areq->nbytes < 4)
+			end = 0;
+		else
+			end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
 	}
 
 	/* TODO if SGlen % 4 and !op->len then DMA */
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index f3ac90692ac6..8c4ec9e93565 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -161,6 +161,7 @@ struct sun4i_tfm_ctx {
 	u32 keylen;
 	u32 keymode;
 	struct sun4i_ss_ctx *ss;
+	struct crypto_sync_skcipher *fallback_tfm;
 };
 
 struct sun4i_cipher_req_ctx {
@@ -203,6 +204,7 @@ int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq);
 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq);
 
 int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+void sun4i_ss_cipher_exit(struct crypto_tfm *tfm);
 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 			unsigned int keylen);
 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index de78b54bcfb1..1d429fc073d1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -913,6 +913,54 @@ badkey:
 	return -EINVAL;
 }
 
+static int aead_des3_setkey(struct crypto_aead *authenc,
+			    const u8 *key, unsigned int keylen)
+{
+	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+	struct device *dev = ctx->dev;
+	struct crypto_authenc_keys keys;
+	u32 flags;
+	int err;
+
+	err = crypto_authenc_extractkeys(&keys, key, keylen);
+	if (unlikely(err))
+		goto badkey;
+
+	err = -EINVAL;
+	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
+		goto badkey;
+
+	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+		goto badkey;
+
+	flags = crypto_aead_get_flags(authenc);
+	err = __des3_verify_key(&flags, keys.enckey);
+	if (unlikely(err)) {
+		crypto_aead_set_flags(authenc, flags);
+		goto out;
+	}
+
+	if (ctx->keylen)
+		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+	memcpy(ctx->key, keys.authkey, keys.authkeylen);
+	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
+
+	ctx->keylen = keys.authkeylen + keys.enckeylen;
+	ctx->enckeylen = keys.enckeylen;
+	ctx->authkeylen = keys.authkeylen;
+	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
+				      DMA_TO_DEVICE);
+
+out:
+	memzero_explicit(&keys, sizeof(keys));
+	return err;
+
+badkey:
+	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	goto out;
+}
+
 /*
  * talitos_edesc - s/w-extended descriptor
  * @src_nents: number of segments in input scatterlist
@@ -1527,12 +1575,22 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
 {
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 	struct device *dev = ctx->dev;
-	u32 tmp[DES_EXPKEY_WORDS];
 
-	if (keylen > TALITOS_MAX_KEY_SIZE) {
-		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-		return -EINVAL;
-	}
+	if (ctx->keylen)
+		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+	memcpy(&ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
+				 const u8 *key, unsigned int keylen)
+{
+	u32 tmp[DES_EXPKEY_WORDS];
 
 	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
 		     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
@@ -1541,15 +1599,23 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
 		return -EINVAL;
 	}
 
-	if (ctx->keylen)
-		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+	return ablkcipher_setkey(cipher, key, keylen);
+}
 
-	memcpy(&ctx->key, key, keylen);
-	ctx->keylen = keylen;
+static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
+				  const u8 *key, unsigned int keylen)
+{
+	u32 flags;
+	int err;
 
-	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+	flags = crypto_ablkcipher_get_flags(cipher);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
+	}
 
-	return 0;
+	return ablkcipher_setkey(cipher, key, keylen);
 }
 
 static void common_nonsnoop_unmap(struct device *dev,
@@ -2313,6 +2379,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2336,6 +2403,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA1_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
 				     DESC_HDR_SEL0_DEU |
@@ -2399,6 +2467,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA224_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2422,6 +2491,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA224_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
 				     DESC_HDR_SEL0_DEU |
@@ -2485,6 +2555,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2508,6 +2579,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA256_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
 				     DESC_HDR_SEL0_DEU |
@@ -2550,6 +2622,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA384_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2592,6 +2665,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = SHA512_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2654,6 +2728,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = MD5_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2676,6 +2751,7 @@ static struct talitos_alg_template driver_algs[] = {
 			},
 			.ivsize = DES3_EDE_BLOCK_SIZE,
 			.maxauthsize = MD5_DIGEST_SIZE,
+			.setkey = aead_des3_setkey,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
 				     DESC_HDR_SEL0_DEU |
@@ -2748,6 +2824,7 @@ static struct talitos_alg_template driver_algs[] = {
 				.min_keysize = DES_KEY_SIZE,
 				.max_keysize = DES_KEY_SIZE,
 				.ivsize = DES_BLOCK_SIZE,
+				.setkey = ablkcipher_des_setkey,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2764,6 +2841,7 @@ static struct talitos_alg_template driver_algs[] = {
 				.min_keysize = DES_KEY_SIZE,
 				.max_keysize = DES_KEY_SIZE,
 				.ivsize = DES_BLOCK_SIZE,
+				.setkey = ablkcipher_des_setkey,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2781,6 +2859,7 @@ static struct talitos_alg_template driver_algs[] = {
 				.min_keysize = DES3_EDE_KEY_SIZE,
 				.max_keysize = DES3_EDE_KEY_SIZE,
 				.ivsize = DES3_EDE_BLOCK_SIZE,
+				.setkey = ablkcipher_des3_setkey,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2798,6 +2877,7 @@ static struct talitos_alg_template driver_algs[] = {
 				.min_keysize = DES3_EDE_KEY_SIZE,
 				.max_keysize = DES3_EDE_KEY_SIZE,
 				.ivsize = DES3_EDE_BLOCK_SIZE,
+				.setkey = ablkcipher_des3_setkey,
 			}
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3144,7 +3224,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
 		alg->cra_init = talitos_cra_init;
 		alg->cra_exit = talitos_cra_exit;
 		alg->cra_type = &crypto_ablkcipher_type;
-		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
+		alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
+					     ablkcipher_setkey;
 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
 		break;
@@ -3152,7 +3233,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
 		alg = &t_alg->algt.alg.aead.base;
 		alg->cra_exit = talitos_cra_exit;
 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
-		t_alg->algt.alg.aead.setkey = aead_setkey;
+		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
+					      aead_setkey;
 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
index b497ae3dde07..dc8fff29c4b3 100644
--- a/drivers/crypto/ux500/cryp/Makefile
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -3,11 +3,7 @@
 # * Author: shujuan.chen@stericsson.com for ST-Ericsson.
 # * License terms: GNU General Public License (GPL) version 2  */
 
-ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_cryp_core.o := -DDEBUG
-CFLAGS_cryp.o := -DDEBUG
-CFLAGS_cryp_irq.o := -DDEBUG
-endif
+ccflags-$(CONFIG_CRYPTO_DEV_UX500_DEBUG) += -DDEBUG
 
 obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
 ux500_cryp-objs :=  cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 3235611928f2..7a93cba0877f 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1019,37 +1019,16 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
 				  const u8 *key, unsigned int keylen)
 {
 	struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-	u32 *flags = &cipher->base.crt_flags;
-	const u32 *K = (const u32 *)key;
-	u32 tmp[DES3_EDE_EXPKEY_WORDS];
-	int i, ret;
+	u32 flags;
+	int err;
 
 	pr_debug(DEV_DBG_NAME " [%s]", __func__);
-	if (keylen != DES3_EDE_KEY_SIZE) {
-		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
-				__func__);
-		return -EINVAL;
-	}
 
-	/* Checking key interdependency for weak key detection. */
-	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
-				!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
-			(*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
-		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
-			 __func__);
-		return -EINVAL;
-	}
-	for (i = 0; i < 3; i++) {
-		ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
-		if (unlikely(ret == 0) &&
-		    (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
-			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-			pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
-				 __func__);
-			return -EINVAL;
-		}
+	flags = crypto_ablkcipher_get_flags(cipher);
+	err = __des3_verify_key(&flags, key);
+	if (unlikely(err)) {
+		crypto_ablkcipher_set_flags(cipher, flags);
+		return err;
 	}
 
 	memcpy(ctx->key, key, keylen);
@@ -1219,57 +1198,6 @@ static struct cryp_algo_template cryp_algs[] = {
 	{
 		.algomode = CRYP_ALGO_DES_ECB,
 		.crypto = {
-			.cra_name = "des",
-			.cra_driver_name = "des-ux500",
-			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-						CRYPTO_ALG_ASYNC,
-			.cra_blocksize = DES_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct cryp_ctx),
-			.cra_alignmask = 3,
-			.cra_type = &crypto_ablkcipher_type,
-			.cra_init = cryp_cra_init,
-			.cra_module = THIS_MODULE,
-			.cra_u = {
-				.ablkcipher = {
-					.min_keysize = DES_KEY_SIZE,
-					.max_keysize = DES_KEY_SIZE,
-					.setkey = des_ablkcipher_setkey,
-					.encrypt = cryp_blk_encrypt,
-					.decrypt = cryp_blk_decrypt
-				}
-			}
-		}
-
-	},
-	{
-		.algomode = CRYP_ALGO_TDES_ECB,
-		.crypto = {
-			.cra_name = "des3_ede",
-			.cra_driver_name = "des3_ede-ux500",
-			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-						CRYPTO_ALG_ASYNC,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct cryp_ctx),
-			.cra_alignmask = 3,
-			.cra_type = &crypto_ablkcipher_type,
-			.cra_init = cryp_cra_init,
-			.cra_module = THIS_MODULE,
-			.cra_u = {
-				.ablkcipher = {
-					.min_keysize = DES3_EDE_KEY_SIZE,
-					.max_keysize = DES3_EDE_KEY_SIZE,
-					.setkey = des_ablkcipher_setkey,
-					.encrypt = cryp_blk_encrypt,
-					.decrypt = cryp_blk_decrypt
-				}
-			}
-		}
-	},
-	{
-		.algomode = CRYP_ALGO_DES_ECB,
-		.crypto = {
 			.cra_name = "ecb(des)",
 			.cra_driver_name = "ecb-des-ux500",
 			.cra_priority = 300,
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d7316f7a3a69..603a62081994 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -23,9 +23,10 @@
 #include <linux/err.h>
 #include <linux/crypto.h>
 #include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
 #include <asm/switch_to.h>
 #include <crypto/aes.h>
+#include <crypto/internal/simd.h>
 
 #include "aesp8-ppc.h"
 
@@ -78,20 +79,21 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
 	pagefault_disable();
 	enable_kernel_vsx();
 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
-	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+	ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
 	disable_kernel_vsx();
 	pagefault_enable();
 	preempt_enable();
 
-	ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
-	return ret;
+	ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
+
+	return ret ? -EINVAL : 0;
 }
 
 static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
 	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	if (in_interrupt()) {
+	if (!crypto_simd_usable()) {
 		crypto_cipher_encrypt_one(ctx->fallback, dst, src);
 	} else {
 		preempt_disable();
@@ -108,7 +110,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
 	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	if (in_interrupt()) {
+	if (!crypto_simd_usable()) {
 		crypto_cipher_decrypt_one(ctx->fallback, dst, src);
 	} else {
 		preempt_disable();
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c5c5ff82b52e..a1a9a6f0d42c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -23,9 +23,10 @@
 #include <linux/err.h>
 #include <linux/crypto.h>
 #include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
 #include <asm/switch_to.h>
 #include <crypto/aes.h>
+#include <crypto/internal/simd.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/skcipher.h>
 
@@ -81,13 +82,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
 	pagefault_disable();
 	enable_kernel_vsx();
 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
-	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+	ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
 	disable_kernel_vsx();
 	pagefault_enable();
 	preempt_enable();
 
-	ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
-	return ret;
+	ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+	return ret ? -EINVAL : 0;
 }
 
 static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
@@ -99,7 +101,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
 	struct p8_aes_cbc_ctx *ctx =
 		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
 
-	if (in_interrupt()) {
+	if (!crypto_simd_usable()) {
 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
 		skcipher_request_set_sync_tfm(req, ctx->fallback);
 		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
@@ -138,7 +140,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
 	struct p8_aes_cbc_ctx *ctx =
 		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
 
-	if (in_interrupt()) {
+	if (!crypto_simd_usable()) {
 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
 		skcipher_request_set_sync_tfm(req, ctx->fallback);
 		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 8a2fe092cb8e..192a53512f5e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -23,9 +23,10 @@
 #include <linux/err.h>
 #include <linux/crypto.h>
 #include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
 #include <asm/switch_to.h>
 #include <crypto/aes.h>
+#include <crypto/internal/simd.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/skcipher.h>
 
@@ -83,8 +84,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
 	pagefault_enable();
 	preempt_enable();
 
-	ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
-	return ret;
+	ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+	return ret ? -EINVAL : 0;
 }
 
 static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
@@ -118,7 +120,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
 	struct p8_aes_ctr_ctx *ctx =
 		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
 
-	if (in_interrupt()) {
+	if (!crypto_simd_usable()) {
 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
 		skcipher_request_set_sync_tfm(req, ctx->fallback);
 		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index ecd64e5cc5bb..00d412d811ae 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -23,9 +23,10 @@
 #include <linux/err.h>
 #include <linux/crypto.h>
 #include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
 #include <asm/switch_to.h>
 #include <crypto/aes.h>
+#include <crypto/internal/simd.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/xts.h>
 #include <crypto/skcipher.h>
@@ -86,14 +87,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
 	pagefault_disable();
 	enable_kernel_vsx();
 	ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
-	ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
-	ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+	ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+	ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
 	disable_kernel_vsx();
 	pagefault_enable();
 	preempt_enable();
 
-	ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
-	return ret;
+	ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+	return ret ? -EINVAL : 0;
 }
 
 static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
@@ -108,7 +110,7 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
 	struct p8_aes_xts_ctx *ctx =
 		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
 
-	if (in_interrupt()) {
+	if (!crypto_simd_usable()) {
 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
 		skcipher_request_set_sync_tfm(req, ctx->fallback);
 		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index d6a9f63d65ba..de78282b8f44 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
 	stvx_u		$out1,$x10,$out
 	stvx_u		$out2,$x20,$out
 	addi		$out,$out,0x30
-	b		Lcbc_dec8x_done
+	b		Lctr32_enc8x_done
 
 .align	5
 Lctr32_enc8x_two:
@@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
 	stvx_u		$out0,$x00,$out
 	stvx_u		$out1,$x10,$out
 	addi		$out,$out,0x20
-	b		Lcbc_dec8x_done
+	b		Lctr32_enc8x_done
 
 .align	5
 Lctr32_enc8x_one:
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index dd8b8716467a..b5a6883bb09e 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -23,16 +23,15 @@
 #include <linux/err.h>
 #include <linux/crypto.h>
 #include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
 #include <asm/switch_to.h>
 #include <crypto/aes.h>
 #include <crypto/ghash.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
 #include <crypto/b128ops.h>
 
-#define IN_INTERRUPT in_interrupt()
-
 void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
 void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
 void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
@@ -102,7 +101,6 @@ static int p8_ghash_init(struct shash_desc *desc)
 	dctx->bytes = 0;
 	memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
 	dctx->fallback_desc.tfm = ctx->fallback;
-	dctx->fallback_desc.flags = desc->flags;
 	return crypto_shash_init(&dctx->fallback_desc);
 }
 
@@ -131,7 +129,7 @@ static int p8_ghash_update(struct shash_desc *desc,
 	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
 	struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
 
-	if (IN_INTERRUPT) {
+	if (!crypto_simd_usable()) {
 		return crypto_shash_update(&dctx->fallback_desc, src,
 					   srclen);
 	} else {
@@ -182,7 +180,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
 	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
 	struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
 
-	if (IN_INTERRUPT) {
+	if (!crypto_simd_usable()) {
 		return crypto_shash_final(&dctx->fallback_desc, out);
 	} else {
 		if (dctx->bytes) {
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 31a98dc6f849..a9f519830615 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -41,7 +41,7 @@ static struct crypto_alg *algs[] = {
 	NULL,
 };
 
-int __init p8_init(void)
+static int __init p8_init(void)
 {
 	int ret = 0;
 	struct crypto_alg **alg_it;
@@ -67,7 +67,7 @@ int __init p8_init(void)
 	return ret;
 }
 
-void __exit p8_exit(void)
+static void __exit p8_exit(void)
 {
 	struct crypto_alg **alg_it;
 
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 2e2dff478833..ecf6e659c0da 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -80,7 +80,6 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
 	SHASH_DESC_ON_STACK(shash, rxe->tfm);
 
 	shash->tfm = rxe->tfm;
-	shash->flags = 0;
 	*(u32 *)shash_desc_ctx(shash) = crc;
 	err = crypto_shash_update(shash, next, len);
 	if (unlikely(err)) {
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index dd6565798778..9faed1c92b52 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -332,7 +332,6 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
 	int err;
 
 	desc->tfm = essiv->hash_tfm;
-	desc->flags = 0;
 
 	err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
 	shash_desc_zero(desc);
@@ -606,7 +605,6 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
 	int i, r;
 
 	desc->tfm = lmk->hash_tfm;
-	desc->flags = 0;
 
 	r = crypto_shash_init(desc);
 	if (r)
@@ -768,7 +766,6 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
 
 	/* calculate crc32 for every 32bit part and xor it */
 	desc->tfm = tcw->crc32_tfm;
-	desc->flags = 0;
 	for (i = 0; i < 4; i++) {
 		r = crypto_shash_init(desc);
 		if (r)
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7c678f50aaa3..95ae4bf34203 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -532,7 +532,6 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
 	unsigned j, size;
 
 	desc->tfm = ic->journal_mac;
-	desc->flags = 0;
 
 	r = crypto_shash_init(desc);
 	if (unlikely(r)) {
@@ -1276,7 +1275,6 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
 	unsigned digest_size;
 
 	req->tfm = ic->internal_hash;
-	req->flags = 0;
 
 	r = crypto_shash_init(req);
 	if (unlikely(r < 0)) {
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 7ccdc62c6052..ff61dd8748de 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -222,7 +222,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
 		goto out_free;
 	}
 	state->sha1->tfm = shash;
-	state->sha1->flags = 0;
 
 	digestsize = crypto_shash_digestsize(shash);
 	if (digestsize < MPPE_MAX_KEY_LEN)
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index 67b0c05afbdb..a324bc4b7938 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -65,7 +65,6 @@ int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
 	hdr[ETH_ALEN * 2 + 3] = 0;
 
 	desc->tfm = tfm_michael;
-	desc->flags = 0;
 
 	err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
 	if (err)
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index b7828fb252f2..b681073ae8ba 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -449,7 +449,6 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
 		SHASH_DESC_ON_STACK(desc, tfm);
 
 		desc->tfm = tfm;
-		desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 		ret = crypto_shash_digest(desc, fw->image, image_size,
 					  hash_data);
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 06ebea0be118..122d4c0af363 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -219,7 +219,6 @@ michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result)
 	}
 
 	desc->tfm = tfm;
-	desc->flags = 0;
 
 	ret = crypto_shash_init(desc);
 	if (ret < 0)
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 55da8c9dfe50..a084e1501f9d 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -507,7 +507,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
 	int err;
 
 	desc->tfm = tfm_michael;
-	desc->flags = 0;
 
 	if (crypto_shash_setkey(tfm_michael, key, 8))
 		return -1;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 829fa4bd253c..d67bb57994c4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -503,7 +503,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
 	int err;
 
 	desc->tfm = tfm_michael;
-	desc->flags = 0;
 
 	if (crypto_shash_setkey(tfm_michael, key, 8))
 		return -1;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 4e680d753941..ca7d7e8aecc0 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -252,7 +252,6 @@ static int chap_server_compute_md5(
 	}
 
 	desc->tfm = tfm;
-	desc->flags = 0;
 
 	ret = crypto_shash_init(desc);
 	if (ret < 0) {
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 7416bdbd8576..b7980c856898 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -678,7 +678,6 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
 	}
 
 	shash->tfm = tfm;
-	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	memset(hmac, 0, sizeof(hmac));
 	ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 0dc6f08020ac..b1a696a73f7c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -959,7 +959,6 @@ cifs_alloc_hash(const char *name,
 	}
 
 	(*sdesc)->shash.tfm = *shash;
-	(*sdesc)->shash.flags = 0x0;
 	return 0;
 }
 
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 322ce9686bdb..2cb4956f8511 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -402,7 +402,6 @@ static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
 	{
 		SHASH_DESC_ON_STACK(desc, tfm);
 		desc->tfm = tfm;
-		desc->flags = 0;
 
 		return crypto_shash_digest(desc, key, keysize, salt);
 	}
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f664da55234e..491cf5baa8c2 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -68,7 +68,6 @@ static int ecryptfs_hash_digest(struct crypto_shash *tfm,
 	int err;
 
 	desc->tfm = tfm;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 	err = crypto_shash_digest(desc, src, len, dst);
 	shash_desc_zero(desc);
 	return err;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index e74fe84d0886..90fbac5d485b 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -769,7 +769,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
 	}
 
 	s->hash_desc->tfm = s->hash_tfm;
-	s->hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	rc = crypto_shash_digest(s->hash_desc,
 				 (u8 *)s->auth_tok->token.password.session_key_encryption_key,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 82ffdacdc7fa..0833b5fc0668 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2024,7 +2024,6 @@ static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx));
 
 	desc.shash.tfm = sbi->s_chksum_driver;
-	desc.shash.flags = 0;
 	*(u32 *)desc.ctx = crc;
 
 	BUG_ON(crypto_shash_update(&desc.shash, address, length));
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 87f75ebd2fd6..21b0ab6bd15a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1422,7 +1422,6 @@ static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
 
 	desc.shash.tfm = sbi->s_chksum_driver;
-	desc.shash.flags = 0;
 	*(u32 *)desc.ctx = crc;
 
 	err = crypto_shash_update(&desc.shash, address, length);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 5188f9f70c78..8c8563441208 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -126,7 +126,6 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
 		SHASH_DESC_ON_STACK(desc, tfm);
 
 		desc->tfm = tfm;
-		desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 		status = crypto_shash_digest(desc, clname->data, clname->len,
 					     cksum.data);
diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
index 5bf5fd08879e..b758004085c4 100644
--- a/fs/ubifs/auth.c
+++ b/fs/ubifs/auth.c
@@ -33,7 +33,6 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
 	int err;
 
 	shash->tfm = c->hash_tfm;
-	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash);
 	if (err < 0)
@@ -56,7 +55,6 @@ static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash,
 	int err;
 
 	shash->tfm = c->hmac_tfm;
-	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	err = crypto_shash_digest(shash, hash, c->hash_len, hmac);
 	if (err < 0)
@@ -88,7 +86,6 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
 		return -ENOMEM;
 
 	hash_desc->tfm = c->hash_tfm;
-	hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 	ubifs_shash_copy_state(c, inhash, hash_desc);
 
 	err = crypto_shash_final(hash_desc, hash);
@@ -123,7 +120,6 @@ static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c,
 		return ERR_PTR(-ENOMEM);
 
 	desc->tfm = tfm;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	err = crypto_shash_init(desc);
 	if (err) {
@@ -364,7 +360,6 @@ static int ubifs_node_calc_hmac(const struct ubifs_info *c, const void *node,
 	ubifs_assert(c, ofs_hmac + hmac_len < len);
 
 	shash->tfm = c->hmac_tfm;
-	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	err = crypto_shash_init(shash);
 	if (err)
@@ -483,7 +478,6 @@ int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac)
 		return 0;
 
 	shash->tfm = c->hmac_tfm;
-	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	err = crypto_shash_init(shash);
 	if (err)
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 0a0e65c07c6d..5c8a81a019a4 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -576,7 +576,6 @@ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_h
 	SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
 
 	hash_desc->tfm = c->hash_tfm;
-	hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	ubifs_shash_copy_state(c, log_hash, hash_desc);
 	return crypto_shash_final(hash_desc, hash);
@@ -587,7 +586,6 @@ static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac)
 	SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
 
 	hmac_desc->tfm = c->hmac_tfm;
-	hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac);
 }
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 852eaa9cd4db..0fdb542c70cd 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -28,10 +28,10 @@ struct crypto_aes_ctx {
 	u32 key_length;
 };
 
-extern const u32 crypto_ft_tab[4][256];
-extern const u32 crypto_fl_tab[4][256];
-extern const u32 crypto_it_tab[4][256];
-extern const u32 crypto_il_tab[4][256];
+extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_fl_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_il_tab[4][256] ____cacheline_aligned;
 
 int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 		unsigned int key_len);
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 2d690494568c..8884046659a0 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -19,14 +19,20 @@
  *
  * @base:	Common attributes for async crypto requests
  * @src:	Source data
- * @dst:	Destination data
+ *		For verify op this is signature + digest, in that case
+ *		total size of @src is @src_len + @dst_len.
+ * @dst:	Destination data (Should be NULL for verify op)
  * @src_len:	Size of the input buffer
- * @dst_len:	Size of the output buffer. It needs to be at least
- *		as big as the expected result depending	on the operation
+ *		For verify op it's size of signature part of @src, this part
+ *		is supposed to be operated by cipher.
+ * @dst_len:	Size of @dst buffer (for all ops except verify).
+ *		It needs to be at least	as big as the expected result
+ *		depending on the operation.
  *		After operation it will be updated with the actual size of the
  *		result.
  *		In case of error where the dst sgl size was insufficient,
  *		it will be updated to the size required for the operation.
+ *		For verify op this is size of digest part in @src.
  * @__ctx:	Start of private context data
  */
 struct akcipher_request {
@@ -55,10 +61,9 @@ struct crypto_akcipher {
  *		algorithm. In case of error, where the dst_len was insufficient,
  *		the req->dst_len will be updated to the size required for the
  *		operation
- * @verify:	Function performs a sign operation as defined by public key
- *		algorithm. In case of error, where the dst_len was insufficient,
- *		the req->dst_len will be updated to the size required for the
- *		operation
+ * @verify:	Function performs a complete verify operation as defined by
+ *		public key algorithm, returning verification status. Requires
+ *		digest value as input parameter.
  * @encrypt:	Function performs an encrypt operation as defined by public key
  *		algorithm. In case of error, where the dst_len was insufficient,
  *		the req->dst_len will be updated to the size required for the
@@ -69,10 +74,10 @@ struct crypto_akcipher {
  *		operation
  * @set_pub_key: Function invokes the algorithm specific set public key
  *		function, which knows how to decode and interpret
- *		the BER encoded public key
+ *		the BER encoded public key and parameters
  * @set_priv_key: Function invokes the algorithm specific set private key
  *		function, which knows how to decode and interpret
- *		the BER encoded private key
+ *		the BER encoded private key and parameters
  * @max_size:	Function returns dest buffer size required for a given key.
  * @init:	Initialize the cryptographic transformation object.
  *		This function is used to initialize the cryptographic
@@ -238,9 +243,10 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req,
  *
  * @req:	public key request
  * @src:	ptr to input scatter list
- * @dst:	ptr to output scatter list
+ * @dst:	ptr to output scatter list or NULL for verify op
  * @src_len:	size of the src input scatter list to be processed
- * @dst_len:	size of the dst output scatter list
+ * @dst_len:	size of the dst output scatter list or size of signature
+ *		portion in @src for verify op
  */
 static inline void akcipher_request_set_crypt(struct akcipher_request *req,
 					      struct scatterlist *src,
@@ -343,14 +349,18 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
 }
 
 /**
- * crypto_akcipher_verify() - Invoke public key verify operation
+ * crypto_akcipher_verify() - Invoke public key signature verification
  *
- * Function invokes the specific public key verify operation for a given
- * public key algorithm
+ * Function invokes the specific public key signature verification operation
+ * for a given public key algorithm.
  *
  * @req:	asymmetric key request
  *
- * Return: zero on success; error code in case of error
+ * Note: req->dst should be NULL, req->src should point to SG of size
+ * (req->src_size + req->dst_size), containing signature (of req->src_size
+ * length) with appended digest (of req->dst_size length).
+ *
+ * Return: zero on verification success; error code in case of error.
  */
 static inline int crypto_akcipher_verify(struct akcipher_request *req)
 {
@@ -369,11 +379,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
  * crypto_akcipher_set_pub_key() - Invoke set public key operation
  *
  * Function invokes the algorithm specific set key function, which knows
- * how to decode and interpret the encoded key
+ * how to decode and interpret the encoded key and parameters
  *
  * @tfm:	tfm handle
- * @key:	BER encoded public key
- * @keylen:	length of the key
+ * @key:	BER encoded public key, algo OID, paramlen, BER encoded
+ *		parameters
+ * @keylen:	length of the key (not including other data)
  *
  * Return: zero on success; error code in case of error
  */
@@ -390,11 +401,12 @@ static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm,
  * crypto_akcipher_set_priv_key() - Invoke set private key operation
  *
  * Function invokes the algorithm specific set key function, which knows
- * how to decode and interpret the encoded key
+ * how to decode and interpret the encoded key and parameters
  *
  * @tfm:	tfm handle
- * @key:	BER encoded private key
- * @keylen:	length of the key
+ * @key:	BER encoded private key, algo OID, paramlen, BER encoded
+ *		parameters
+ * @keylen:	length of the key (not including other data)
  *
  * Return: zero on success; error code in case of error
  */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 1e64f354c2b8..23169f4d87e6 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -18,27 +18,11 @@
 #include <crypto/hash.h>
 #include <crypto/skcipher.h>
 
-struct cryptd_ablkcipher {
-	struct crypto_ablkcipher base;
-};
-
-static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
-	struct crypto_ablkcipher *tfm)
-{
-	return (struct cryptd_ablkcipher *)tfm;
-}
-
-/* alg_name should be algorithm to be cryptd-ed */
-struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
-						  u32 type, u32 mask);
-struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
-bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
-void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
-
 struct cryptd_skcipher {
 	struct crypto_skcipher base;
 };
 
+/* alg_name should be algorithm to be cryptd-ed */
 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
 					      u32 type, u32 mask);
 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
diff --git a/include/crypto/des.h b/include/crypto/des.h
index d4094d58ac54..72c7c8e5a5a7 100644
--- a/include/crypto/des.h
+++ b/include/crypto/des.h
@@ -6,6 +6,11 @@
 #ifndef __CRYPTO_DES_H
 #define __CRYPTO_DES_H
 
+#include <crypto/skcipher.h>
+#include <linux/compiler.h>
+#include <linux/fips.h>
+#include <linux/string.h>
+
 #define DES_KEY_SIZE		8
 #define DES_EXPKEY_WORDS	32
 #define DES_BLOCK_SIZE		8
@@ -14,6 +19,44 @@
 #define DES3_EDE_EXPKEY_WORDS	(3 * DES_EXPKEY_WORDS)
 #define DES3_EDE_BLOCK_SIZE	DES_BLOCK_SIZE
 
+static inline int __des3_verify_key(u32 *flags, const u8 *key)
+{
+	int err = -EINVAL;
+	u32 K[6];
+
+	memcpy(K, key, DES3_EDE_KEY_SIZE);
+
+	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+		     (fips_enabled ||
+		      (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)))
+		goto bad;
+
+	if (unlikely(!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled)
+		goto bad;
+
+	err = 0;
+
+out:
+	memzero_explicit(K, DES3_EDE_KEY_SIZE);
+
+	return err;
+
+bad:
+	*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+	goto out;
+}
+
+static inline int des3_verify_key(struct crypto_skcipher *tfm, const u8 *key)
+{
+	u32 flags;
+	int err;
+
+	flags = crypto_skcipher_get_flags(tfm);
+	err = __des3_verify_key(&flags, key);
+	crypto_skcipher_set_flags(tfm, flags);
+	return err;
+}
 
 extern unsigned long des_ekey(u32 *pe, const u8 *k);
 
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 3b31c1b349ae..d21bea2c4382 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -146,8 +146,6 @@ struct ahash_alg {
 
 struct shash_desc {
 	struct crypto_shash *tfm;
-	u32 flags;
-
 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
@@ -819,6 +817,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
  * cipher handle must point to a keyed message digest cipher in order for this
  * function to succeed.
  *
+ * Context: Any context.
  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
  */
 int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -835,6 +834,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
  * crypto_shash_update and crypto_shash_final. The parameters have the same
  * meaning as discussed for those separate three functions.
  *
+ * Context: Any context.
  * Return: 0 if the message digest creation was successful; < 0 if an error
  *	   occurred
  */
@@ -850,6 +850,7 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
  * caller-allocated output buffer out which must have sufficient size (e.g. by
  * calling crypto_shash_descsize).
  *
+ * Context: Any context.
  * Return: 0 if the export creation was successful; < 0 if an error occurred
  */
 static inline int crypto_shash_export(struct shash_desc *desc, void *out)
@@ -866,6 +867,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
  * the input buffer. That buffer should have been generated with the
  * crypto_ahash_export function.
  *
+ * Context: Any context.
  * Return: 0 if the import was successful; < 0 if an error occurred
  */
 static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
@@ -886,6 +888,7 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
  * operational state handle. Any potentially existing state created by
  * previous operations is discarded.
  *
+ * Context: Any context.
  * Return: 0 if the message digest initialization was successful; < 0 if an
  *	   error occurred
  */
@@ -907,6 +910,7 @@ static inline int crypto_shash_init(struct shash_desc *desc)
  *
  * Updates the message digest state of the operational state handle.
  *
+ * Context: Any context.
  * Return: 0 if the message digest update was successful; < 0 if an error
  *	   occurred
  */
@@ -923,6 +927,7 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
  * into the output buffer. The caller must ensure that the output buffer is
  * large enough by using crypto_shash_digestsize.
  *
+ * Context: Any context.
  * Return: 0 if the message digest creation was successful; < 0 if an error
  *	   occurred
  */
@@ -939,6 +944,7 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out);
  * crypto_shash_update and crypto_shash_final. The parameters have the same
  * meaning as discussed for those separate functions.
  *
+ * Context: Any context.
  * Return: 0 if the message digest creation was successful; < 0 if an error
  *	   occurred
  */
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index f18344518e32..d2316242a988 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,11 @@
 #ifndef _CRYPTO_INTERNAL_SIMD_H
 #define _CRYPTO_INTERNAL_SIMD_H
 
+#include <linux/percpu.h>
+#include <linux/types.h>
+
+/* skcipher support */
+
 struct simd_skcipher_alg;
 struct skcipher_alg;
 
@@ -22,4 +27,43 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
 void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
 			       struct simd_skcipher_alg **simd_algs);
 
+/* AEAD support */
+
+struct simd_aead_alg;
+struct aead_alg;
+
+struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+					      const char *drvname,
+					      const char *basename);
+struct simd_aead_alg *simd_aead_create(const char *algname,
+				       const char *basename);
+void simd_aead_free(struct simd_aead_alg *alg);
+
+int simd_register_aeads_compat(struct aead_alg *algs, int count,
+			       struct simd_aead_alg **simd_algs);
+
+void simd_unregister_aeads(struct aead_alg *algs, int count,
+			   struct simd_aead_alg **simd_algs);
+
+/*
+ * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or
+ *			  access the SIMD register file?
+ *
+ * This delegates to may_use_simd(), except that this also returns false if SIMD
+ * in crypto code has been temporarily disabled on this CPU by the crypto
+ * self-tests, in order to test the no-SIMD fallback code.  This override is
+ * currently limited to configurations where the extra self-tests are enabled,
+ * because it might be a bit too invasive to be part of the regular self-tests.
+ *
+ * This is a macro so that <asm/simd.h>, which some architectures don't have,
+ * doesn't have to be included directly here.
+ */
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
+#define crypto_simd_usable() \
+	(may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
+#else
+#define crypto_simd_usable() may_use_simd()
+#endif
+
 #endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index ad2aa743dd99..5cefddb1991f 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -47,16 +47,7 @@ int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
 int crypto_morus1280_glue_encrypt(struct aead_request *req);
 int crypto_morus1280_glue_decrypt(struct aead_request *req);
 
-int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
-				 unsigned int keylen);
-int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
-				      unsigned int authsize);
-int cryptd_morus1280_glue_encrypt(struct aead_request *req);
-int cryptd_morus1280_glue_decrypt(struct aead_request *req);
-int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead);
-void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
-
-#define MORUS1280_DECLARE_ALGS(id, driver_name, priority) \
+#define MORUS1280_DECLARE_ALG(id, driver_name, priority) \
 	static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\
 		.init = crypto_morus1280_##id##_init, \
 		.ad = crypto_morus1280_##id##_ad, \
@@ -77,55 +68,29 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
 	{ \
 	} \
 	\
-	static struct aead_alg crypto_morus1280_##id##_algs[] = {\
-		{ \
-			.setkey = crypto_morus1280_glue_setkey, \
-			.setauthsize = crypto_morus1280_glue_setauthsize, \
-			.encrypt = crypto_morus1280_glue_encrypt, \
-			.decrypt = crypto_morus1280_glue_decrypt, \
-			.init = crypto_morus1280_##id##_init_tfm, \
-			.exit = crypto_morus1280_##id##_exit_tfm, \
-			\
-			.ivsize = MORUS_NONCE_SIZE, \
-			.maxauthsize = MORUS_MAX_AUTH_SIZE, \
-			.chunksize = MORUS1280_BLOCK_SIZE, \
-			\
-			.base = { \
-				.cra_flags = CRYPTO_ALG_INTERNAL, \
-				.cra_blocksize = 1, \
-				.cra_ctxsize = sizeof(struct morus1280_ctx), \
-				.cra_alignmask = 0, \
-				\
-				.cra_name = "__morus1280", \
-				.cra_driver_name = "__"driver_name, \
-				\
-				.cra_module = THIS_MODULE, \
-			} \
-		}, { \
-			.setkey = cryptd_morus1280_glue_setkey, \
-			.setauthsize = cryptd_morus1280_glue_setauthsize, \
-			.encrypt = cryptd_morus1280_glue_encrypt, \
-			.decrypt = cryptd_morus1280_glue_decrypt, \
-			.init = cryptd_morus1280_glue_init_tfm, \
-			.exit = cryptd_morus1280_glue_exit_tfm, \
+	static struct aead_alg crypto_morus1280_##id##_alg = { \
+		.setkey = crypto_morus1280_glue_setkey, \
+		.setauthsize = crypto_morus1280_glue_setauthsize, \
+		.encrypt = crypto_morus1280_glue_encrypt, \
+		.decrypt = crypto_morus1280_glue_decrypt, \
+		.init = crypto_morus1280_##id##_init_tfm, \
+		.exit = crypto_morus1280_##id##_exit_tfm, \
+		\
+		.ivsize = MORUS_NONCE_SIZE, \
+		.maxauthsize = MORUS_MAX_AUTH_SIZE, \
+		.chunksize = MORUS1280_BLOCK_SIZE, \
+		\
+		.base = { \
+			.cra_flags = CRYPTO_ALG_INTERNAL, \
+			.cra_blocksize = 1, \
+			.cra_ctxsize = sizeof(struct morus1280_ctx), \
+			.cra_alignmask = 0, \
+			.cra_priority = priority, \
 			\
-			.ivsize = MORUS_NONCE_SIZE, \
-			.maxauthsize = MORUS_MAX_AUTH_SIZE, \
-			.chunksize = MORUS1280_BLOCK_SIZE, \
+			.cra_name = "__morus1280", \
+			.cra_driver_name = "__"driver_name, \
 			\
-			.base = { \
-				.cra_flags = CRYPTO_ALG_ASYNC, \
-				.cra_blocksize = 1, \
-				.cra_ctxsize = sizeof(struct crypto_aead *), \
-				.cra_alignmask = 0, \
-				\
-				.cra_priority = priority, \
-				\
-				.cra_name = "morus1280", \
-				.cra_driver_name = driver_name, \
-				\
-				.cra_module = THIS_MODULE, \
-			} \
+			.cra_module = THIS_MODULE, \
 		} \
 	}
 
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index df8e1103ff94..0ee6266cb26c 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -47,16 +47,7 @@ int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
 int crypto_morus640_glue_encrypt(struct aead_request *req);
 int crypto_morus640_glue_decrypt(struct aead_request *req);
 
-int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
-				unsigned int keylen);
-int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
-				     unsigned int authsize);
-int cryptd_morus640_glue_encrypt(struct aead_request *req);
-int cryptd_morus640_glue_decrypt(struct aead_request *req);
-int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead);
-void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
-
-#define MORUS640_DECLARE_ALGS(id, driver_name, priority) \
+#define MORUS640_DECLARE_ALG(id, driver_name, priority) \
 	static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\
 		.init = crypto_morus640_##id##_init, \
 		.ad = crypto_morus640_##id##_ad, \
@@ -77,55 +68,29 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
 	{ \
 	} \
 	\
-	static struct aead_alg crypto_morus640_##id##_algs[] = {\
-		{ \
-			.setkey = crypto_morus640_glue_setkey, \
-			.setauthsize = crypto_morus640_glue_setauthsize, \
-			.encrypt = crypto_morus640_glue_encrypt, \
-			.decrypt = crypto_morus640_glue_decrypt, \
-			.init = crypto_morus640_##id##_init_tfm, \
-			.exit = crypto_morus640_##id##_exit_tfm, \
-			\
-			.ivsize = MORUS_NONCE_SIZE, \
-			.maxauthsize = MORUS_MAX_AUTH_SIZE, \
-			.chunksize = MORUS640_BLOCK_SIZE, \
-			\
-			.base = { \
-				.cra_flags = CRYPTO_ALG_INTERNAL, \
-				.cra_blocksize = 1, \
-				.cra_ctxsize = sizeof(struct morus640_ctx), \
-				.cra_alignmask = 0, \
-				\
-				.cra_name = "__morus640", \
-				.cra_driver_name = "__"driver_name, \
-				\
-				.cra_module = THIS_MODULE, \
-			} \
-		}, { \
-			.setkey = cryptd_morus640_glue_setkey, \
-			.setauthsize = cryptd_morus640_glue_setauthsize, \
-			.encrypt = cryptd_morus640_glue_encrypt, \
-			.decrypt = cryptd_morus640_glue_decrypt, \
-			.init = cryptd_morus640_glue_init_tfm, \
-			.exit = cryptd_morus640_glue_exit_tfm, \
+	static struct aead_alg crypto_morus640_##id##_alg = {\
+		.setkey = crypto_morus640_glue_setkey, \
+		.setauthsize = crypto_morus640_glue_setauthsize, \
+		.encrypt = crypto_morus640_glue_encrypt, \
+		.decrypt = crypto_morus640_glue_decrypt, \
+		.init = crypto_morus640_##id##_init_tfm, \
+		.exit = crypto_morus640_##id##_exit_tfm, \
+		\
+		.ivsize = MORUS_NONCE_SIZE, \
+		.maxauthsize = MORUS_MAX_AUTH_SIZE, \
+		.chunksize = MORUS640_BLOCK_SIZE, \
+		\
+		.base = { \
+			.cra_flags = CRYPTO_ALG_INTERNAL, \
+			.cra_blocksize = 1, \
+			.cra_ctxsize = sizeof(struct morus640_ctx), \
+			.cra_alignmask = 0, \
+			.cra_priority = priority, \
 			\
-			.ivsize = MORUS_NONCE_SIZE, \
-			.maxauthsize = MORUS_MAX_AUTH_SIZE, \
-			.chunksize = MORUS640_BLOCK_SIZE, \
+			.cra_name = "__morus640", \
+			.cra_driver_name = "__"driver_name, \
 			\
-			.base = { \
-				.cra_flags = CRYPTO_ALG_ASYNC, \
-				.cra_blocksize = 1, \
-				.cra_ctxsize = sizeof(struct crypto_aead *), \
-				.cra_alignmask = 0, \
-				\
-				.cra_priority = priority, \
-				\
-				.cra_name = "morus640", \
-				.cra_driver_name = driver_name, \
-				\
-				.cra_module = THIS_MODULE, \
-			} \
+			.cra_module = THIS_MODULE, \
 		} \
 	}
 
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index be626eac9113..712fe1214b5f 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
 #define _LINUX_PUBLIC_KEY_H
 
 #include <linux/keyctl.h>
+#include <linux/oid_registry.h>
 
 /*
  * Cryptographic data for the public-key subtype of the asymmetric key type.
@@ -25,6 +26,9 @@
 struct public_key {
 	void *key;
 	u32 keylen;
+	enum OID algo;
+	void *params;
+	u32 paramlen;
 	bool key_is_private;
 	const char *id_type;
 	const char *pkey_algo;
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index 856e32af8657..cae1b4a01971 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,7 +23,10 @@ struct streebog_uint512 {
 };
 
 struct streebog_state {
-	u8 buffer[STREEBOG_BLOCK_SIZE];
+	union {
+		u8 buffer[STREEBOG_BLOCK_SIZE];
+		struct streebog_uint512 m;
+	};
 	struct streebog_uint512 hash;
 	struct streebog_uint512 h;
 	struct streebog_uint512 N;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0f919d5fe84f..c2ffff5f9ae2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1606,7 +1606,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
 		JBD_MAX_CHECKSUM_SIZE);
 
 	desc.shash.tfm = journal->j_chksum_driver;
-	desc.shash.flags = 0;
 	*(u32 *)desc.ctx = crc;
 
 	err = crypto_shash_update(&desc.shash, address, length);
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index d2fa9ca42e9a..7f30446348c4 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -93,6 +93,24 @@ enum OID {
 	OID_authorityKeyIdentifier,	/* 2.5.29.35 */
 	OID_extKeyUsage,		/* 2.5.29.37 */
 
+	/* EC-RDSA */
+	OID_gostCPSignA,		/* 1.2.643.2.2.35.1 */
+	OID_gostCPSignB,		/* 1.2.643.2.2.35.2 */
+	OID_gostCPSignC,		/* 1.2.643.2.2.35.3 */
+	OID_gost2012PKey256,		/* 1.2.643.7.1.1.1.1 */
+	OID_gost2012PKey512,		/* 1.2.643.7.1.1.1.2 */
+	OID_gost2012Digest256,		/* 1.2.643.7.1.1.2.2 */
+	OID_gost2012Digest512,		/* 1.2.643.7.1.1.2.3 */
+	OID_gost2012Signature256,	/* 1.2.643.7.1.1.3.2 */
+	OID_gost2012Signature512,	/* 1.2.643.7.1.1.3.3 */
+	OID_gostTC26Sign256A,		/* 1.2.643.7.1.2.1.1.1 */
+	OID_gostTC26Sign256B,		/* 1.2.643.7.1.2.1.1.2 */
+	OID_gostTC26Sign256C,		/* 1.2.643.7.1.2.1.1.3 */
+	OID_gostTC26Sign256D,		/* 1.2.643.7.1.2.1.1.4 */
+	OID_gostTC26Sign512A,		/* 1.2.643.7.1.2.1.2.1 */
+	OID_gostTC26Sign512B,		/* 1.2.643.7.1.2.1.2.2 */
+	OID_gostTC26Sign512C,		/* 1.2.643.7.1.2.1.2.3 */
+
 	OID__NR
 };
 
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 827c601841c4..6f89fc8d4b8e 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -5,8 +5,7 @@
  *
  * Author: Brijesh Singh <brijesh.singh@amd.com>
  *
- * SEV spec 0.14 is available at:
- * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf
+ * SEV API spec is available at https://developer.amd.com/sev
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index ac8c60bcc83b..43521d500c2b 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -6,8 +6,7 @@
  *
  * Author: Brijesh Singh <brijesh.singh@amd.com>
  *
- * SEV spec 0.14 is available at:
- * http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+ * SEV API specification is available at: https://developer.amd.com/sev/
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -30,7 +29,8 @@ enum {
 	SEV_PDH_GEN,
 	SEV_PDH_CERT_EXPORT,
 	SEV_PEK_CERT_IMPORT,
-	SEV_GET_ID,
+	SEV_GET_ID,	/* This command is deprecated, use SEV_GET_ID2 */
+	SEV_GET_ID2,
 
 	SEV_MAX,
 };
@@ -125,7 +125,7 @@ struct sev_user_data_pdh_cert_export {
 } __packed;
 
 /**
- * struct sev_user_data_get_id - GET_ID command parameters
+ * struct sev_user_data_get_id - GET_ID command parameters (deprecated)
  *
  * @socket1: Buffer to pass unique ID of first socket
  * @socket2: Buffer to pass unique ID of second socket
@@ -136,6 +136,16 @@ struct sev_user_data_get_id {
 } __packed;
 
 /**
+ * struct sev_user_data_get_id2 - GET_ID command parameters
+ * @address: Buffer to store unique ID
+ * @length: length of the unique ID
+ */
+struct sev_user_data_get_id2 {
+	__u64 address;				/* In */
+	__u32 length;				/* In/Out */
+} __packed;
+
+/**
  * struct sev_issue_cmd - SEV ioctl parameters
  *
  * @cmd: SEV commands to execute
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index f1d0e00a3971..f7fb8f6a688f 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -688,7 +688,6 @@ static int kexec_calculate_store_digests(struct kimage *image)
 		goto out_free_desc;
 
 	desc->tfm   = tfm;
-	desc->flags = 0;
 
 	ret = crypto_shash_init(desc);
 	if (ret < 0)
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 4d0d47c1ffbd..e89ebfdbb0fc 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -69,7 +69,6 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
 
 	rcu_read_lock();
 	desc.shash.tfm = rcu_dereference(crct10dif_tfm);
-	desc.shash.flags = 0;
 	*(__u16 *)desc.ctx = crc;
 
 	err = crypto_shash_update(&desc.shash, buffer, len);
diff --git a/lib/digsig.c b/lib/digsig.c
index 6ba6fcd92dd1..3b0a579bdcdf 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -240,7 +240,6 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
 		goto err;
 
 	desc->tfm = shash;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	crypto_shash_init(desc);
 	crypto_shash_update(desc, data, datalen);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index f0a2934605bf..4e9829c4d64c 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -47,7 +47,6 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
 	int err;
 
 	shash->tfm = tfm;
-	shash->flags = 0;
 	*ctx = crc;
 
 	err = crypto_shash_update(shash, address, length);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 78bec8df8525..aaa39409eeb7 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -161,7 +161,6 @@ static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
 	}
 
 	shash->tfm = tfm;
-	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	ret = crypto_shash_digest(shash, plaintext, psize, output);
 
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 621146d04c03..e68c715f8d37 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -183,7 +183,6 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m,
 	}
 
 	desc->tfm = tfm;
-	desc->flags = 0;
 
 	/* Swap key and message from LSB to MSB */
 	swap_buf(k, tmp, 16);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 39d72e58b8e5..31569f4809f6 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -760,7 +760,6 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
 		SHASH_DESC_ON_STACK(desc, tfm);
 
 		desc->tfm = tfm;
-		desc->flags = 0;
 		crypto_shash_digest(desc, (u8 *)auth,
 				    end - (unsigned char *)auth, digest);
 		shash_desc_zero(desc);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index d05c57664e36..72e74503f9fc 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1684,7 +1684,6 @@ static struct sctp_cookie_param *sctp_pack_cookie(
 
 		/* Sign the message.  */
 		desc->tfm = sctp_sk(ep->base.sk)->hmac;
-		desc->flags = 0;
 
 		err = crypto_shash_setkey(desc->tfm, ep->secret_key,
 					  sizeof(ep->secret_key)) ?:
@@ -1755,7 +1754,6 @@ struct sctp_association *sctp_unpack_cookie(
 		int err;
 
 		desc->tfm = sctp_sk(ep->base.sk)->hmac;
-		desc->flags = 0;
 
 		err = crypto_shash_setkey(desc->tfm, ep->secret_key,
 					  sizeof(ep->secret_key)) ?:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 4f43383971ba..6f2d30d7b766 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -977,7 +977,6 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
 	}
 
 	desc->tfm = hmac;
-	desc->flags = 0;
 
 	/* Compute intermediate Kseq from session key */
 	err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
@@ -1045,7 +1044,6 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
 	}
 
 	desc->tfm = hmac;
-	desc->flags = 0;
 
 	/* Compute intermediate Kcrypt from session key */
 	for (i = 0; i < kctx->gk5e->keylength; i++)
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 56cc85c5bc06..6e5d6d240215 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -438,7 +438,6 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
 	}
 
 	desc->tfm = hmac;
-	desc->flags = 0;
 
 	err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum);
 	kzfree(desc);
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 35f06563207d..11eaa5956f00 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -501,7 +501,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
 	}
 
 	desc->tfm = tfm_michael;
-	desc->flags = 0;
 
 	if (crypto_shash_setkey(tfm_michael, key, 8))
 		return -1;
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index af03d98c7552..baba63bc66b1 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -43,7 +43,6 @@ char *aa_calc_hash(void *data, size_t len)
 		goto fail;
 
 	desc->tfm = apparmor_tfm;
-	desc->flags = 0;
 
 	error = crypto_shash_init(desc);
 	if (error)
@@ -81,7 +80,6 @@ int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
 		goto fail;
 
 	desc->tfm = apparmor_tfm;
-	desc->flags = 0;
 
 	error = crypto_shash_init(desc);
 	if (error)
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index d775e03fbbcc..99080871eb9f 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -104,9 +104,16 @@ int asymmetric_verify(struct key *keyring, const char *sig,
 
 	memset(&pks, 0, sizeof(pks));
 
-	pks.pkey_algo = "rsa";
 	pks.hash_algo = hash_algo_name[hdr->hash_algo];
-	pks.encoding = "pkcs1";
+	if (hdr->hash_algo == HASH_ALGO_STREEBOG_256 ||
+	    hdr->hash_algo == HASH_ALGO_STREEBOG_512) {
+		/* EC-RDSA and Streebog should go together. */
+		pks.pkey_algo = "ecrdsa";
+		pks.encoding = "raw";
+	} else {
+		pks.pkey_algo = "rsa";
+		pks.encoding = "pkcs1";
+	}
 	pks.digest = (u8 *)data;
 	pks.digest_size = datalen;
 	pks.s = hdr->sig;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index c37d08118af5..e11564eb645b 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -124,7 +124,6 @@ out:
 		return ERR_PTR(-ENOMEM);
 
 	desc->tfm = *tfm;
-	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	rc = crypto_shash_init(desc);
 	if (rc) {
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 16a4f45863b1..a32878e10ebc 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -333,7 +333,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
 	SHASH_DESC_ON_STACK(shash, tfm);
 
 	shash->tfm = tfm;
-	shash->flags = 0;
 
 	hash->length = crypto_shash_digestsize(tfm);
 
@@ -469,7 +468,6 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
 	int rc, i;
 
 	shash->tfm = tfm;
-	shash->flags = 0;
 
 	hash->length = crypto_shash_digestsize(tfm);
 
@@ -591,7 +589,6 @@ static int calc_buffer_shash_tfm(const void *buf, loff_t size,
 	int rc;
 
 	shash->tfm = tfm;
-	shash->flags = 0;
 
 	hash->length = crypto_shash_digestsize(tfm);
 
@@ -664,7 +661,6 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest,
 	SHASH_DESC_ON_STACK(shash, tfm);
 
 	shash->tfm = tfm;
-	shash->flags = 0;
 
 	rc = crypto_shash_init(shash);
 	if (rc != 0)
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 711e89d8c415..23f95dec771b 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -112,7 +112,6 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
 	if (!sdesc)
 		goto out_free_tfm;
 	sdesc->shash.tfm = tfm;
-	sdesc->shash.flags = 0x0;
 
 	*sdesc_ret = sdesc;
 
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 347108f660a1..1b1456b21a93 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -333,7 +333,6 @@ static int calc_hash(struct crypto_shash *tfm, u8 *digest,
 	int err;
 
 	desc->tfm = tfm;
-	desc->flags = 0;
 
 	err = crypto_shash_digest(desc, buf, buflen, digest);
 	shash_desc_zero(desc);
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index efdbf17f3915..a75b2f0f1230 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -55,7 +55,6 @@ static struct sdesc *init_sdesc(struct crypto_shash *alg)
 	if (!sdesc)
 		return ERR_PTR(-ENOMEM);
 	sdesc->shash.tfm = alg;
-	sdesc->shash.flags = 0x0;
 	return sdesc;
 }