summary refs log tree commit diff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 09:09:55 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-05 09:09:55 -0800
commit63bdf4284c38a48af21745ceb148a087b190cd21 (patch)
treeffbf9e69ed457e776db0317903ccb0addbd1b276 /drivers/crypto
parent6456300356433873309a1cae6aa05e77d6b59153 (diff)
parent0918f18c7179e8cdf718d01531a81b28130b4217 (diff)
downloadlinux-63bdf4284c38a48af21745ceb148a087b190cd21.tar.gz
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
 "API:
   - Add helper for simple skcipher modes.
   - Add helper to register multiple templates.
   - Set CRYPTO_TFM_NEED_KEY when setkey fails.
   - Require neither or both of export/import in shash.
   - AEAD decryption test vectors are now generated from encryption
     ones.
   - New option CONFIG_CRYPTO_MANAGER_EXTRA_TESTS that includes random
     fuzzing.

  Algorithms:
   - Conversions to skcipher and helper for many templates.
   - Add more test vectors for nhpoly1305 and adiantum.

  Drivers:
   - Add crypto4xx prng support.
   - Add xcbc/cmac/ecb support in caam.
   - Add AES support for Exynos5433 in s5p.
   - Remove sha384/sha512 from artpec7 as hardware cannot do partial
     hash"

[ There is a merge of the Freescale SoC tree in order to pull in changes
  required by patches to the caam/qi2 driver. ]

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (174 commits)
  crypto: s5p - add AES support for Exynos5433
  dt-bindings: crypto: document Exynos5433 SlimSSS
  crypto: crypto4xx - add missing of_node_put after of_device_is_available
  crypto: cavium/zip - fix collision with generic cra_driver_name
  crypto: af_alg - use struct_size() in sock_kfree_s()
  crypto: caam - remove redundant likely/unlikely annotation
  crypto: s5p - update iv after AES-CBC op end
  crypto: x86/poly1305 - Clear key material from stack in SSE2 variant
  crypto: caam - generate hash keys in-place
  crypto: caam - fix DMA mapping xcbc key twice
  crypto: caam - fix hash context DMA unmap size
  hwrng: bcm2835 - fix probe as platform device
  crypto: s5p-sss - Use AES_BLOCK_SIZE define instead of number
  crypto: stm32 - drop pointless static qualifier in stm32_hash_remove()
  crypto: chelsio - Fixed Traffic Stall
  crypto: marvell - Remove set but not used variable 'ivsize'
  crypto: ccp - Update driver messages to remove some confusion
  crypto: adiantum - add 1536 and 4096-byte test vectors
  crypto: nhpoly1305 - add a test vector with len % 16 != 0
  crypto: arm/aes-ce - update IV after partial final CTR block
  ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c87
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h1
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h4
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c326
-rw-r--r--drivers/crypto/bcm/Makefile2
-rw-r--r--drivers/crypto/bcm/cipher.c10
-rw-r--r--drivers/crypto/bcm/cipher.h4
-rw-r--r--drivers/crypto/bcm/util.c40
-rw-r--r--drivers/crypto/bcm/util.h6
-rw-r--r--drivers/crypto/caam/Kconfig1
-rw-r--r--drivers/crypto/caam/caamalg.c238
-rw-r--r--drivers/crypto/caam/caamalg_desc.c18
-rw-r--r--drivers/crypto/caam/caamalg_qi.c29
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c85
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/caamhash.c429
-rw-r--r--drivers/crypto/caam/caamhash_desc.c68
-rw-r--r--drivers/crypto/caam/caamhash_desc.h8
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c25
-rw-r--r--drivers/crypto/caam/error.c6
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/key_gen.c30
-rw-r--r--drivers/crypto/caam/qi.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c27
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.h5
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c58
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c36
-rw-r--r--drivers/crypto/ccp/ccp-ops.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c37
-rw-r--r--drivers/crypto/ccp/psp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-dev.c2
-rw-r--r--drivers/crypto/ccp/sp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-pci.c6
-rw-r--r--drivers/crypto/ccp/sp-platform.c2
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c87
-rw-r--r--drivers/crypto/ccree/cc_cipher.c10
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c22
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h8
-rw-r--r--drivers/crypto/ccree/cc_driver.c13
-rw-r--r--drivers/crypto/ccree/cc_driver.h2
-rw-r--r--drivers/crypto/chelsio/Makefile2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c12
-rw-r--r--drivers/crypto/chelsio/chcr_core.h2
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c42
-rw-r--r--drivers/crypto/chelsio/chtls/Makefile3
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c12
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c1
-rw-r--r--drivers/crypto/hifn_795x.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/crypto/marvell/cipher.c4
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-des.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c3
-rw-r--r--drivers/crypto/qat/qat_c3xxx/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c62x/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_c62xvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c15
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/Makefile2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c5
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/Makefile2
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c5
-rw-r--r--drivers/crypto/qce/ablkcipher.c4
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h4
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c41
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c2
-rw-r--r--drivers/crypto/s5p-sss.c64
-rw-r--r--drivers/crypto/stm32/stm32-hash.c2
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c26
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c2
88 files changed, 1153 insertions, 929 deletions
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index acf79889d903..06574a884715 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -40,9 +40,11 @@
 #include <crypto/ctr.h>
 #include <crypto/gcm.h>
 #include <crypto/sha.h>
+#include <crypto/rng.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/skcipher.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
 #include <crypto/internal/skcipher.h>
 #include "crypto4xx_reg_def.h"
 #include "crypto4xx_core.h"
@@ -1035,6 +1037,10 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
 			rc = crypto_register_ahash(&alg->alg.u.hash);
 			break;
 
+		case CRYPTO_ALG_TYPE_RNG:
+			rc = crypto_register_rng(&alg->alg.u.rng);
+			break;
+
 		default:
 			rc = crypto_register_skcipher(&alg->alg.u.cipher);
 			break;
@@ -1064,6 +1070,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
 			crypto_unregister_aead(&alg->alg.u.aead);
 			break;
 
+		case CRYPTO_ALG_TYPE_RNG:
+			crypto_unregister_rng(&alg->alg.u.rng);
+			break;
+
 		default:
 			crypto_unregister_skcipher(&alg->alg.u.cipher);
 		}
@@ -1122,6 +1132,69 @@ static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
 		PPC4XX_TMO_ERR_INT);
 }
 
+static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
+				 u8 *data, unsigned int max)
+{
+	unsigned int i, curr = 0;
+	u32 val[2];
+
+	do {
+		/* trigger PRN generation */
+		writel(PPC4XX_PRNG_CTRL_AUTO_EN,
+		       dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+
+		for (i = 0; i < 1024; i++) {
+			/* usually 19 iterations are enough */
+			if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
+			     CRYPTO4XX_PRNG_STAT_BUSY))
+				continue;
+
+			val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
+			val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
+			break;
+		}
+		if (i == 1024)
+			return -ETIMEDOUT;
+
+		if ((max - curr) >= 8) {
+			memcpy(data, &val, 8);
+			data += 8;
+			curr += 8;
+		} else {
+			/* copy only remaining bytes */
+			memcpy(data, &val, max - curr);
+			break;
+		}
+	} while (curr < max);
+
+	return curr;
+}
+
+static int crypto4xx_prng_generate(struct crypto_rng *tfm,
+				   const u8 *src, unsigned int slen,
+				   u8 *dstn, unsigned int dlen)
+{
+	struct rng_alg *alg = crypto_rng_alg(tfm);
+	struct crypto4xx_alg *amcc_alg;
+	struct crypto4xx_device *dev;
+	int ret;
+
+	amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
+	dev = amcc_alg->dev;
+
+	mutex_lock(&dev->core_dev->rng_lock);
+	ret = ppc4xx_prng_data_read(dev, dstn, dlen);
+	mutex_unlock(&dev->core_dev->rng_lock);
+	return ret;
+}
+
+
+static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+			unsigned int slen)
+{
+	return 0;
+}
+
 /**
  * Supported Crypto Algorithms
  */
@@ -1291,6 +1364,18 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
 			.cra_module	= THIS_MODULE,
 		},
 	} },
+	{ .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
+		.base = {
+			.cra_name		= "stdrng",
+			.cra_driver_name        = "crypto4xx_rng",
+			.cra_priority		= 300,
+			.cra_ctxsize		= 0,
+			.cra_module		= THIS_MODULE,
+		},
+		.generate               = crypto4xx_prng_generate,
+		.seed                   = crypto4xx_prng_seed,
+		.seedsize               = 0,
+	} },
 };
 
 /**
@@ -1360,6 +1445,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
 	core_dev->dev->core_dev = core_dev;
 	core_dev->dev->is_revb = is_revb;
 	core_dev->device = dev;
+	mutex_init(&core_dev->rng_lock);
 	spin_lock_init(&core_dev->lock);
 	INIT_LIST_HEAD(&core_dev->dev->alg_list);
 	ratelimit_default_init(&core_dev->dev->aead_ratelimit);
@@ -1439,6 +1525,7 @@ static int crypto4xx_remove(struct platform_device *ofdev)
 	tasklet_kill(&core_dev->tasklet);
 	/* Un-register with Linux CryptoAPI */
 	crypto4xx_unregister_alg(core_dev->dev);
+	mutex_destroy(&core_dev->rng_lock);
 	/* Free all allocated memory */
 	crypto4xx_stop_all(core_dev);
 
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index e2ca56722f07..18df695ca6b1 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -23,8 +23,10 @@
 #define __CRYPTO4XX_CORE_H__
 
 #include <linux/ratelimit.h>
+#include <linux/mutex.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
 #include <crypto/internal/skcipher.h>
 #include "crypto4xx_reg_def.h"
 #include "crypto4xx_sa.h"
@@ -119,6 +121,7 @@ struct crypto4xx_core_device {
 	u32 irq;
 	struct tasklet_struct tasklet;
 	spinlock_t lock;
+	struct mutex rng_lock;
 };
 
 struct crypto4xx_ctx {
@@ -143,6 +146,7 @@ struct crypto4xx_alg_common {
 		struct skcipher_alg cipher;
 		struct ahash_alg hash;
 		struct aead_alg aead;
+		struct rng_alg rng;
 	} u;
 };
 
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 472331787e04..80c67490bbf6 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -100,6 +100,7 @@
 #define CRYPTO4XX_ENDIAN_CFG			0x000600d8
 
 #define CRYPTO4XX_PRNG_STAT			0x00070000
+#define CRYPTO4XX_PRNG_STAT_BUSY		0x1
 #define CRYPTO4XX_PRNG_CTRL			0x00070004
 #define CRYPTO4XX_PRNG_SEED_L			0x00070008
 #define CRYPTO4XX_PRNG_SEED_H			0x0007000c
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 5e63742b0d22..53ab1f140a26 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
 
 	/* Find the TRNG device node and map it */
 	trng = of_find_matching_node(NULL, ppc4xx_trng_match);
-	if (!trng || !of_device_is_available(trng))
+	if (!trng || !of_device_is_available(trng)) {
+		of_node_put(trng);
 		return;
+	}
 
 	dev->trng_base = of_iomap(trng, 0);
 	of_node_put(trng);
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
index 931d22531f51..7bbda51b7337 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.h
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -26,9 +26,9 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
 void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
 #else
 static inline void ppc4xx_trng_probe(
-	struct crypto4xx_device *dev __maybe_unused) { }
+	struct crypto4xx_core_device *dev __maybe_unused) { }
 static inline void ppc4xx_trng_remove(
-	struct crypto4xx_device *dev __maybe_unused) { }
+	struct crypto4xx_core_device *dev __maybe_unused) { }
 #endif
 
 #endif
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 438e1ffb2ec0..65bf1a299562 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -785,7 +785,7 @@ static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 	}
 
 	err = des_ekey(tmp, key);
-	if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+	if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 		return -EINVAL;
 	}
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index f3442c2bdbdc..57e5dca3253f 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -135,8 +135,6 @@
 #define regk_crypto_ext         0x00000001
 #define regk_crypto_hmac_sha1   0x00000007
 #define regk_crypto_hmac_sha256 0x00000009
-#define regk_crypto_hmac_sha384 0x0000000b
-#define regk_crypto_hmac_sha512 0x0000000d
 #define regk_crypto_init        0x00000000
 #define regk_crypto_key_128     0x00000000
 #define regk_crypto_key_192     0x00000001
@@ -144,8 +142,6 @@
 #define regk_crypto_null        0x00000000
 #define regk_crypto_sha1        0x00000006
 #define regk_crypto_sha256      0x00000008
-#define regk_crypto_sha384      0x0000000a
-#define regk_crypto_sha512      0x0000000c
 
 /* DMA descriptor structures */
 struct pdma_descr_ctrl  {
@@ -190,8 +186,6 @@ struct pdma_stat_descr {
 /* Hash modes (including HMAC variants) */
 #define ARTPEC6_CRYPTO_HASH_SHA1	1
 #define ARTPEC6_CRYPTO_HASH_SHA256	2
-#define ARTPEC6_CRYPTO_HASH_SHA384	3
-#define ARTPEC6_CRYPTO_HASH_SHA512	4
 
 /* Crypto modes */
 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB	1
@@ -284,6 +278,7 @@ enum artpec6_crypto_hash_flags {
 
 struct artpec6_crypto_req_common {
 	struct list_head list;
+	struct list_head complete_in_progress;
 	struct artpec6_crypto_dma_descriptors *dma;
 	struct crypto_async_request *req;
 	void (*complete)(struct crypto_async_request *req);
@@ -291,11 +286,11 @@ struct artpec6_crypto_req_common {
 };
 
 struct artpec6_hash_request_context {
-	char partial_buffer[SHA512_BLOCK_SIZE];
-	char partial_buffer_out[SHA512_BLOCK_SIZE];
-	char key_buffer[SHA512_BLOCK_SIZE];
-	char pad_buffer[SHA512_BLOCK_SIZE + 32];
-	unsigned char digeststate[SHA512_DIGEST_SIZE];
+	char partial_buffer[SHA256_BLOCK_SIZE];
+	char partial_buffer_out[SHA256_BLOCK_SIZE];
+	char key_buffer[SHA256_BLOCK_SIZE];
+	char pad_buffer[SHA256_BLOCK_SIZE + 32];
+	unsigned char digeststate[SHA256_DIGEST_SIZE];
 	size_t partial_bytes;
 	u64 digcnt;
 	u32 key_md;
@@ -305,8 +300,8 @@ struct artpec6_hash_request_context {
 };
 
 struct artpec6_hash_export_state {
-	char partial_buffer[SHA512_BLOCK_SIZE];
-	unsigned char digeststate[SHA512_DIGEST_SIZE];
+	char partial_buffer[SHA256_BLOCK_SIZE];
+	unsigned char digeststate[SHA256_DIGEST_SIZE];
 	size_t partial_bytes;
 	u64 digcnt;
 	int oper;
@@ -314,7 +309,7 @@ struct artpec6_hash_export_state {
 };
 
 struct artpec6_hashalg_context {
-	char hmac_key[SHA512_BLOCK_SIZE];
+	char hmac_key[SHA256_BLOCK_SIZE];
 	size_t hmac_key_length;
 	struct crypto_shash *child_hash;
 };
@@ -670,8 +665,8 @@ artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
 	 * to be written.
 	 */
 	return artpec6_crypto_dma_map_single(common,
-				dma->stat + dma->in_cnt - 1,
-				sizeof(dma->stat[0]),
+				dma->stat,
+				sizeof(dma->stat[0]) * dma->in_cnt,
 				DMA_BIDIRECTIONAL,
 				&dma->stat_dma_addr);
 }
@@ -1315,8 +1310,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
 	struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
 	size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
-	size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
-		SHA512_DIGEST_SIZE : digestsize;
+	size_t contextsize = digestsize;
 	size_t blocksize = crypto_tfm_alg_blocksize(
 		crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
 	struct artpec6_crypto_req_common *common = &req_ctx->common;
@@ -1456,7 +1450,6 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
 
 	/* Finalize */
 	if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
-		bool needtrim = contextsize != digestsize;
 		size_t hash_pad_len;
 		u64 digest_bits;
 		u32 oper;
@@ -1502,19 +1495,10 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
 		/* Descriptor for the final result */
 		error = artpec6_crypto_setup_in_descr(common, areq->result,
 						      digestsize,
-						      !needtrim);
+						      true);
 		if (error)
 			return error;
 
-		if (needtrim) {
-			/* Discard the extra context bytes for SHA-384 */
-			error = artpec6_crypto_setup_in_descr(common,
-					req_ctx->partial_buffer,
-					digestsize - contextsize, true);
-			if (error)
-				return error;
-		}
-
 	} else { /* This is not the final operation for this request */
 		if (!run_hw)
 			return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
@@ -1923,7 +1907,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
 	/* For the decryption, cryptlen includes the tag. */
 	input_length = areq->cryptlen;
 	if (req_ctx->decrypt)
-		input_length -= AES_BLOCK_SIZE;
+		input_length -= crypto_aead_authsize(cipher);
 
 	/* Prepare the context buffer */
 	req_ctx->hw_ctx.aad_length_bits =
@@ -1988,7 +1972,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
 		size_t output_len = areq->cryptlen;
 
 		if (req_ctx->decrypt)
-			output_len -= AES_BLOCK_SIZE;
+			output_len -= crypto_aead_authsize(cipher);
 
 		artpec6_crypto_walk_init(&walk, areq->dst);
 
@@ -2017,19 +2001,32 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
 		 * the output ciphertext. For decryption it is put in a context
 		 * buffer for later compare against the input tag.
 		 */
-		count = AES_BLOCK_SIZE;
 
 		if (req_ctx->decrypt) {
 			ret = artpec6_crypto_setup_in_descr(common,
-				req_ctx->decryption_tag, count, false);
+				req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
 			if (ret)
 				return ret;
 
 		} else {
+			/* For encryption the requested tag size may be smaller
+			 * than the hardware's generated tag.
+			 */
+			size_t authsize = crypto_aead_authsize(cipher);
+
 			ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
-								count);
+								authsize);
 			if (ret)
 				return ret;
+
+			if (authsize < AES_BLOCK_SIZE) {
+				count = AES_BLOCK_SIZE - authsize;
+				ret = artpec6_crypto_setup_in_descr(common,
+					ac->pad_buffer,
+					count, false);
+				if (ret)
+					return ret;
+			}
 		}
 
 	}
@@ -2045,7 +2042,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
 	return artpec6_crypto_dma_map_descs(common);
 }
 
-static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
+	    struct list_head *completions)
 {
 	struct artpec6_crypto_req_common *req;
 
@@ -2056,7 +2054,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
 		list_move_tail(&req->list, &ac->pending);
 		artpec6_crypto_start_dma(req);
 
-		req->req->complete(req->req, -EINPROGRESS);
+		list_add_tail(&req->complete_in_progress, completions);
 	}
 
 	/*
@@ -2086,6 +2084,11 @@ static void artpec6_crypto_task(unsigned long data)
 	struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
 	struct artpec6_crypto_req_common *req;
 	struct artpec6_crypto_req_common *n;
+	struct list_head complete_done;
+	struct list_head complete_in_progress;
+
+	INIT_LIST_HEAD(&complete_done);
+	INIT_LIST_HEAD(&complete_in_progress);
 
 	if (list_empty(&ac->pending)) {
 		pr_debug("Spurious IRQ\n");
@@ -2097,9 +2100,12 @@ static void artpec6_crypto_task(unsigned long data)
 	list_for_each_entry_safe(req, n, &ac->pending, list) {
 		struct artpec6_crypto_dma_descriptors *dma = req->dma;
 		u32 stat;
+		dma_addr_t stataddr;
 
-		dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
-					sizeof(dma->stat[0]),
+		stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
+		dma_sync_single_for_cpu(artpec6_crypto_dev,
+					stataddr,
+					4,
 					DMA_BIDIRECTIONAL);
 
 		stat = req->dma->stat[req->dma->in_cnt-1];
@@ -2119,19 +2125,30 @@ static void artpec6_crypto_task(unsigned long data)
 
 		pr_debug("Completing request %p\n", req);
 
-		list_del(&req->list);
+		list_move_tail(&req->list, &complete_done);
 
+		ac->pending_count--;
+	}
+
+	artpec6_crypto_process_queue(ac, &complete_in_progress);
+
+	spin_unlock_bh(&ac->queue_lock);
+
+	/* Perform the completion callbacks without holding the queue lock
+	 * to allow new request submissions from the callbacks.
+	 */
+	list_for_each_entry_safe(req, n, &complete_done, list) {
 		artpec6_crypto_dma_unmap_all(req);
 		artpec6_crypto_copy_bounce_buffers(req);
-
-		ac->pending_count--;
 		artpec6_crypto_common_destroy(req);
+
 		req->complete(req->req);
 	}
 
-	artpec6_crypto_process_queue(ac);
-
-	spin_unlock_bh(&ac->queue_lock);
+	list_for_each_entry_safe(req, n, &complete_in_progress,
+				 complete_in_progress) {
+		req->req->complete(req->req, -EINPROGRESS);
+	}
 }
 
 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
@@ -2170,27 +2187,29 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
 	/* Verify GCM hashtag. */
 	struct aead_request *areq = container_of(req,
 		struct aead_request, base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
 
 	if (req_ctx->decrypt) {
 		u8 input_tag[AES_BLOCK_SIZE];
+		unsigned int authsize = crypto_aead_authsize(aead);
 
 		sg_pcopy_to_buffer(areq->src,
 				   sg_nents(areq->src),
 				   input_tag,
-				   AES_BLOCK_SIZE,
+				   authsize,
 				   areq->assoclen + areq->cryptlen -
-				   AES_BLOCK_SIZE);
+				   authsize);
 
-		if (memcmp(req_ctx->decryption_tag,
-			   input_tag,
-			   AES_BLOCK_SIZE)) {
+		if (crypto_memneq(req_ctx->decryption_tag,
+				  input_tag,
+				  authsize)) {
 			pr_debug("***EBADMSG:\n");
 			print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
-					     input_tag, AES_BLOCK_SIZE, true);
+					     input_tag, authsize, true);
 			print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
 					     req_ctx->decryption_tag,
-					     AES_BLOCK_SIZE, true);
+					     authsize, true);
 
 			result = -EBADMSG;
 		}
@@ -2266,13 +2285,6 @@ artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
 	case ARTPEC6_CRYPTO_HASH_SHA256:
 		oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
 		break;
-	case ARTPEC6_CRYPTO_HASH_SHA384:
-		oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
-		break;
-	case ARTPEC6_CRYPTO_HASH_SHA512:
-		oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
-		break;
-
 	default:
 		pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
 		return -EINVAL;
@@ -2368,53 +2380,11 @@ static int artpec6_crypto_sha256_digest(struct ahash_request *req)
 	return artpec6_crypto_prepare_submit_hash(req);
 }
 
-static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
-{
-	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
-}
-
-static int __maybe_unused
-artpec6_crypto_sha384_digest(struct ahash_request *req)
-{
-	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
-	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
-	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
-	return artpec6_crypto_prepare_submit_hash(req);
-}
-
-static int artpec6_crypto_sha512_init(struct ahash_request *req)
-{
-	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
-}
-
-static int artpec6_crypto_sha512_digest(struct ahash_request *req)
-{
-	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
-	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
-	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
-	return artpec6_crypto_prepare_submit_hash(req);
-}
-
 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
 {
 	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
 }
 
-static int __maybe_unused
-artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
-{
-	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
-}
-
-static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
-{
-	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
-}
-
 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
 {
 	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
@@ -2425,27 +2395,6 @@ static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
 	return artpec6_crypto_prepare_submit_hash(req);
 }
 
-static int __maybe_unused
-artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
-{
-	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
-	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
-	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
-	return artpec6_crypto_prepare_submit_hash(req);
-}
-
-static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
-{
-	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
-
-	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
-	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
-
-	return artpec6_crypto_prepare_submit_hash(req);
-}
-
 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
 				    const char *base_hash_name)
 {
@@ -2480,17 +2429,6 @@ static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
 	return artpec6_crypto_ahash_init_common(tfm, "sha256");
 }
 
-static int __maybe_unused
-artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
-{
-	return artpec6_crypto_ahash_init_common(tfm, "sha384");
-}
-
-static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
-{
-	return artpec6_crypto_ahash_init_common(tfm, "sha512");
-}
-
 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
 {
 	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
@@ -2761,103 +2699,6 @@ static struct ahash_alg hash_algos[] = {
 	},
 };
 
-static struct ahash_alg artpec7_hash_algos[] = {
-	/* SHA-384 */
-	{
-		.init = artpec6_crypto_sha384_init,
-		.update = artpec6_crypto_hash_update,
-		.final = artpec6_crypto_hash_final,
-		.digest = artpec6_crypto_sha384_digest,
-		.import = artpec6_crypto_hash_import,
-		.export = artpec6_crypto_hash_export,
-		.halg.digestsize = SHA384_DIGEST_SIZE,
-		.halg.statesize = sizeof(struct artpec6_hash_export_state),
-		.halg.base = {
-			.cra_name = "sha384",
-			.cra_driver_name = "artpec-sha384",
-			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_ASYNC,
-			.cra_blocksize = SHA384_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
-			.cra_alignmask = 3,
-			.cra_module = THIS_MODULE,
-			.cra_init = artpec6_crypto_ahash_init,
-			.cra_exit = artpec6_crypto_ahash_exit,
-		}
-	},
-	/* HMAC SHA-384 */
-	{
-		.init = artpec6_crypto_hmac_sha384_init,
-		.update = artpec6_crypto_hash_update,
-		.final = artpec6_crypto_hash_final,
-		.digest = artpec6_crypto_hmac_sha384_digest,
-		.import = artpec6_crypto_hash_import,
-		.export = artpec6_crypto_hash_export,
-		.setkey = artpec6_crypto_hash_set_key,
-		.halg.digestsize = SHA384_DIGEST_SIZE,
-		.halg.statesize = sizeof(struct artpec6_hash_export_state),
-		.halg.base = {
-			.cra_name = "hmac(sha384)",
-			.cra_driver_name = "artpec-hmac-sha384",
-			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_ASYNC,
-			.cra_blocksize = SHA384_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
-			.cra_alignmask = 3,
-			.cra_module = THIS_MODULE,
-			.cra_init = artpec6_crypto_ahash_init_hmac_sha384,
-			.cra_exit = artpec6_crypto_ahash_exit,
-		}
-	},
-	/* SHA-512 */
-	{
-		.init = artpec6_crypto_sha512_init,
-		.update = artpec6_crypto_hash_update,
-		.final = artpec6_crypto_hash_final,
-		.digest = artpec6_crypto_sha512_digest,
-		.import = artpec6_crypto_hash_import,
-		.export = artpec6_crypto_hash_export,
-		.halg.digestsize = SHA512_DIGEST_SIZE,
-		.halg.statesize = sizeof(struct artpec6_hash_export_state),
-		.halg.base = {
-			.cra_name = "sha512",
-			.cra_driver_name = "artpec-sha512",
-			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_ASYNC,
-			.cra_blocksize = SHA512_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
-			.cra_alignmask = 3,
-			.cra_module = THIS_MODULE,
-			.cra_init = artpec6_crypto_ahash_init,
-			.cra_exit = artpec6_crypto_ahash_exit,
-		}
-	},
-	/* HMAC SHA-512 */
-	{
-		.init = artpec6_crypto_hmac_sha512_init,
-		.update = artpec6_crypto_hash_update,
-		.final = artpec6_crypto_hash_final,
-		.digest = artpec6_crypto_hmac_sha512_digest,
-		.import = artpec6_crypto_hash_import,
-		.export = artpec6_crypto_hash_export,
-		.setkey = artpec6_crypto_hash_set_key,
-		.halg.digestsize = SHA512_DIGEST_SIZE,
-		.halg.statesize = sizeof(struct artpec6_hash_export_state),
-		.halg.base = {
-			.cra_name = "hmac(sha512)",
-			.cra_driver_name = "artpec-hmac-sha512",
-			.cra_priority = 300,
-			.cra_flags = CRYPTO_ALG_ASYNC,
-			.cra_blocksize = SHA512_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
-			.cra_alignmask = 3,
-			.cra_module = THIS_MODULE,
-			.cra_init = artpec6_crypto_ahash_init_hmac_sha512,
-			.cra_exit = artpec6_crypto_ahash_exit,
-		}
-	},
-};
-
 /* Crypto */
 static struct skcipher_alg crypto_algos[] = {
 	/* AES - ECB */
@@ -2984,12 +2825,6 @@ static void artpec6_crypto_init_debugfs(void)
 {
 	dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
 
-	if (!dbgfs_root || IS_ERR(dbgfs_root)) {
-		dbgfs_root = NULL;
-		pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
-		return;
-	}
-
 #ifdef CONFIG_FAULT_INJECTION
 	fault_create_debugfs_attr("fail_status_read", dbgfs_root,
 				  &artpec6_crypto_fail_status_read);
@@ -3001,9 +2836,6 @@ static void artpec6_crypto_init_debugfs(void)
 
 static void artpec6_crypto_free_debugfs(void)
 {
-	if (!dbgfs_root)
-		return;
-
 	debugfs_remove_recursive(dbgfs_root);
 	dbgfs_root = NULL;
 }
@@ -3104,19 +2936,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
 		goto disable_hw;
 	}
 
-	if (variant != ARTPEC6_CRYPTO) {
-		err = crypto_register_ahashes(artpec7_hash_algos,
-					      ARRAY_SIZE(artpec7_hash_algos));
-		if (err) {
-			dev_err(dev, "Failed to register ahashes\n");
-			goto unregister_ahashes;
-		}
-	}
-
 	err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
 	if (err) {
 		dev_err(dev, "Failed to register ciphers\n");
-		goto unregister_a7_ahashes;
+		goto unregister_ahashes;
 	}
 
 	err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
@@ -3129,10 +2952,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
 
 unregister_algs:
 	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
-unregister_a7_ahashes:
-	if (variant != ARTPEC6_CRYPTO)
-		crypto_unregister_ahashes(artpec7_hash_algos,
-					  ARRAY_SIZE(artpec7_hash_algos));
 unregister_ahashes:
 	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
 disable_hw:
@@ -3148,9 +2967,6 @@ static int artpec6_crypto_remove(struct platform_device *pdev)
 	int irq = platform_get_irq(pdev, 0);
 
 	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
-	if (ac->variant != ARTPEC6_CRYPTO)
-		crypto_unregister_ahashes(artpec7_hash_algos,
-					  ARRAY_SIZE(artpec7_hash_algos));
 	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
 	crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
 
diff --git a/drivers/crypto/bcm/Makefile b/drivers/crypto/bcm/Makefile
index 13cb80eb2665..7469e19afe85 100644
--- a/drivers/crypto/bcm/Makefile
+++ b/drivers/crypto/bcm/Makefile
@@ -11,5 +11,3 @@
 obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) := bcm_crypto_spu.o
 
 bcm_crypto_spu-objs :=  util.o spu.o spu2.o cipher.o
-
-ccflags-y += -I. -DBCMDRIVER
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 5567cbda2798..28f592f7e1b7 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -717,7 +717,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
 	 */
 	unsigned int new_data_len;
 
-	unsigned int chunk_start = 0;
+	unsigned int __maybe_unused chunk_start = 0;
 	u32 db_size;	 /* Length of data field, incl gcm and hash padding */
 	int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
 	u32 data_pad_len = 0;	/* length of GCM/CCM padding */
@@ -1675,8 +1675,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
 	struct spu_hw *spu = &iproc_priv.spu;
 	struct brcm_message *mssg = msg;
 	struct iproc_reqctx_s *rctx;
-	struct iproc_ctx_s *ctx;
-	struct crypto_async_request *areq;
 	int err = 0;
 
 	rctx = mssg->ctx;
@@ -1686,8 +1684,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
 		err = -EFAULT;
 		goto cb_finish;
 	}
-	areq = rctx->parent;
-	ctx = rctx->ctx;
 
 	/* process the SPU status */
 	err = spu->spu_status_process(rctx->msg_buf.rx_stat);
@@ -1822,7 +1818,7 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	if (keylen == DES_KEY_SIZE) {
 		if (des_ekey(tmp, key) == 0) {
 			if (crypto_ablkcipher_get_flags(cipher) &
-			    CRYPTO_TFM_REQ_WEAK_KEY) {
+			    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
 				u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
 
 				crypto_ablkcipher_set_flags(cipher, flags);
@@ -2876,7 +2872,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
 
 			if (des_ekey(tmp, keys.enckey) == 0) {
 				if (crypto_aead_get_flags(cipher) &
-				    CRYPTO_TFM_REQ_WEAK_KEY) {
+				    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
 					crypto_aead_set_flags(cipher, flags);
 					return -EINVAL;
 				}
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 763c425c41ca..f6da49758954 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -23,6 +23,7 @@
 #include <crypto/aes.h>
 #include <crypto/internal/hash.h>
 #include <crypto/aead.h>
+#include <crypto/arc4.h>
 #include <crypto/gcm.h>
 #include <crypto/sha.h>
 #include <crypto/sha3.h>
@@ -34,9 +35,6 @@
 /* Driver supports up to MAX_SPUS SPU blocks */
 #define MAX_SPUS 16
 
-#define ARC4_MIN_KEY_SIZE   1
-#define ARC4_MAX_KEY_SIZE   256
-#define ARC4_BLOCK_SIZE     1
 #define ARC4_STATE_SIZE     4
 
 #define CCM_AES_IV_SIZE    16
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index a912c6ad3e85..d8cda5fb75ad 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -201,46 +201,6 @@ struct sdesc {
 	char ctx[];
 };
 
-/* do a synchronous decrypt operation */
-int do_decrypt(char *alg_name,
-	       void *key_ptr, unsigned int key_len,
-	       void *iv_ptr, void *src_ptr, void *dst_ptr,
-	       unsigned int block_len)
-{
-	struct scatterlist sg_in[1], sg_out[1];
-	struct crypto_blkcipher *tfm =
-	    crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
-	struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 };
-	int ret = 0;
-	void *iv;
-	int ivsize;
-
-	flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len);
-
-	if (IS_ERR(tfm))
-		return PTR_ERR(tfm);
-
-	crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len);
-
-	sg_init_table(sg_in, 1);
-	sg_set_buf(sg_in, src_ptr, block_len);
-
-	sg_init_table(sg_out, 1);
-	sg_set_buf(sg_out, dst_ptr, block_len);
-
-	iv = crypto_blkcipher_crt(tfm)->iv;
-	ivsize = crypto_blkcipher_ivsize(tfm);
-	memcpy(iv, iv_ptr, ivsize);
-
-	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len);
-	crypto_free_blkcipher(tfm);
-
-	if (ret < 0)
-		pr_err("aes_decrypt failed %d\n", ret);
-
-	return ret;
-}
-
 /**
  * do_shash() - Do a synchronous hash operation in software
  * @name:       The name of the hash algorithm
diff --git a/drivers/crypto/bcm/util.h b/drivers/crypto/bcm/util.h
index 712e029795f8..15c60356518a 100644
--- a/drivers/crypto/bcm/util.h
+++ b/drivers/crypto/bcm/util.h
@@ -95,12 +95,6 @@ u32 spu_msg_sg_add(struct scatterlist **to_sg,
 
 void add_to_ctr(u8 *ctr_pos, unsigned int increment);
 
-/* do a synchronous decrypt operation */
-int do_decrypt(char *alg_name,
-	       void *key_ptr, unsigned int key_len,
-	       void *iv_ptr, void *src_ptr, void *dst_ptr,
-	       unsigned int block_len);
-
 /* produce a message digest from data of length n bytes */
 int do_shash(unsigned char *name, unsigned char *result,
 	     const u8 *data1, unsigned int data1_len,
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index c4b1cade55c1..577c9844b322 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -91,6 +91,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
 	select CRYPTO_AEAD
 	select CRYPTO_AUTHENC
 	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
 	help
 	  Selecting this will offload crypto for users of the
 	  scatterlist crypto API (such as the linux native IPSec
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 80ae69f906fb..9eac5099098e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
  * caam - Freescale FSL CAAM support for crypto API
  *
  * Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
  *
  * Based on talitos crypto API driver.
  *
@@ -766,6 +766,27 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 	return 0;
 }
 
+static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
+			       const u8 *key, unsigned int keylen)
+{
+	u32 tmp[DES3_EDE_EXPKEY_WORDS];
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+
+	if (keylen == DES3_EDE_KEY_SIZE &&
+	    __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) {
+		return -EINVAL;
+	}
+
+	if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) &
+	    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+		crypto_skcipher_set_flags(skcipher,
+					  CRYPTO_TFM_RES_WEAK_KEY);
+		return -EINVAL;
+	}
+
+	return skcipher_setkey(skcipher, key, keylen);
+}
+
 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 			       unsigned int keylen)
 {
@@ -802,6 +823,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
  * aead_edesc - s/w-extended aead descriptor
  * @src_nents: number of segments in input s/w scatterlist
  * @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
  * @sec4_sg_dma: bus physical mapped address of h/w link table
  * @sec4_sg: pointer to h/w link table
@@ -810,6 +833,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 struct aead_edesc {
 	int src_nents;
 	int dst_nents;
+	int mapped_src_nents;
+	int mapped_dst_nents;
 	int sec4_sg_bytes;
 	dma_addr_t sec4_sg_dma;
 	struct sec4_sg_entry *sec4_sg;
@@ -820,6 +845,8 @@ struct aead_edesc {
  * skcipher_edesc - s/w-extended skcipher descriptor
  * @src_nents: number of segments in input s/w scatterlist
  * @dst_nents: number of segments in output s/w scatterlist
+ * @mapped_src_nents: number of segments in input h/w link table
+ * @mapped_dst_nents: number of segments in output h/w link table
  * @iv_dma: dma address of iv for checking continuity and link table
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
  * @sec4_sg_dma: bus physical mapped address of h/w link table
@@ -830,6 +857,8 @@ struct aead_edesc {
 struct skcipher_edesc {
 	int src_nents;
 	int dst_nents;
+	int mapped_src_nents;
+	int mapped_dst_nents;
 	dma_addr_t iv_dma;
 	int sec4_sg_bytes;
 	dma_addr_t sec4_sg_dma;
@@ -846,7 +875,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
 	if (dst != src) {
 		if (src_nents)
 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+		if (dst_nents)
+			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 	} else {
 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 	}
@@ -961,8 +991,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 	 * The crypto API expects us to set the IV (req->iv) to the last
 	 * ciphertext block. This is used e.g. by the CTS mode.
 	 */
-	scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
-				 ivsize, 0);
+	if (ivsize)
+		scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
+					 ivsize, ivsize, 0);
 
 	kfree(edesc);
 
@@ -1023,11 +1054,12 @@ static void init_aead_job(struct aead_request *req,
 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	if (all_contig) {
-		src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
+		src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) :
+						    0;
 		in_options = 0;
 	} else {
 		src_dma = edesc->sec4_sg_dma;
-		sec4_sg_index += edesc->src_nents;
+		sec4_sg_index += edesc->mapped_src_nents;
 		in_options = LDST_SGF;
 	}
 
@@ -1038,8 +1070,11 @@ static void init_aead_job(struct aead_request *req,
 	out_options = in_options;
 
 	if (unlikely(req->src != req->dst)) {
-		if (edesc->dst_nents == 1) {
+		if (!edesc->mapped_dst_nents) {
+			dst_dma = 0;
+		} else if (edesc->mapped_dst_nents == 1) {
 			dst_dma = sg_dma_address(req->dst);
+			out_options = 0;
 		} else {
 			dst_dma = edesc->sec4_sg_dma +
 				  sec4_sg_index *
@@ -1183,9 +1218,9 @@ static void init_skcipher_job(struct skcipher_request *req,
 	int ivsize = crypto_skcipher_ivsize(skcipher);
 	u32 *desc = edesc->hw_desc;
 	u32 *sh_desc;
-	u32 out_options = 0;
-	dma_addr_t dst_dma, ptr;
-	int len;
+	u32 in_options = 0, out_options = 0;
+	dma_addr_t src_dma, dst_dma, ptr;
+	int len, sec4_sg_index = 0;
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@@ -1203,21 +1238,27 @@ static void init_skcipher_job(struct skcipher_request *req,
 	len = desc_len(sh_desc);
 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
 
-	append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
-			  LDST_SGF);
+	if (ivsize || edesc->mapped_src_nents > 1) {
+		src_dma = edesc->sec4_sg_dma;
+		sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
+		in_options = LDST_SGF;
+	} else {
+		src_dma = sg_dma_address(req->src);
+	}
+
+	append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
 
 	if (likely(req->src == req->dst)) {
-		dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
-		out_options = LDST_SGF;
+		dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
+		out_options = in_options;
+	} else if (edesc->mapped_dst_nents == 1) {
+		dst_dma = sg_dma_address(req->dst);
 	} else {
-		if (edesc->dst_nents == 1) {
-			dst_dma = sg_dma_address(req->dst);
-		} else {
-			dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
-				  sizeof(struct sec4_sg_entry);
-			out_options = LDST_SGF;
-		}
+		dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+			  sizeof(struct sec4_sg_entry);
+		out_options = LDST_SGF;
 	}
+
 	append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
 }
 
@@ -1289,12 +1330,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 			mapped_src_nents = 0;
 		}
 
-		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
-					      DMA_FROM_DEVICE);
-		if (unlikely(!mapped_dst_nents)) {
-			dev_err(jrdev, "unable to map destination\n");
-			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
-			return ERR_PTR(-ENOMEM);
+		/* Cover also the case of null (zero length) output data */
+		if (dst_nents) {
+			mapped_dst_nents = dma_map_sg(jrdev, req->dst,
+						      dst_nents,
+						      DMA_FROM_DEVICE);
+			if (unlikely(!mapped_dst_nents)) {
+				dev_err(jrdev, "unable to map destination\n");
+				dma_unmap_sg(jrdev, req->src, src_nents,
+					     DMA_TO_DEVICE);
+				return ERR_PTR(-ENOMEM);
+			}
+		} else {
+			mapped_dst_nents = 0;
 		}
 	}
 
@@ -1313,6 +1361,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 
 	edesc->src_nents = src_nents;
 	edesc->dst_nents = dst_nents;
+	edesc->mapped_src_nents = mapped_src_nents;
+	edesc->mapped_dst_nents = mapped_dst_nents;
 	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
 			 desc_bytes;
 	*all_contig_ptr = !(mapped_src_nents > 1);
@@ -1586,7 +1636,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 		       GFP_KERNEL : GFP_ATOMIC;
 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
 	struct skcipher_edesc *edesc;
-	dma_addr_t iv_dma;
+	dma_addr_t iv_dma = 0;
 	u8 *iv;
 	int ivsize = crypto_skcipher_ivsize(skcipher);
 	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@@ -1621,7 +1671,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 			dev_err(jrdev, "unable to map source\n");
 			return ERR_PTR(-ENOMEM);
 		}
-
 		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
 					      DMA_FROM_DEVICE);
 		if (unlikely(!mapped_dst_nents)) {
@@ -1631,7 +1680,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 		}
 	}
 
-	sec4_sg_ents = 1 + mapped_src_nents;
+	if (!ivsize && mapped_src_nents == 1)
+		sec4_sg_ents = 0; // no need for an input hw s/g table
+	else
+		sec4_sg_ents = mapped_src_nents + !!ivsize;
 	dst_sg_idx = sec4_sg_ents;
 	sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
 	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
@@ -1650,39 +1702,48 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 
 	edesc->src_nents = src_nents;
 	edesc->dst_nents = dst_nents;
+	edesc->mapped_src_nents = mapped_src_nents;
+	edesc->mapped_dst_nents = mapped_dst_nents;
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 	edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
 						  desc_bytes);
 
 	/* Make sure IV is located in a DMAable area */
-	iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
-	memcpy(iv, req->iv, ivsize);
+	if (ivsize) {
+		iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+		memcpy(iv, req->iv, ivsize);
+
+		iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, iv_dma)) {
+			dev_err(jrdev, "unable to map IV\n");
+			caam_unmap(jrdev, req->src, req->dst, src_nents,
+				   dst_nents, 0, 0, 0, 0);
+			kfree(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
 
-	iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, iv_dma)) {
-		dev_err(jrdev, "unable to map IV\n");
-		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
-			   0, 0, 0);
-		kfree(edesc);
-		return ERR_PTR(-ENOMEM);
+		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
 	}
-
-	dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
-	sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+	if (dst_sg_idx)
+		sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
+				   !!ivsize, 0);
 
 	if (mapped_dst_nents > 1) {
 		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
 				   edesc->sec4_sg + dst_sg_idx, 0);
 	}
 
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-		dev_err(jrdev, "unable to map S/G table\n");
-		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
-			   iv_dma, ivsize, 0, 0);
-		kfree(edesc);
-		return ERR_PTR(-ENOMEM);
+	if (sec4_sg_bytes) {
+		edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+						    sec4_sg_bytes,
+						    DMA_TO_DEVICE);
+		if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+			dev_err(jrdev, "unable to map S/G table\n");
+			caam_unmap(jrdev, req->src, req->dst, src_nents,
+				   dst_nents, iv_dma, ivsize, 0, 0);
+			kfree(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
 	}
 
 	edesc->iv_dma = iv_dma;
@@ -1749,8 +1810,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
 	 * The crypto API expects us to set the IV (req->iv) to the last
 	 * ciphertext block.
 	 */
-	scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
-				 ivsize, 0);
+	if (ivsize)
+		scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
+					 ivsize, ivsize, 0);
 
 	/* Create and submit job descriptor*/
 	init_skcipher_job(req, edesc, false);
@@ -1796,7 +1858,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 				.cra_driver_name = "cbc-3des-caam",
 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
 			},
-			.setkey = skcipher_setkey,
+			.setkey = des_skcipher_setkey,
 			.encrypt = skcipher_encrypt,
 			.decrypt = skcipher_decrypt,
 			.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1812,7 +1874,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 				.cra_driver_name = "cbc-des-caam",
 				.cra_blocksize = DES_BLOCK_SIZE,
 			},
-			.setkey = skcipher_setkey,
+			.setkey = des_skcipher_setkey,
 			.encrypt = skcipher_encrypt,
 			.decrypt = skcipher_decrypt,
 			.min_keysize = DES_KEY_SIZE,
@@ -1878,6 +1940,66 @@ static struct caam_skcipher_alg driver_algs[] = {
 		},
 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
 	},
+	{
+		.skcipher = {
+			.base = {
+				.cra_name = "ecb(des)",
+				.cra_driver_name = "ecb-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = des_skcipher_setkey,
+			.encrypt = skcipher_encrypt,
+			.decrypt = skcipher_decrypt,
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+		},
+		.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
+	},
+	{
+		.skcipher = {
+			.base = {
+				.cra_name = "ecb(aes)",
+				.cra_driver_name = "ecb-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = skcipher_setkey,
+			.encrypt = skcipher_encrypt,
+			.decrypt = skcipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+		},
+		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
+	},
+	{
+		.skcipher = {
+			.base = {
+				.cra_name = "ecb(des3_ede)",
+				.cra_driver_name = "ecb-des3-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = des_skcipher_setkey,
+			.encrypt = skcipher_encrypt,
+			.decrypt = skcipher_decrypt,
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+		},
+		.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
+	},
+	{
+		.skcipher = {
+			.base = {
+				.cra_name = "ecb(arc4)",
+				.cra_driver_name = "ecb-arc4-caam",
+				.cra_blocksize = ARC4_BLOCK_SIZE,
+			},
+			.setkey = skcipher_setkey,
+			.encrypt = skcipher_encrypt,
+			.decrypt = skcipher_decrypt,
+			.min_keysize = ARC4_MIN_KEY_SIZE,
+			.max_keysize = ARC4_MAX_KEY_SIZE,
+		},
+		.caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
+	},
 };
 
 static struct caam_aead_alg driver_aeads[] = {
@@ -3337,6 +3459,7 @@ static int __init caam_algapi_init(void)
 	struct caam_drv_private *priv;
 	int i = 0, err = 0;
 	u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
+	u32 arc4_inst;
 	unsigned int md_limit = SHA512_DIGEST_SIZE;
 	bool registered = false;
 
@@ -3381,6 +3504,8 @@ static int __init caam_algapi_init(void)
 			   CHA_ID_LS_DES_SHIFT;
 		aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
 		md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+		arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
+			    CHA_ID_LS_ARC4_SHIFT;
 		ccha_inst = 0;
 		ptha_inst = 0;
 	} else {
@@ -3397,6 +3522,7 @@ static int __init caam_algapi_init(void)
 		md_inst = mdha & CHA_VER_NUM_MASK;
 		ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
 		ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+		arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
 	}
 
 	/* If MD is present, limit digest size based on LP256 */
@@ -3417,6 +3543,10 @@ static int __init caam_algapi_init(void)
 		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
 				continue;
 
+		/* Skip ARC4 algorithms if not supported by device */
+		if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
+			continue;
+
 		/*
 		 * Check support for AES modes not available
 		 * on LP devices.
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 7db1640d3577..1e1a376edc2f 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -2,7 +2,7 @@
 /*
  * Shared descriptors for aead, skcipher algorithms
  *
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
  */
 
 #include "compat.h"
@@ -1396,9 +1396,11 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
 
 	set_jump_tgt_here(desc, key_jump_cmd);
 
-	/* Load iv */
-	append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
-			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+	/* Load IV, if there is one */
+	if (ivsize)
+		append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+				LDST_CLASS_1_CCB | (ctx1_iv_off <<
+				LDST_OFFSET_SHIFT));
 
 	/* Load counter into CONTEXT1 reg */
 	if (is_rfc3686)
@@ -1462,9 +1464,11 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
 
 	set_jump_tgt_here(desc, key_jump_cmd);
 
-	/* load IV */
-	append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
-			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+	/* Load IV, if there is one */
+	if (ivsize)
+		append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+				LDST_CLASS_1_CCB | (ctx1_iv_off <<
+				LDST_OFFSET_SHIFT));
 
 	/* Load counter into CONTEXT1 reg */
 	if (is_rfc3686)
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c0d55310aade..a15ce9213310 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -782,7 +782,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
 
 			cpu = smp_processor_id();
 			drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
-			if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+			if (!IS_ERR_OR_NULL(drv_ctx))
 				drv_ctx->op_type = type;
 
 			ctx->drv_ctx[type] = drv_ctx;
@@ -802,7 +802,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
 	if (dst != src) {
 		if (src_nents)
 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+		if (dst_nents)
+			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 	} else {
 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 	}
@@ -892,7 +893,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 	struct caam_drv_ctx *drv_ctx;
 
 	drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
-	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+	if (IS_ERR_OR_NULL(drv_ctx))
 		return (struct aead_edesc *)drv_ctx;
 
 	/* allocate space for base edesc and hw desc commands, link tables */
@@ -955,13 +956,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 			mapped_src_nents = 0;
 		}
 
-		mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
-					      DMA_FROM_DEVICE);
-		if (unlikely(!mapped_dst_nents)) {
-			dev_err(qidev, "unable to map destination\n");
-			dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
-			qi_cache_free(edesc);
-			return ERR_PTR(-ENOMEM);
+		if (dst_nents) {
+			mapped_dst_nents = dma_map_sg(qidev, req->dst,
+						      dst_nents,
+						      DMA_FROM_DEVICE);
+			if (unlikely(!mapped_dst_nents)) {
+				dev_err(qidev, "unable to map destination\n");
+				dma_unmap_sg(qidev, req->src, src_nents,
+					     DMA_TO_DEVICE);
+				qi_cache_free(edesc);
+				return ERR_PTR(-ENOMEM);
+			}
+		} else {
+			mapped_dst_nents = 0;
 		}
 	}
 
@@ -1184,7 +1191,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 	struct caam_drv_ctx *drv_ctx;
 
 	drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
-	if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+	if (IS_ERR_OR_NULL(drv_ctx))
 		return (struct skcipher_edesc *)drv_ctx;
 
 	src_nents = sg_nents_for_len(req->src, req->cryptlen);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 425d5d974613..c2c1abc68f81 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -25,13 +25,6 @@
 #define CAAM_MAX_KEY_SIZE	(AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
 				 SHA512_DIGEST_SIZE * 2)
 
-#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
-bool caam_little_end;
-EXPORT_SYMBOL(caam_little_end);
-bool caam_imx;
-EXPORT_SYMBOL(caam_imx);
-#endif
-
 /*
  * This is a a cache of buffers, from which the users of CAAM QI driver
  * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
@@ -151,7 +144,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
 	if (dst != src) {
 		if (src_nents)
 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+		if (dst_nents)
+			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 	} else {
 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 	}
@@ -392,13 +386,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 			mapped_src_nents = 0;
 		}
 
-		mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
-					      DMA_FROM_DEVICE);
-		if (unlikely(!mapped_dst_nents)) {
-			dev_err(dev, "unable to map destination\n");
-			dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
-			qi_cache_free(edesc);
-			return ERR_PTR(-ENOMEM);
+		if (dst_nents) {
+			mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+						      DMA_FROM_DEVICE);
+			if (unlikely(!mapped_dst_nents)) {
+				dev_err(dev, "unable to map destination\n");
+				dma_unmap_sg(dev, req->src, src_nents,
+					     DMA_TO_DEVICE);
+				qi_cache_free(edesc);
+				return ERR_PTR(-ENOMEM);
+			}
+		} else {
+			mapped_dst_nents = 0;
 		}
 	} else {
 		src_nents = sg_nents_for_len(req->src, req->assoclen +
@@ -4503,7 +4502,8 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
 		nctx->cb = dpaa2_caam_fqdan_cb;
 
 		/* Register notification callbacks */
-		err = dpaa2_io_service_register(NULL, nctx);
+		ppriv->dpio = dpaa2_io_service_select(cpu);
+		err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
 		if (unlikely(err)) {
 			dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
 			nctx->cb = NULL;
@@ -4536,7 +4536,7 @@ err:
 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
 		if (!ppriv->nctx.cb)
 			break;
-		dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
 	}
 
 	for_each_online_cpu(cpu) {
@@ -4556,7 +4556,8 @@ static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
 
 	for_each_online_cpu(cpu) {
 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
-		dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+		dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
+					    priv->dev);
 		dpaa2_io_store_destroy(ppriv->store);
 
 		if (++i == priv->num_pairs)
@@ -4654,7 +4655,7 @@ static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
 
 	/* Retry while portal is busy */
 	do {
-		err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+		err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
 					       ppriv->store);
 	} while (err == -EBUSY);
 
@@ -4722,7 +4723,7 @@ static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
 
 	if (cleaned < budget) {
 		napi_complete_done(napi, cleaned);
-		err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
+		err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
 		if (unlikely(err))
 			dev_err(priv->dev, "Notification rearm failed: %d\n",
 				err);
@@ -4863,21 +4864,31 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
 
 	i = 0;
 	for_each_online_cpu(cpu) {
-		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
-			priv->rx_queue_attr[i].fqid,
-			priv->tx_queue_attr[i].fqid);
+		u8 j;
+
+		j = i % priv->num_pairs;
 
 		ppriv = per_cpu_ptr(priv->ppriv, cpu);
-		ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
-		ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
-		ppriv->prio = i;
+		ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
+
+		/*
+		 * Allow all cores to enqueue, while only some of them
+		 * will take part in dequeuing.
+		 */
+		if (++i > priv->num_pairs)
+			continue;
+
+		ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
+		ppriv->prio = j;
+
+		dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
+			priv->rx_queue_attr[j].fqid,
+			priv->tx_queue_attr[j].fqid);
 
 		ppriv->net_dev.dev = *dev;
 		INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
 		netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
 			       DPAA2_CAAM_NAPI_WEIGHT);
-		if (++i == priv->num_pairs)
-			break;
 	}
 
 	return 0;
@@ -5229,7 +5240,8 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
 {
 	struct dpaa2_fd fd;
 	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
-	int err = 0, i, id;
+	struct dpaa2_caam_priv_per_cpu *ppriv;
+	int err = 0, i;
 
 	if (IS_ERR(req))
 		return PTR_ERR(req);
@@ -5259,23 +5271,18 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
 	dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
 	dpaa2_fd_set_flc(&fd, req->flc_dma);
 
-	/*
-	 * There is no guarantee that preemption is disabled here,
-	 * thus take action.
-	 */
-	preempt_disable();
-	id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
+	ppriv = this_cpu_ptr(priv->ppriv);
 	for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
-		err = dpaa2_io_service_enqueue_fq(NULL,
-						  priv->tx_queue_attr[id].fqid,
+		err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
 						  &fd);
 		if (err != -EBUSY)
 			break;
+
+		cpu_relax();
 	}
-	preempt_enable();
 
 	if (unlikely(err)) {
-		dev_err(dev, "Error enqueuing frame: %d\n", err);
+		dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
 		goto err_out;
 	}
 
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 9823bdefd029..20890780fb82 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -76,6 +76,7 @@ struct dpaa2_caam_priv {
  * @nctx: notification context of response FQ
  * @store: where dequeued frames are stored
  * @priv: backpointer to dpaa2_caam_priv
+ * @dpio: portal used for data path operations
  */
 struct dpaa2_caam_priv_per_cpu {
 	struct napi_struct napi;
@@ -86,6 +87,7 @@ struct dpaa2_caam_priv_per_cpu {
 	struct dpaa2_io_notification_ctx nctx;
 	struct dpaa2_io_store *store;
 	struct dpaa2_caam_priv *priv;
+	struct dpaa2_io *dpio;
 };
 
 /*
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index bb1a2cdf1951..d7483e4d0ce2 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -3,7 +3,7 @@
  * caam - Freescale FSL CAAM support for ahash functions of crypto API
  *
  * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
  *
  * Based on caamalg.c crypto API driver.
  *
@@ -98,13 +98,14 @@ struct caam_hash_ctx {
 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+	u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
 	dma_addr_t sh_desc_update_first_dma;
 	dma_addr_t sh_desc_fin_dma;
 	dma_addr_t sh_desc_digest_dma;
+	dma_addr_t key_dma;
 	enum dma_data_direction dir;
 	struct device *jrdev;
-	u8 key[CAAM_MAX_HASH_KEY_SIZE];
 	int ctx_len;
 	struct alginfo adata;
 };
@@ -113,6 +114,7 @@ struct caam_hash_ctx {
 struct caam_hash_state {
 	dma_addr_t buf_dma;
 	dma_addr_t ctx_dma;
+	int ctx_dma_len;
 	u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
 	int buflen_0;
 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -158,6 +160,11 @@ static inline int *alt_buflen(struct caam_hash_state *state)
 	return state->current_buf ? &state->buflen_0 : &state->buflen_1;
 }
 
+static inline bool is_cmac_aes(u32 algtype)
+{
+	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
+	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
+}
 /* Common job descriptor seq in/out ptr routines */
 
 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -165,6 +172,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
 				      struct caam_hash_state *state,
 				      int ctx_len)
 {
+	state->ctx_dma_len = ctx_len;
 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
 					ctx_len, DMA_FROM_DEVICE);
 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
@@ -178,18 +186,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
 	return 0;
 }
 
-/* Map req->result, and append seq_out_ptr command that points to it */
-static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
-						u8 *result, int digestsize)
-{
-	dma_addr_t dst_dma;
-
-	dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
-	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
-
-	return dst_dma;
-}
-
 /* Map current buffer in state (if length > 0) and put it in link table */
 static inline int buf_map_to_sec4_sg(struct device *jrdev,
 				     struct sec4_sg_entry *sec4_sg,
@@ -218,6 +214,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
 				     struct caam_hash_state *state, int ctx_len,
 				     struct sec4_sg_entry *sec4_sg, u32 flag)
 {
+	state->ctx_dma_len = ctx_len;
 	state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
 	if (dma_mapping_error(jrdev, state->ctx_dma)) {
 		dev_err(jrdev, "unable to map ctx\n");
@@ -292,14 +289,119 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 	return 0;
 }
 
+static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
+{
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+
+	/* key is loaded from memory for UPDATE and FINALIZE states */
+	ctx->adata.key_dma = ctx->key_dma;
+
+	/* shared descriptor for ahash_update */
+	desc = ctx->sh_desc_update;
+	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
+			    ctx->ctx_len, ctx->ctx_len, 0);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+				   desc_bytes(desc), ctx->dir);
+	print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+			     1);
+
+	/* shared descriptor for ahash_{final,finup} */
+	desc = ctx->sh_desc_fin;
+	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
+			    digestsize, ctx->ctx_len, 0);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+				   desc_bytes(desc), ctx->dir);
+	print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+			     1);
+
+	/* key is immediate data for INIT and INITFINAL states */
+	ctx->adata.key_virt = ctx->key;
+
+	/* shared descriptor for first invocation of ahash_update */
+	desc = ctx->sh_desc_update_first;
+	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+			    ctx->ctx_len, ctx->key_dma);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+				   desc_bytes(desc), ctx->dir);
+	print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+			     1);
+
+	/* shared descriptor for ahash_digest */
+	desc = ctx->sh_desc_digest;
+	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
+			    digestsize, ctx->ctx_len, 0);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+				   desc_bytes(desc), ctx->dir);
+	print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+			     1);
+	return 0;
+}
+
+static int acmac_set_sh_desc(struct crypto_ahash *ahash)
+{
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	int digestsize = crypto_ahash_digestsize(ahash);
+	struct device *jrdev = ctx->jrdev;
+	u32 *desc;
+
+	/* shared descriptor for ahash_update */
+	desc = ctx->sh_desc_update;
+	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
+			    ctx->ctx_len, ctx->ctx_len, 0);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+				   desc_bytes(desc), ctx->dir);
+	print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			     desc_bytes(desc), 1);
+
+	/* shared descriptor for ahash_{final,finup} */
+	desc = ctx->sh_desc_fin;
+	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
+			    digestsize, ctx->ctx_len, 0);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+				   desc_bytes(desc), ctx->dir);
+	print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			     desc_bytes(desc), 1);
+
+	/* shared descriptor for first invocation of ahash_update */
+	desc = ctx->sh_desc_update_first;
+	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+			    ctx->ctx_len, 0);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+				   desc_bytes(desc), ctx->dir);
+	print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			     desc_bytes(desc), 1);
+
+	/* shared descriptor for ahash_digest */
+	desc = ctx->sh_desc_digest;
+	cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
+			    digestsize, ctx->ctx_len, 0);
+	dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+				   desc_bytes(desc), ctx->dir);
+	print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, desc,
+			     desc_bytes(desc), 1);
+
+	return 0;
+}
+
 /* Digest hash size if it is too large */
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
-			   u32 *keylen, u8 *key_out, u32 digestsize)
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+			   u32 digestsize)
 {
 	struct device *jrdev = ctx->jrdev;
 	u32 *desc;
 	struct split_key_result result;
-	dma_addr_t src_dma, dst_dma;
+	dma_addr_t key_dma;
 	int ret;
 
 	desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
@@ -310,18 +412,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 
 	init_job_desc(desc, 0);
 
-	src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
-				 DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, src_dma)) {
-		dev_err(jrdev, "unable to map key input memory\n");
-		kfree(desc);
-		return -ENOMEM;
-	}
-	dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
-				 DMA_FROM_DEVICE);
-	if (dma_mapping_error(jrdev, dst_dma)) {
-		dev_err(jrdev, "unable to map key output memory\n");
-		dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+	key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(jrdev, key_dma)) {
+		dev_err(jrdev, "unable to map key memory\n");
 		kfree(desc);
 		return -ENOMEM;
 	}
@@ -329,16 +422,16 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 	/* Job descriptor to perform unkeyed hash on key_in */
 	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
 			 OP_ALG_AS_INITFINAL);
-	append_seq_in_ptr(desc, src_dma, *keylen, 0);
+	append_seq_in_ptr(desc, key_dma, *keylen, 0);
 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
 			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
-	append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+	append_seq_out_ptr(desc, key_dma, digestsize, 0);
 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
 			 LDST_SRCDST_BYTE_CONTEXT);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
@@ -354,12 +447,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR,
 			       "digested key@"__stringify(__LINE__)": ",
-			       DUMP_PREFIX_ADDRESS, 16, 4, key_in,
-			       digestsize, 1);
+			       DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1);
 #endif
 	}
-	dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
-	dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+	dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
 
 	*keylen = digestsize;
 
@@ -383,13 +474,10 @@ static int ahash_setkey(struct crypto_ahash *ahash,
 #endif
 
 	if (keylen > blocksize) {
-		hashed_key = kmalloc_array(digestsize,
-					   sizeof(*hashed_key),
-					   GFP_KERNEL | GFP_DMA);
+		hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
 		if (!hashed_key)
 			return -ENOMEM;
-		ret = hash_digest_key(ctx, key, &keylen, hashed_key,
-				      digestsize);
+		ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
 		if (ret)
 			goto bad_free_key;
 		key = hashed_key;
@@ -424,9 +512,39 @@ static int ahash_setkey(struct crypto_ahash *ahash,
 	return -EINVAL;
 }
 
+static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
+			unsigned int keylen)
+{
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct device *jrdev = ctx->jrdev;
+
+	memcpy(ctx->key, key, keylen);
+	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+	ctx->adata.keylen = keylen;
+
+	print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
+
+	return axcbc_set_sh_desc(ahash);
+}
+
+static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
+			unsigned int keylen)
+{
+	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+	/* key is immediate data for all cmac shared descriptors */
+	ctx->adata.key_virt = key;
+	ctx->adata.keylen = keylen;
+
+	print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
+			     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+	return acmac_set_sh_desc(ahash);
+}
+
 /*
  * ahash_edesc - s/w-extended ahash descriptor
- * @dst_dma: physical mapped address of req->result
  * @sec4_sg_dma: physical mapped address of h/w link table
  * @src_nents: number of segments in input scatterlist
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
@@ -434,7 +552,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
  * @sec4_sg: h/w link table
  */
 struct ahash_edesc {
-	dma_addr_t dst_dma;
 	dma_addr_t sec4_sg_dma;
 	int src_nents;
 	int sec4_sg_bytes;
@@ -450,8 +567,6 @@ static inline void ahash_unmap(struct device *dev,
 
 	if (edesc->src_nents)
 		dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
-	if (edesc->dst_dma)
-		dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
 
 	if (edesc->sec4_sg_bytes)
 		dma_unmap_single(dev, edesc->sec4_sg_dma,
@@ -468,12 +583,10 @@ static inline void ahash_unmap_ctx(struct device *dev,
 			struct ahash_edesc *edesc,
 			struct ahash_request *req, int dst_len, u32 flag)
 {
-	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 	struct caam_hash_state *state = ahash_request_ctx(req);
 
 	if (state->ctx_dma) {
-		dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+		dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
 		state->ctx_dma = 0;
 	}
 	ahash_unmap(dev, edesc, req, dst_len);
@@ -486,9 +599,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
 	struct ahash_edesc *edesc;
 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 	int digestsize = crypto_ahash_digestsize(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
 #ifdef DEBUG
 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-	struct caam_hash_state *state = ahash_request_ctx(req);
 
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
@@ -497,17 +610,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
-	ahash_unmap(jrdev, edesc, req, digestsize);
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	memcpy(req->result, state->caam_ctx, digestsize);
 	kfree(edesc);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
 		       ctx->ctx_len, 1);
-	if (req->result)
-		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
-			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
-			       digestsize, 1);
 #endif
 
 	req->base.complete(&req->base, err);
@@ -555,9 +665,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
 	struct ahash_edesc *edesc;
 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 	int digestsize = crypto_ahash_digestsize(ahash);
+	struct caam_hash_state *state = ahash_request_ctx(req);
 #ifdef DEBUG
 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-	struct caam_hash_state *state = ahash_request_ctx(req);
 
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
@@ -566,17 +676,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
-	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
+	memcpy(req->result, state->caam_ctx, digestsize);
 	kfree(edesc);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
 		       ctx->ctx_len, 1);
-	if (req->result)
-		print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
-			       DUMP_PREFIX_ADDRESS, 16, 4, req->result,
-			       digestsize, 1);
 #endif
 
 	req->base.complete(&req->base, err);
@@ -688,6 +795,7 @@ static int ahash_update_ctx(struct ahash_request *req)
 	u8 *buf = current_buf(state);
 	int *buflen = current_buflen(state);
 	u8 *next_buf = alt_buf(state);
+	int blocksize = crypto_ahash_blocksize(ahash);
 	int *next_buflen = alt_buflen(state), last_buflen;
 	int in_len = *buflen + req->nbytes, to_hash;
 	u32 *desc;
@@ -696,9 +804,20 @@ static int ahash_update_ctx(struct ahash_request *req)
 	int ret = 0;
 
 	last_buflen = *next_buflen;
-	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+	*next_buflen = in_len & (blocksize - 1);
 	to_hash = in_len - *next_buflen;
 
+	/*
+	 * For XCBC and CMAC, if to_hash is multiple of block size,
+	 * keep last block in internal buffer
+	 */
+	if ((is_xcbc_aes(ctx->adata.algtype) ||
+	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+	     (*next_buflen == 0)) {
+		*next_buflen = blocksize;
+		to_hash -= blocksize;
+	}
+
 	if (to_hash) {
 		src_nents = sg_nents_for_len(req->src,
 					     req->nbytes - (*next_buflen));
@@ -801,7 +920,7 @@ static int ahash_update_ctx(struct ahash_request *req)
 #endif
 
 	return ret;
- unmap_ctx:
+unmap_ctx:
 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
 	kfree(edesc);
 	return ret;
@@ -837,7 +956,7 @@ static int ahash_final_ctx(struct ahash_request *req)
 	edesc->sec4_sg_bytes = sec4_sg_bytes;
 
 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
-				 edesc->sec4_sg, DMA_TO_DEVICE);
+				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
 	if (ret)
 		goto unmap_ctx;
 
@@ -857,14 +976,7 @@ static int ahash_final_ctx(struct ahash_request *req)
 
 	append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
 			  LDST_SGF);
-
-	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
-						digestsize);
-	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
-		dev_err(jrdev, "unable to map dst\n");
-		ret = -ENOMEM;
-		goto unmap_ctx;
-	}
+	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -877,7 +989,7 @@ static int ahash_final_ctx(struct ahash_request *req)
 
 	return -EINPROGRESS;
  unmap_ctx:
-	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
 	kfree(edesc);
 	return ret;
 }
@@ -931,7 +1043,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 	edesc->src_nents = src_nents;
 
 	ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
-				 edesc->sec4_sg, DMA_TO_DEVICE);
+				 edesc->sec4_sg, DMA_BIDIRECTIONAL);
 	if (ret)
 		goto unmap_ctx;
 
@@ -945,13 +1057,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 	if (ret)
 		goto unmap_ctx;
 
-	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
-						digestsize);
-	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
-		dev_err(jrdev, "unable to map dst\n");
-		ret = -ENOMEM;
-		goto unmap_ctx;
-	}
+	append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -964,7 +1070,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 
 	return -EINPROGRESS;
  unmap_ctx:
-	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
 	kfree(edesc);
 	return ret;
 }
@@ -1023,10 +1129,8 @@ static int ahash_digest(struct ahash_request *req)
 
 	desc = edesc->hw_desc;
 
-	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
-						digestsize);
-	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
-		dev_err(jrdev, "unable to map dst\n");
+	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+	if (ret) {
 		ahash_unmap(jrdev, edesc, req, digestsize);
 		kfree(edesc);
 		return -ENOMEM;
@@ -1041,7 +1145,7 @@ static int ahash_digest(struct ahash_request *req)
 	if (!ret) {
 		ret = -EINPROGRESS;
 	} else {
-		ahash_unmap(jrdev, edesc, req, digestsize);
+		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
 		kfree(edesc);
 	}
 
@@ -1083,12 +1187,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
 		append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
 	}
 
-	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
-						digestsize);
-	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
-		dev_err(jrdev, "unable to map dst\n");
+	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+	if (ret)
 		goto unmap;
-	}
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1099,7 +1200,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
 	if (!ret) {
 		ret = -EINPROGRESS;
 	} else {
-		ahash_unmap(jrdev, edesc, req, digestsize);
+		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
 		kfree(edesc);
 	}
 
@@ -1122,6 +1223,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 		       GFP_KERNEL : GFP_ATOMIC;
 	u8 *buf = current_buf(state);
 	int *buflen = current_buflen(state);
+	int blocksize = crypto_ahash_blocksize(ahash);
 	u8 *next_buf = alt_buf(state);
 	int *next_buflen = alt_buflen(state);
 	int in_len = *buflen + req->nbytes, to_hash;
@@ -1130,9 +1232,20 @@ static int ahash_update_no_ctx(struct ahash_request *req)
 	u32 *desc;
 	int ret = 0;
 
-	*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+	*next_buflen = in_len & (blocksize - 1);
 	to_hash = in_len - *next_buflen;
 
+	/*
+	 * For XCBC and CMAC, if to_hash is multiple of block size,
+	 * keep last block in internal buffer
+	 */
+	if ((is_xcbc_aes(ctx->adata.algtype) ||
+	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+	     (*next_buflen == 0)) {
+		*next_buflen = blocksize;
+		to_hash -= blocksize;
+	}
+
 	if (to_hash) {
 		src_nents = sg_nents_for_len(req->src,
 					     req->nbytes - *next_buflen);
@@ -1298,12 +1411,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 		goto unmap;
 	}
 
-	edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
-						digestsize);
-	if (dma_mapping_error(jrdev, edesc->dst_dma)) {
-		dev_err(jrdev, "unable to map dst\n");
+	ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
+	if (ret)
 		goto unmap;
-	}
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1314,7 +1424,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
 	if (!ret) {
 		ret = -EINPROGRESS;
 	} else {
-		ahash_unmap(jrdev, edesc, req, digestsize);
+		ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
 		kfree(edesc);
 	}
 
@@ -1338,15 +1448,26 @@ static int ahash_update_first(struct ahash_request *req)
 	u8 *next_buf = alt_buf(state);
 	int *next_buflen = alt_buflen(state);
 	int to_hash;
+	int blocksize = crypto_ahash_blocksize(ahash);
 	u32 *desc;
 	int src_nents, mapped_nents;
 	struct ahash_edesc *edesc;
 	int ret = 0;
 
-	*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
-				      1);
+	*next_buflen = req->nbytes & (blocksize - 1);
 	to_hash = req->nbytes - *next_buflen;
 
+	/*
+	 * For XCBC and CMAC, if to_hash is multiple of block size,
+	 * keep last block in internal buffer
+	 */
+	if ((is_xcbc_aes(ctx->adata.algtype) ||
+	     is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
+	     (*next_buflen == 0)) {
+		*next_buflen = blocksize;
+		to_hash -= blocksize;
+	}
+
 	if (to_hash) {
 		src_nents = sg_nents_for_len(req->src,
 					     req->nbytes - *next_buflen);
@@ -1446,6 +1567,7 @@ static int ahash_init(struct ahash_request *req)
 	state->final = ahash_final_no_ctx;
 
 	state->ctx_dma = 0;
+	state->ctx_dma_len = 0;
 	state->current_buf = 0;
 	state->buf_dma = 0;
 	state->buflen_0 = 0;
@@ -1654,6 +1776,44 @@ static struct caam_hash_template driver_hash[] = {
 			},
 		},
 		.alg_type = OP_ALG_ALGSEL_MD5,
+	}, {
+		.hmac_name = "xcbc(aes)",
+		.hmac_driver_name = "xcbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = axcbc_setkey,
+			.halg = {
+				.digestsize = AES_BLOCK_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		 },
+		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
+	}, {
+		.hmac_name = "cmac(aes)",
+		.hmac_driver_name = "cmac-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.template_ahash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.export = ahash_export,
+			.import = ahash_import,
+			.setkey = acmac_setkey,
+			.halg = {
+				.digestsize = AES_BLOCK_SIZE,
+				.statesize = sizeof(struct caam_export_state),
+			},
+		 },
+		.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
 	},
 };
 
@@ -1695,14 +1855,45 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
 	}
 
 	priv = dev_get_drvdata(ctx->jrdev->parent);
-	ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+	if (is_xcbc_aes(caam_hash->alg_type)) {
+		ctx->dir = DMA_TO_DEVICE;
+		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+		ctx->ctx_len = 48;
+
+		ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
+						    ARRAY_SIZE(ctx->key),
+						    DMA_BIDIRECTIONAL,
+						    DMA_ATTR_SKIP_CPU_SYNC);
+		if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+			dev_err(ctx->jrdev, "unable to map key\n");
+			caam_jr_free(ctx->jrdev);
+			return -ENOMEM;
+		}
+	} else if (is_cmac_aes(caam_hash->alg_type)) {
+		ctx->dir = DMA_TO_DEVICE;
+		ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
+		ctx->ctx_len = 32;
+	} else {
+		ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+		ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+		ctx->ctx_len = runninglen[(ctx->adata.algtype &
+					   OP_ALG_ALGSEL_SUBMASK) >>
+					  OP_ALG_ALGSEL_SHIFT];
+	}
 
 	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
-					offsetof(struct caam_hash_ctx,
-						 sh_desc_update_dma),
+					offsetof(struct caam_hash_ctx, key),
 					ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
 	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
 		dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+
+		if (is_xcbc_aes(caam_hash->alg_type))
+			dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
+					       ARRAY_SIZE(ctx->key),
+					       DMA_BIDIRECTIONAL,
+					       DMA_ATTR_SKIP_CPU_SYNC);
+
 		caam_jr_free(ctx->jrdev);
 		return -ENOMEM;
 	}
@@ -1716,16 +1907,14 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
 	ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
 						      sh_desc_digest);
 
-	/* copy descriptor header template value */
-	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
-
-	ctx->ctx_len = runninglen[(ctx->adata.algtype &
-				   OP_ALG_ALGSEL_SUBMASK) >>
-				  OP_ALG_ALGSEL_SHIFT];
-
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				 sizeof(struct caam_hash_state));
-	return ahash_set_sh_desc(ahash);
+
+	/*
+	 * For keyed hash algorithms shared descriptors
+	 * will be created later in setkey() callback
+	 */
+	return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
 }
 
 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -1733,9 +1922,12 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
 	struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 
 	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
-			       offsetof(struct caam_hash_ctx,
-					sh_desc_update_dma),
+			       offsetof(struct caam_hash_ctx, key),
 			       ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+	if (is_xcbc_aes(ctx->adata.algtype))
+		dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
+				       ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
+				       DMA_ATTR_SKIP_CPU_SYNC);
 	caam_jr_free(ctx->jrdev);
 }
 
@@ -1866,14 +2058,16 @@ static int __init caam_algapi_hash_init(void)
 		struct caam_hash_template *alg = driver_hash + i;
 
 		/* If MD size is not supported by device, skip registration */
-		if (alg->template_ahash.halg.digestsize > md_limit)
+		if (is_mdha(alg->alg_type) &&
+		    alg->template_ahash.halg.digestsize > md_limit)
 			continue;
 
 		/* register hmac version */
 		t_alg = caam_hash_alloc(alg, true);
 		if (IS_ERR(t_alg)) {
 			err = PTR_ERR(t_alg);
-			pr_warn("%s alg allocation failed\n", alg->driver_name);
+			pr_warn("%s alg allocation failed\n",
+				alg->hmac_driver_name);
 			continue;
 		}
 
@@ -1886,6 +2080,9 @@ static int __init caam_algapi_hash_init(void)
 		} else
 			list_add_tail(&t_alg->entry, &hash_list);
 
+		if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
+			continue;
+
 		/* register unkeyed version */
 		t_alg = caam_hash_alloc(alg, false);
 		if (IS_ERR(t_alg)) {
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
index a12f7959a2c3..71d018343ee4 100644
--- a/drivers/crypto/caam/caamhash_desc.c
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -2,7 +2,7 @@
 /*
  * Shared descriptors for ahash algorithms
  *
- * Copyright 2017 NXP
+ * Copyright 2017-2019 NXP
  */
 
 #include "compat.h"
@@ -75,6 +75,72 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
 }
 EXPORT_SYMBOL(cnstr_shdsc_ahash);
 
+/**
+ * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based
+ *                       hash algorithms
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ * @key_dma: I/O Virtual Address of the key
+ */
+void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
+			 int digestsize, int ctx_len, dma_addr_t key_dma)
+{
+	u32 *skip_key_load;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+	/* Skip loading of key, context if already shared */
+	skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+
+	if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) {
+		append_key_as_imm(desc, adata->key_virt, adata->keylen,
+				  adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	} else { /* UPDATE, FINALIZE */
+		if (is_xcbc_aes(adata->algtype))
+			/* Load K1 */
+			append_key(desc, adata->key_dma, adata->keylen,
+				   CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC);
+		else /* CMAC */
+			append_key_as_imm(desc, adata->key_virt, adata->keylen,
+					  adata->keylen, CLASS_1 |
+					  KEY_DEST_CLASS_REG);
+		/* Restore context */
+		append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB |
+				LDST_SRCDST_BYTE_CONTEXT);
+	}
+
+	set_jump_tgt_here(desc, skip_key_load);
+
+	/* Class 1 operation */
+	append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT);
+
+	/*
+	 * Load from buf and/or src and write to req->result or state->context
+	 * Calculate remaining bytes to read
+	 */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Read remaining bytes */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 |
+			     FIFOLD_TYPE_MSG | FIFOLDST_VLF);
+
+	/*
+	 * Save context:
+	 * - xcbc: partial hash, keys K2 and K3
+	 * - cmac: partial hash, constant L = E(K,0)
+	 */
+	append_seq_store(desc, digestsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+	if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT)
+		/* Save K1 */
+		append_fifo_store(desc, key_dma, adata->keylen,
+				  LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK);
+}
+EXPORT_SYMBOL(cnstr_shdsc_sk_hash);
+
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
 MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
index 631fc1ac312c..6947ee1f200c 100644
--- a/drivers/crypto/caam/caamhash_desc.h
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -15,7 +15,15 @@
 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
 #define DESC_AHASH_DIGEST_LEN		(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
 
+static inline bool is_xcbc_aes(u32 algtype)
+{
+	return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
+	       (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC);
+}
+
 void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
 		       int digestsize, int ctx_len, bool import_ctx, int era);
 
+void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state,
+			 int digestsize, int ctx_len, dma_addr_t key_dma);
 #endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 87d9efe4c7aa..8639b2df0371 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -43,6 +43,7 @@
 #include <crypto/akcipher.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/skcipher.h>
+#include <crypto/arc4.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 16bbc72f041a..858bdc9ab4a3 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -18,12 +18,8 @@
 #include "desc_constr.h"
 #include "ctrl.h"
 
-bool caam_little_end;
-EXPORT_SYMBOL(caam_little_end);
 bool caam_dpaa2;
 EXPORT_SYMBOL(caam_dpaa2);
-bool caam_imx;
-EXPORT_SYMBOL(caam_imx);
 
 #ifdef CONFIG_CAAM_QI
 #include "qi.h"
@@ -863,27 +859,18 @@ static int caam_probe(struct platform_device *pdev)
 	/* Internal covering keys (useful in non-secure mode only) */
 	ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
 	ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
-	ctrlpriv->ctl_kek = debugfs_create_blob("kek",
-						S_IRUSR |
-						S_IRGRP | S_IROTH,
-						ctrlpriv->ctl,
-						&ctrlpriv->ctl_kek_wrap);
+	debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+			    &ctrlpriv->ctl_kek_wrap);
 
 	ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
 	ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
-	ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
-						 S_IRUSR |
-						 S_IRGRP | S_IROTH,
-						 ctrlpriv->ctl,
-						 &ctrlpriv->ctl_tkek_wrap);
+	debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+			    &ctrlpriv->ctl_tkek_wrap);
 
 	ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
 	ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
-	ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
-						 S_IRUSR |
-						 S_IRGRP | S_IROTH,
-						 ctrlpriv->ctl,
-						 &ctrlpriv->ctl_tdsk_wrap);
+	debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
+			    &ctrlpriv->ctl_tdsk_wrap);
 #endif
 	return 0;
 
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 7e8d690f2827..21a70fd32f5d 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
 #endif /* DEBUG */
 EXPORT_SYMBOL(caam_dump_sg);
 
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+
 static const struct {
 	u8 value;
 	const char *error_text;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index babc78abd155..5869ad58d497 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -106,7 +106,6 @@ struct caam_drv_private {
 	struct dentry *dfs_root;
 	struct dentry *ctl; /* controller dir */
 	struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
-	struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
 #endif
 };
 
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 312b5f042f31..8d0713fae6ac 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -48,7 +48,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
 {
 	u32 *desc;
 	struct split_key_result result;
-	dma_addr_t dma_addr_in, dma_addr_out;
+	dma_addr_t dma_addr;
 	int ret = -ENOMEM;
 
 	adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
@@ -71,22 +71,17 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
 		return ret;
 	}
 
-	dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
-				     DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, dma_addr_in)) {
-		dev_err(jrdev, "unable to map key input memory\n");
-		goto out_free;
-	}
+	memcpy(key_out, key_in, keylen);
 
-	dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
-				      DMA_FROM_DEVICE);
-	if (dma_mapping_error(jrdev, dma_addr_out)) {
-		dev_err(jrdev, "unable to map key output memory\n");
-		goto out_unmap_in;
+	dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad,
+				  DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(jrdev, dma_addr)) {
+		dev_err(jrdev, "unable to map key memory\n");
+		goto out_free;
 	}
 
 	init_job_desc(desc, 0);
-	append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+	append_key(desc, dma_addr, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
 
 	/* Sets MDHA up into an HMAC-INIT */
 	append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
@@ -104,12 +99,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
 	 * FIFO_STORE with the explicit split-key content store
 	 * (0x26 output type)
 	 */
-	append_fifo_store(desc, dma_addr_out, adata->keylen,
+	append_fifo_store(desc, dma_addr, adata->keylen,
 			  LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
 
 #ifdef DEBUG
-	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
 	print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
@@ -129,10 +122,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
 #endif
 	}
 
-	dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
-			 DMA_FROM_DEVICE);
-out_unmap_in:
-	dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+	dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL);
 out_free:
 	kfree(desc);
 	return ret;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b84e6c8b1e13..7cb8b1755e57 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
 	/* Create a new req FQ in parked state */
 	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
 				    drv_ctx->context_a, 0);
-	if (unlikely(IS_ERR_OR_NULL(new_fq))) {
+	if (IS_ERR_OR_NULL(new_fq)) {
 		dev_err(qidev, "FQ allocation for shdesc update failed\n");
 		return PTR_ERR(new_fq);
 	}
@@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
 	/* Attach request FQ */
 	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
 					     QMAN_INITFQ_FLAG_SCHED);
-	if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
+	if (IS_ERR_OR_NULL(drv_ctx->req_fq)) {
 		dev_err(qidev, "create_caam_req_fq failed\n");
 		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
 		kfree(drv_ctx);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
index 0196b992280f..848ec93d4333 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -55,31 +55,14 @@ void nitrox_debugfs_exit(struct nitrox_device *ndev)
 	ndev->debugfs_dir = NULL;
 }
 
-int nitrox_debugfs_init(struct nitrox_device *ndev)
+void nitrox_debugfs_init(struct nitrox_device *ndev)
 {
-	struct dentry *dir, *f;
+	struct dentry *dir;
 
 	dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
-	if (!dir)
-		return -ENOMEM;
 
 	ndev->debugfs_dir = dir;
-	f = debugfs_create_file("firmware", 0400, dir, ndev,
-				&firmware_fops);
-	if (!f)
-		goto err;
-	f = debugfs_create_file("device", 0400, dir, ndev,
-				&device_fops);
-	if (!f)
-		goto err;
-	f = debugfs_create_file("stats", 0400, dir, ndev,
-				&stats_fops);
-	if (!f)
-		goto err;
-
-	return 0;
-
-err:
-	nitrox_debugfs_exit(ndev);
-	return -ENODEV;
+	debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+	debugfs_create_file("device", 0400, dir, ndev, &device_fops);
+	debugfs_create_file("stats", 0400, dir, ndev, &stats_fops);
 }
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
index a8d85ffa619c..f177b79bbab0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
@@ -5,12 +5,11 @@
 #include "nitrox_dev.h"
 
 #ifdef CONFIG_DEBUG_FS
-int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_init(struct nitrox_device *ndev);
 void nitrox_debugfs_exit(struct nitrox_device *ndev);
 #else
-static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+static inline void nitrox_debugfs_init(struct nitrox_device *ndev)
 {
-	return 0;
 }
 
 static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 014e9863c20e..faa78f651238 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -404,9 +404,7 @@ static int nitrox_probe(struct pci_dev *pdev,
 	if (err)
 		goto pf_hw_fail;
 
-	err = nitrox_debugfs_init(ndev);
-	if (err)
-		goto pf_hw_fail;
+	nitrox_debugfs_init(ndev);
 
 	/* clear the statistics */
 	atomic64_set(&ndev->stats.posted, 0);
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index be055b9547f6..a8447a3cf366 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -351,6 +351,7 @@ static struct pci_driver zip_driver = {
 
 static struct crypto_alg zip_comp_deflate = {
 	.cra_name		= "deflate",
+	.cra_driver_name	= "deflate-cavium",
 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
 	.cra_ctxsize		= sizeof(struct zip_kernel_ctx),
 	.cra_priority           = 300,
@@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = {
 
 static struct crypto_alg zip_comp_lzs = {
 	.cra_name		= "lzs",
+	.cra_driver_name	= "lzs-cavium",
 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
 	.cra_ctxsize		= sizeof(struct zip_kernel_ctx),
 	.cra_priority           = 300,
@@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = {
 	.decompress		= zip_scomp_decompress,
 	.base			= {
 		.cra_name		= "deflate",
-		.cra_driver_name	= "deflate-scomp",
+		.cra_driver_name	= "deflate-scomp-cavium",
 		.cra_module		= THIS_MODULE,
 		.cra_priority           = 300,
 	}
@@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = {
 	.decompress		= zip_scomp_decompress,
 	.base			= {
 		.cra_name		= "lzs",
-		.cra_driver_name	= "lzs-scomp",
+		.cra_driver_name	= "lzs-scomp-cavium",
 		.cra_module		= THIS_MODULE,
 		.cra_priority           = 300,
 	}
@@ -618,41 +620,23 @@ static const struct file_operations zip_regs_fops = {
 /* Root directory for thunderx_zip debugfs entry */
 static struct dentry *zip_debugfs_root;
 
-static int __init zip_debugfs_init(void)
+static void __init zip_debugfs_init(void)
 {
-	struct dentry *zip_stats, *zip_clear, *zip_regs;
-
 	if (!debugfs_initialized())
-		return -ENODEV;
+		return;
 
 	zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
-	if (!zip_debugfs_root)
-		return -ENOMEM;
 
 	/* Creating files for entries inside thunderx_zip directory */
-	zip_stats = debugfs_create_file("zip_stats", 0444,
-					zip_debugfs_root,
-					NULL, &zip_stats_fops);
-	if (!zip_stats)
-		goto failed_to_create;
-
-	zip_clear = debugfs_create_file("zip_clear", 0444,
-					zip_debugfs_root,
-					NULL, &zip_clear_fops);
-	if (!zip_clear)
-		goto failed_to_create;
-
-	zip_regs = debugfs_create_file("zip_regs", 0444,
-				       zip_debugfs_root,
-				       NULL, &zip_regs_fops);
-	if (!zip_regs)
-		goto failed_to_create;
+	debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
+			    &zip_stats_fops);
 
-	return 0;
+	debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
+			    &zip_clear_fops);
+
+	debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
+			    &zip_regs_fops);
 
-failed_to_create:
-	debugfs_remove_recursive(zip_debugfs_root);
-	return -ENOENT;
 }
 
 static void __exit zip_debugfs_exit(void)
@@ -661,13 +645,8 @@ static void __exit zip_debugfs_exit(void)
 }
 
 #else
-static int __init zip_debugfs_init(void)
-{
-	return 0;
-}
-
+static void __init zip_debugfs_init(void) { }
 static void __exit zip_debugfs_exit(void) { }
-
 #endif
 /* debugfs - end */
 
@@ -691,17 +670,10 @@ static int __init zip_init_module(void)
 	}
 
 	/* comp-decomp statistics are handled with debugfs interface */
-	ret = zip_debugfs_init();
-	if (ret < 0) {
-		zip_err("ZIP: debugfs initialization failed\n");
-		goto err_crypto_unregister;
-	}
+	zip_debugfs_init();
 
 	return ret;
 
-err_crypto_unregister:
-	zip_unregister_compression_device();
-
 err_pci_unregister:
 	pci_unregister_driver(&zip_driver);
 	return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 9108015e56cc..f6e252c1d6fb 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -1,7 +1,7 @@
 /*
  * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
  *
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
  *
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index ae87b741f9d5..c2ff551d215b 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -57,7 +57,7 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 
 	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
 		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
-		     (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		     (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
 		return -EINVAL;
 	}
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 2ca64bb57d2e..10a61cd54fce 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
 /*
  * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
  *
- * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
  * Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 1a734bd2070a..4bd26af7098d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -286,10 +286,7 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
 {
 	struct ccp_cmd_queue *cmd_q;
 	char name[MAX_NAME_LEN + 1];
-	struct dentry *debugfs_info;
-	struct dentry *debugfs_stats;
 	struct dentry *debugfs_q_instance;
-	struct dentry *debugfs_q_stats;
 	int i;
 
 	if (!debugfs_initialized())
@@ -299,24 +296,14 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
 	if (!ccp_debugfs_dir)
 		ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
 	mutex_unlock(&ccp_debugfs_lock);
-	if (!ccp_debugfs_dir)
-		return;
 
 	ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir);
-	if (!ccp->debugfs_instance)
-		goto err;
 
-	debugfs_info = debugfs_create_file("info", 0400,
-					   ccp->debugfs_instance, ccp,
-					   &ccp_debugfs_info_ops);
-	if (!debugfs_info)
-		goto err;
+	debugfs_create_file("info", 0400, ccp->debugfs_instance, ccp,
+			    &ccp_debugfs_info_ops);
 
-	debugfs_stats = debugfs_create_file("stats", 0600,
-					    ccp->debugfs_instance, ccp,
-					    &ccp_debugfs_stats_ops);
-	if (!debugfs_stats)
-		goto err;
+	debugfs_create_file("stats", 0600, ccp->debugfs_instance, ccp,
+			    &ccp_debugfs_stats_ops);
 
 	for (i = 0; i < ccp->cmd_q_count; i++) {
 		cmd_q = &ccp->cmd_q[i];
@@ -325,21 +312,12 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
 
 		debugfs_q_instance =
 			debugfs_create_dir(name, ccp->debugfs_instance);
-		if (!debugfs_q_instance)
-			goto err;
-
-		debugfs_q_stats =
-			debugfs_create_file("stats", 0600,
-					    debugfs_q_instance, cmd_q,
-					    &ccp_debugfs_queue_ops);
-		if (!debugfs_q_stats)
-			goto err;
+
+		debugfs_create_file("stats", 0600, debugfs_q_instance, cmd_q,
+				    &ccp_debugfs_queue_ops);
 	}
 
 	return;
-
-err:
-	debugfs_remove_recursive(ccp->debugfs_instance);
 }
 
 void ccp5_debugfs_destroy(void)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 0ea43cdeb05f..267a367bd076 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
 /*
  * AMD Cryptographic Coprocessor (CCP) driver
  *
- * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
  * Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index b16be8a11d92..fadf859a14b8 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -1,7 +1,7 @@
 /*
  * AMD Platform Security Processor (PSP) interface
  *
- * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2018 Advanced Micro Devices, Inc.
  *
  * Author: Brijesh Singh <brijesh.singh@amd.com>
  *
@@ -437,6 +437,7 @@ static int sev_get_api_version(void)
 	psp_master->api_major = status->api_major;
 	psp_master->api_minor = status->api_minor;
 	psp_master->build = status->build;
+	psp_master->sev_state = status->state;
 
 	return 0;
 }
@@ -857,15 +858,15 @@ static int sev_misc_init(struct psp_device *psp)
 	return 0;
 }
 
-static int sev_init(struct psp_device *psp)
+static int psp_check_sev_support(struct psp_device *psp)
 {
 	/* Check if device supports SEV feature */
 	if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) {
-		dev_dbg(psp->dev, "device does not support SEV\n");
-		return 1;
+		dev_dbg(psp->dev, "psp does not support SEV\n");
+		return -ENODEV;
 	}
 
-	return sev_misc_init(psp);
+	return 0;
 }
 
 int psp_dev_init(struct sp_device *sp)
@@ -890,6 +891,10 @@ int psp_dev_init(struct sp_device *sp)
 
 	psp->io_regs = sp->io_map;
 
+	ret = psp_check_sev_support(psp);
+	if (ret)
+		goto e_disable;
+
 	/* Disable and clear interrupts until ready */
 	iowrite32(0, psp->io_regs + psp->vdata->inten_reg);
 	iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg);
@@ -901,7 +906,7 @@ int psp_dev_init(struct sp_device *sp)
 		goto e_err;
 	}
 
-	ret = sev_init(psp);
+	ret = sev_misc_init(psp);
 	if (ret)
 		goto e_irq;
 
@@ -923,6 +928,11 @@ e_err:
 	dev_notice(dev, "psp initialization failed\n");
 
 	return ret;
+
+e_disable:
+	sp->psp_data = NULL;
+
+	return ret;
 }
 
 void psp_dev_destroy(struct sp_device *sp)
@@ -964,6 +974,21 @@ void psp_pci_init(void)
 	if (sev_get_api_version())
 		goto err;
 
+	/*
+	 * If platform is not in UNINIT state then firmware upgrade and/or
+	 * platform INIT command will fail. These command require UNINIT state.
+	 *
+	 * In a normal boot we should never run into case where the firmware
+	 * is not in UNINIT state on boot. But in case of kexec boot, a reboot
+	 * may not go through a typical shutdown sequence and may leave the
+	 * firmware in INIT or WORKING state.
+	 */
+
+	if (psp_master->sev_state != SEV_STATE_UNINIT) {
+		sev_platform_shutdown(NULL);
+		psp_master->sev_state = SEV_STATE_UNINIT;
+	}
+
 	if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) &&
 	    sev_update_firmware(psp_master->dev) == 0)
 		sev_get_api_version();
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 8b53a9674ecb..f5afeccf42a1 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -1,7 +1,7 @@
 /*
  * AMD Platform Security Processor (PSP) interface driver
  *
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
  *
  * Author: Brijesh Singh <brijesh.singh@amd.com>
  *
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index e0459002eb71..b2879767fc98 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -1,7 +1,7 @@
 /*
  * AMD Secure Processor driver
  *
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
  * Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 14398cad1625..5b0790025db3 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -1,7 +1,7 @@
 /*
  * AMD Secure Processor driver
  *
- * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ * Copyright (C) 2017-2018 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
  * Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 7da93e9bebed..41bce0a3f4bb 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -1,7 +1,7 @@
 /*
  * AMD Secure Processor device driver
  *
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
  * Author: Gary R Hook <gary.hook@amd.com>
@@ -226,8 +226,6 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (ret)
 		goto e_err;
 
-	dev_notice(dev, "enabled\n");
-
 	return 0;
 
 e_err:
@@ -246,8 +244,6 @@ static void sp_pci_remove(struct pci_dev *pdev)
 	sp_destroy(sp);
 
 	sp_free_irqs(sp);
-
-	dev_notice(dev, "disabled\n");
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index b75dc7db2d4a..d24228efbaaa 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -1,7 +1,7 @@
 /*
  * AMD Secure Processor device driver
  *
- * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2014,2018 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
  *
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index dd948e1df9e5..0ee1c52da0a4 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -156,8 +156,11 @@ static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 
 	/* Verify there is no memory overflow*/
 	new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
-	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
+	if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
+		dev_err(dev, "Too many mlli entries. current %d max %d\n",
+			new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
 		return -ENOMEM;
+	}
 
 	/*handle buffer longer than 64 kbytes */
 	while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
@@ -511,10 +514,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 	/* Map the src SGL */
 	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
-	if (rc) {
-		rc = -ENOMEM;
+	if (rc)
 		goto cipher_exit;
-	}
 	if (mapped_nents > 1)
 		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 
@@ -528,12 +529,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 		}
 	} else {
 		/* Map the dst sg */
-		if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
-			      &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
-			      &dummy, &mapped_nents)) {
-			rc = -ENOMEM;
+		rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+			       &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+			       &dummy, &mapped_nents);
+		if (rc)
 			goto cipher_exit;
-		}
 		if (mapped_nents > 1)
 			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 
@@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 				 hw_iv_size, DMA_BIDIRECTIONAL);
 	}
 
-	/*In case a pool was set, a table was
-	 *allocated and should be released
-	 */
-	if (areq_ctx->mlli_params.curr_pool) {
+	/* Release pool */
+	if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+	     areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
+	    (areq_ctx->mlli_params.mlli_virt_addr)) {
 		dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 			&areq_ctx->mlli_params.mlli_dma_addr,
 			areq_ctx->mlli_params.mlli_virt_addr);
@@ -1078,10 +1078,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 			       &areq_ctx->dst.nents,
 			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 			       &dst_mapped_nents);
-		if (rc) {
-			rc = -ENOMEM;
+		if (rc)
 			goto chain_data_exit;
-		}
 	}
 
 	dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
@@ -1235,11 +1233,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 		}
 		areq_ctx->ccm_iv0_dma_addr = dma_addr;
 
-		if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
-					 &sg_data, req->assoclen)) {
-			rc = -ENOMEM;
+		rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
+					  &sg_data, req->assoclen);
+		if (rc)
 			goto aead_map_failure;
-		}
 	}
 
 	if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
@@ -1299,10 +1296,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
 		       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
 			LLI_MAX_NUM_OF_DATA_ENTRIES),
 		       &dummy, &mapped_nents);
-	if (rc) {
-		rc = -ENOMEM;
+	if (rc)
 		goto aead_map_failure;
-	}
 
 	if (areq_ctx->is_single_pass) {
 		/*
@@ -1386,6 +1381,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
 	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
 	struct buffer_array sg_data;
 	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+	int rc = 0;
 	u32 dummy = 0;
 	u32 mapped_nents = 0;
 
@@ -1405,18 +1401,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
 	/*TODO: copy data in case that buffer is enough for operation */
 	/* map the previous buffer */
 	if (*curr_buff_cnt) {
-		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
-				    &sg_data)) {
-			return -ENOMEM;
-		}
+		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+				     &sg_data);
+		if (rc)
+			return rc;
 	}
 
 	if (src && nbytes > 0 && do_update) {
-		if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
-			      &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
-			      &dummy, &mapped_nents)) {
+		rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+			       &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+			       &dummy, &mapped_nents);
+		if (rc)
 			goto unmap_curr_buff;
-		}
 		if (src && mapped_nents == 1 &&
 		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
 			memcpy(areq_ctx->buff_sg, src,
@@ -1435,7 +1431,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
 		/* add the src data to the sg_data */
 		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
 				0, true, &areq_ctx->mlli_nents);
-		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+		if (rc)
 			goto fail_unmap_din;
 	}
 	/* change the buffer index for the unmap function */
@@ -1451,7 +1448,7 @@ unmap_curr_buff:
 	if (*curr_buff_cnt)
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 
-	return -ENOMEM;
+	return rc;
 }
 
 int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
@@ -1470,6 +1467,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
 	struct buffer_array sg_data;
 	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 	unsigned int swap_index = 0;
+	int rc = 0;
 	u32 dummy = 0;
 	u32 mapped_nents = 0;
 
@@ -1514,21 +1512,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
 	}
 
 	if (*curr_buff_cnt) {
-		if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
-				    &sg_data)) {
-			return -ENOMEM;
-		}
+		rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
+				     &sg_data);
+		if (rc)
+			return rc;
 		/* change the buffer index for next operation */
 		swap_index = 1;
 	}
 
 	if (update_data_len > *curr_buff_cnt) {
-		if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
-			      DMA_TO_DEVICE, &areq_ctx->in_nents,
-			      LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
-			      &mapped_nents)) {
+		rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+			       DMA_TO_DEVICE, &areq_ctx->in_nents,
+			       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+			       &mapped_nents);
+		if (rc)
 			goto unmap_curr_buff;
-		}
 		if (mapped_nents == 1 &&
 		    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
 			/* only one entry in the SG and no previous data */
@@ -1548,7 +1546,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
 		cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
 				(update_data_len - *curr_buff_cnt), 0, true,
 				&areq_ctx->mlli_nents);
-		if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
+		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+		if (rc)
 			goto fail_unmap_din;
 	}
 	areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1562,7 +1561,7 @@ unmap_curr_buff:
 	if (*curr_buff_cnt)
 		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
 
-	return -ENOMEM;
+	return rc;
 }
 
 void cc_unmap_hash_request(struct device *dev, void *ctx,
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index cc92b031fad1..d9c17078517b 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -80,6 +80,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
 		default:
 			break;
 		}
+		break;
 	case S_DIN_to_DES:
 		if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
 			return 0;
@@ -352,7 +353,8 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
 			dev_dbg(dev, "weak 3DES key");
 			return -EINVAL;
 		} else if (!des_ekey(tmp, key) &&
-		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			   (crypto_tfm_get_flags(tfm) &
+			    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 			dev_dbg(dev, "weak DES key");
 			return -EINVAL;
@@ -652,6 +654,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
 	unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
 	unsigned int len;
 
+	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+
 	switch (ctx_p->cipher_mode) {
 	case DRV_CIPHER_CBC:
 		/*
@@ -681,7 +685,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
 		break;
 	}
 
-	cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 	kzfree(req_ctx->iv);
 
 	skcipher_request_complete(req, err);
@@ -799,7 +802,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req)
 
 	memset(req_ctx, 0, sizeof(*req_ctx));
 
-	if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+	if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
+	    (req->cryptlen >= ivsize)) {
 
 		/* Allocate and save the last IV sized bytes of the source,
 		 * which will be lost in case of in-place decryption.
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5ca184e42483..5fa05a7bcf36 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -39,11 +39,9 @@ static struct debugfs_reg32 debug_regs[] = {
 	CC_DEBUG_REG(AXIM_MON_COMP),
 };
 
-int __init cc_debugfs_global_init(void)
+void __init cc_debugfs_global_init(void)
 {
 	cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
-
-	return !cc_debugfs_dir;
 }
 
 void __exit cc_debugfs_global_fini(void)
@@ -56,7 +54,6 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
 	struct device *dev = drvdata_to_dev(drvdata);
 	struct cc_debugfs_ctx *ctx;
 	struct debugfs_regset32 *regset;
-	struct dentry *file;
 
 	debug_regs[0].offset = drvdata->sig_offset;
 	debug_regs[1].offset = drvdata->ver_offset;
@@ -74,22 +71,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
 	regset->base = drvdata->cc_base;
 
 	ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
-	if (!ctx->dir)
-		return -ENFILE;
-
-	file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
-	if (!file) {
-		debugfs_remove(ctx->dir);
-		return -ENFILE;
-	}
 
-	file = debugfs_create_bool("coherent", 0400, ctx->dir,
-				   &drvdata->coherent);
-
-	if (!file) {
-		debugfs_remove_recursive(ctx->dir);
-		return -ENFILE;
-	}
+	debugfs_create_regset32("regs", 0400, ctx->dir, regset);
+	debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
 
 	drvdata->debugfs = ctx;
 
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 5b5320eca7d2..01cbd9a95659 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -5,7 +5,7 @@
 #define __CC_DEBUGFS_H__
 
 #ifdef CONFIG_DEBUG_FS
-int cc_debugfs_global_init(void);
+void cc_debugfs_global_init(void);
 void cc_debugfs_global_fini(void);
 
 int cc_debugfs_init(struct cc_drvdata *drvdata);
@@ -13,11 +13,7 @@ void cc_debugfs_fini(struct cc_drvdata *drvdata);
 
 #else
 
-static inline int cc_debugfs_global_init(void)
-{
-	return 0;
-}
-
+static inline void cc_debugfs_global_init(void) {}
 static inline void cc_debugfs_global_fini(void) {}
 
 static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index b0125ad65825..3bcc6c76e090 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -103,10 +103,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
 	/* read the interrupt status */
 	irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
 	dev_dbg(dev, "Got IRR=0x%08X\n", irr);
-	if (irr == 0) { /* Probably shared interrupt line */
-		dev_err(dev, "Got interrupt with empty IRR\n");
+
+	if (irr == 0) /* Probably shared interrupt line */
 		return IRQ_NONE;
-	}
+
 	imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
 
 	/* clear interrupt - must be before processing events */
@@ -539,13 +539,8 @@ static struct platform_driver ccree_driver = {
 
 static int __init ccree_init(void)
 {
-	int ret;
-
 	cc_hash_global_init();
-
-	ret = cc_debugfs_global_init();
-	if (ret)
-		return ret;
+	cc_debugfs_global_init();
 
 	return platform_driver_register(&ccree_driver);
 }
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 5be7fd431b05..33dbf3e6d15d 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -111,13 +111,11 @@ struct cc_crypto_req {
  * @cc_base:	virt address of the CC registers
  * @irq:	device IRQ number
  * @irq_mask:	Interrupt mask shadow (1 for masked interrupts)
- * @fw_ver:	SeP loaded firmware version
  */
 struct cc_drvdata {
 	void __iomem *cc_base;
 	int irq;
 	u32 irq_mask;
-	u32 fw_ver;
 	struct completion hw_queue_avail; /* wait for HW queue availability */
 	struct platform_device *plat_dev;
 	cc_sram_addr_t mlli_sram_addr;
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index 639e5718dff4..b7bd980a27d8 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
 
 obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
 chcr-objs :=  chcr_core.o chcr_algo.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index bcef76508dfa..8d8cf80b9294 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1368,7 +1368,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
 static int chcr_device_init(struct chcr_context *ctx)
 {
 	struct uld_ctx *u_ctx = NULL;
-	struct adapter *adap;
 	unsigned int id;
 	int txq_perchan, txq_idx, ntxq;
 	int err = 0, rxq_perchan, rxq_idx;
@@ -1382,7 +1381,6 @@ static int chcr_device_init(struct chcr_context *ctx)
 			goto out;
 		}
 		ctx->dev = &u_ctx->dev;
-		adap = padap(ctx->dev);
 		ntxq = u_ctx->lldi.ntxq;
 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
 		txq_perchan = ntxq / u_ctx->lldi.nchan;
@@ -2762,7 +2760,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize)
 	return 0;
 }
 
-static void generate_b0(struct aead_request *req, u8 *ivptr,
+static int generate_b0(struct aead_request *req, u8 *ivptr,
 			unsigned short op_type)
 {
 	unsigned int l, lp, m;
@@ -2787,6 +2785,8 @@ static void generate_b0(struct aead_request *req, u8 *ivptr,
 	rc = set_msg_len(b0 + 16 - l,
 			 (op_type == CHCR_DECRYPT_OP) ?
 			 req->cryptlen - m : req->cryptlen, l);
+
+	return rc;
 }
 
 static inline int crypto_ccm_check_iv(const u8 *iv)
@@ -2821,7 +2821,7 @@ static int ccm_format_packet(struct aead_request *req,
 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
 				htons(assoclen);
 
-	generate_b0(req, ivptr, op_type);
+	rc = generate_b0(req, ivptr, op_type);
 	/* zero the ctr value */
 	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
 	return rc;
@@ -3676,9 +3676,9 @@ static int chcr_aead_op(struct aead_request *req,
 	/* Form a WR from req */
 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
 
-	if (IS_ERR(skb) || !skb) {
+	if (IS_ERR_OR_NULL(skb)) {
 		chcr_dec_wrcount(cdev);
-		return PTR_ERR(skb);
+		return PTR_ERR_OR_ZERO(skb);
 	}
 
 	skb->dev = u_ctx->lldi.ports[0];
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 1159dee964ed..ad874d548aa5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -183,7 +183,7 @@ struct chcr_ipsec_aadiv {
 struct ipsec_sa_entry {
 	int hmac_ctrl;
 	u16 esn;
-	u16 imm;
+	u16 resv;
 	unsigned int enckey_len;
 	unsigned int kctx_len;
 	unsigned int authsize;
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 2fb48cce4462..2f60049361ef 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -303,6 +303,9 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
 			return false;
 	}
+	/* Inline single pdu */
+	if (skb_shinfo(skb)->gso_size)
+		return false;
 	return true;
 }
 
@@ -333,7 +336,8 @@ static inline int is_eth_imm(const struct sk_buff *skb,
 }
 
 static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
-					     struct ipsec_sa_entry *sa_entry)
+					     struct ipsec_sa_entry *sa_entry,
+					     bool *immediate)
 {
 	unsigned int kctx_len;
 	unsigned int flits;
@@ -351,8 +355,10 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
 	 * TX Packet header plus the skb data in the Work Request.
 	 */
 
-	if (hdrlen)
+	if (hdrlen) {
+		*immediate = true;
 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+	}
 
 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
 
@@ -415,12 +421,12 @@ inline void *copy_esn_pktxt(struct sk_buff *skb,
 	iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
 	memcpy(aadiv->iv, iv, 8);
 
-	if (sa_entry->imm) {
+	if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
 		sc_imm = (struct ulptx_idata *)(pos +
 			  (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
 					sizeof(__be64)) << 3));
-		sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
-		sc_imm->len = cpu_to_be32(sa_entry->imm);
+		sc_imm->cmd_more = FILL_CMD_MORE(0);
+		sc_imm->len = cpu_to_be32(skb->len);
 	}
 	pos += len;
 	return pos;
@@ -528,15 +534,18 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
 	struct adapter *adap = pi->adapter;
 	unsigned int ivsize = GCM_ESP_IV_SIZE;
 	struct chcr_ipsec_wr *wr;
+	bool immediate = false;
 	u16 immdatalen = 0;
 	unsigned int flits;
 	u32 ivinoffset;
 	u32 aadstart;
 	u32 aadstop;
 	u32 ciphstart;
+	u16 sc_more = 0;
 	u32 ivdrop = 0;
 	u32 esnlen = 0;
 	u32 wr_mid;
+	u16 ndesc;
 	int qidx = skb_get_queue_mapping(skb);
 	struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
 	unsigned int kctx_len = sa_entry->kctx_len;
@@ -544,22 +553,24 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
 
 	atomic_inc(&adap->chcr_stats.ipsec_cnt);
 
-	flits = calc_tx_sec_flits(skb, sa_entry);
+	flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
+	ndesc = DIV_ROUND_UP(flits, 2);
 	if (sa_entry->esn)
 		ivdrop = 1;
 
-	if (is_eth_imm(skb, sa_entry)) {
+	if (immediate)
 		immdatalen = skb->len;
-		sa_entry->imm = immdatalen;
-	}
 
-	if (sa_entry->esn)
+	if (sa_entry->esn) {
 		esnlen = sizeof(struct chcr_ipsec_aadiv);
+		if (!skb_is_nonlinear(skb))
+			sc_more  = 1;
+	}
 
 	/* WR Header */
 	wr = (struct chcr_ipsec_wr *)pos;
 	wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
-	wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+	wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
 
 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
 		netif_tx_stop_queue(q->txq);
@@ -571,10 +582,10 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
 
 	/* ULPTX */
 	wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
-	wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2)  - 1);
+	wr->req.ulptx.len = htonl(ndesc - 1);
 
 	/* Sub-command */
-	wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
+	wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
 	wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 					 sizeof(wr->req.key_ctx) +
 					 kctx_len +
@@ -697,7 +708,7 @@ out_free:       dev_kfree_skb_any(skb);
 
 	cxgb4_reclaim_completed_tx(adap, &q->q, true);
 
-	flits = calc_tx_sec_flits(skb, sa_entry);
+	flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
 	ndesc = flits_to_desc(flits);
 	credits = txq_avail(&q->q) - ndesc;
 
@@ -710,9 +721,6 @@ out_free:       dev_kfree_skb_any(skb);
 		return NETDEV_TX_BUSY;
 	}
 
-	if (is_eth_imm(skb, sa_entry))
-		immediate = true;
-
 	if (!immediate &&
 	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
 		q->mapping_err++;
diff --git a/drivers/crypto/chelsio/chtls/Makefile b/drivers/crypto/chelsio/chtls/Makefile
index df1379570a8e..b958f1b8ec39 100644
--- a/drivers/crypto/chelsio/chtls/Makefile
+++ b/drivers/crypto/chelsio/chtls/Makefile
@@ -1,4 +1,5 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 -Idrivers/crypto/chelsio/
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \
+	     -I $(srctree)/drivers/crypto/chelsio
 
 obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls.o
 chtls-objs := chtls_main.o chtls_cm.o chtls_io.o chtls_hw.o
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 18f553fcc167..1285a1bceda7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -922,14 +922,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
 			   struct sock *sk, long *timeo_p)
 {
 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
-	int sndbuf, err = 0;
+	int err = 0;
 	long current_timeo;
 	long vm_wait = 0;
 	bool noblock;
 
 	current_timeo = *timeo_p;
 	noblock = (*timeo_p ? false : true);
-	sndbuf = cdev->max_host_sndbuf;
 	if (csk_mem_free(cdev, sk)) {
 		current_timeo = (prandom_u32() % (HZ / 5)) + 2;
 		vm_wait = (prandom_u32() % (HZ / 5)) + 2;
@@ -1401,23 +1400,18 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 			    int nonblock, int flags, int *addr_len)
 {
 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
-	struct net_device *dev = csk->egress_dev;
 	struct chtls_hws *hws = &csk->tlshws;
 	struct tcp_sock *tp = tcp_sk(sk);
-	struct adapter *adap;
 	unsigned long avail;
 	int buffers_freed;
 	int copied = 0;
-	int request;
 	int target;
 	long timeo;
 
-	adap = netdev2adap(dev);
 	buffers_freed = 0;
 
 	timeo = sock_rcvtimeo(sk, nonblock);
 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
-	request = len;
 
 	if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
 		chtls_cleanup_rbuf(sk, copied);
@@ -1694,11 +1688,9 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct chtls_sock *csk;
-	struct chtls_hws *hws;
 	unsigned long avail;    /* amount of available data in current skb */
 	int buffers_freed;
 	int copied = 0;
-	int request;
 	long timeo;
 	int target;             /* Read at least this many bytes */
 
@@ -1718,7 +1710,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 
 	lock_sock(sk);
 	csk = rcu_dereference_sk_user_data(sk);
-	hws = &csk->tlshws;
 
 	if (is_tls_rx(csk))
 		return chtls_pt_recvmsg(sk, msg, len, nonblock,
@@ -1726,7 +1717,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 
 	timeo = sock_rcvtimeo(sk, nonblock);
 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
-	request = len;
 
 	if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
 		chtls_cleanup_rbuf(sk, copied);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 563f8fe7686a..dd2daf2a54e0 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -30,7 +30,6 @@
  */
 static LIST_HEAD(cdev_list);
 static DEFINE_MUTEX(cdev_mutex);
-static DEFINE_MUTEX(cdev_list_lock);
 
 static DEFINE_MUTEX(notify_mutex);
 static RAW_NOTIFIER_HEAD(listen_notify_list);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a5a36fe7bf2c..dad212cabe63 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1961,7 +1961,8 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 		u32 tmp[DES_EXPKEY_WORDS];
 		int ret = des_ekey(tmp, key);
 
-		if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		if (unlikely(ret == 0) &&
+		    (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 			return -EINVAL;
 		}
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d531c14020dc..7ef30a98cb24 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -940,7 +940,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
 	}
 
 	ret = des_ekey(tmp, key);
-	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 		return -EINVAL;
 	}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 1b0d156bb9be..5c4659b04d70 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -847,7 +847,7 @@ static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 		goto out;
 
 	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
-		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+		if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
 			ret = -EINVAL;
 		} else {
 			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
@@ -1125,7 +1125,7 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
 		goto out;
 
 	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
-		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+		if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
 			ret = -EINVAL;
 			goto out;
 		} else {
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0ae84ec9e21c..fb279b3a1ca1 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -286,7 +286,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
 	}
 
 	ret = des_ekey(tmp, key);
-	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 		return -EINVAL;
 	}
@@ -322,7 +322,6 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
 	struct mv_cesa_skcipher_dma_iter iter;
 	bool skip_ctx = false;
 	int ret;
-	unsigned int ivsize;
 
 	basereq->chain.first = NULL;
 	basereq->chain.last = NULL;
@@ -381,7 +380,6 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
 	} while (mv_cesa_skcipher_req_iter_next_op(&iter));
 
 	/* Add output data for IV */
-	ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
 	ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
 				    CESA_SA_DATA_SRAM_OFFSET,
 				    CESA_TDMA_SRC_IN_SRAM, flags);
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 55f34cfc43ff..9450c41211b2 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -772,7 +772,7 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	}
 
 	err = des_ekey(tmp, key);
-	if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+	if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 		return -EINVAL;
 	}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 6369019219d4..1ba2633e90d6 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -662,7 +662,7 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	pr_debug("enter, keylen: %d\n", keylen);
 
 	/* Do we need to test against weak key? */
-	if (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY) {
+	if (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
 		u32 tmp[DES_EXPKEY_WORDS];
 		int ret = des_ekey(tmp, key);
 
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 17068b55fea5..1b3acdeffede 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -759,7 +759,8 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	}
 
 	if (unlikely(!des_ekey(tmp, key)) &&
-	    (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+	    (crypto_ablkcipher_get_flags(cipher) &
+	     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 		return -EINVAL;
 	}
diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile
index 8f5fd4838a96..822b5de58ec6 100644
--- a/drivers/crypto/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/qat/qat_c3xxx/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
 qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 763c2166ee0e..d937cc7248a5 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		 PCI_FUNC(pdev->devfn));
 
 	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-	if (!accel_dev->debugfs_dir) {
-		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
-		ret = -EINVAL;
-		goto out_err;
-	}
 
 	/* Create device configuration table */
 	ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile
index 16d178e2eaa2..8f56d27c7479 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
 qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 613c7d5644ce..1dc5ac859f7b 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		 PCI_FUNC(pdev->devfn));
 
 	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-	if (!accel_dev->debugfs_dir) {
-		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
-		ret = -EINVAL;
-		goto out_err;
-	}
 
 	/* Create device configuration table */
 	ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile
index bd75ace59b76..6dcd404578fc 100644
--- a/drivers/crypto/qat/qat_c62x/Makefile
+++ b/drivers/crypto/qat/qat_c62x/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
 qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 9cb832963357..2bc06c89d2fe 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		 PCI_FUNC(pdev->devfn));
 
 	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-	if (!accel_dev->debugfs_dir) {
-		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
-		ret = -EINVAL;
-		goto out_err;
-	}
 
 	/* Create device configuration table */
 	ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile
index ecd708c213b2..1e5d51de778f 100644
--- a/drivers/crypto/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/qat/qat_c62xvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
 qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 278452b8ef81..a68358b31292 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		 PCI_FUNC(pdev->devfn));
 
 	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-	if (!accel_dev->debugfs_dir) {
-		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
-		ret = -EINVAL;
-		goto out_err;
-	}
 
 	/* Create device configuration table */
 	ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index d0879790561f..5c7fdb0fc53d 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -141,13 +141,6 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
 						  accel_dev->debugfs_dir,
 						  dev_cfg_data,
 						  &qat_dev_cfg_fops);
-	if (!dev_cfg_data->debug) {
-		dev_err(&GET_DEV(accel_dev),
-			"Failed to create qat cfg debugfs entry.\n");
-		kfree(dev_cfg_data);
-		accel_dev->cfg = NULL;
-		return -EFAULT;
-	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 57d2622728a5..2136cbe4bf6c 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -486,12 +486,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
 	/* accel_dev->debugfs_dir should always be non-NULL here */
 	etr_data->debug = debugfs_create_dir("transport",
 					     accel_dev->debugfs_dir);
-	if (!etr_data->debug) {
-		dev_err(&GET_DEV(accel_dev),
-			"Unable to create transport debugfs entry\n");
-		ret = -ENOENT;
-		goto err_bank_debug;
-	}
 
 	for (i = 0; i < num_banks; i++) {
 		ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
@@ -504,7 +498,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
 
 err_bank_all:
 	debugfs_remove(etr_data->debug);
-err_bank_debug:
 	kfree(etr_data->banks);
 err_bank:
 	kfree(etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index 52340b9bb387..e794e9d97b2c 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -163,11 +163,6 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
 	ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
 						ring->bank->bank_debug_dir,
 						ring, &adf_ring_debug_fops);
-	if (!ring_debug->debug) {
-		pr_err("QAT: Failed to create ring debug entry.\n");
-		kfree(ring_debug);
-		return -EFAULT;
-	}
 	ring->ring_debug = ring_debug;
 	return 0;
 }
@@ -271,19 +266,9 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
 
 	snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
 	bank->bank_debug_dir = debugfs_create_dir(name, parent);
-	if (!bank->bank_debug_dir) {
-		pr_err("QAT: Failed to create bank debug dir.\n");
-		return -EFAULT;
-	}
-
 	bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
 						   bank->bank_debug_dir, bank,
 						   &adf_bank_debug_fops);
-	if (!bank->bank_debug_cfg) {
-		pr_err("QAT: Failed to create bank debug entry.\n");
-		debugfs_remove(bank->bank_debug_dir);
-		return -EFAULT;
-	}
 	return 0;
 }
 
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
index 180a00ed7f89..0fc06b1e1632 100644
--- a/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
 qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 3a9708ef4ce2..b11bf8c0e683 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		 PCI_FUNC(pdev->devfn));
 
 	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-	if (!accel_dev->debugfs_dir) {
-		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
-		ret = -EINVAL;
-		goto out_err;
-	}
 
 	/* Create device configuration table */
 	ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
index 5c3ccf8267eb..9ce906af6034 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -1,3 +1,3 @@
-ccflags-y := -I$(src)/../qat_common
+ccflags-y := -I $(srctree)/$(src)/../qat_common
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
 qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 3da0f951cb59..1b762eefc6c1 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		 PCI_FUNC(pdev->devfn));
 
 	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
-	if (!accel_dev->debugfs_dir) {
-		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
-		ret = -EINVAL;
-		goto out_err;
-	}
 
 	/* Create device configuration table */
 	ret = adf_cfg_dev_add(accel_dev);
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 25c13e26d012..154b6baa124e 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -180,8 +180,8 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
 		u32 tmp[DES_EXPKEY_WORDS];
 
 		ret = des_ekey(tmp, key);
-		if (!ret && crypto_ablkcipher_get_flags(ablk) &
-		    CRYPTO_TFM_REQ_WEAK_KEY)
+		if (!ret && (crypto_ablkcipher_get_flags(ablk) &
+			     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS))
 			goto weakkey;
 	}
 
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index c9d622abd90c..0ce4a65b95f5 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev,
 		count = (dev->left_bytes > PAGE_SIZE) ?
 			PAGE_SIZE : dev->left_bytes;
 
-		if (!sg_pcopy_to_buffer(dev->first, dev->nents,
+		if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
 					dev->addr_vir, count,
 					dev->total - dev->left_bytes)) {
 			dev_err(dev->dev, "[%s:%d] pcopy err\n",
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index d5fb4013fb42..54ee5b3ed9db 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -207,7 +207,8 @@ struct rk_crypto_info {
 	void				*addr_vir;
 	int				aligned;
 	int				align_size;
-	size_t				nents;
+	size_t				src_nents;
+	size_t				dst_nents;
 	unsigned int			total;
 	unsigned int			count;
 	dma_addr_t			addr_in;
@@ -244,6 +245,7 @@ struct rk_cipher_ctx {
 	struct rk_crypto_info		*dev;
 	unsigned int			keylen;
 	u32				mode;
+	u8				iv[AES_BLOCK_SIZE];
 };
 
 enum alg_type {
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 639c15c5364b..02dac6ae7e53 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -60,7 +60,7 @@ static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
 
 	if (keylen == DES_KEY_SIZE) {
 		if (!des_ekey(tmp, key) &&
-		    (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		    (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
 			return -EINVAL;
 		}
@@ -242,6 +242,17 @@ static void crypto_dma_start(struct rk_crypto_info *dev)
 static int rk_set_data_start(struct rk_crypto_info *dev)
 {
 	int err;
+	struct ablkcipher_request *req =
+		ablkcipher_request_cast(dev->async_req);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
+	u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
+		dev->sg_src->offset + dev->sg_src->length - ivsize;
+
+	/* store the iv that need to be updated in chain mode */
+	if (ctx->mode & RK_CRYPTO_DEC)
+		memcpy(ctx->iv, src_last_blk, ivsize);
 
 	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
 	if (!err)
@@ -260,8 +271,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
 	dev->total = req->nbytes;
 	dev->sg_src = req->src;
 	dev->first = req->src;
-	dev->nents = sg_nents(req->src);
+	dev->src_nents = sg_nents(req->src);
 	dev->sg_dst = req->dst;
+	dev->dst_nents = sg_nents(req->dst);
 	dev->aligned = 1;
 
 	spin_lock_irqsave(&dev->lock, flags);
@@ -285,6 +297,28 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
 		memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
 }
 
+static void rk_update_iv(struct rk_crypto_info *dev)
+{
+	struct ablkcipher_request *req =
+		ablkcipher_request_cast(dev->async_req);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
+	u8 *new_iv = NULL;
+
+	if (ctx->mode & RK_CRYPTO_DEC) {
+		new_iv = ctx->iv;
+	} else {
+		new_iv = page_address(sg_page(dev->sg_dst)) +
+			 dev->sg_dst->offset + dev->sg_dst->length - ivsize;
+	}
+
+	if (ivsize == DES_BLOCK_SIZE)
+		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
+	else if (ivsize == AES_BLOCK_SIZE)
+		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
+}
+
 /* return:
  *	true	some err was occurred
  *	fault	no err, continue
@@ -297,7 +331,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
 
 	dev->unload_data(dev);
 	if (!dev->aligned) {
-		if (!sg_pcopy_from_buffer(req->dst, dev->nents,
+		if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
 					  dev->addr_vir, dev->count,
 					  dev->total - dev->left_bytes -
 					  dev->count)) {
@@ -306,6 +340,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
 		}
 	}
 	if (dev->left_bytes) {
+		rk_update_iv(dev);
 		if (dev->aligned) {
 			if (sg_is_last(dev->sg_src)) {
 				dev_err(dev->dev, "[%s:%d] Lack of data\n",
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 821a506b9e17..c336ae75e361 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
 	dev->sg_dst = NULL;
 	dev->sg_src = req->src;
 	dev->first = req->src;
-	dev->nents = sg_nents(req->src);
+	dev->src_nents = sg_nents(req->src);
 	rctx = ahash_request_ctx(req);
 	rctx->mode = 0;
 
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 0064be0e3941..f4e625cf53ca 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -232,6 +232,7 @@
  * struct samsung_aes_variant - platform specific SSS driver data
  * @aes_offset: AES register offset from SSS module's base.
  * @hash_offset: HASH register offset from SSS module's base.
+ * @clk_names: names of clocks needed to run SSS IP
  *
  * Specifies platform specific configuration of SSS module.
  * Note: A structure for driver specific platform data is used for future
@@ -240,6 +241,7 @@
 struct samsung_aes_variant {
 	unsigned int			aes_offset;
 	unsigned int			hash_offset;
+	const char			*clk_names[];
 };
 
 struct s5p_aes_reqctx {
@@ -296,6 +298,7 @@ struct s5p_aes_ctx {
 struct s5p_aes_dev {
 	struct device			*dev;
 	struct clk			*clk;
+	struct clk			*pclk;
 	void __iomem			*ioaddr;
 	void __iomem			*aes_ioaddr;
 	int				irq_fc;
@@ -384,11 +387,19 @@ struct s5p_hash_ctx {
 static const struct samsung_aes_variant s5p_aes_data = {
 	.aes_offset	= 0x4000,
 	.hash_offset	= 0x6000,
+	.clk_names	= { "secss", },
 };
 
 static const struct samsung_aes_variant exynos_aes_data = {
 	.aes_offset	= 0x200,
 	.hash_offset	= 0x400,
+	.clk_names	= { "secss", },
+};
+
+static const struct samsung_aes_variant exynos5433_slim_aes_data = {
+	.aes_offset	= 0x400,
+	.hash_offset	= 0x800,
+	.clk_names	= { "pclk", "aclk", },
 };
 
 static const struct of_device_id s5p_sss_dt_match[] = {
@@ -400,6 +411,10 @@ static const struct of_device_id s5p_sss_dt_match[] = {
 		.compatible = "samsung,exynos4210-secss",
 		.data = &exynos_aes_data,
 	},
+	{
+		.compatible = "samsung,exynos5433-slim-sss",
+		.data = &exynos5433_slim_aes_data,
+	},
 	{ },
 };
 MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
@@ -463,6 +478,9 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
 
 static void s5p_sg_done(struct s5p_aes_dev *dev)
 {
+	struct ablkcipher_request *req = dev->req;
+	struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+
 	if (dev->sg_dst_cpy) {
 		dev_dbg(dev->dev,
 			"Copying %d bytes of output data back to original place\n",
@@ -472,6 +490,11 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
 	}
 	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
 	s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+	if (reqctx->mode & FLAGS_AES_CBC)
+		memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
+
+	else if (reqctx->mode & FLAGS_AES_CTR)
+		memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
 }
 
 /* Calls the completion. Cannot be called with dev->lock hold. */
@@ -1819,10 +1842,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
 	void __iomem *keystart;
 
 	if (iv)
-		memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+		memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
+			    AES_BLOCK_SIZE);
 
 	if (ctr)
-		memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
+		memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
+			    AES_BLOCK_SIZE);
 
 	if (keylen == AES_KEYSIZE_256)
 		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
@@ -2208,18 +2233,39 @@ static int s5p_aes_probe(struct platform_device *pdev)
 			return PTR_ERR(pdata->ioaddr);
 	}
 
-	pdata->clk = devm_clk_get(dev, "secss");
+	pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
 	if (IS_ERR(pdata->clk)) {
-		dev_err(dev, "failed to find secss clock source\n");
+		dev_err(dev, "failed to find secss clock %s\n",
+			variant->clk_names[0]);
 		return -ENOENT;
 	}
 
 	err = clk_prepare_enable(pdata->clk);
 	if (err < 0) {
-		dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
+		dev_err(dev, "Enabling clock %s failed, err %d\n",
+			variant->clk_names[0], err);
 		return err;
 	}
 
+	if (variant->clk_names[1]) {
+		pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
+		if (IS_ERR(pdata->pclk)) {
+			dev_err(dev, "failed to find clock %s\n",
+				variant->clk_names[1]);
+			err = -ENOENT;
+			goto err_clk;
+		}
+
+		err = clk_prepare_enable(pdata->pclk);
+		if (err < 0) {
+			dev_err(dev, "Enabling clock %s failed, err %d\n",
+				variant->clk_names[0], err);
+			goto err_clk;
+		}
+	} else {
+		pdata->pclk = NULL;
+	}
+
 	spin_lock_init(&pdata->lock);
 	spin_lock_init(&pdata->hash_lock);
 
@@ -2295,8 +2341,11 @@ err_algs:
 	tasklet_kill(&pdata->tasklet);
 
 err_irq:
-	clk_disable_unprepare(pdata->clk);
+	if (pdata->pclk)
+		clk_disable_unprepare(pdata->pclk);
 
+err_clk:
+	clk_disable_unprepare(pdata->clk);
 	s5p_dev = NULL;
 
 	return err;
@@ -2323,6 +2372,9 @@ static int s5p_aes_remove(struct platform_device *pdev)
 		pdata->use_hash = false;
 	}
 
+	if (pdata->pclk)
+		clk_disable_unprepare(pdata->pclk);
+
 	clk_disable_unprepare(pdata->clk);
 	s5p_dev = NULL;
 
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 590d7352837e..4a6cc8a3045d 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1564,7 +1564,7 @@ err_engine:
 
 static int stm32_hash_remove(struct platform_device *pdev)
 {
-	static struct stm32_hash_dev *hdev;
+	struct stm32_hash_dev *hdev;
 	int ret;
 
 	hdev = platform_get_drvdata(pdev);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 5cf64746731a..54fd714d53ca 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -517,7 +517,7 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
 	flags = crypto_skcipher_get_flags(tfm);
 
 	ret = des_ekey(tmp, key);
-	if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+	if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
 		dev_dbg(ss->dev, "Weak key %u\n", keylen);
 		return -EINVAL;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index f8e2c5c3f4eb..de78b54bcfb1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1535,7 +1535,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
 	}
 
 	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
-		     CRYPTO_TFM_REQ_WEAK_KEY) &&
+		     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
 	    !des_ekey(tmp, key)) {
 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
 		return -EINVAL;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index a92a66b1ff46..3235611928f2 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -595,6 +595,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
 	}
 
 	cookie = dmaengine_submit(desc);
+	if (dma_submit_error(cookie)) {
+		dev_dbg(ctx->device->dev, "[%s]: DMA submission failed\n",
+			__func__);
+		return cookie;
+	}
+
 	dma_async_issue_pending(channel);
 
 	return 0;
@@ -994,10 +1000,11 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
 	}
 
 	ret = des_ekey(tmp, key);
-	if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+	if (unlikely(ret == 0) &&
+	    (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
-				__func__);
+		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+			 __func__);
 		return -EINVAL;
 	}
 
@@ -1028,18 +1035,19 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
 	/* Checking key interdependency for weak key detection. */
 	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
 				!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
-			(*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			(*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
-				__func__);
+		pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+			 __func__);
 		return -EINVAL;
 	}
 	for (i = 0; i < 3; i++) {
 		ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
-		if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		if (unlikely(ret == 0) &&
+		    (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
 			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
-			pr_debug(DEV_DBG_NAME " [%s]: "
-					"CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+			pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
+				 __func__);
 			return -EINVAL;
 		}
 	}
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 2c573d1aaa64..0704833ece92 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -406,7 +406,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
 	} else {
 		req_data->header.session_id =
 			cpu_to_le64(ctx->dec_sess_info.session_id);
-	    req_data->header.opcode =
+		req_data->header.opcode =
 			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
 	}
 	req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);