summary refs log tree commit diff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-07 19:44:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-07 19:44:40 -0700
commit639b4ac691c6f6e48921dc576379c176f82f3250 (patch)
tree6cf521ae7d46ca8dfa139ca67dd32545de8d2a75 /drivers/crypto
parent9d2cd01b15d0782adb81e40094b67904d77b03df (diff)
parent5208ed2ca16526cdbec25abe594a3cc3aea210f4 (diff)
downloadlinux-639b4ac691c6f6e48921dc576379c176f82f3250.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6 into next
Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 3.16:

   - Added test vectors for SHA/AES-CCM/DES-CBC/3DES-CBC.
   - Fixed a number of error-path memory leaks in tcrypt.
   - Fixed error-path memory leak in caam.
   - Removed unnecessary global mutex from mxs-dcp.
   - Added ahash walk interface that can actually be asynchronous.
   - Cleaned up caam error reporting.
   - Allow crypto_user get operation to be used by non-root users.
   - Add support for SSS module on Exynos.
   - Misc fixes"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6: (60 commits)
  crypto: testmgr - add aead cbc des, des3_ede tests
  crypto: testmgr - Fix DMA-API warning
  crypto: cesa - tfm->__crt_alg->cra_type directly
  crypto: sahara - tfm->__crt_alg->cra_name directly
  crypto: padlock - tfm->__crt_alg->cra_name directly
  crypto: n2 - tfm->__crt_alg->cra_name directly
  crypto: dcp - tfm->__crt_alg->cra_name directly
  crypto: cesa - tfm->__crt_alg->cra_name directly
  crypto: ccp - tfm->__crt_alg->cra_name directly
  crypto: geode - Don't use tfm->__crt_alg->cra_name directly
  crypto: geode - Weed out printk() from probe()
  crypto: geode - Consistently use AES_KEYSIZE_128
  crypto: geode - Kill AES_IV_LENGTH
  crypto: geode - Kill AES_MIN_BLOCK_SIZE
  crypto: mxs-dcp - Remove global mutex
  crypto: hash - Add real ahash walk interface
  hwrng: n2-drv - Introduce the use of the managed version of kzalloc
  crypto: caam - reinitialize keys_fit_inline for decrypt and givencrypt
  crypto: s5p-sss - fix multiplatform build
  hwrng: timeriomem - remove unnecessary OOM messages
  ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig6
-rw-r--r--drivers/crypto/atmel-aes.c8
-rw-r--r--drivers/crypto/bfin_crc.c103
-rw-r--r--drivers/crypto/bfin_crc.h125
-rw-r--r--drivers/crypto/caam/caamalg.c31
-rw-r--r--drivers/crypto/caam/caamhash.c32
-rw-r--r--drivers/crypto/caam/caamrng.c7
-rw-r--r--drivers/crypto/caam/error.c393
-rw-r--r--drivers/crypto/caam/error.h2
-rw-r--r--drivers/crypto/caam/key_gen.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c4
-rw-r--r--drivers/crypto/ccp/ccp-pci.c7
-rw-r--r--drivers/crypto/geode-aes.c28
-rw-r--r--drivers/crypto/geode-aes.h6
-rw-r--r--drivers/crypto/mv_cesa.c6
-rw-r--r--drivers/crypto/mxs-dcp.c52
-rw-r--r--drivers/crypto/n2_core.c4
-rw-r--r--drivers/crypto/nx/nx-842.c4
-rw-r--r--drivers/crypto/omap-des.c33
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/s5p-sss.c148
-rw-r--r--drivers/crypto/sahara.c2
22 files changed, 582 insertions, 428 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 03ccdb0ccf9e..f066fa23cc05 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -301,14 +301,14 @@ config CRYPTO_DEV_SAHARA
 	  found in some Freescale i.MX chips.
 
 config CRYPTO_DEV_S5P
-	tristate "Support for Samsung S5PV210 crypto accelerator"
-	depends on ARCH_S5PV210
+	tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
+	depends on ARCH_S5PV210 || ARCH_EXYNOS
 	select CRYPTO_AES
 	select CRYPTO_ALGAPI
 	select CRYPTO_BLKCIPHER
 	help
 	  This option allows you to have support for S5P crypto acceleration.
-	  Select this to offload Samsung S5PV210 or S5PC110 from AES
+	  Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
 	  algorithms execution.
 
 config CRYPTO_DEV_NX
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index d7c9e317423c..a083474991ab 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -716,6 +716,12 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 			return -EINVAL;
 		}
 		ctx->block_size = CFB32_BLOCK_SIZE;
+	} else if (mode & AES_FLAGS_CFB64) {
+		if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
+			pr_err("request size is not exact amount of CFB64 blocks\n");
+			return -EINVAL;
+		}
+		ctx->block_size = CFB64_BLOCK_SIZE;
 	} else {
 		if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 			pr_err("request size is not exact amount of AES blocks\n");
@@ -1069,7 +1075,7 @@ static struct crypto_alg aes_algs[] = {
 	.cra_driver_name	= "atmel-cfb8-aes",
 	.cra_priority		= 100,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= CFB64_BLOCK_SIZE,
+	.cra_blocksize		= CFB8_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
 	.cra_alignmask		= 0x0,
 	.cra_type		= &crypto_ablkcipher_type,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index c9ff298e6d26..b099e33cb073 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -29,10 +29,11 @@
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
 
-#include <asm/blackfin.h>
-#include <asm/bfin_crc.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
+#include <asm/io.h>
+
+#include "bfin_crc.h"
 
 #define CRC_CCRYPTO_QUEUE_LENGTH	5
 
@@ -54,12 +55,13 @@ struct bfin_crypto_crc {
 	int			irq;
 	int			dma_ch;
 	u32			poly;
-	volatile struct crc_register *regs;
+	struct crc_register	*regs;
 
 	struct ahash_request	*req; /* current request in operation */
 	struct dma_desc_array	*sg_cpu; /* virt addr of sg dma descriptors */
 	dma_addr_t		sg_dma; /* phy addr of sg dma descriptors */
 	u8			*sg_mid_buf;
+	dma_addr_t		sg_mid_dma; /* phy addr of sg mid buffer */
 
 	struct tasklet_struct	done_task;
 	struct crypto_queue	queue; /* waiting requests */
@@ -132,13 +134,13 @@ static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nent
 
 static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
 {
-	crc->regs->datacntrld = 0;
-	crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET;
-	crc->regs->curresult = key;
+	writel(0, &crc->regs->datacntrld);
+	writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
+	writel(key, &crc->regs->curresult);
 
 	/* setup CRC interrupts */
-	crc->regs->status = CMPERRI | DCNTEXPI;
-	crc->regs->intrenset = CMPERRI | DCNTEXPI;
+	writel(CMPERRI | DCNTEXPI, &crc->regs->status);
+	writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
 
 	return 0;
 }
@@ -194,7 +196,6 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
 	dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
 
 	for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
-		dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
 		dma_addr = sg_dma_address(sg);
 		/* deduce extra bytes in last sg */
 		if (sg_is_last(sg))
@@ -207,12 +208,29 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
 			   bytes in current sg buffer. Move addr of current
 			   sg and deduce the length of current sg.
 			 */
-			memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count,
-				(void *)dma_addr,
+			memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
+				sg_virt(sg),
 				CHKSUM_DIGEST_SIZE - mid_dma_count);
 			dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
 			dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
+
+			dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
+				DMAEN | PSIZE_32 | WDSIZE_32;
+
+			/* setup new dma descriptor for next middle dma */
+			crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
+			crc->sg_cpu[i].cfg = dma_config;
+			crc->sg_cpu[i].x_count = 1;
+			crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
+			dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
+				"cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
+				i, crc->sg_cpu[i].start_addr,
+				crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
+				crc->sg_cpu[i].x_modify);
+			i++;
 		}
+
+		dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
 		/* chop current sg dma len to multiple of 32 bits */
 		mid_dma_count = dma_count % 4;
 		dma_count &= ~0x3;
@@ -243,24 +261,9 @@ static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
 
 		if (mid_dma_count) {
 			/* copy extra bytes to next middle dma buffer */
-			dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
-				DMAEN | PSIZE_32 | WDSIZE_32;
 			memcpy(crc->sg_mid_buf + (i << 2),
-				(void *)(dma_addr + (dma_count << 2)),
+				(u8*)sg_virt(sg) + (dma_count << 2),
 				mid_dma_count);
-			/* setup new dma descriptor for next middle dma */
-			crc->sg_cpu[i].start_addr = dma_map_single(crc->dev,
-					crc->sg_mid_buf + (i << 2),
-					CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
-			crc->sg_cpu[i].cfg = dma_config;
-			crc->sg_cpu[i].x_count = 1;
-			crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
-			dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
-				"cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
-				i, crc->sg_cpu[i].start_addr,
-				crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
-				crc->sg_cpu[i].x_modify);
-			i++;
 		}
 	}
 
@@ -303,6 +306,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
 	int nsg, i, j;
 	unsigned int nextlen;
 	unsigned long flags;
+	u32 reg;
 
 	spin_lock_irqsave(&crc->lock, flags);
 	if (req)
@@ -402,13 +406,14 @@ finish_update:
 		ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
 
 	/* set CRC data count before start DMA */
-	crc->regs->datacnt = ctx->sg_buflen >> 2;
+	writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
 
 	/* setup and enable CRC DMA */
 	bfin_crypto_crc_config_dma(crc);
 
 	/* finally kick off CRC operation */
-	crc->regs->control |= BLKEN;
+	reg = readl(&crc->regs->control);
+	writel(reg | BLKEN, &crc->regs->control);
 
 	return -EINPROGRESS;
 }
@@ -529,14 +534,17 @@ static void bfin_crypto_crc_done_task(unsigned long data)
 static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
 {
 	struct bfin_crypto_crc *crc = dev_id;
+	u32 reg;
 
-	if (crc->regs->status & DCNTEXP) {
-		crc->regs->status = DCNTEXP;
+	if (readl(&crc->regs->status) & DCNTEXP) {
+		writel(DCNTEXP, &crc->regs->status);
 
 		/* prepare results */
-		put_unaligned_le32(crc->regs->result, crc->req->result);
+		put_unaligned_le32(readl(&crc->regs->result),
+			crc->req->result);
 
-		crc->regs->control &= ~BLKEN;
+		reg = readl(&crc->regs->control);
+		writel(reg & ~BLKEN, &crc->regs->control);
 		crc->busy = 0;
 
 		if (crc->req->base.complete)
@@ -560,7 +568,7 @@ static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t st
 	struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
 	int i = 100000;
 
-	while ((crc->regs->control & BLKEN) && --i)
+	while ((readl(&crc->regs->control) & BLKEN) && --i)
 		cpu_relax();
 
 	if (i == 0)
@@ -647,29 +655,32 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
 	 * 1 last + 1 next dma descriptors
 	 */
 	crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
+	crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
+			* ((CRC_MAX_DMA_DESC + 1) << 1);
 
-	crc->regs->control = 0;
-	crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data;
+	writel(0, &crc->regs->control);
+	crc->poly = (u32)pdev->dev.platform_data;
+	writel(crc->poly, &crc->regs->poly);
 
-	while (!(crc->regs->status & LUTDONE) && (--timeout) > 0)
+	while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
 		cpu_relax();
 
 	if (timeout == 0)
 		dev_info(&pdev->dev, "init crc poly timeout\n");
 
+	platform_set_drvdata(pdev, crc);
+
 	spin_lock(&crc_list.lock);
 	list_add(&crc->list, &crc_list.dev_list);
 	spin_unlock(&crc_list.lock);
 
-	platform_set_drvdata(pdev, crc);
-
-	ret = crypto_register_ahash(&algs);
-	if (ret) {
-		spin_lock(&crc_list.lock);
-		list_del(&crc->list);
-		spin_unlock(&crc_list.lock);
-		dev_err(&pdev->dev, "Cann't register crypto ahash device\n");
-		goto out_error_dma;
+	if (list_is_singular(&crc_list.dev_list)) {
+		ret = crypto_register_ahash(&algs);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Can't register crypto ahash device\n");
+			goto out_error_dma;
+		}
 	}
 
 	dev_info(&pdev->dev, "initialized\n");
diff --git a/drivers/crypto/bfin_crc.h b/drivers/crypto/bfin_crc.h
new file mode 100644
index 000000000000..75cef4dc85a1
--- /dev/null
+++ b/drivers/crypto/bfin_crc.h
@@ -0,0 +1,125 @@
+/*
+ * bfin_crc.h - interface to Blackfin CRC controllers
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_CRC_H__
+#define __BFIN_CRC_H__
+
+/* Function driver which use hardware crc must initialize the structure */
+struct crc_info {
+	/* Input data address */
+	unsigned char *in_addr;
+	/* Output data address */
+	unsigned char *out_addr;
+	/* Input or output bytes */
+	unsigned long datasize;
+	union {
+	/* CRC to compare with that of input buffer */
+	unsigned long crc_compare;
+	/* Value to compare with input data */
+	unsigned long val_verify;
+	/* Value to fill */
+	unsigned long val_fill;
+	};
+	/* Value to program the 32b CRC Polynomial */
+	unsigned long crc_poly;
+	union {
+	/* CRC calculated from the input data */
+	unsigned long crc_result;
+	/* First failed position to verify input data */
+	unsigned long pos_verify;
+	};
+	/* CRC mirror flags */
+	unsigned int bitmirr:1;
+	unsigned int bytmirr:1;
+	unsigned int w16swp:1;
+	unsigned int fdsel:1;
+	unsigned int rsltmirr:1;
+	unsigned int polymirr:1;
+	unsigned int cmpmirr:1;
+};
+
+/* Userspace interface */
+#define CRC_IOC_MAGIC		'C'
+#define CRC_IOC_CALC_CRC	_IOWR('C', 0x01, unsigned int)
+#define CRC_IOC_MEMCPY_CRC	_IOWR('C', 0x02, unsigned int)
+#define CRC_IOC_VERIFY_VAL	_IOWR('C', 0x03, unsigned int)
+#define CRC_IOC_FILL_VAL	_IOWR('C', 0x04, unsigned int)
+
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+
+struct crc_register {
+	u32 control;
+	u32 datacnt;
+	u32 datacntrld;
+	u32 __pad_1[2];
+	u32 compare;
+	u32 fillval;
+	u32 datafifo;
+	u32 intren;
+	u32 intrenset;
+	u32 intrenclr;
+	u32 poly;
+	u32 __pad_2[4];
+	u32 status;
+	u32 datacntcap;
+	u32 __pad_3;
+	u32 result;
+	u32 curresult;
+	u32 __pad_4[3];
+	u32 revid;
+};
+
+/* CRC_STATUS Masks */
+#define CMPERR			0x00000002	/* Compare error */
+#define DCNTEXP			0x00000010	/* datacnt register expired */
+#define IBR			0x00010000	/* Input buffer ready */
+#define OBR			0x00020000	/* Output buffer ready */
+#define IRR			0x00040000	/* Immediate result readt */
+#define LUTDONE			0x00080000	/* Look-up table generation done */
+#define FSTAT			0x00700000	/* FIFO status */
+#define MAX_FIFO		4		/* Max fifo size */
+
+/* CRC_CONTROL Masks */
+#define BLKEN			0x00000001	/* Block enable */
+#define OPMODE			0x000000F0	/* Operation mode */
+#define OPMODE_OFFSET		4		/* Operation mode mask offset*/
+#define MODE_DMACPY_CRC		1		/* MTM CRC compute and compare */
+#define MODE_DATA_FILL		2		/* MTM data fill */
+#define MODE_CALC_CRC		3		/* MSM CRC compute and compare */
+#define MODE_DATA_VERIFY	4		/* MSM data verify */
+#define AUTOCLRZ		0x00000100	/* Auto clear to zero */
+#define AUTOCLRF		0x00000200	/* Auto clear to one */
+#define OBRSTALL		0x00001000	/* Stall on output buffer ready */
+#define IRRSTALL		0x00002000	/* Stall on immediate result ready */
+#define BITMIRR			0x00010000	/* Mirror bits within each byte of 32-bit input data */
+#define BITMIRR_OFFSET		16		/* Mirror bits offset */
+#define BYTMIRR			0x00020000	/* Mirror bytes of 32-bit input data */
+#define BYTMIRR_OFFSET		17		/* Mirror bytes offset */
+#define W16SWP			0x00040000	/* Mirror uppper and lower 16-bit word of 32-bit input data */
+#define W16SWP_OFFSET		18		/* Mirror 16-bit word offset */
+#define FDSEL			0x00080000	/* FIFO is written after input data is mirrored */
+#define FDSEL_OFFSET		19		/* Mirror FIFO offset */
+#define RSLTMIRR		0x00100000	/* CRC result registers are mirrored. */
+#define RSLTMIRR_OFFSET		20		/* Mirror CRC result offset. */
+#define POLYMIRR		0x00200000	/* CRC poly register is mirrored. */
+#define POLYMIRR_OFFSET		21		/* Mirror CRC poly offset. */
+#define CMPMIRR			0x00400000	/* CRC compare register is mirrored. */
+#define CMPMIRR_OFFSET		22		/* Mirror CRC compare offset. */
+
+/* CRC_INTREN Masks */
+#define CMPERRI 		0x02		/* CRC_ERROR_INTR */
+#define DCNTEXPI 		0x10		/* CRC_STATUS_INTR */
+
+#endif
+
+#endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 5f891254db73..c09ce1f040d3 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -303,6 +303,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
+	keys_fit_inline = false;
 	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
@@ -472,6 +473,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
+	keys_fit_inline = false;
 	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len + ctx->enckeylen <=
 	    CAAM_DESC_BYTES_MAX)
@@ -527,6 +529,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
+	keys_fit_inline = false;
 	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len + ctx->enckeylen <=
 	    CAAM_DESC_BYTES_MAX)
@@ -918,11 +921,8 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 	edesc = (struct aead_edesc *)((char *)desc -
 		 offsetof(struct aead_edesc, hw_desc));
 
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 	aead_unmap(jrdev, edesc, req);
 
@@ -969,11 +969,8 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 		       req->cryptlen - ctx->authsize, 1);
 #endif
 
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 	aead_unmap(jrdev, edesc, req);
 
@@ -1018,11 +1015,8 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 	edesc = (struct ablkcipher_edesc *)((char *)desc -
 		 offsetof(struct ablkcipher_edesc, hw_desc));
 
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
@@ -1053,11 +1047,8 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 
 	edesc = (struct ablkcipher_edesc *)((char *)desc -
 		 offsetof(struct ablkcipher_edesc, hw_desc));
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 0378328f47a7..0d9284ef96a8 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -545,7 +545,8 @@ static int ahash_setkey(struct crypto_ahash *ahash,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto map_err;
 	}
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
@@ -559,6 +560,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
 				 DMA_TO_DEVICE);
 	}
 
+map_err:
 	kfree(hashed_key);
 	return ret;
 badkey:
@@ -631,11 +633,8 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
 
 	edesc = (struct ahash_edesc *)((char *)desc -
 		 offsetof(struct ahash_edesc, hw_desc));
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 	ahash_unmap(jrdev, edesc, req, digestsize);
 	kfree(edesc);
@@ -669,11 +668,8 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
 
 	edesc = (struct ahash_edesc *)((char *)desc -
 		 offsetof(struct ahash_edesc, hw_desc));
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
 	kfree(edesc);
@@ -707,11 +703,8 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
 
 	edesc = (struct ahash_edesc *)((char *)desc -
 		 offsetof(struct ahash_edesc, hw_desc));
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 	ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
 	kfree(edesc);
@@ -745,11 +738,8 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
 
 	edesc = (struct ahash_edesc *)((char *)desc -
 		 offsetof(struct ahash_edesc, hw_desc));
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 	ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
 	kfree(edesc);
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 3529b54048c9..8c07d3153f12 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -103,11 +103,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
 	bd = (struct buf_data *)((char *)desc -
 	      offsetof(struct buf_data, hw_desc));
 
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(jrdev, err);
 
 	atomic_set(&bd->empty, BUF_NOT_EMPTY);
 	complete(&bd->filled);
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 0eabd81e1a90..6531054a44c8 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -11,247 +11,208 @@
 #include "jr.h"
 #include "error.h"
 
-#define SPRINTFCAT(str, format, param, max_alloc)		\
-{								\
-	char *tmp;						\
-								\
-	tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC);	\
-	if (likely(tmp)) {					\
-		sprintf(tmp, format, param);			\
-		strcat(str, tmp);				\
-		kfree(tmp);					\
-	} else {						\
-		strcat(str, "kmalloc failure in SPRINTFCAT");	\
-	}							\
-}
-
-static void report_jump_idx(u32 status, char *outstr)
+static const struct {
+	u8 value;
+	const char *error_text;
+} desc_error_list[] = {
+	{ 0x00, "No error." },
+	{ 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." },
+	{ 0x02, "SGT Null Entry Error." },
+	{ 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." },
+	{ 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." },
+	{ 0x05, "Reserved." },
+	{ 0x06, "Invalid KEY Command" },
+	{ 0x07, "Invalid LOAD Command" },
+	{ 0x08, "Invalid STORE Command" },
+	{ 0x09, "Invalid OPERATION Command" },
+	{ 0x0A, "Invalid FIFO LOAD Command" },
+	{ 0x0B, "Invalid FIFO STORE Command" },
+	{ 0x0C, "Invalid MOVE/MOVE_LEN Command" },
+	{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." },
+	{ 0x0E, "Invalid MATH Command" },
+	{ 0x0F, "Invalid SIGNATURE Command" },
+	{ 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." },
+	{ 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
+	{ 0x12, "Shared Descriptor Header Error" },
+	{ 0x13, "Header Error. Invalid length or parity, or certain other problems." },
+	{ 0x14, "Burster Error. Burster has gotten to an illegal state" },
+	{ 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." },
+	{ 0x16, "DMA Error" },
+	{ 0x17, "Reserved." },
+	{ 0x1A, "Job failed due to JR reset" },
+	{ 0x1B, "Job failed due to Fail Mode" },
+	{ 0x1C, "DECO Watchdog timer timeout error" },
+	{ 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" },
+	{ 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" },
+	{ 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." },
+	{ 0x20, "DECO has completed a reset initiated via the DRR register" },
+	{ 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." },
+	{ 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." },
+	{ 0x23, "Read Input Frame error" },
+	{ 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
+	{ 0x80, "DNR (do not run) error" },
+	{ 0x81, "undefined protocol command" },
+	{ 0x82, "invalid setting in PDB" },
+	{ 0x83, "Anti-replay LATE error" },
+	{ 0x84, "Anti-replay REPLAY error" },
+	{ 0x85, "Sequence number overflow" },
+	{ 0x86, "Sigver invalid signature" },
+	{ 0x87, "DSA Sign Illegal test descriptor" },
+	{ 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." },
+	{ 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." },
+	{ 0xC1, "Blob Command error: Undefined mode" },
+	{ 0xC2, "Blob Command error: Secure Memory Blob mode error" },
+	{ 0xC4, "Blob Command error: Black Blob key or input size error" },
+	{ 0xC5, "Blob Command error: Invalid key destination" },
+	{ 0xC8, "Blob Command error: Trusted/Secure mode error" },
+	{ 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" },
+	{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+};
+
+static const char * const cha_id_list[] = {
+	"",
+	"AES",
+	"DES",
+	"ARC4",
+	"MDHA",
+	"RNG",
+	"SNOW f8",
+	"Kasumi f8/9",
+	"PKHA",
+	"CRCA",
+	"SNOW f9",
+	"ZUCE",
+	"ZUCA",
+};
+
+static const char * const err_id_list[] = {
+	"No error.",
+	"Mode error.",
+	"Data size error.",
+	"Key size error.",
+	"PKHA A memory size error.",
+	"PKHA B memory size error.",
+	"Data arrived out of sequence error.",
+	"PKHA divide-by-zero error.",
+	"PKHA modulus even error.",
+	"DES key parity error.",
+	"ICV check failed.",
+	"Hardware error.",
+	"Unsupported CCM AAD size.",
+	"Class 1 CHA is not reset",
+	"Invalid CHA combination was selected",
+	"Invalid CHA selected.",
+};
+
+static const char * const rng_err_id_list[] = {
+	"",
+	"",
+	"",
+	"Instantiate",
+	"Not instantiated",
+	"Test instantiate",
+	"Prediction resistance",
+	"Prediction resistance and test request",
+	"Uninstantiate",
+	"Secure key generation",
+};
+
+static void report_ccb_status(struct device *jrdev, const u32 status,
+			      const char *error)
 {
+	u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
+		    JRSTA_CCBERR_CHAID_SHIFT;
+	u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
 	u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
 		  JRSTA_DECOERR_INDEX_SHIFT;
+	char *idx_str;
+	const char *cha_str = "unidentified cha_id value 0x";
+	char cha_err_code[3] = { 0 };
+	const char *err_str = "unidentified err_id value 0x";
+	char err_err_code[3] = { 0 };
 
 	if (status & JRSTA_DECOERR_JUMP)
-		strcat(outstr, "jump tgt desc idx ");
+		idx_str = "jump tgt desc idx";
 	else
-		strcat(outstr, "desc idx ");
-
-	SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
-}
-
-static void report_ccb_status(u32 status, char *outstr)
-{
-	static const char * const cha_id_list[] = {
-		"",
-		"AES",
-		"DES",
-		"ARC4",
-		"MDHA",
-		"RNG",
-		"SNOW f8",
-		"Kasumi f8/9",
-		"PKHA",
-		"CRCA",
-		"SNOW f9",
-		"ZUCE",
-		"ZUCA",
-	};
-	static const char * const err_id_list[] = {
-		"No error.",
-		"Mode error.",
-		"Data size error.",
-		"Key size error.",
-		"PKHA A memory size error.",
-		"PKHA B memory size error.",
-		"Data arrived out of sequence error.",
-		"PKHA divide-by-zero error.",
-		"PKHA modulus even error.",
-		"DES key parity error.",
-		"ICV check failed.",
-		"Hardware error.",
-		"Unsupported CCM AAD size.",
-		"Class 1 CHA is not reset",
-		"Invalid CHA combination was selected",
-		"Invalid CHA selected.",
-	};
-	static const char * const rng_err_id_list[] = {
-		"",
-		"",
-		"",
-		"Instantiate",
-		"Not instantiated",
-		"Test instantiate",
-		"Prediction resistance",
-		"Prediction resistance and test request",
-		"Uninstantiate",
-		"Secure key generation",
-	};
-	u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
-		    JRSTA_CCBERR_CHAID_SHIFT;
-	u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+		idx_str = "desc idx";
 
-	report_jump_idx(status, outstr);
-
-	if (cha_id < ARRAY_SIZE(cha_id_list)) {
-		SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id],
-			   strlen(cha_id_list[cha_id]));
-	} else {
-		SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
-			   cha_id, sizeof("ff"));
-	}
+	if (cha_id < ARRAY_SIZE(cha_id_list))
+		cha_str = cha_id_list[cha_id];
+	else
+		snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
 
 	if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
 	    err_id < ARRAY_SIZE(rng_err_id_list) &&
 	    strlen(rng_err_id_list[err_id])) {
 		/* RNG-only error */
-		SPRINTFCAT(outstr, "%s", rng_err_id_list[err_id],
-			   strlen(rng_err_id_list[err_id]));
-	} else if (err_id < ARRAY_SIZE(err_id_list)) {
-		SPRINTFCAT(outstr, "%s", err_id_list[err_id],
-			   strlen(err_id_list[err_id]));
-	} else {
-		SPRINTFCAT(outstr, "unidentified err_id value 0x%02x",
-			   err_id, sizeof("ff"));
-	}
+		err_str = rng_err_id_list[err_id];
+	} else if (err_id < ARRAY_SIZE(err_id_list))
+		err_str = err_id_list[err_id];
+	else
+		snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+	dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
+		status, error, idx_str, idx,
+		cha_str, cha_err_code,
+		err_str, err_err_code);
 }
 
-static void report_jump_status(u32 status, char *outstr)
+static void report_jump_status(struct device *jrdev, const u32 status,
+			       const char *error)
 {
-	SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
 }
 
-static void report_deco_status(u32 status, char *outstr)
+static void report_deco_status(struct device *jrdev, const u32 status,
+			       const char *error)
 {
-	static const struct {
-		u8 value;
-		char *error_text;
-	} desc_error_list[] = {
-		{ 0x00, "No error." },
-		{ 0x01, "SGT Length Error. The descriptor is trying to read "
-			"more data than is contained in the SGT table." },
-		{ 0x02, "SGT Null Entry Error." },
-		{ 0x03, "Job Ring Control Error. There is a bad value in the "
-			"Job Ring Control register." },
-		{ 0x04, "Invalid Descriptor Command. The Descriptor Command "
-			"field is invalid." },
-		{ 0x05, "Reserved." },
-		{ 0x06, "Invalid KEY Command" },
-		{ 0x07, "Invalid LOAD Command" },
-		{ 0x08, "Invalid STORE Command" },
-		{ 0x09, "Invalid OPERATION Command" },
-		{ 0x0A, "Invalid FIFO LOAD Command" },
-		{ 0x0B, "Invalid FIFO STORE Command" },
-		{ 0x0C, "Invalid MOVE/MOVE_LEN Command" },
-		{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
-			"invalid because the target is not a Job Header "
-			"Command, or the jump is from a Trusted Descriptor to "
-			"a Job Descriptor, or because the target Descriptor "
-			"contains a Shared Descriptor." },
-		{ 0x0E, "Invalid MATH Command" },
-		{ 0x0F, "Invalid SIGNATURE Command" },
-		{ 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
-			"Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
-			"LOAD, or SEQ FIFO STORE decremented the input or "
-			"output sequence length below 0. This error may result "
-			"if a built-in PROTOCOL Command has encountered a "
-			"malformed PDU." },
-		{ 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
-		{ 0x12, "Shared Descriptor Header Error" },
-		{ 0x13, "Header Error. Invalid length or parity, or certain "
-			"other problems." },
-		{ 0x14, "Burster Error. Burster has gotten to an illegal "
-			"state" },
-		{ 0x15, "Context Register Length Error. The descriptor is "
-			"trying to read or write past the end of the Context "
-			"Register. A SEQ LOAD or SEQ STORE with the VLF bit "
-			"set was executed with too large a length in the "
-			"variable length register (VSOL for SEQ STORE or VSIL "
-			"for SEQ LOAD)." },
-		{ 0x16, "DMA Error" },
-		{ 0x17, "Reserved." },
-		{ 0x1A, "Job failed due to JR reset" },
-		{ 0x1B, "Job failed due to Fail Mode" },
-		{ 0x1C, "DECO Watchdog timer timeout error" },
-		{ 0x1D, "DECO tried to copy a key from another DECO but the "
-			"other DECO's Key Registers were locked" },
-		{ 0x1E, "DECO attempted to copy data from a DECO that had an "
-			"unmasked Descriptor error" },
-		{ 0x1F, "LIODN error. DECO was trying to share from itself or "
-			"from another DECO but the two Non-SEQ LIODN values "
-			"didn't match or the 'shared from' DECO's Descriptor "
-			"required that the SEQ LIODNs be the same and they "
-			"aren't." },
-		{ 0x20, "DECO has completed a reset initiated via the DRR "
-			"register" },
-		{ 0x21, "Nonce error. When using EKT (CCM) key encryption "
-			"option in the FIFO STORE Command, the Nonce counter "
-			"reached its maximum value and this encryption mode "
-			"can no longer be used." },
-		{ 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
-			"(input frame; block ciphers) and IPsec decap (output "
-			"frame, when doing the next header byte update) and "
-			"DCRC (output frame)." },
-		{ 0x23, "Read Input Frame error" },
-		{ 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
-		{ 0x80, "DNR (do not run) error" },
-		{ 0x81, "undefined protocol command" },
-		{ 0x82, "invalid setting in PDB" },
-		{ 0x83, "Anti-replay LATE error" },
-		{ 0x84, "Anti-replay REPLAY error" },
-		{ 0x85, "Sequence number overflow" },
-		{ 0x86, "Sigver invalid signature" },
-		{ 0x87, "DSA Sign Illegal test descriptor" },
-		{ 0x88, "Protocol Format Error - A protocol has seen an error "
-			"in the format of data received. When running RSA, "
-			"this means that formatting with random padding was "
-			"used, and did not follow the form: 0x00, 0x02, 8-to-N "
-			"bytes of non-zero pad, 0x00, F data." },
-		{ 0x89, "Protocol Size Error - A protocol has seen an error in "
-			"size. When running RSA, pdb size N < (size of F) when "
-			"no formatting is used; or pdb size N < (F + 11) when "
-			"formatting is used." },
-		{ 0xC1, "Blob Command error: Undefined mode" },
-		{ 0xC2, "Blob Command error: Secure Memory Blob mode error" },
-		{ 0xC4, "Blob Command error: Black Blob key or input size "
-			"error" },
-		{ 0xC5, "Blob Command error: Invalid key destination" },
-		{ 0xC8, "Blob Command error: Trusted/Secure mode error" },
-		{ 0xF0, "IPsec TTL or hop limit field either came in as 0, "
-			"or was decremented to 0" },
-		{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
-	};
-	u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
+	u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
+	u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+		  JRSTA_DECOERR_INDEX_SHIFT;
+	char *idx_str;
+	const char *err_str = "unidentified error value 0x";
+	char err_err_code[3] = { 0 };
 	int i;
 
-	report_jump_idx(status, outstr);
+	if (status & JRSTA_DECOERR_JUMP)
+		idx_str = "jump tgt desc idx";
+	else
+		idx_str = "desc idx";
 
 	for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
-		if (desc_error_list[i].value == desc_error)
+		if (desc_error_list[i].value == err_id)
 			break;
 
-	if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) {
-		SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text,
-			   strlen(desc_error_list[i].error_text));
-	} else {
-		SPRINTFCAT(outstr, "unidentified error value 0x%02x",
-			   desc_error, sizeof("ff"));
-	}
+	if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
+		err_str = desc_error_list[i].error_text;
+	else
+		snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+	dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
+		status, error, idx_str, idx, err_str, err_err_code);
 }
 
-static void report_jr_status(u32 status, char *outstr)
+static void report_jr_status(struct device *jrdev, const u32 status,
+			     const char *error)
 {
-	SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
 }
 
-static void report_cond_code_status(u32 status, char *outstr)
+static void report_cond_code_status(struct device *jrdev, const u32 status,
+				    const char *error)
 {
-	SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+	dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+		status, error, __func__);
 }
 
-char *caam_jr_strstatus(char *outstr, u32 status)
+void caam_jr_strstatus(struct device *jrdev, u32 status)
 {
 	static const struct stat_src {
-		void (*report_ssed)(u32 status, char *outstr);
-		char *error;
+		void (*report_ssed)(struct device *jrdev, const u32 status,
+				    const char *error);
+		const char *error;
 	} status_src[] = {
 		{ NULL, "No error" },
 		{ NULL, NULL },
@@ -263,12 +224,16 @@ char *caam_jr_strstatus(char *outstr, u32 status)
 		{ report_cond_code_status, "Condition Code" },
 	};
 	u32 ssrc = status >> JRSTA_SSRC_SHIFT;
-
-	sprintf(outstr, "%s: ", status_src[ssrc].error);
-
-	if (status_src[ssrc].report_ssed)
-		status_src[ssrc].report_ssed(status, outstr);
-
-	return outstr;
+	const char *error = status_src[ssrc].error;
+
+	/*
+	 * If there is no further error handling function, just
+	 * print the error code, error string and exit. Otherwise
+	 * call the handler function.
+	 */
+	if (!status_src[ssrc].report_ssed)
+		dev_err(jrdev, "%08x: %s: \n", status, status_src[ssrc].error);
+	else
+		status_src[ssrc].report_ssed(jrdev, status, error);
 }
 EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 02c7baa1748e..b6350b0d9153 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,5 +7,5 @@
 #ifndef CAAM_ERROR_H
 #define CAAM_ERROR_H
 #define CAAM_ERROR_STR_MAX 302
-extern char *caam_jr_strstatus(char *outstr, u32 status);
+void caam_jr_strstatus(struct device *jrdev, u32 status);
 #endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index ea2e406610eb..871703c49d2c 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -19,11 +19,8 @@ void split_key_done(struct device *dev, u32 *desc, u32 err,
 	dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
-	if (err) {
-		char tmp[CAAM_ERROR_STR_MAX];
-
-		dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-	}
+	if (err)
+		caam_jr_strstatus(dev, err);
 
 	res->err = err;
 
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0237ab58f242..0cc5594b7de3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -191,12 +191,12 @@ static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
 	ctx->complete = ccp_aes_xts_complete;
 	ctx->u.aes.key_len = 0;
 
-	fallback_tfm = crypto_alloc_ablkcipher(tfm->__crt_alg->cra_name, 0,
+	fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0,
 					       CRYPTO_ALG_ASYNC |
 					       CRYPTO_ALG_NEED_FALLBACK);
 	if (IS_ERR(fallback_tfm)) {
 		pr_warn("could not load fallback driver %s\n",
-			tfm->__crt_alg->cra_name);
+			crypto_tfm_alg_name(tfm));
 		return PTR_ERR(fallback_tfm);
 	}
 	ctx->u.aes.tfm_ablkcipher = fallback_tfm;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 93319f9db753..0d746236df5e 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -48,12 +48,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
 	for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
 		msix_entry[v].entry = v;
 
-	while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0)
-		v = ret;
-	if (ret)
+	ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
+	if (ret < 0)
 		return ret;
 
-	ccp_pci->msix_count = v;
+	ccp_pci->msix_count = ret;
 	for (v = 0; v < ccp_pci->msix_count; v++) {
 		/* Set the interrupt names and request the irqs */
 		snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 0c9ff4971724..fe538e5287a5 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -226,7 +226,7 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 	op->dst = (void *) out;
 	op->mode = AES_MODE_ECB;
 	op->flags = 0;
-	op->len = AES_MIN_BLOCK_SIZE;
+	op->len = AES_BLOCK_SIZE;
 	op->dir = AES_DIR_ENCRYPT;
 
 	geode_aes_crypt(op);
@@ -247,7 +247,7 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 	op->dst = (void *) out;
 	op->mode = AES_MODE_ECB;
 	op->flags = 0;
-	op->len = AES_MIN_BLOCK_SIZE;
+	op->len = AES_BLOCK_SIZE;
 	op->dir = AES_DIR_DECRYPT;
 
 	geode_aes_crypt(op);
@@ -255,7 +255,7 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 
 static int fallback_init_cip(struct crypto_tfm *tfm)
 {
-	const char *name = tfm->__crt_alg->cra_name;
+	const char *name = crypto_tfm_alg_name(tfm);
 	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 
 	op->fallback.cip = crypto_alloc_cipher(name, 0,
@@ -286,7 +286,7 @@ static struct crypto_alg geode_alg = {
 							CRYPTO_ALG_NEED_FALLBACK,
 	.cra_init			=	fallback_init_cip,
 	.cra_exit			=	fallback_exit_cip,
-	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
 	.cra_module			=	THIS_MODULE,
 	.cra_u				=	{
@@ -320,7 +320,7 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
 		op->src = walk.src.virt.addr,
 		op->dst = walk.dst.virt.addr;
 		op->mode = AES_MODE_CBC;
-		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
 		op->dir = AES_DIR_DECRYPT;
 
 		ret = geode_aes_crypt(op);
@@ -352,7 +352,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
 		op->src = walk.src.virt.addr,
 		op->dst = walk.dst.virt.addr;
 		op->mode = AES_MODE_CBC;
-		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
 		op->dir = AES_DIR_ENCRYPT;
 
 		ret = geode_aes_crypt(op);
@@ -365,7 +365,7 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
 
 static int fallback_init_blk(struct crypto_tfm *tfm)
 {
-	const char *name = tfm->__crt_alg->cra_name;
+	const char *name = crypto_tfm_alg_name(tfm);
 	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 
 	op->fallback.blk = crypto_alloc_blkcipher(name, 0,
@@ -396,7 +396,7 @@ static struct crypto_alg geode_cbc_alg = {
 						CRYPTO_ALG_NEED_FALLBACK,
 	.cra_init			=	fallback_init_blk,
 	.cra_exit			=	fallback_exit_blk,
-	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
 	.cra_alignmask		=	15,
 	.cra_type			=	&crypto_blkcipher_type,
@@ -408,7 +408,7 @@ static struct crypto_alg geode_cbc_alg = {
 			.setkey			=	geode_setkey_blk,
 			.encrypt		=	geode_cbc_encrypt,
 			.decrypt		=	geode_cbc_decrypt,
-			.ivsize			=	AES_IV_LENGTH,
+			.ivsize			=	AES_BLOCK_SIZE,
 		}
 	}
 };
@@ -432,7 +432,7 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
 		op->src = walk.src.virt.addr,
 		op->dst = walk.dst.virt.addr;
 		op->mode = AES_MODE_ECB;
-		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
 		op->dir = AES_DIR_DECRYPT;
 
 		ret = geode_aes_crypt(op);
@@ -462,7 +462,7 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
 		op->src = walk.src.virt.addr,
 		op->dst = walk.dst.virt.addr;
 		op->mode = AES_MODE_ECB;
-		op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
 		op->dir = AES_DIR_ENCRYPT;
 
 		ret = geode_aes_crypt(op);
@@ -482,7 +482,7 @@ static struct crypto_alg geode_ecb_alg = {
 						CRYPTO_ALG_NEED_FALLBACK,
 	.cra_init			=	fallback_init_blk,
 	.cra_exit			=	fallback_exit_blk,
-	.cra_blocksize		=	AES_MIN_BLOCK_SIZE,
+	.cra_blocksize		=	AES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct geode_aes_op),
 	.cra_alignmask		=	15,
 	.cra_type			=	&crypto_blkcipher_type,
@@ -547,7 +547,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	if (ret)
 		goto eecb;
 
-	printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
+	dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
 	return 0;
 
  eecb:
@@ -565,7 +565,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
  eenable:
 	pci_disable_device(dev);
 
-	printk(KERN_ERR "geode-aes:  GEODE AES initialization failed.\n");
+	dev_err(&dev->dev, "GEODE AES initialization failed.\n");
 	return ret;
 }
 
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
index f1855b50da48..f442ca972e3c 100644
--- a/drivers/crypto/geode-aes.h
+++ b/drivers/crypto/geode-aes.h
@@ -10,10 +10,6 @@
 #define _GEODE_AES_H_
 
 /* driver logic flags */
-#define AES_IV_LENGTH  16
-#define AES_KEY_LENGTH 16
-#define AES_MIN_BLOCK_SIZE 16
-
 #define AES_MODE_ECB 0
 #define AES_MODE_CBC 1
 
@@ -64,7 +60,7 @@ struct geode_aes_op {
 	u32 flags;
 	int len;
 
-	u8 key[AES_KEY_LENGTH];
+	u8 key[AES_KEYSIZE_128];
 	u8 *iv;
 
 	union {
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 8d1e6f8e9e9c..29d0ee504907 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -622,8 +622,8 @@ static int queue_manag(void *data)
 		}
 
 		if (async_req) {
-			if (async_req->tfm->__crt_alg->cra_type !=
-			    &crypto_ahash_type) {
+			if (crypto_tfm_alg_type(async_req->tfm) !=
+			    CRYPTO_ALG_TYPE_AHASH) {
 				struct ablkcipher_request *req =
 				    ablkcipher_request_cast(async_req);
 				mv_start_new_crypt_req(req);
@@ -843,7 +843,7 @@ static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
 static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
 			    enum hash_op op, int count_add)
 {
-	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 	struct crypto_shash *fallback_tfm = NULL;
 	struct crypto_shash *base_hash = NULL;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 7bbe0ab21eca..b5f7e6db24d4 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -104,7 +104,6 @@ struct dcp_sha_req_ctx {
  * design of Linux Crypto API.
  */
 static struct dcp *global_sdcp;
-static DEFINE_MUTEX(global_mutex);
 
 /* DCP register layout. */
 #define MXS_DCP_CTRL				0x00
@@ -482,7 +481,7 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 
 static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
 {
-	const char *name = tfm->__crt_alg->cra_name;
+	const char *name = crypto_tfm_alg_name(tfm);
 	const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
 	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
 	struct crypto_ablkcipher *blk;
@@ -907,60 +906,49 @@ static int mxs_dcp_probe(struct platform_device *pdev)
 	struct resource *iores;
 	int dcp_vmi_irq, dcp_irq;
 
-	mutex_lock(&global_mutex);
 	if (global_sdcp) {
 		dev_err(dev, "Only one DCP instance allowed!\n");
-		ret = -ENODEV;
-		goto err_mutex;
+		return -ENODEV;
 	}
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	dcp_vmi_irq = platform_get_irq(pdev, 0);
-	if (dcp_vmi_irq < 0) {
-		ret = dcp_vmi_irq;
-		goto err_mutex;
-	}
+	if (dcp_vmi_irq < 0)
+		return dcp_vmi_irq;
 
 	dcp_irq = platform_get_irq(pdev, 1);
-	if (dcp_irq < 0) {
-		ret = dcp_irq;
-		goto err_mutex;
-	}
+	if (dcp_irq < 0)
+		return dcp_irq;
 
 	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
-	if (!sdcp) {
-		ret = -ENOMEM;
-		goto err_mutex;
-	}
+	if (!sdcp)
+		return -ENOMEM;
 
 	sdcp->dev = dev;
 	sdcp->base = devm_ioremap_resource(dev, iores);
-	if (IS_ERR(sdcp->base)) {
-		ret = PTR_ERR(sdcp->base);
-		goto err_mutex;
-	}
+	if (IS_ERR(sdcp->base))
+		return PTR_ERR(sdcp->base);
+
 
 	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
 			       "dcp-vmi-irq", sdcp);
 	if (ret) {
 		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
-		goto err_mutex;
+		return ret;
 	}
 
 	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
 			       "dcp-irq", sdcp);
 	if (ret) {
 		dev_err(dev, "Failed to claim DCP IRQ!\n");
-		goto err_mutex;
+		return ret;
 	}
 
 	/* Allocate coherent helper block. */
 	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
 				   GFP_KERNEL);
-	if (!sdcp->coh) {
-		ret = -ENOMEM;
-		goto err_mutex;
-	}
+	if (!sdcp->coh)
+		return -ENOMEM;
 
 	/* Re-align the structure so it fits the DCP constraints. */
 	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
@@ -968,7 +956,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
 	/* Restart the DCP block. */
 	ret = stmp_reset_block(sdcp->base);
 	if (ret)
-		goto err_mutex;
+		return ret;
 
 	/* Initialize control register. */
 	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
@@ -1006,8 +994,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
 						      NULL, "mxs_dcp_chan/sha");
 	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
 		dev_err(dev, "Error starting SHA thread!\n");
-		ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
-		goto err_mutex;
+		return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
 	}
 
 	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
@@ -1064,9 +1051,6 @@ err_destroy_aes_thread:
 
 err_destroy_sha_thread:
 	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
-
-err_mutex:
-	mutex_unlock(&global_mutex);
 	return ret;
 }
 
@@ -1088,9 +1072,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, NULL);
 
-	mutex_lock(&global_mutex);
 	global_sdcp = NULL;
-	mutex_unlock(&global_mutex);
 
 	return 0;
 }
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index e1f0ab413c3b..7263c10a56ee 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -356,7 +356,7 @@ static int n2_hash_async_finup(struct ahash_request *req)
 
 static int n2_hash_cra_init(struct crypto_tfm *tfm)
 {
-	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 	struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 	struct crypto_ahash *fallback_tfm;
@@ -391,7 +391,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
 
 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
 {
-	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
 	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 	struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
 	struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 5ce8b5765121..502edf0a2933 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1229,7 +1229,7 @@ static int __exit nx842_remove(struct vio_dev *viodev)
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
 	of_reconfig_notifier_unregister(&nx842_of_nb);
-	rcu_assign_pointer(devdata, NULL);
+	RCU_INIT_POINTER(devdata, NULL);
 	spin_unlock_irqrestore(&devdata_mutex, flags);
 	synchronize_rcu();
 	dev_set_drvdata(&viodev->dev, NULL);
@@ -1280,7 +1280,7 @@ static void __exit nx842_exit(void)
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
-	rcu_assign_pointer(devdata, NULL);
+	RCU_INIT_POINTER(devdata, NULL);
 	spin_unlock_irqrestore(&devdata_mutex, flags);
 	synchronize_rcu();
 	if (old_devdata)
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index ec5f13162b73..b8bc84be8741 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -223,12 +223,19 @@ static void omap_des_write_n(struct omap_des_dev *dd, u32 offset,
 
 static int omap_des_hw_init(struct omap_des_dev *dd)
 {
+	int err;
+
 	/*
 	 * clocks are enabled when request starts and disabled when finished.
 	 * It may be long delays between requests.
 	 * Device might go to off mode to save power.
 	 */
-	pm_runtime_get_sync(dd->dev);
+	err = pm_runtime_get_sync(dd->dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(dd->dev);
+		dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
+		return err;
+	}
 
 	if (!(dd->flags & FLAGS_INIT)) {
 		dd->flags |= FLAGS_INIT;
@@ -1074,16 +1081,20 @@ static int omap_des_probe(struct platform_device *pdev)
 	if (err)
 		goto err_res;
 
-	dd->io_base = devm_request_and_ioremap(dev, res);
-	if (!dd->io_base) {
-		dev_err(dev, "can't ioremap\n");
-		err = -ENOMEM;
+	dd->io_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(dd->io_base)) {
+		err = PTR_ERR(dd->io_base);
 		goto err_res;
 	}
 	dd->phys_base = res->start;
 
 	pm_runtime_enable(dev);
-	pm_runtime_get_sync(dev);
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(dev);
+		dev_err(dd->dev, "%s: failed to get_sync(%d)\n", __func__, err);
+		goto err_get;
+	}
 
 	omap_des_dma_stop(dd);
 
@@ -1148,6 +1159,7 @@ err_algs:
 err_irq:
 	tasklet_kill(&dd->done_task);
 	tasklet_kill(&dd->queue_task);
+err_get:
 	pm_runtime_disable(dev);
 err_res:
 	dd = NULL;
@@ -1191,7 +1203,14 @@ static int omap_des_suspend(struct device *dev)
 
 static int omap_des_resume(struct device *dev)
 {
-	pm_runtime_get_sync(dev);
+	int err;
+
+	err = pm_runtime_get_sync(dev);
+	if (err < 0) {
+		pm_runtime_put_noidle(dev);
+		dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err);
+		return err;
+	}
 	return 0;
 }
 #endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 9266c0e25492..bace885634f2 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -211,7 +211,7 @@ static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
 static int padlock_cra_init(struct crypto_tfm *tfm)
 {
 	struct crypto_shash *hash = __crypto_shash_cast(tfm);
-	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
 	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
 	struct crypto_shash *fallback_tfm;
 	int err = -ENOMEM;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index be45762f390a..4197ad9a711b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -22,6 +22,7 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
+#include <linux/of.h>
 #include <linux/crypto.h>
 #include <linux/interrupt.h>
 
@@ -29,9 +30,6 @@
 #include <crypto/aes.h>
 #include <crypto/ctr.h>
 
-#include <plat/cpu.h>
-#include <mach/dma.h>
-
 #define _SBF(s, v)                      ((v) << (s))
 #define _BIT(b)                         _SBF(b, 1)
 
@@ -105,7 +103,7 @@
 #define SSS_REG_FCPKDMAO                0x005C
 
 /* AES registers */
-#define SSS_REG_AES_CONTROL             0x4000
+#define SSS_REG_AES_CONTROL		0x00
 #define SSS_AES_BYTESWAP_DI             _BIT(11)
 #define SSS_AES_BYTESWAP_DO             _BIT(10)
 #define SSS_AES_BYTESWAP_IV             _BIT(9)
@@ -121,21 +119,25 @@
 #define SSS_AES_CHAIN_MODE_CTR          _SBF(1, 0x02)
 #define SSS_AES_MODE_DECRYPT            _BIT(0)
 
-#define SSS_REG_AES_STATUS              0x4004
+#define SSS_REG_AES_STATUS		0x04
 #define SSS_AES_BUSY                    _BIT(2)
 #define SSS_AES_INPUT_READY             _BIT(1)
 #define SSS_AES_OUTPUT_READY            _BIT(0)
 
-#define SSS_REG_AES_IN_DATA(s)          (0x4010 + (s << 2))
-#define SSS_REG_AES_OUT_DATA(s)         (0x4020 + (s << 2))
-#define SSS_REG_AES_IV_DATA(s)          (0x4030 + (s << 2))
-#define SSS_REG_AES_CNT_DATA(s)         (0x4040 + (s << 2))
-#define SSS_REG_AES_KEY_DATA(s)         (0x4080 + (s << 2))
+#define SSS_REG_AES_IN_DATA(s)		(0x10 + (s << 2))
+#define SSS_REG_AES_OUT_DATA(s)		(0x20 + (s << 2))
+#define SSS_REG_AES_IV_DATA(s)		(0x30 + (s << 2))
+#define SSS_REG_AES_CNT_DATA(s)		(0x40 + (s << 2))
+#define SSS_REG_AES_KEY_DATA(s)		(0x80 + (s << 2))
 
 #define SSS_REG(dev, reg)               ((dev)->ioaddr + (SSS_REG_##reg))
 #define SSS_READ(dev, reg)              __raw_readl(SSS_REG(dev, reg))
 #define SSS_WRITE(dev, reg, val)        __raw_writel((val), SSS_REG(dev, reg))
 
+#define SSS_AES_REG(dev, reg)           ((dev)->aes_ioaddr + SSS_REG_##reg)
+#define SSS_AES_WRITE(dev, reg, val)    __raw_writel((val), \
+						SSS_AES_REG(dev, reg))
+
 /* HW engine modes */
 #define FLAGS_AES_DECRYPT               _BIT(0)
 #define FLAGS_AES_MODE_MASK             _SBF(1, 0x03)
@@ -145,6 +147,20 @@
 #define AES_KEY_LEN         16
 #define CRYPTO_QUEUE_LEN    1
 
+/**
+ * struct samsung_aes_variant - platform specific SSS driver data
+ * @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
+ * @aes_offset: AES register offset from SSS module's base.
+ *
+ * Specifies platform specific configuration of SSS module.
+ * Note: A structure for driver specific platform data is used for future
+ * expansion of its usage.
+ */
+struct samsung_aes_variant {
+	bool			    has_hash_irq;
+	unsigned int		    aes_offset;
+};
+
 struct s5p_aes_reqctx {
 	unsigned long mode;
 };
@@ -161,6 +177,7 @@ struct s5p_aes_dev {
 	struct device              *dev;
 	struct clk                 *clk;
 	void __iomem               *ioaddr;
+	void __iomem               *aes_ioaddr;
 	int                         irq_hash;
 	int                         irq_fc;
 
@@ -173,10 +190,48 @@ struct s5p_aes_dev {
 	struct crypto_queue         queue;
 	bool                        busy;
 	spinlock_t                  lock;
+
+	struct samsung_aes_variant *variant;
 };
 
 static struct s5p_aes_dev *s5p_dev;
 
+static const struct samsung_aes_variant s5p_aes_data = {
+	.has_hash_irq	= true,
+	.aes_offset	= 0x4000,
+};
+
+static const struct samsung_aes_variant exynos_aes_data = {
+	.has_hash_irq	= false,
+	.aes_offset	= 0x200,
+};
+
+static const struct of_device_id s5p_sss_dt_match[] = {
+	{
+		.compatible = "samsung,s5pv210-secss",
+		.data = &s5p_aes_data,
+	},
+	{
+		.compatible = "samsung,exynos4210-secss",
+		.data = &exynos_aes_data,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
+
+static inline struct samsung_aes_variant *find_s5p_sss_version
+				   (struct platform_device *pdev)
+{
+	if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
+		const struct of_device_id *match;
+		match = of_match_node(s5p_sss_dt_match,
+					pdev->dev.of_node);
+		return (struct samsung_aes_variant *)match->data;
+	}
+	return (struct samsung_aes_variant *)
+			platform_get_device_id(pdev)->driver_data;
+}
+
 static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
 {
 	SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
@@ -272,8 +327,12 @@ static void s5p_aes_tx(struct s5p_aes_dev *dev)
 		}
 
 		s5p_set_dma_outdata(dev, dev->sg_dst);
-	} else
+	} else {
 		s5p_aes_complete(dev, err);
+
+		dev->busy = true;
+		tasklet_schedule(&dev->tasklet);
+	}
 }
 
 static void s5p_aes_rx(struct s5p_aes_dev *dev)
@@ -322,14 +381,15 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
 {
 	void __iomem *keystart;
 
-	memcpy(dev->ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+	if (iv)
+		memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
 
 	if (keylen == AES_KEYSIZE_256)
-		keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(0);
+		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
 	else if (keylen == AES_KEYSIZE_192)
-		keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(2);
+		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
 	else
-		keystart = dev->ioaddr + SSS_REG_AES_KEY_DATA(4);
+		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
 
 	memcpy(keystart, key, keylen);
 }
@@ -379,7 +439,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
 	if (err)
 		goto outdata_error;
 
-	SSS_WRITE(dev, AES_CONTROL, aes_control);
+	SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
 	s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
 
 	s5p_set_dma_indata(dev,  req->src);
@@ -410,10 +470,13 @@ static void s5p_tasklet_cb(unsigned long data)
 	spin_lock_irqsave(&dev->lock, flags);
 	backlog   = crypto_get_backlog(&dev->queue);
 	async_req = crypto_dequeue_request(&dev->queue);
-	spin_unlock_irqrestore(&dev->lock, flags);
 
-	if (!async_req)
+	if (!async_req) {
+		dev->busy = false;
+		spin_unlock_irqrestore(&dev->lock, flags);
 		return;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
 
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
@@ -432,14 +495,13 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
 	int err;
 
 	spin_lock_irqsave(&dev->lock, flags);
+	err = ablkcipher_enqueue_request(&dev->queue, req);
 	if (dev->busy) {
-		err = -EAGAIN;
 		spin_unlock_irqrestore(&dev->lock, flags);
 		goto exit;
 	}
 	dev->busy = true;
 
-	err = ablkcipher_enqueue_request(&dev->queue, req);
 	spin_unlock_irqrestore(&dev->lock, flags);
 
 	tasklet_schedule(&dev->tasklet);
@@ -564,6 +626,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
 	struct s5p_aes_dev *pdata;
 	struct device      *dev = &pdev->dev;
 	struct resource    *res;
+	struct samsung_aes_variant *variant;
 
 	if (s5p_dev)
 		return -EEXIST;
@@ -577,30 +640,25 @@ static int s5p_aes_probe(struct platform_device *pdev)
 	if (IS_ERR(pdata->ioaddr))
 		return PTR_ERR(pdata->ioaddr);
 
+	variant = find_s5p_sss_version(pdev);
+
 	pdata->clk = devm_clk_get(dev, "secss");
 	if (IS_ERR(pdata->clk)) {
 		dev_err(dev, "failed to find secss clock source\n");
 		return -ENOENT;
 	}
 
-	clk_enable(pdata->clk);
+	err = clk_prepare_enable(pdata->clk);
+	if (err < 0) {
+		dev_err(dev, "Enabling SSS clk failed, err %d\n", err);
+		return err;
+	}
 
 	spin_lock_init(&pdata->lock);
 
-	pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
-	if (pdata->irq_hash < 0) {
-		err = pdata->irq_hash;
-		dev_warn(dev, "hash interrupt is not available.\n");
-		goto err_irq;
-	}
-	err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
-			       IRQF_SHARED, pdev->name, pdev);
-	if (err < 0) {
-		dev_warn(dev, "hash interrupt is not available.\n");
-		goto err_irq;
-	}
+	pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
 
-	pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
+	pdata->irq_fc = platform_get_irq(pdev, 0);
 	if (pdata->irq_fc < 0) {
 		err = pdata->irq_fc;
 		dev_warn(dev, "feed control interrupt is not available.\n");
@@ -613,6 +671,23 @@ static int s5p_aes_probe(struct platform_device *pdev)
 		goto err_irq;
 	}
 
+	if (variant->has_hash_irq) {
+		pdata->irq_hash = platform_get_irq(pdev, 1);
+		if (pdata->irq_hash < 0) {
+			err = pdata->irq_hash;
+			dev_warn(dev, "hash interrupt is not available.\n");
+			goto err_irq;
+		}
+		err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
+				       IRQF_SHARED, pdev->name, pdev);
+		if (err < 0) {
+			dev_warn(dev, "hash interrupt is not available.\n");
+			goto err_irq;
+		}
+	}
+
+	pdata->busy = false;
+	pdata->variant = variant;
 	pdata->dev = dev;
 	platform_set_drvdata(pdev, pdata);
 	s5p_dev = pdata;
@@ -639,7 +714,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
 	tasklet_kill(&pdata->tasklet);
 
  err_irq:
-	clk_disable(pdata->clk);
+	clk_disable_unprepare(pdata->clk);
 
 	s5p_dev = NULL;
 
@@ -659,7 +734,7 @@ static int s5p_aes_remove(struct platform_device *pdev)
 
 	tasklet_kill(&pdata->tasklet);
 
-	clk_disable(pdata->clk);
+	clk_disable_unprepare(pdata->clk);
 
 	s5p_dev = NULL;
 
@@ -672,6 +747,7 @@ static struct platform_driver s5p_aes_crypto = {
 	.driver	= {
 		.owner	= THIS_MODULE,
 		.name	= "s5p-secss",
+		.of_match_table = s5p_sss_dt_match,
 	},
 };
 
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 07a5987ce67d..164e1ec624e3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -728,7 +728,7 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
 
 static int sahara_aes_cra_init(struct crypto_tfm *tfm)
 {
-	const char *name = tfm->__crt_alg->cra_name;
+	const char *name = crypto_tfm_alg_name(tfm);
 	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 
 	ctx->fallback = crypto_alloc_ablkcipher(name, 0,