summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c4
-rw-r--r--drivers/char/hw_random/core.c55
-rw-r--r--drivers/char/hw_random/imx-rngc.c51
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c16
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c6
-rw-r--r--drivers/crypto/aspeed/Kconfig48
-rw-r--r--drivers/crypto/aspeed/Makefile7
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c1133
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c1391
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c284
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h298
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c6
-rw-r--r--drivers/crypto/bcm/cipher.c4
-rw-r--r--drivers/crypto/bcm/cipher.h2
-rw-r--r--drivers/crypto/cavium/cpt/cpt_hw_types.h2
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c30
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c6
-rw-r--r--drivers/crypto/ccp/sev-dev.c78
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c250
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c216
-rw-r--r--drivers/crypto/hisilicon/qm.c906
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h34
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c456
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c160
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h3
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c134
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c266
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c60
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c67
-rw-r--r--drivers/crypto/keembay/Kconfig4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c24
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c8
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c5
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c10
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c2
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h3
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c18
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c56
-rw-r--r--drivers/crypto/qce/aead.c4
-rw-r--r--drivers/crypto/qce/sha.c8
-rw-r--r--drivers/crypto/qce/skcipher.c8
-rw-r--r--drivers/crypto/qcom-rng.c7
-rw-r--r--drivers/crypto/sahara.c22
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c3
60 files changed, 5137 insertions, 1116 deletions
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
index b24ac39a903b..e34c3ea692b6 100644
--- a/drivers/char/hw_random/arm_smccc_trng.c
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 				  MAX_BITS_PER_CALL);
 
 		arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
-		if ((int)res.a0 < 0)
-			return (int)res.a0;
 
 		switch ((int)res.a0) {
 		case SMCCC_RET_SUCCESS:
@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 				return copied;
 			cond_resched();
 			break;
+		default:
+			return -EIO;
 		}
 	}
 
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 16f227b995e8..cc002b0c2f0c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(default_quality,
 
 static void drop_current_rng(void);
 static int hwrng_init(struct hwrng *rng);
-static void hwrng_manage_rngd(struct hwrng *rng);
+static int hwrng_fillfn(void *unused);
 
 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
 			       int wait);
@@ -96,6 +96,15 @@ static int set_current_rng(struct hwrng *rng)
 	drop_current_rng();
 	current_rng = rng;
 
+	/* if necessary, start hwrng thread */
+	if (!hwrng_fill) {
+		hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+		if (IS_ERR(hwrng_fill)) {
+			pr_err("hwrng_fill thread creation failed\n");
+			hwrng_fill = NULL;
+		}
+	}
+
 	return 0;
 }
 
@@ -167,8 +176,6 @@ skip_init:
 		rng->quality = 1024;
 	current_quality = rng->quality; /* obsolete */
 
-	hwrng_manage_rngd(rng);
-
 	return 0;
 }
 
@@ -454,10 +461,6 @@ static ssize_t rng_quality_store(struct device *dev,
 	/* the best available RNG may have changed */
 	ret = enable_best_rng();
 
-	/* start/stop rngd if necessary */
-	if (current_rng)
-		hwrng_manage_rngd(current_rng);
-
 out:
 	mutex_unlock(&rng_mutex);
 	return ret ? ret : len;
@@ -507,16 +510,14 @@ static int hwrng_fillfn(void *unused)
 			rng->quality = current_quality; /* obsolete */
 		quality = rng->quality;
 		mutex_unlock(&reading_mutex);
-		put_rng(rng);
 
-		if (!quality)
-			break;
+		if (rc <= 0)
+			hwrng_msleep(rng, 10000);
 
-		if (rc <= 0) {
-			pr_warn("hwrng: no data available\n");
-			msleep_interruptible(10000);
+		put_rng(rng);
+
+		if (rc <= 0)
 			continue;
-		}
 
 		/* If we cannot credit at least one bit of entropy,
 		 * keep track of the remainder for the next iteration
@@ -533,22 +534,6 @@ static int hwrng_fillfn(void *unused)
 	return 0;
 }
 
-static void hwrng_manage_rngd(struct hwrng *rng)
-{
-	if (WARN_ON(!mutex_is_locked(&rng_mutex)))
-		return;
-
-	if (rng->quality == 0 && hwrng_fill)
-		kthread_stop(hwrng_fill);
-	if (rng->quality > 0 && !hwrng_fill) {
-		hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
-		if (IS_ERR(hwrng_fill)) {
-			pr_err("hwrng_fill thread creation failed\n");
-			hwrng_fill = NULL;
-		}
-	}
-}
-
 int hwrng_register(struct hwrng *rng)
 {
 	int err = -EINVAL;
@@ -570,6 +555,7 @@ int hwrng_register(struct hwrng *rng)
 
 	init_completion(&rng->cleanup_done);
 	complete(&rng->cleanup_done);
+	init_completion(&rng->dying);
 
 	if (!current_rng ||
 	    (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
@@ -617,6 +603,7 @@ void hwrng_unregister(struct hwrng *rng)
 
 	old_rng = current_rng;
 	list_del(&rng->list);
+	complete_all(&rng->dying);
 	if (current_rng == rng) {
 		err = enable_best_rng();
 		if (err) {
@@ -685,6 +672,14 @@ void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
 }
 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
 
+long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+{
+	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+	return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
+}
+EXPORT_SYMBOL_GPL(hwrng_msleep);
+
 static int __init hwrng_modinit(void)
 {
 	int ret;
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index b05d676ca814..a1c24148ed31 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -245,7 +245,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
 	if (IS_ERR(rngc->base))
 		return PTR_ERR(rngc->base);
 
-	rngc->clk = devm_clk_get(&pdev->dev, NULL);
+	rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
 	if (IS_ERR(rngc->clk)) {
 		dev_err(&pdev->dev, "Can not get rng_clk\n");
 		return PTR_ERR(rngc->clk);
@@ -255,27 +255,14 @@ static int imx_rngc_probe(struct platform_device *pdev)
 	if (irq < 0)
 		return irq;
 
-	ret = clk_prepare_enable(rngc->clk);
-	if (ret)
-		return ret;
-
 	ver_id = readl(rngc->base + RNGC_VER_ID);
 	rng_type = ver_id >> RNGC_TYPE_SHIFT;
 	/*
 	 * This driver supports only RNGC and RNGB. (There's a different
 	 * driver for RNGA.)
 	 */
-	if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
-		ret = -ENODEV;
-		goto err;
-	}
-
-	ret = devm_request_irq(&pdev->dev,
-			irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
-	if (ret) {
-		dev_err(rngc->dev, "Can't get interrupt working.\n");
-		goto err;
-	}
+	if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+		return -ENODEV;
 
 	init_completion(&rngc->rng_op_done);
 
@@ -290,18 +277,25 @@ static int imx_rngc_probe(struct platform_device *pdev)
 
 	imx_rngc_irq_mask_clear(rngc);
 
+	ret = devm_request_irq(&pdev->dev,
+			irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+	if (ret) {
+		dev_err(rngc->dev, "Can't get interrupt working.\n");
+		return ret;
+	}
+
 	if (self_test) {
 		ret = imx_rngc_self_test(rngc);
 		if (ret) {
 			dev_err(rngc->dev, "self test failed\n");
-			goto err;
+			return ret;
 		}
 	}
 
-	ret = hwrng_register(&rngc->rng);
+	ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
 	if (ret) {
 		dev_err(&pdev->dev, "hwrng registration failed\n");
-		goto err;
+		return ret;
 	}
 
 	dev_info(&pdev->dev,
@@ -309,22 +303,6 @@ static int imx_rngc_probe(struct platform_device *pdev)
 		rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
 		(ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
 	return 0;
-
-err:
-	clk_disable_unprepare(rngc->clk);
-
-	return ret;
-}
-
-static int __exit imx_rngc_remove(struct platform_device *pdev)
-{
-	struct imx_rngc *rngc = platform_get_drvdata(pdev);
-
-	hwrng_unregister(&rngc->rng);
-
-	clk_disable_unprepare(rngc->clk);
-
-	return 0;
 }
 
 static int __maybe_unused imx_rngc_suspend(struct device *dev)
@@ -355,11 +333,10 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
 
 static struct platform_driver imx_rngc_driver = {
 	.driver = {
-		.name = "imx_rngc",
+		.name = KBUILD_MODNAME,
 		.pm = &imx_rngc_pm_ops,
 		.of_match_table = imx_rngc_dt_ids,
 	},
-	.remove = __exit_p(imx_rngc_remove),
 };
 
 module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3e6aa319920b..55e75fbb658e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -802,9 +802,7 @@ source "drivers/crypto/amlogic/Kconfig"
 config CRYPTO_DEV_SA2UL
 	tristate "Support for TI security accelerator"
 	depends on ARCH_K3 || COMPILE_TEST
-	select ARM64_CRYPTO
 	select CRYPTO_AES
-	select CRYPTO_AES_ARM64
 	select CRYPTO_ALGAPI
 	select CRYPTO_AUTHENC
 	select CRYPTO_SHA1
@@ -818,5 +816,6 @@ config CRYPTO_DEV_SA2UL
 	  acceleration for cryptographic algorithms on these devices.
 
 source "drivers/crypto/keembay/Kconfig"
+source "drivers/crypto/aspeed/Kconfig"
 
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f81703a86b98..116de173a66c 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed/
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 44b8fc4b786d..006e40133c28 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -235,7 +235,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
 #endif
 };
 
-static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
+static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v)
 {
 	unsigned int i;
 
@@ -266,19 +266,7 @@ static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
 	}
 	return 0;
 }
-
-static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun4i_ss_debugfs_fops = {
-	.owner = THIS_MODULE,
-	.open = sun4i_ss_dbgfs_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs);
 
 /*
  * Power management strategy: The device is suspended unless a TFM exists for
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index 19cd2e52f89d..c4b0a8b58842 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
 		goto err_dst;
 	}
 
-	err = pm_runtime_get_sync(ce->dev);
-	if (err < 0) {
-		pm_runtime_put_noidle(ce->dev);
+	err = pm_runtime_resume_and_get(ce->dev);
+	if (err < 0)
 		goto err_pm;
-	}
 
 	mutex_lock(&ce->rnglock);
 	chan = &ce->chanlist[flow];
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e79514fce731..af017a087ebf 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -177,7 +177,7 @@ static int meson_cipher(struct skcipher_request *areq)
 	if (areq->src == areq->dst) {
 		nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
 				    DMA_BIDIRECTIONAL);
-		if (nr_sgs < 0) {
+		if (!nr_sgs) {
 			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
 			err = -EINVAL;
 			goto theend;
@@ -186,14 +186,14 @@ static int meson_cipher(struct skcipher_request *areq)
 	} else {
 		nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
 				    DMA_TO_DEVICE);
-		if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
+		if (!nr_sgs || nr_sgs > MAXDESC - 3) {
 			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
 			err = -EINVAL;
 			goto theend;
 		}
 		nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
 				    DMA_FROM_DEVICE);
-		if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
+		if (!nr_sgd || nr_sgd > MAXDESC - 3) {
 			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
 			err = -EINVAL;
 			goto theend;
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
new file mode 100644
index 000000000000..ae2710ae8d8f
--- /dev/null
+++ b/drivers/crypto/aspeed/Kconfig
@@ -0,0 +1,48 @@
+config CRYPTO_DEV_ASPEED
+	tristate "Support for Aspeed cryptographic engine driver"
+	depends on ARCH_ASPEED || COMPILE_TEST
+	select CRYPTO_ENGINE
+	help
+	  Hash and Crypto Engine (HACE) is designed to accelerate the
+	  throughput of hash data digest, encryption and decryption.
+
+	  Select y here to have support for the cryptographic driver
+	  available on Aspeed SoC.
+
+config CRYPTO_DEV_ASPEED_DEBUG
+	bool "Enable Aspeed crypto debug messages"
+	depends on CRYPTO_DEV_ASPEED
+	help
+	  Print Aspeed crypto debugging messages if you use this
+	  option to ask for those messages.
+	  Avoid enabling this option for production build to
+	  minimize driver timing.
+
+config CRYPTO_DEV_ASPEED_HACE_HASH
+	bool "Enable Aspeed Hash & Crypto Engine (HACE) hash"
+	depends on CRYPTO_DEV_ASPEED
+	select CRYPTO_SHA1
+	select CRYPTO_SHA256
+	select CRYPTO_SHA512
+	select CRYPTO_HMAC
+	help
+	  Select here to enable Aspeed Hash & Crypto Engine (HACE)
+	  hash driver.
+	  Supports multiple message digest standards, including
+	  SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on.
+
+config CRYPTO_DEV_ASPEED_HACE_CRYPTO
+	bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto"
+	depends on CRYPTO_DEV_ASPEED
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_ECB
+	select CRYPTO_CBC
+	select CRYPTO_CFB
+	select CRYPTO_OFB
+	select CRYPTO_CTR
+	help
+	  Select here to enable Aspeed Hash & Crypto Engine (HACE)
+	  crypto driver.
+	  Supports AES/DES symmetric-key encryption and decryption
+	  with ECB/CBC/CFB/OFB/CTR options.
diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile
new file mode 100644
index 000000000000..a0ed40ddaad1
--- /dev/null
+++ b/drivers/crypto/aspeed/Makefile
@@ -0,0 +1,7 @@
+hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o
+hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o
+
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
+aspeed_crypto-objs := aspeed-hace.o	\
+		      $(hace-hash-y)	\
+		      $(hace-crypto-y)
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
new file mode 100644
index 000000000000..ef73b0028b4d
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
+#define CIPHER_DBG(h, fmt, ...)	\
+	dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define CIPHER_DBG(h, fmt, ...)	\
+	dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+static int aspeed_crypto_do_fallback(struct skcipher_request *areq)
+{
+	struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int err;
+
+	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+				      areq->base.complete, areq->base.data);
+	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+				   areq->cryptlen, areq->iv);
+
+	if (rctx->enc_cmd & HACE_CMD_ENCRYPT)
+		err = crypto_skcipher_encrypt(&rctx->fallback_req);
+	else
+		err = crypto_skcipher_decrypt(&rctx->fallback_req);
+
+	return err;
+}
+
+static bool aspeed_crypto_need_fallback(struct skcipher_request *areq)
+{
+	struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+
+	if (areq->cryptlen == 0)
+		return true;
+
+	if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) &&
+	    !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE))
+		return true;
+
+	if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) &&
+	    !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE))
+		return true;
+
+	return false;
+}
+
+static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev,
+					   struct skcipher_request *req)
+{
+	if (hace_dev->version == AST2500_VERSION &&
+	    aspeed_crypto_need_fallback(req)) {
+		CIPHER_DBG(hace_dev, "SW fallback\n");
+		return aspeed_crypto_do_fallback(req);
+	}
+
+	return crypto_transfer_skcipher_request_to_engine(
+			hace_dev->crypt_engine_crypto, req);
+}
+
+static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq)
+{
+	struct skcipher_request *req = skcipher_request_cast(areq);
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+	struct aspeed_engine_crypto *crypto_engine;
+	int rc;
+
+	crypto_engine = &hace_dev->crypto_engine;
+	crypto_engine->req = req;
+	crypto_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+	rc = ctx->start(hace_dev);
+
+	if (rc != -EINPROGRESS)
+		return -EIO;
+
+	return 0;
+}
+
+static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err)
+{
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+	struct aspeed_cipher_reqctx *rctx;
+	struct skcipher_request *req;
+
+	CIPHER_DBG(hace_dev, "\n");
+
+	req = crypto_engine->req;
+	rctx = skcipher_request_ctx(req);
+
+	if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+		if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+			memcpy(req->iv, crypto_engine->cipher_ctx +
+			       DES_KEY_SIZE, DES_KEY_SIZE);
+		else
+			memcpy(req->iv, crypto_engine->cipher_ctx,
+			       AES_BLOCK_SIZE);
+	}
+
+	crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+	crypto_finalize_skcipher_request(hace_dev->crypt_engine_crypto, req,
+					 err);
+
+	return err;
+}
+
+static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+	struct device *dev = hace_dev->dev;
+	struct aspeed_cipher_reqctx *rctx;
+	struct skcipher_request *req;
+
+	CIPHER_DBG(hace_dev, "\n");
+
+	req = crypto_engine->req;
+	rctx = skcipher_request_ctx(req);
+
+	if (req->src == req->dst) {
+		dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL);
+	} else {
+		dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+		dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE);
+	}
+
+	return aspeed_sk_complete(hace_dev, 0);
+}
+
+static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+	struct aspeed_cipher_reqctx *rctx;
+	struct skcipher_request *req;
+	struct scatterlist *out_sg;
+	int nbytes = 0;
+	int rc = 0;
+
+	req = crypto_engine->req;
+	rctx = skcipher_request_ctx(req);
+	out_sg = req->dst;
+
+	/* Copy output buffer to dst scatter-gather lists */
+	nbytes = sg_copy_from_buffer(out_sg, rctx->dst_nents,
+				     crypto_engine->cipher_addr, req->cryptlen);
+	if (!nbytes) {
+		dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+			 "nbytes", nbytes, "cryptlen", req->cryptlen);
+		rc = -EINVAL;
+	}
+
+	CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+		   "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+		   "nb_out_sg", rctx->dst_nents,
+		   "cipher addr", crypto_engine->cipher_addr);
+
+	return aspeed_sk_complete(hace_dev, rc);
+}
+
+static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+	struct aspeed_cipher_reqctx *rctx;
+	struct skcipher_request *req;
+	struct scatterlist *in_sg;
+	int nbytes;
+
+	req = crypto_engine->req;
+	rctx = skcipher_request_ctx(req);
+	in_sg = req->src;
+
+	nbytes = sg_copy_to_buffer(in_sg, rctx->src_nents,
+				   crypto_engine->cipher_addr, req->cryptlen);
+
+	CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+		   "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+		   "nb_in_sg", rctx->src_nents,
+		   "cipher addr", crypto_engine->cipher_addr);
+
+	if (!nbytes) {
+		dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+			 "nbytes", nbytes, "cryptlen", req->cryptlen);
+		return -EINVAL;
+	}
+
+	crypto_engine->resume = aspeed_sk_transfer;
+
+	/* Trigger engines */
+	ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+		       ASPEED_HACE_SRC);
+	ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+		       ASPEED_HACE_DEST);
+	ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+	ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+	return -EINPROGRESS;
+}
+
+static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+	struct aspeed_sg_list *src_list, *dst_list;
+	dma_addr_t src_dma_addr, dst_dma_addr;
+	struct aspeed_cipher_reqctx *rctx;
+	struct skcipher_request *req;
+	struct scatterlist *s;
+	int src_sg_len;
+	int dst_sg_len;
+	int total, i;
+	int rc;
+
+	CIPHER_DBG(hace_dev, "\n");
+
+	req = crypto_engine->req;
+	rctx = skcipher_request_ctx(req);
+
+	rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL |
+			 HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN;
+
+	/* BIDIRECTIONAL */
+	if (req->dst == req->src) {
+		src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+					rctx->src_nents, DMA_BIDIRECTIONAL);
+		dst_sg_len = src_sg_len;
+		if (!src_sg_len) {
+			dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+			return -EINVAL;
+		}
+
+	} else {
+		src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+					rctx->src_nents, DMA_TO_DEVICE);
+		if (!src_sg_len) {
+			dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+			return -EINVAL;
+		}
+
+		dst_sg_len = dma_map_sg(hace_dev->dev, req->dst,
+					rctx->dst_nents, DMA_FROM_DEVICE);
+		if (!dst_sg_len) {
+			dev_warn(hace_dev->dev, "dma_map_sg() dst error\n");
+			rc = -EINVAL;
+			goto free_req_src;
+		}
+	}
+
+	src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr;
+	src_dma_addr = crypto_engine->cipher_dma_addr;
+	total = req->cryptlen;
+
+	for_each_sg(req->src, s, src_sg_len, i) {
+		u32 phy_addr = sg_dma_address(s);
+		u32 len = sg_dma_len(s);
+
+		if (total > len)
+			total -= len;
+		else {
+			/* last sg list */
+			len = total;
+			len |= BIT(31);
+			total = 0;
+		}
+
+		src_list[i].phy_addr = cpu_to_le32(phy_addr);
+		src_list[i].len = cpu_to_le32(len);
+	}
+
+	if (total != 0) {
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	if (req->dst == req->src) {
+		dst_list = src_list;
+		dst_dma_addr = src_dma_addr;
+
+	} else {
+		dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr;
+		dst_dma_addr = crypto_engine->dst_sg_dma_addr;
+		total = req->cryptlen;
+
+		for_each_sg(req->dst, s, dst_sg_len, i) {
+			u32 phy_addr = sg_dma_address(s);
+			u32 len = sg_dma_len(s);
+
+			if (total > len)
+				total -= len;
+			else {
+				/* last sg list */
+				len = total;
+				len |= BIT(31);
+				total = 0;
+			}
+
+			dst_list[i].phy_addr = cpu_to_le32(phy_addr);
+			dst_list[i].len = cpu_to_le32(len);
+
+		}
+
+		dst_list[dst_sg_len].phy_addr = 0;
+		dst_list[dst_sg_len].len = 0;
+	}
+
+	if (total != 0) {
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	crypto_engine->resume = aspeed_sk_transfer_sg;
+
+	/* Memory barrier to ensure all data setup before engine starts */
+	mb();
+
+	/* Trigger engines */
+	ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC);
+	ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST);
+	ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+	ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+	return -EINPROGRESS;
+
+free_req:
+	if (req->dst == req->src) {
+		dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+			     DMA_BIDIRECTIONAL);
+
+	} else {
+		dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+			     DMA_TO_DEVICE);
+		dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+			     DMA_TO_DEVICE);
+	}
+
+	return rc;
+
+free_req_src:
+	dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+
+	return rc;
+}
+
+static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+	struct aspeed_cipher_reqctx *rctx;
+	struct crypto_skcipher *cipher;
+	struct aspeed_cipher_ctx *ctx;
+	struct skcipher_request *req;
+
+	CIPHER_DBG(hace_dev, "\n");
+
+	req = crypto_engine->req;
+	rctx = skcipher_request_ctx(req);
+	cipher = crypto_skcipher_reqtfm(req);
+	ctx = crypto_skcipher_ctx(cipher);
+
+	/* enable interrupt */
+	rctx->enc_cmd |= HACE_CMD_ISR_EN;
+
+	rctx->dst_nents = sg_nents(req->dst);
+	rctx->src_nents = sg_nents(req->src);
+
+	ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma,
+		       ASPEED_HACE_CONTEXT);
+
+	if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+		if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+			memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE,
+			       req->iv, DES_BLOCK_SIZE);
+		else
+			memcpy(crypto_engine->cipher_ctx, req->iv,
+			       AES_BLOCK_SIZE);
+	}
+
+	if (hace_dev->version == AST2600_VERSION) {
+		memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len);
+
+		return aspeed_sk_start_sg(hace_dev);
+	}
+
+	memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH);
+
+	return aspeed_sk_start(hace_dev);
+}
+
+static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd)
+{
+	struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+	u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+	CIPHER_DBG(hace_dev, "\n");
+
+	if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+		if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
+			return -EINVAL;
+	}
+
+	rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+			HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE |
+			HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+	return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
+			     unsigned int keylen)
+{
+	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+	struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+	int rc;
+
+	CIPHER_DBG(hace_dev, "keylen: %d bits\n", keylen);
+
+	if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+		dev_warn(hace_dev->dev, "invalid keylen: %d bits\n", keylen);
+		return -EINVAL;
+	}
+
+	if (keylen == DES_KEY_SIZE) {
+		rc = crypto_des_verify_key(tfm, key);
+		if (rc)
+			return rc;
+
+	} else if (keylen == DES3_EDE_KEY_SIZE) {
+		rc = crypto_des3_ede_verify_key(tfm, key);
+		if (rc)
+			return rc;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->key_len = keylen;
+
+	crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+				  CRYPTO_TFM_REQ_MASK);
+
+	return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+				HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_des_ctr_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ctr_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+				HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd)
+{
+	struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+	u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+	if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+		if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+			return -EINVAL;
+	}
+
+	CIPHER_DBG(hace_dev, "%s\n",
+		   (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt");
+
+	cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+	       HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+	switch (ctx->key_len) {
+	case AES_KEYSIZE_128:
+		cmd |= HACE_CMD_AES128;
+		break;
+	case AES_KEYSIZE_192:
+		cmd |= HACE_CMD_AES192;
+		break;
+	case AES_KEYSIZE_256:
+		cmd |= HACE_CMD_AES256;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rctx->enc_cmd = cmd;
+
+	return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+			     unsigned int keylen)
+{
+	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+	struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+	struct crypto_aes_ctx gen_aes_key;
+
+	CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8));
+
+	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+	    keylen != AES_KEYSIZE_256)
+		return -EINVAL;
+
+	if (ctx->hace_dev->version == AST2500_VERSION) {
+		aes_expandkey(&gen_aes_key, key, keylen);
+		memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH);
+
+	} else {
+		memcpy(ctx->key, key, keylen);
+	}
+
+	ctx->key_len = keylen;
+
+	crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+				  CRYPTO_TFM_REQ_MASK);
+
+	return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_aes_ctr_decrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ctr_encrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ofb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_ofb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_cfb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cfb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cbc_decrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_cbc_encrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_ecb_decrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_aes_ecb_encrypt(struct skcipher_request *req)
+{
+	return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
+{
+	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+	const char *name = crypto_tfm_alg_name(&tfm->base);
+	struct aspeed_hace_alg *crypto_alg;
+
+
+	crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
+	ctx->hace_dev = crypto_alg->hace_dev;
+	ctx->start = aspeed_hace_skcipher_trigger;
+
+	CIPHER_DBG(ctx->hace_dev, "%s\n", name);
+
+	ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_ASYNC |
+						  CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->fallback_tfm)) {
+		dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+			name, PTR_ERR(ctx->fallback_tfm));
+		return PTR_ERR(ctx->fallback_tfm);
+	}
+
+	crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
+			 crypto_skcipher_reqsize(ctx->fallback_tfm));
+
+	ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
+	ctx->enginectx.op.prepare_request = NULL;
+	ctx->enginectx.op.unprepare_request = NULL;
+
+	return 0;
+}
+
+static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
+{
+	struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+
+	CIPHER_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(&tfm->base));
+	crypto_free_skcipher(ctx->fallback_tfm);
+}
+
+static struct aspeed_hace_alg aspeed_crypto_algs[] = {
+	{
+		.alg.skcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= aspeed_aes_setkey,
+			.encrypt	= aspeed_aes_ecb_encrypt,
+			.decrypt	= aspeed_aes_ecb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ecb(aes)",
+				.cra_driver_name	= "aspeed-ecb-aes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= AES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= aspeed_aes_setkey,
+			.encrypt	= aspeed_aes_cbc_encrypt,
+			.decrypt	= aspeed_aes_cbc_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "cbc(aes)",
+				.cra_driver_name	= "aspeed-cbc-aes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= AES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= aspeed_aes_setkey,
+			.encrypt	= aspeed_aes_cfb_encrypt,
+			.decrypt	= aspeed_aes_cfb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "cfb(aes)",
+				.cra_driver_name	= "aspeed-cfb-aes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= 1,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= aspeed_aes_setkey,
+			.encrypt	= aspeed_aes_ofb_encrypt,
+			.decrypt	= aspeed_aes_ofb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ofb(aes)",
+				.cra_driver_name	= "aspeed-ofb-aes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= 1,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_des_ecb_encrypt,
+			.decrypt	= aspeed_des_ecb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ecb(des)",
+				.cra_driver_name	= "aspeed-ecb-des",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= DES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_des_cbc_encrypt,
+			.decrypt	= aspeed_des_cbc_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "cbc(des)",
+				.cra_driver_name	= "aspeed-cbc-des",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= DES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_des_cfb_encrypt,
+			.decrypt	= aspeed_des_cfb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "cfb(des)",
+				.cra_driver_name	= "aspeed-cfb-des",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= DES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_des_ofb_encrypt,
+			.decrypt	= aspeed_des_ofb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ofb(des)",
+				.cra_driver_name	= "aspeed-ofb-des",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= DES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_tdes_ecb_encrypt,
+			.decrypt	= aspeed_tdes_ecb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ecb(des3_ede)",
+				.cra_driver_name	= "aspeed-ecb-tdes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= DES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_tdes_cbc_encrypt,
+			.decrypt	= aspeed_tdes_cbc_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "cbc(des3_ede)",
+				.cra_driver_name	= "aspeed-cbc-tdes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= DES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_tdes_cfb_encrypt,
+			.decrypt	= aspeed_tdes_cfb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "cfb(des3_ede)",
+				.cra_driver_name	= "aspeed-cfb-tdes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= DES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_tdes_ofb_encrypt,
+			.decrypt	= aspeed_tdes_ofb_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ofb(des3_ede)",
+				.cra_driver_name	= "aspeed-ofb-tdes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC |
+							  CRYPTO_ALG_NEED_FALLBACK,
+				.cra_blocksize		= DES_BLOCK_SIZE,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+};
+
+static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
+	{
+		.alg.skcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= aspeed_aes_setkey,
+			.encrypt	= aspeed_aes_ctr_encrypt,
+			.decrypt	= aspeed_aes_ctr_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ctr(aes)",
+				.cra_driver_name	= "aspeed-ctr-aes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= 1,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES_KEY_SIZE,
+			.max_keysize	= DES_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_des_ctr_encrypt,
+			.decrypt	= aspeed_des_ctr_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ctr(des)",
+				.cra_driver_name	= "aspeed-ctr-des",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= 1,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+	{
+		.alg.skcipher = {
+			.ivsize		= DES_BLOCK_SIZE,
+			.min_keysize	= DES3_EDE_KEY_SIZE,
+			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.setkey		= aspeed_des_setkey,
+			.encrypt	= aspeed_tdes_ctr_encrypt,
+			.decrypt	= aspeed_tdes_ctr_decrypt,
+			.init		= aspeed_crypto_cra_init,
+			.exit		= aspeed_crypto_cra_exit,
+			.base = {
+				.cra_name		= "ctr(des3_ede)",
+				.cra_driver_name	= "aspeed-ctr-tdes",
+				.cra_priority		= 300,
+				.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
+							  CRYPTO_ALG_ASYNC,
+				.cra_blocksize		= 1,
+				.cra_ctxsize		= sizeof(struct aspeed_cipher_ctx),
+				.cra_alignmask		= 0x0f,
+				.cra_module		= THIS_MODULE,
+			}
+		}
+	},
+
+};
+
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
+		crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+
+	if (hace_dev->version != AST2600_VERSION)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
+		crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+}
+
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+	int rc, i;
+
+	CIPHER_DBG(hace_dev, "\n");
+
+	for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
+		aspeed_crypto_algs[i].hace_dev = hace_dev;
+		rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+		if (rc) {
+			CIPHER_DBG(hace_dev, "Failed to register %s\n",
+				   aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
+		}
+	}
+
+	if (hace_dev->version != AST2600_VERSION)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
+		aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
+		rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+		if (rc) {
+			CIPHER_DBG(hace_dev, "Failed to register %s\n",
+				   aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
+		}
+	}
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
new file mode 100644
index 000000000000..935135229ebd
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -0,0 +1,1391 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define AHASH_DBG(h, fmt, ...)	\
+	dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define AHASH_DBG(h, fmt, ...)	\
+	dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* Initialization Vectors for SHA-family */
+static const __be32 sha1_iv[8] = {
+	cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+	cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+	cpu_to_be32(SHA1_H4), 0, 0, 0
+};
+
+static const __be32 sha224_iv[8] = {
+	cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+	cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+	cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+	cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 sha256_iv[8] = {
+	cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+	cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+	cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
+static const __be64 sha384_iv[8] = {
+	cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
+	cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
+	cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
+	cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
+};
+
+static const __be64 sha512_iv[8] = {
+	cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
+	cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
+	cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
+	cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
+};
+
+static const __be32 sha512_224_iv[16] = {
+	cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
+	cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
+	cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
+	cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
+	cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
+	cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
+	cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
+	cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
+};
+
+static const __be32 sha512_256_iv[16] = {
+	cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
+	cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
+	cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
+	cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
+	cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
+	cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
+	cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
+	cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
+};
+
+/* The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
+ *
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
+ *  - if message length < 56 bytes then padlen = 56 - message length
+ *  - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ *  - if message length < 112 bytes then padlen = 112 - message length
+ *  - else padlen = 128 + 112 - message length
+ */
+static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+				      struct aspeed_sham_reqctx *rctx)
+{
+	unsigned int index, padlen;
+	__be64 bits[2];
+
+	AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
+
+	switch (rctx->flags & SHA_FLAGS_MASK) {
+	case SHA_FLAGS_SHA1:
+	case SHA_FLAGS_SHA224:
+	case SHA_FLAGS_SHA256:
+		bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
+		index = rctx->bufcnt & 0x3f;
+		padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+		*(rctx->buffer + rctx->bufcnt) = 0x80;
+		memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+		memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
+		rctx->bufcnt += padlen + 8;
+		break;
+	default:
+		bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
+		bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
+				      rctx->digcnt[0] >> 61);
+		index = rctx->bufcnt & 0x7f;
+		padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
+		*(rctx->buffer + rctx->bufcnt) = 0x80;
+		memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+		memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
+		rctx->bufcnt += padlen + 16;
+		break;
+	}
+}
+
+/*
+ * Prepare DMA buffer before hardware engine
+ * processing.
+ */
+static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	int length, remain;
+
+	length = rctx->total + rctx->bufcnt;
+	remain = length % rctx->block_size;
+
+	AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
+
+	if (rctx->bufcnt)
+		memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
+
+	if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
+		scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
+					 rctx->bufcnt, rctx->src_sg,
+					 rctx->offset, rctx->total - remain, 0);
+		rctx->offset += rctx->total - remain;
+
+	} else {
+		dev_warn(hace_dev->dev, "Hash data length is too large\n");
+		return -EINVAL;
+	}
+
+	scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
+				 rctx->offset, remain, 0);
+
+	rctx->bufcnt = remain;
+	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+					       SHA512_DIGEST_SIZE,
+					       DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+		return -ENOMEM;
+	}
+
+	hash_engine->src_length = length - remain;
+	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+	hash_engine->digest_dma = rctx->digest_dma_addr;
+
+	return 0;
+}
+
+/*
+ * Prepare DMA buffer as SG list buffer before
+ * hardware engine processing.
+ */
+static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct aspeed_sg_list *src_list;
+	struct scatterlist *s;
+	int length, remain, sg_len, i;
+	int rc = 0;
+
+	remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
+	length = rctx->total + rctx->bufcnt - remain;
+
+	AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
+		  "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+		  "length", length, "remain", remain);
+
+	sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+			    DMA_TO_DEVICE);
+	if (!sg_len) {
+		dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
+	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+					       SHA512_DIGEST_SIZE,
+					       DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+		rc = -ENOMEM;
+		goto free_src_sg;
+	}
+
+	if (rctx->bufcnt != 0) {
+		u32 phy_addr;
+		u32 len;
+
+		rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+						       rctx->buffer,
+						       rctx->block_size * 2,
+						       DMA_TO_DEVICE);
+		if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+			dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+			rc = -ENOMEM;
+			goto free_rctx_digest;
+		}
+
+		phy_addr = rctx->buffer_dma_addr;
+		len = rctx->bufcnt;
+		length -= len;
+
+		/* Last sg list */
+		if (length == 0)
+			len |= HASH_SG_LAST_LIST;
+
+		src_list[0].phy_addr = cpu_to_le32(phy_addr);
+		src_list[0].len = cpu_to_le32(len);
+		src_list++;
+	}
+
+	if (length != 0) {
+		for_each_sg(rctx->src_sg, s, sg_len, i) {
+			u32 phy_addr = sg_dma_address(s);
+			u32 len = sg_dma_len(s);
+
+			if (length > len)
+				length -= len;
+			else {
+				/* Last sg list */
+				len = length;
+				len |= HASH_SG_LAST_LIST;
+				length = 0;
+			}
+
+			src_list[i].phy_addr = cpu_to_le32(phy_addr);
+			src_list[i].len = cpu_to_le32(len);
+		}
+	}
+
+	if (length != 0) {
+		rc = -EINVAL;
+		goto free_rctx_buffer;
+	}
+
+	rctx->offset = rctx->total - remain;
+	hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+	hash_engine->digest_dma = rctx->digest_dma_addr;
+
+	return 0;
+
+free_rctx_buffer:
+	if (rctx->bufcnt != 0)
+		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+				 rctx->block_size * 2, DMA_TO_DEVICE);
+free_rctx_digest:
+	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+free_src_sg:
+	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+		     DMA_TO_DEVICE);
+end:
+	return rc;
+}
+
+static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+
+	AHASH_DBG(hace_dev, "\n");
+
+	hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+	crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
+
+	return 0;
+}
+
+/*
+ * Copy digest to the corresponding request result.
+ * This function will be called at final() stage.
+ */
+static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+	AHASH_DBG(hace_dev, "\n");
+
+	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+	dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+			 rctx->block_size * 2, DMA_TO_DEVICE);
+
+	memcpy(req->result, rctx->digest, rctx->digsize);
+
+	return aspeed_ahash_complete(hace_dev);
+}
+
+/*
+ * Trigger hardware engines to do the math.
+ */
+static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
+				     aspeed_hace_fn_t resume)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+	AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
+		  &hash_engine->src_dma, &hash_engine->digest_dma,
+		  hash_engine->src_length);
+
+	rctx->cmd |= HASH_CMD_INT_ENABLE;
+	hash_engine->resume = resume;
+
+	ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
+	ast_hace_write(hace_dev, hash_engine->digest_dma,
+		       ASPEED_HACE_HASH_DIGEST_BUFF);
+	ast_hace_write(hace_dev, hash_engine->digest_dma,
+		       ASPEED_HACE_HASH_KEY_BUFF);
+	ast_hace_write(hace_dev, hash_engine->src_length,
+		       ASPEED_HACE_HASH_DATA_LEN);
+
+	/* Memory barrier to ensure all data setup before engine starts */
+	mb();
+
+	ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
+
+	return -EINPROGRESS;
+}
+
+/*
+ * HMAC resume aims to do the second pass produces
+ * the final HMAC code derived from the inner hash
+ * result and the outer key.
+ */
+static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+	int rc = 0;
+
+	AHASH_DBG(hace_dev, "\n");
+
+	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+	dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+			 rctx->block_size * 2, DMA_TO_DEVICE);
+
+	/* o key pad + hash sum 1 */
+	memcpy(rctx->buffer, bctx->opad, rctx->block_size);
+	memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
+
+	rctx->bufcnt = rctx->block_size + rctx->digsize;
+	rctx->digcnt[0] = rctx->block_size + rctx->digsize;
+
+	aspeed_ahash_fill_padding(hace_dev, rctx);
+	memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
+
+	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+					       SHA512_DIGEST_SIZE,
+					       DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
+					       rctx->block_size * 2,
+					       DMA_TO_DEVICE);
+	if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+		dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+		rc = -ENOMEM;
+		goto free_rctx_digest;
+	}
+
+	hash_engine->src_dma = rctx->buffer_dma_addr;
+	hash_engine->src_length = rctx->bufcnt;
+	hash_engine->digest_dma = rctx->digest_dma_addr;
+
+	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+	return rc;
+}
+
+static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	int rc = 0;
+
+	AHASH_DBG(hace_dev, "\n");
+
+	aspeed_ahash_fill_padding(hace_dev, rctx);
+
+	rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
+					       rctx->digest,
+					       SHA512_DIGEST_SIZE,
+					       DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+					       rctx->buffer,
+					       rctx->block_size * 2,
+					       DMA_TO_DEVICE);
+	if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+		dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+		rc = -ENOMEM;
+		goto free_rctx_digest;
+	}
+
+	hash_engine->src_dma = rctx->buffer_dma_addr;
+	hash_engine->src_length = rctx->bufcnt;
+	hash_engine->digest_dma = rctx->digest_dma_addr;
+
+	if (rctx->flags & SHA_FLAGS_HMAC)
+		return aspeed_hace_ahash_trigger(hace_dev,
+						 aspeed_ahash_hmac_resume);
+
+	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+	return rc;
+}
+
+static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+	AHASH_DBG(hace_dev, "\n");
+
+	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+		     DMA_TO_DEVICE);
+
+	if (rctx->bufcnt != 0)
+		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+				 rctx->block_size * 2,
+				 DMA_TO_DEVICE);
+
+	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+	scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
+				 rctx->total - rctx->offset, 0);
+
+	rctx->bufcnt = rctx->total - rctx->offset;
+	rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
+
+	if (rctx->flags & SHA_FLAGS_FINUP)
+		return aspeed_ahash_req_final(hace_dev);
+
+	return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+	AHASH_DBG(hace_dev, "\n");
+
+	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+	if (rctx->flags & SHA_FLAGS_FINUP)
+		return aspeed_ahash_req_final(hace_dev);
+
+	return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
+{
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	aspeed_hace_fn_t resume;
+	int ret;
+
+	AHASH_DBG(hace_dev, "\n");
+
+	if (hace_dev->version == AST2600_VERSION) {
+		rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
+		resume = aspeed_ahash_update_resume_sg;
+
+	} else {
+		resume = aspeed_ahash_update_resume;
+	}
+
+	ret = hash_engine->dma_prepare(hace_dev);
+	if (ret)
+		return ret;
+
+	return aspeed_hace_ahash_trigger(hace_dev, resume);
+}
+
+static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
+				  struct ahash_request *req)
+{
+	return crypto_transfer_hash_request_to_engine(
+			hace_dev->crypt_engine_hash, req);
+}
+
+static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
+{
+	struct ahash_request *req = ahash_request_cast(areq);
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+	struct aspeed_engine_hash *hash_engine;
+	int ret = 0;
+
+	hash_engine = &hace_dev->hash_engine;
+	hash_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+	if (rctx->op == SHA_OP_UPDATE)
+		ret = aspeed_ahash_req_update(hace_dev);
+	else if (rctx->op == SHA_OP_FINAL)
+		ret = aspeed_ahash_req_final(hace_dev);
+
+	if (ret != -EINPROGRESS)
+		return ret;
+
+	return 0;
+}
+
+static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
+					void *areq)
+{
+	struct ahash_request *req = ahash_request_cast(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+	struct aspeed_engine_hash *hash_engine;
+
+	hash_engine = &hace_dev->hash_engine;
+	hash_engine->req = req;
+
+	if (hace_dev->version == AST2600_VERSION)
+		hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
+	else
+		hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
+
+	return 0;
+}
+
+static int aspeed_sham_update(struct ahash_request *req)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+	rctx->total = req->nbytes;
+	rctx->src_sg = req->src;
+	rctx->offset = 0;
+	rctx->src_nents = sg_nents(req->src);
+	rctx->op = SHA_OP_UPDATE;
+
+	rctx->digcnt[0] += rctx->total;
+	if (rctx->digcnt[0] < rctx->total)
+		rctx->digcnt[1]++;
+
+	if (rctx->bufcnt + rctx->total < rctx->block_size) {
+		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
+					 rctx->src_sg, rctx->offset,
+					 rctx->total, 0);
+		rctx->bufcnt += rctx->total;
+
+		return 0;
+	}
+
+	return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
+				    const u8 *data, unsigned int len, u8 *out)
+{
+	SHASH_DESC_ON_STACK(shash, tfm);
+
+	shash->tfm = tfm;
+
+	return crypto_shash_digest(shash, data, len, out);
+}
+
+static int aspeed_sham_final(struct ahash_request *req)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+	AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
+		  req->nbytes, rctx->total);
+	rctx->op = SHA_OP_FINAL;
+
+	return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_finup(struct ahash_request *req)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+	int rc1, rc2;
+
+	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+	rctx->flags |= SHA_FLAGS_FINUP;
+
+	rc1 = aspeed_sham_update(req);
+	if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
+		return rc1;
+
+	/*
+	 * final() has to be always called to cleanup resources
+	 * even if update() failed, except EINPROGRESS
+	 */
+	rc2 = aspeed_sham_final(req);
+
+	return rc1 ? : rc2;
+}
+
+static int aspeed_sham_init(struct ahash_request *req)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+	AHASH_DBG(hace_dev, "%s: digest size:%d\n",
+		  crypto_tfm_alg_name(&tfm->base),
+		  crypto_ahash_digestsize(tfm));
+
+	rctx->cmd = HASH_CMD_ACC_MODE;
+	rctx->flags = 0;
+
+	switch (crypto_ahash_digestsize(tfm)) {
+	case SHA1_DIGEST_SIZE:
+		rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
+		rctx->flags |= SHA_FLAGS_SHA1;
+		rctx->digsize = SHA1_DIGEST_SIZE;
+		rctx->block_size = SHA1_BLOCK_SIZE;
+		rctx->sha_iv = sha1_iv;
+		rctx->ivsize = 32;
+		memcpy(rctx->digest, sha1_iv, rctx->ivsize);
+		break;
+	case SHA224_DIGEST_SIZE:
+		rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
+		rctx->flags |= SHA_FLAGS_SHA224;
+		rctx->digsize = SHA224_DIGEST_SIZE;
+		rctx->block_size = SHA224_BLOCK_SIZE;
+		rctx->sha_iv = sha224_iv;
+		rctx->ivsize = 32;
+		memcpy(rctx->digest, sha224_iv, rctx->ivsize);
+		break;
+	case SHA256_DIGEST_SIZE:
+		rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
+		rctx->flags |= SHA_FLAGS_SHA256;
+		rctx->digsize = SHA256_DIGEST_SIZE;
+		rctx->block_size = SHA256_BLOCK_SIZE;
+		rctx->sha_iv = sha256_iv;
+		rctx->ivsize = 32;
+		memcpy(rctx->digest, sha256_iv, rctx->ivsize);
+		break;
+	case SHA384_DIGEST_SIZE:
+		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
+			     HASH_CMD_SHA_SWAP;
+		rctx->flags |= SHA_FLAGS_SHA384;
+		rctx->digsize = SHA384_DIGEST_SIZE;
+		rctx->block_size = SHA384_BLOCK_SIZE;
+		rctx->sha_iv = (const __be32 *)sha384_iv;
+		rctx->ivsize = 64;
+		memcpy(rctx->digest, sha384_iv, rctx->ivsize);
+		break;
+	case SHA512_DIGEST_SIZE:
+		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
+			     HASH_CMD_SHA_SWAP;
+		rctx->flags |= SHA_FLAGS_SHA512;
+		rctx->digsize = SHA512_DIGEST_SIZE;
+		rctx->block_size = SHA512_BLOCK_SIZE;
+		rctx->sha_iv = (const __be32 *)sha512_iv;
+		rctx->ivsize = 64;
+		memcpy(rctx->digest, sha512_iv, rctx->ivsize);
+		break;
+	default:
+		dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+			 crypto_ahash_digestsize(tfm));
+		return -EINVAL;
+	}
+
+	rctx->bufcnt = 0;
+	rctx->total = 0;
+	rctx->digcnt[0] = 0;
+	rctx->digcnt[1] = 0;
+
+	/* HMAC init */
+	if (tctx->flags & SHA_FLAGS_HMAC) {
+		rctx->digcnt[0] = rctx->block_size;
+		rctx->bufcnt = rctx->block_size;
+		memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+		rctx->flags |= SHA_FLAGS_HMAC;
+	}
+
+	return 0;
+}
+
+static int aspeed_sha512s_init(struct ahash_request *req)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+	AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
+
+	rctx->cmd = HASH_CMD_ACC_MODE;
+	rctx->flags = 0;
+
+	switch (crypto_ahash_digestsize(tfm)) {
+	case SHA224_DIGEST_SIZE:
+		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
+			     HASH_CMD_SHA_SWAP;
+		rctx->flags |= SHA_FLAGS_SHA512_224;
+		rctx->digsize = SHA224_DIGEST_SIZE;
+		rctx->block_size = SHA512_BLOCK_SIZE;
+		rctx->sha_iv = sha512_224_iv;
+		rctx->ivsize = 64;
+		memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
+		break;
+	case SHA256_DIGEST_SIZE:
+		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
+			     HASH_CMD_SHA_SWAP;
+		rctx->flags |= SHA_FLAGS_SHA512_256;
+		rctx->digsize = SHA256_DIGEST_SIZE;
+		rctx->block_size = SHA512_BLOCK_SIZE;
+		rctx->sha_iv = sha512_256_iv;
+		rctx->ivsize = 64;
+		memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
+		break;
+	default:
+		dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+			 crypto_ahash_digestsize(tfm));
+		return -EINVAL;
+	}
+
+	rctx->bufcnt = 0;
+	rctx->total = 0;
+	rctx->digcnt[0] = 0;
+	rctx->digcnt[1] = 0;
+
+	/* HMAC init */
+	if (tctx->flags & SHA_FLAGS_HMAC) {
+		rctx->digcnt[0] = rctx->block_size;
+		rctx->bufcnt = rctx->block_size;
+		memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+		rctx->flags |= SHA_FLAGS_HMAC;
+	}
+
+	return 0;
+}
+
+static int aspeed_sham_digest(struct ahash_request *req)
+{
+	return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
+}
+
+static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+			      unsigned int keylen)
+{
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+	int ds = crypto_shash_digestsize(bctx->shash);
+	int bs = crypto_shash_blocksize(bctx->shash);
+	int err = 0;
+	int i;
+
+	AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
+		  keylen);
+
+	if (keylen > bs) {
+		err = aspeed_sham_shash_digest(bctx->shash,
+					       crypto_shash_get_flags(bctx->shash),
+					       key, keylen, bctx->ipad);
+		if (err)
+			return err;
+		keylen = ds;
+
+	} else {
+		memcpy(bctx->ipad, key, keylen);
+	}
+
+	memset(bctx->ipad + keylen, 0, bs - keylen);
+	memcpy(bctx->opad, bctx->ipad, bs);
+
+	for (i = 0; i < bs; i++) {
+		bctx->ipad[i] ^= HMAC_IPAD_VALUE;
+		bctx->opad[i] ^= HMAC_OPAD_VALUE;
+	}
+
+	return err;
+}
+
+static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
+{
+	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+	struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+	struct aspeed_hace_alg *ast_alg;
+
+	ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
+	tctx->hace_dev = ast_alg->hace_dev;
+	tctx->flags = 0;
+
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct aspeed_sham_reqctx));
+
+	if (ast_alg->alg_base) {
+		/* hmac related */
+		struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+		tctx->flags |= SHA_FLAGS_HMAC;
+		bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
+						 CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(bctx->shash)) {
+			dev_warn(ast_alg->hace_dev->dev,
+				 "base driver '%s' could not be loaded.\n",
+				 ast_alg->alg_base);
+			return PTR_ERR(bctx->shash);
+		}
+	}
+
+	tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
+	tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
+	tctx->enginectx.op.unprepare_request = NULL;
+
+	return 0;
+}
+
+static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
+{
+	struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+	AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
+
+	if (tctx->flags & SHA_FLAGS_HMAC) {
+		struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+		crypto_free_shash(bctx->shash);
+	}
+}
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+	memcpy(out, rctx, sizeof(*rctx));
+
+	return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+	memcpy(rctx, in, sizeof(*rctx));
+
+	return 0;
+}
+
+static struct aspeed_hace_alg aspeed_ahash_algs[] = {
+	{
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA1_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "sha1",
+					.cra_driver_name	= "aspeed-sha1",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA1_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA256_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "sha256",
+					.cra_driver_name	= "aspeed-sha256",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA256_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA224_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "sha224",
+					.cra_driver_name	= "aspeed-sha224",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA224_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg_base = "sha1",
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.setkey	= aspeed_sham_setkey,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA1_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "hmac(sha1)",
+					.cra_driver_name	= "aspeed-hmac-sha1",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA1_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
+								sizeof(struct aspeed_sha_hmac_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg_base = "sha224",
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.setkey	= aspeed_sham_setkey,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA224_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "hmac(sha224)",
+					.cra_driver_name	= "aspeed-hmac-sha224",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA224_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
+								sizeof(struct aspeed_sha_hmac_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg_base = "sha256",
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.setkey	= aspeed_sham_setkey,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA256_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "hmac(sha256)",
+					.cra_driver_name	= "aspeed-hmac-sha256",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA256_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
+								sizeof(struct aspeed_sha_hmac_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+};
+
+static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
+	{
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA384_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "sha384",
+					.cra_driver_name	= "aspeed-sha384",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA384_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA512_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "sha512",
+					.cra_driver_name	= "aspeed-sha512",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA512_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg.ahash = {
+			.init	= aspeed_sha512s_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA224_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "sha512_224",
+					.cra_driver_name	= "aspeed-sha512_224",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA512_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg.ahash = {
+			.init	= aspeed_sha512s_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA256_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "sha512_256",
+					.cra_driver_name	= "aspeed-sha512_256",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA512_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg_base = "sha384",
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.setkey	= aspeed_sham_setkey,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA384_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "hmac(sha384)",
+					.cra_driver_name	= "aspeed-hmac-sha384",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA384_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
+								sizeof(struct aspeed_sha_hmac_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg_base = "sha512",
+		.alg.ahash = {
+			.init	= aspeed_sham_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.setkey	= aspeed_sham_setkey,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA512_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "hmac(sha512)",
+					.cra_driver_name	= "aspeed-hmac-sha512",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA512_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
+								sizeof(struct aspeed_sha_hmac_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg_base = "sha512_224",
+		.alg.ahash = {
+			.init	= aspeed_sha512s_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.setkey	= aspeed_sham_setkey,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA224_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "hmac(sha512_224)",
+					.cra_driver_name	= "aspeed-hmac-sha512_224",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA512_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
+								sizeof(struct aspeed_sha_hmac_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+	{
+		.alg_base = "sha512_256",
+		.alg.ahash = {
+			.init	= aspeed_sha512s_init,
+			.update	= aspeed_sham_update,
+			.final	= aspeed_sham_final,
+			.finup	= aspeed_sham_finup,
+			.digest	= aspeed_sham_digest,
+			.setkey	= aspeed_sham_setkey,
+			.export	= aspeed_sham_export,
+			.import	= aspeed_sham_import,
+			.halg = {
+				.digestsize = SHA256_DIGEST_SIZE,
+				.statesize = sizeof(struct aspeed_sham_reqctx),
+				.base = {
+					.cra_name		= "hmac(sha512_256)",
+					.cra_driver_name	= "aspeed-hmac-sha512_256",
+					.cra_priority		= 300,
+					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_ALG_KERN_DRIVER_ONLY,
+					.cra_blocksize		= SHA512_BLOCK_SIZE,
+					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
+								sizeof(struct aspeed_sha_hmac_ctx),
+					.cra_alignmask		= 0,
+					.cra_module		= THIS_MODULE,
+					.cra_init		= aspeed_sham_cra_init,
+					.cra_exit		= aspeed_sham_cra_exit,
+				}
+			}
+		},
+	},
+};
+
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
+		crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+
+	if (hace_dev->version != AST2600_VERSION)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
+		crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+}
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+	int rc, i;
+
+	AHASH_DBG(hace_dev, "\n");
+
+	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
+		aspeed_ahash_algs[i].hace_dev = hace_dev;
+		rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
+		if (rc) {
+			AHASH_DBG(hace_dev, "Failed to register %s\n",
+				  aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
+		}
+	}
+
+	if (hace_dev->version != AST2600_VERSION)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
+		aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
+		rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+		if (rc) {
+			AHASH_DBG(hace_dev, "Failed to register %s\n",
+				  aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
+		}
+	}
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
new file mode 100644
index 000000000000..656cb92c8bb6
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define HACE_DBG(d, fmt, ...)	\
+	dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define HACE_DBG(d, fmt, ...)	\
+	dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* HACE interrupt service routine */
+static irqreturn_t aspeed_hace_irq(int irq, void *dev)
+{
+	struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev;
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+	u32 sts;
+
+	sts = ast_hace_read(hace_dev, ASPEED_HACE_STS);
+	ast_hace_write(hace_dev, sts, ASPEED_HACE_STS);
+
+	HACE_DBG(hace_dev, "irq status: 0x%x\n", sts);
+
+	if (sts & HACE_HASH_ISR) {
+		if (hash_engine->flags & CRYPTO_FLAGS_BUSY)
+			tasklet_schedule(&hash_engine->done_task);
+		else
+			dev_warn(hace_dev->dev, "HASH no active requests.\n");
+	}
+
+	if (sts & HACE_CRYPTO_ISR) {
+		if (crypto_engine->flags & CRYPTO_FLAGS_BUSY)
+			tasklet_schedule(&crypto_engine->done_task);
+		else
+			dev_warn(hace_dev->dev, "CRYPTO no active requests.\n");
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void aspeed_hace_crypto_done_task(unsigned long data)
+{
+	struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+
+	crypto_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_hash_done_task(unsigned long data)
+{
+	struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+	hash_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+	aspeed_register_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+	aspeed_register_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+	aspeed_unregister_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+	aspeed_unregister_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static const struct of_device_id aspeed_hace_of_matches[] = {
+	{ .compatible = "aspeed,ast2500-hace", .data = (void *)5, },
+	{ .compatible = "aspeed,ast2600-hace", .data = (void *)6, },
+	{},
+};
+
+static int aspeed_hace_probe(struct platform_device *pdev)
+{
+	struct aspeed_engine_crypto *crypto_engine;
+	const struct of_device_id *hace_dev_id;
+	struct aspeed_engine_hash *hash_engine;
+	struct aspeed_hace_dev *hace_dev;
+	struct resource *res;
+	int rc;
+
+	hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
+				GFP_KERNEL);
+	if (!hace_dev)
+		return -ENOMEM;
+
+	hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev);
+	if (!hace_dev_id) {
+		dev_err(&pdev->dev, "Failed to match hace dev id\n");
+		return -EINVAL;
+	}
+
+	hace_dev->dev = &pdev->dev;
+	hace_dev->version = (unsigned long)hace_dev_id->data;
+	hash_engine = &hace_dev->hash_engine;
+	crypto_engine = &hace_dev->crypto_engine;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	platform_set_drvdata(pdev, hace_dev);
+
+	hace_dev->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hace_dev->regs))
+		return PTR_ERR(hace_dev->regs);
+
+	/* Get irq number and register it */
+	hace_dev->irq = platform_get_irq(pdev, 0);
+	if (hace_dev->irq < 0)
+		return -ENXIO;
+
+	rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0,
+			      dev_name(&pdev->dev), hace_dev);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to request interrupt\n");
+		return rc;
+	}
+
+	/* Get clk and enable it */
+	hace_dev->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(hace_dev->clk)) {
+		dev_err(&pdev->dev, "Failed to get clk\n");
+		return -ENODEV;
+	}
+
+	rc = clk_prepare_enable(hace_dev->clk);
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc);
+		return rc;
+	}
+
+	/* Initialize crypto hardware engine structure for hash */
+	hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev,
+							       true);
+	if (!hace_dev->crypt_engine_hash) {
+		rc = -ENOMEM;
+		goto clk_exit;
+	}
+
+	rc = crypto_engine_start(hace_dev->crypt_engine_hash);
+	if (rc)
+		goto err_engine_hash_start;
+
+	tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task,
+		     (unsigned long)hace_dev);
+
+	/* Initialize crypto hardware engine structure for crypto */
+	hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev,
+								 true);
+	if (!hace_dev->crypt_engine_crypto) {
+		rc = -ENOMEM;
+		goto err_engine_hash_start;
+	}
+
+	rc = crypto_engine_start(hace_dev->crypt_engine_crypto);
+	if (rc)
+		goto err_engine_crypto_start;
+
+	tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task,
+		     (unsigned long)hace_dev);
+
+	/* Allocate DMA buffer for hash engine input used */
+	hash_engine->ahash_src_addr =
+		dmam_alloc_coherent(&pdev->dev,
+				    ASPEED_HASH_SRC_DMA_BUF_LEN,
+				    &hash_engine->ahash_src_dma_addr,
+				    GFP_KERNEL);
+	if (!hash_engine->ahash_src_addr) {
+		dev_err(&pdev->dev, "Failed to allocate dma buffer\n");
+		rc = -ENOMEM;
+		goto err_engine_crypto_start;
+	}
+
+	/* Allocate DMA buffer for crypto engine context used */
+	crypto_engine->cipher_ctx =
+		dmam_alloc_coherent(&pdev->dev,
+				    PAGE_SIZE,
+				    &crypto_engine->cipher_ctx_dma,
+				    GFP_KERNEL);
+	if (!crypto_engine->cipher_ctx) {
+		dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n");
+		rc = -ENOMEM;
+		goto err_engine_crypto_start;
+	}
+
+	/* Allocate DMA buffer for crypto engine input used */
+	crypto_engine->cipher_addr =
+		dmam_alloc_coherent(&pdev->dev,
+				    ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
+				    &crypto_engine->cipher_dma_addr,
+				    GFP_KERNEL);
+	if (!crypto_engine->cipher_addr) {
+		dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n");
+		rc = -ENOMEM;
+		goto err_engine_crypto_start;
+	}
+
+	/* Allocate DMA buffer for crypto engine output used */
+	if (hace_dev->version == AST2600_VERSION) {
+		crypto_engine->dst_sg_addr =
+			dmam_alloc_coherent(&pdev->dev,
+					    ASPEED_CRYPTO_DST_DMA_BUF_LEN,
+					    &crypto_engine->dst_sg_dma_addr,
+					    GFP_KERNEL);
+		if (!crypto_engine->dst_sg_addr) {
+			dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n");
+			rc = -ENOMEM;
+			goto err_engine_crypto_start;
+		}
+	}
+
+	aspeed_hace_register(hace_dev);
+
+	dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n");
+
+	return 0;
+
+err_engine_crypto_start:
+	crypto_engine_exit(hace_dev->crypt_engine_crypto);
+err_engine_hash_start:
+	crypto_engine_exit(hace_dev->crypt_engine_hash);
+clk_exit:
+	clk_disable_unprepare(hace_dev->clk);
+
+	return rc;
+}
+
+static int aspeed_hace_remove(struct platform_device *pdev)
+{
+	struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev);
+	struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+	aspeed_hace_unregister(hace_dev);
+
+	crypto_engine_exit(hace_dev->crypt_engine_hash);
+	crypto_engine_exit(hace_dev->crypt_engine_crypto);
+
+	tasklet_kill(&hash_engine->done_task);
+	tasklet_kill(&crypto_engine->done_task);
+
+	clk_disable_unprepare(hace_dev->clk);
+
+	return 0;
+}
+
+MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
+
+static struct platform_driver aspeed_hace_driver = {
+	.probe		= aspeed_hace_probe,
+	.remove		= aspeed_hace_remove,
+	.driver         = {
+		.name   = KBUILD_MODNAME,
+		.of_match_table = aspeed_hace_of_matches,
+	},
+};
+
+module_platform_driver(aspeed_hace_driver);
+
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
new file mode 100644
index 000000000000..f2cde23b56ae
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __ASPEED_HACE_H__
+#define __ASPEED_HACE_H__
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fips.h>
+#include <linux/dma-mapping.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+
+/*****************************
+ *                           *
+ * HACE register definitions *
+ *                           *
+ * ***************************/
+#define ASPEED_HACE_SRC			0x00	/* Crypto Data Source Base Address Register */
+#define ASPEED_HACE_DEST		0x04	/* Crypto Data Destination Base Address Register */
+#define ASPEED_HACE_CONTEXT		0x08	/* Crypto Context Buffer Base Address Register */
+#define ASPEED_HACE_DATA_LEN		0x0C	/* Crypto Data Length Register */
+#define ASPEED_HACE_CMD			0x10	/* Crypto Engine Command Register */
+
+/* G5 */
+#define ASPEED_HACE_TAG			0x18	/* HACE Tag Register */
+/* G6 */
+#define ASPEED_HACE_GCM_ADD_LEN		0x14	/* Crypto AES-GCM Additional Data Length Register */
+#define ASPEED_HACE_GCM_TAG_BASE_ADDR	0x18	/* Crypto AES-GCM Tag Write Buff Base Address Reg */
+
+#define ASPEED_HACE_STS			0x1C	/* HACE Status Register */
+
+#define ASPEED_HACE_HASH_SRC		0x20	/* Hash Data Source Base Address Register */
+#define ASPEED_HACE_HASH_DIGEST_BUFF	0x24	/* Hash Digest Write Buffer Base Address Register */
+#define ASPEED_HACE_HASH_KEY_BUFF	0x28	/* Hash HMAC Key Buffer Base Address Register */
+#define ASPEED_HACE_HASH_DATA_LEN	0x2C	/* Hash Data Length Register */
+#define ASPEED_HACE_HASH_CMD		0x30	/* Hash Engine Command Register */
+
+/* crypto cmd */
+#define  HACE_CMD_SINGLE_DES		0
+#define  HACE_CMD_TRIPLE_DES		BIT(17)
+#define  HACE_CMD_AES_SELECT		0
+#define  HACE_CMD_DES_SELECT		BIT(16)
+#define  HACE_CMD_ISR_EN		BIT(12)
+#define  HACE_CMD_CONTEXT_SAVE_ENABLE	(0)
+#define  HACE_CMD_CONTEXT_SAVE_DISABLE	BIT(9)
+#define  HACE_CMD_AES			(0)
+#define  HACE_CMD_DES			(0)
+#define  HACE_CMD_RC4			BIT(8)
+#define  HACE_CMD_DECRYPT		(0)
+#define  HACE_CMD_ENCRYPT		BIT(7)
+
+#define  HACE_CMD_ECB			(0x0 << 4)
+#define  HACE_CMD_CBC			(0x1 << 4)
+#define  HACE_CMD_CFB			(0x2 << 4)
+#define  HACE_CMD_OFB			(0x3 << 4)
+#define  HACE_CMD_CTR			(0x4 << 4)
+#define  HACE_CMD_OP_MODE_MASK		(0x7 << 4)
+
+#define  HACE_CMD_AES128		(0x0 << 2)
+#define  HACE_CMD_AES192		(0x1 << 2)
+#define  HACE_CMD_AES256		(0x2 << 2)
+#define  HACE_CMD_OP_CASCADE		(0x3)
+#define  HACE_CMD_OP_INDEPENDENT	(0x1)
+
+/* G5 */
+#define  HACE_CMD_RI_WO_DATA_ENABLE	(0)
+#define  HACE_CMD_RI_WO_DATA_DISABLE	BIT(11)
+#define  HACE_CMD_CONTEXT_LOAD_ENABLE	(0)
+#define  HACE_CMD_CONTEXT_LOAD_DISABLE	BIT(10)
+/* G6 */
+#define  HACE_CMD_AES_KEY_FROM_OTP	BIT(24)
+#define  HACE_CMD_GHASH_TAG_XOR_EN	BIT(23)
+#define  HACE_CMD_GHASH_PAD_LEN_INV	BIT(22)
+#define  HACE_CMD_GCM_TAG_ADDR_SEL	BIT(21)
+#define  HACE_CMD_MBUS_REQ_SYNC_EN	BIT(20)
+#define  HACE_CMD_DES_SG_CTRL		BIT(19)
+#define  HACE_CMD_SRC_SG_CTRL		BIT(18)
+#define  HACE_CMD_CTR_IV_AES_96		(0x1 << 14)
+#define  HACE_CMD_CTR_IV_DES_32		(0x1 << 14)
+#define  HACE_CMD_CTR_IV_AES_64		(0x2 << 14)
+#define  HACE_CMD_CTR_IV_AES_32		(0x3 << 14)
+#define  HACE_CMD_AES_KEY_HW_EXP	BIT(13)
+#define  HACE_CMD_GCM			(0x5 << 4)
+
+/* interrupt status reg */
+#define  HACE_CRYPTO_ISR		BIT(12)
+#define  HACE_HASH_ISR			BIT(9)
+#define  HACE_HASH_BUSY			BIT(0)
+
+/* hash cmd reg */
+#define  HASH_CMD_MBUS_REQ_SYNC_EN	BIT(20)
+#define  HASH_CMD_HASH_SRC_SG_CTRL	BIT(18)
+#define  HASH_CMD_SHA512_224		(0x3 << 10)
+#define  HASH_CMD_SHA512_256		(0x2 << 10)
+#define  HASH_CMD_SHA384		(0x1 << 10)
+#define  HASH_CMD_SHA512		(0)
+#define  HASH_CMD_INT_ENABLE		BIT(9)
+#define  HASH_CMD_HMAC			(0x1 << 7)
+#define  HASH_CMD_ACC_MODE		(0x2 << 7)
+#define  HASH_CMD_HMAC_KEY		(0x3 << 7)
+#define  HASH_CMD_SHA1			(0x2 << 4)
+#define  HASH_CMD_SHA224		(0x4 << 4)
+#define  HASH_CMD_SHA256		(0x5 << 4)
+#define  HASH_CMD_SHA512_SER		(0x6 << 4)
+#define  HASH_CMD_SHA_SWAP		(0x2 << 2)
+
+#define HASH_SG_LAST_LIST		BIT(31)
+
+#define CRYPTO_FLAGS_BUSY		BIT(1)
+
+#define SHA_OP_UPDATE			1
+#define SHA_OP_FINAL			2
+
+#define SHA_FLAGS_SHA1			BIT(0)
+#define SHA_FLAGS_SHA224		BIT(1)
+#define SHA_FLAGS_SHA256		BIT(2)
+#define SHA_FLAGS_SHA384		BIT(3)
+#define SHA_FLAGS_SHA512		BIT(4)
+#define SHA_FLAGS_SHA512_224		BIT(5)
+#define SHA_FLAGS_SHA512_256		BIT(6)
+#define SHA_FLAGS_HMAC			BIT(8)
+#define SHA_FLAGS_FINUP			BIT(9)
+#define SHA_FLAGS_MASK			(0xff)
+
+#define ASPEED_CRYPTO_SRC_DMA_BUF_LEN	0xa000
+#define ASPEED_CRYPTO_DST_DMA_BUF_LEN	0xa000
+#define ASPEED_CRYPTO_GCM_TAG_OFFSET	0x9ff0
+#define ASPEED_HASH_SRC_DMA_BUF_LEN	0xa000
+#define ASPEED_HASH_QUEUE_LENGTH	50
+
+#define HACE_CMD_IV_REQUIRE		(HACE_CMD_CBC | HACE_CMD_CFB | \
+					 HACE_CMD_OFB | HACE_CMD_CTR)
+
+struct aspeed_hace_dev;
+
+typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
+
+struct aspeed_sg_list {
+	__le32 len;
+	__le32 phy_addr;
+};
+
+struct aspeed_engine_hash {
+	struct tasklet_struct		done_task;
+	unsigned long			flags;
+	struct ahash_request		*req;
+
+	/* input buffer */
+	void				*ahash_src_addr;
+	dma_addr_t			ahash_src_dma_addr;
+
+	dma_addr_t			src_dma;
+	dma_addr_t			digest_dma;
+
+	size_t				src_length;
+
+	/* callback func */
+	aspeed_hace_fn_t		resume;
+	aspeed_hace_fn_t		dma_prepare;
+};
+
+struct aspeed_sha_hmac_ctx {
+	struct crypto_shash *shash;
+	u8 ipad[SHA512_BLOCK_SIZE];
+	u8 opad[SHA512_BLOCK_SIZE];
+};
+
+struct aspeed_sham_ctx {
+	struct crypto_engine_ctx	enginectx;
+
+	struct aspeed_hace_dev		*hace_dev;
+	unsigned long			flags;	/* hmac flag */
+
+	struct aspeed_sha_hmac_ctx	base[0];
+};
+
+struct aspeed_sham_reqctx {
+	unsigned long		flags;		/* final update flag should no use*/
+	unsigned long		op;		/* final or update */
+	u32			cmd;		/* trigger cmd */
+
+	/* walk state */
+	struct scatterlist	*src_sg;
+	int			src_nents;
+	unsigned int		offset;		/* offset in current sg */
+	unsigned int		total;		/* per update length */
+
+	size_t			digsize;
+	size_t			block_size;
+	size_t			ivsize;
+	const __be32		*sha_iv;
+
+	/* remain data buffer */
+	u8			buffer[SHA512_BLOCK_SIZE * 2];
+	dma_addr_t		buffer_dma_addr;
+	size_t			bufcnt;		/* buffer counter */
+
+	/* output buffer */
+	u8			digest[SHA512_DIGEST_SIZE] __aligned(64);
+	dma_addr_t		digest_dma_addr;
+	u64			digcnt[2];
+};
+
+struct aspeed_engine_crypto {
+	struct tasklet_struct		done_task;
+	unsigned long			flags;
+	struct skcipher_request		*req;
+
+	/* context buffer */
+	void				*cipher_ctx;
+	dma_addr_t			cipher_ctx_dma;
+
+	/* input buffer, could be single/scatter-gather lists */
+	void				*cipher_addr;
+	dma_addr_t			cipher_dma_addr;
+
+	/* output buffer, only used in scatter-gather lists */
+	void				*dst_sg_addr;
+	dma_addr_t			dst_sg_dma_addr;
+
+	/* callback func */
+	aspeed_hace_fn_t		resume;
+};
+
+struct aspeed_cipher_ctx {
+	struct crypto_engine_ctx	enginectx;
+
+	struct aspeed_hace_dev		*hace_dev;
+	int				key_len;
+	u8				key[AES_MAX_KEYLENGTH];
+
+	/* callback func */
+	aspeed_hace_fn_t		start;
+
+	struct crypto_skcipher          *fallback_tfm;
+};
+
+struct aspeed_cipher_reqctx {
+	int enc_cmd;
+	int src_nents;
+	int dst_nents;
+
+	struct skcipher_request         fallback_req;   /* keep at the end */
+};
+
+struct aspeed_hace_dev {
+	void __iomem			*regs;
+	struct device			*dev;
+	int				irq;
+	struct clk			*clk;
+	unsigned long			version;
+
+	struct crypto_engine		*crypt_engine_hash;
+	struct crypto_engine		*crypt_engine_crypto;
+
+	struct aspeed_engine_hash	hash_engine;
+	struct aspeed_engine_crypto	crypto_engine;
+};
+
+struct aspeed_hace_alg {
+	struct aspeed_hace_dev		*hace_dev;
+
+	const char			*alg_base;
+
+	union {
+		struct skcipher_alg	skcipher;
+		struct ahash_alg	ahash;
+	} alg;
+};
+
+enum aspeed_version {
+	AST2500_VERSION = 5,
+	AST2600_VERSION
+};
+
+#define ast_hace_write(hace, val, offset)	\
+	writel((val), (hace)->regs + (offset))
+#define ast_hace_read(hace, offset)		\
+	readl((hace)->regs + (offset))
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+
+#endif
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 9ad188cffd0d..51c66afbe677 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1712,7 +1712,7 @@ static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
 		cipher_len = regk_crypto_key_256;
 		break;
 	default:
-		pr_err("%s: Invalid key length %d!\n",
+		pr_err("%s: Invalid key length %zu!\n",
 			MODULE_NAME, ctx->key_length);
 		return -EINVAL;
 	}
@@ -2091,7 +2091,7 @@ static void artpec6_crypto_task(unsigned long data)
 		return;
 	}
 
-	spin_lock_bh(&ac->queue_lock);
+	spin_lock(&ac->queue_lock);
 
 	list_for_each_entry_safe(req, n, &ac->pending, list) {
 		struct artpec6_crypto_dma_descriptors *dma = req->dma;
@@ -2128,7 +2128,7 @@ static void artpec6_crypto_task(unsigned long data)
 
 	artpec6_crypto_process_queue(ac, &complete_in_progress);
 
-	spin_unlock_bh(&ac->queue_lock);
+	spin_unlock(&ac->queue_lock);
 
 	/* Perform the completion callbacks without holding the queue lock
 	 * to allow new request submissions from the callbacks.
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 053315e260c2..c8c799428fe0 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1928,7 +1928,7 @@ static int ahash_enqueue(struct ahash_request *req)
 	/* SPU2 hardware does not compute hash of zero length data */
 	if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
 	    (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
-		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+		alg_name = crypto_ahash_alg_name(tfm);
 		flow_log("Doing %sfinal %s zero-len hash request in software\n",
 			 rctx->is_final ? "" : "non-", alg_name);
 		err = do_shash((unsigned char *)alg_name, req->result,
@@ -2029,7 +2029,7 @@ static int ahash_init(struct ahash_request *req)
 		 * supported by the hardware, we need to handle it in software
 		 * by calling synchronous hash functions.
 		 */
-		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+		alg_name = crypto_ahash_alg_name(tfm);
 		hash = crypto_alloc_shash(alg_name, 0, 0);
 		if (IS_ERR(hash)) {
 			ret = PTR_ERR(hash);
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 71281a3bdbdc..d6d87332140a 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -231,7 +231,7 @@ struct iproc_ctx_s {
 
 	/*
 	 * shash descriptor - needed to perform incremental hashing in
-	 * in software, when hw doesn't support it.
+	 * software, when hw doesn't support it.
 	 */
 	struct shash_desc *shash;
 
diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h
index 8ec6edc69f3f..ae4791a8ec4a 100644
--- a/drivers/crypto/cavium/cpt/cpt_hw_types.h
+++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h
@@ -396,7 +396,7 @@ union cptx_vqx_misc_ena_w1s {
  * Word0
  *  reserved_20_63:44 [63:20] Reserved.
  *  dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
- *	to the CPT instruction doorbell count. Readback value is the the
+ *	to the CPT instruction doorbell count. Readback value is the
  *	current number of pending doorbell requests. If counter overflows
  *	CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
  *	zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 8c32d0eb8fcf..6872ac344001 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -253,6 +253,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
 	const struct firmware *fw_entry;
 	struct device *dev = &cpt->pdev->dev;
 	struct ucode_header *ucode;
+	unsigned int code_length;
 	struct microcode *mcode;
 	int j, ret = 0;
 
@@ -263,11 +264,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
 	ucode = (struct ucode_header *)fw_entry->data;
 	mcode = &cpt->mcode[cpt->next_mc_idx];
 	memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
-	mcode->code_size = ntohl(ucode->code_length) * 2;
-	if (!mcode->code_size) {
+	code_length = ntohl(ucode->code_length);
+	if (code_length == 0 || code_length >= INT_MAX / 2) {
 		ret = -EINVAL;
 		goto fw_release;
 	}
+	mcode->code_size = code_length * 2;
 
 	mcode->is_ae = is_ae;
 	mcode->core_mask = 0ULL;
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index 7df71fcebe8f..1046a746d36f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -198,22 +198,16 @@ static int zip_decompress(const u8 *src, unsigned int slen,
 /* Legacy Compress framework start */
 int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
 {
-	int ret;
 	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
 
-	ret = zip_ctx_init(zip_ctx, 0);
-
-	return ret;
+	return zip_ctx_init(zip_ctx, 0);
 }
 
 int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
 {
-	int ret;
 	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
 
-	ret = zip_ctx_init(zip_ctx, 1);
-
-	return ret;
+	return zip_ctx_init(zip_ctx, 1);
 }
 
 void zip_free_comp_ctx(struct crypto_tfm *tfm)
@@ -227,24 +221,18 @@ int  zip_comp_compress(struct crypto_tfm *tfm,
 		       const u8 *src, unsigned int slen,
 		       u8 *dst, unsigned int *dlen)
 {
-	int ret;
 	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
 
-	ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
-	return ret;
+	return zip_compress(src, slen, dst, dlen, zip_ctx);
 }
 
 int  zip_comp_decompress(struct crypto_tfm *tfm,
 			 const u8 *src, unsigned int slen,
 			 u8 *dst, unsigned int *dlen)
 {
-	int ret;
 	struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
 
-	ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
-	return ret;
+	return zip_decompress(src, slen, dst, dlen, zip_ctx);
 } /* Legacy compress framework end */
 
 /* SCOMP framework start */
@@ -298,22 +286,16 @@ int zip_scomp_compress(struct crypto_scomp *tfm,
 		       const u8 *src, unsigned int slen,
 		       u8 *dst, unsigned int *dlen, void *ctx)
 {
-	int ret;
 	struct zip_kernel_ctx *zip_ctx  = ctx;
 
-	ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
-	return ret;
+	return zip_compress(src, slen, dst, dlen, zip_ctx);
 }
 
 int zip_scomp_decompress(struct crypto_scomp *tfm,
 			 const u8 *src, unsigned int slen,
 			 u8 *dst, unsigned int *dlen, void *ctx)
 {
-	int ret;
 	struct zip_kernel_ctx *zip_ctx = ctx;
 
-	ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
-	return ret;
+	return zip_decompress(src, slen, dst, dlen, zip_ctx);
 } /* SCOMP framework end */
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index ec97daf0fcb7..278636ed251a 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -64,7 +64,6 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
 	struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
 	struct scatterlist *iv_sg = NULL;
 	unsigned int iv_len = 0;
-	int ret;
 
 	if (!ctx->u.des3.key_len)
 		return -EINVAL;
@@ -100,9 +99,7 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
 	rctx->cmd.u.des3.src_len = req->cryptlen;
 	rctx->cmd.u.des3.dst = req->dst;
 
-	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
-
-	return ret;
+	return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
 }
 
 static int ccp_des3_encrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 7d4b4ad1db1f..9f753cb4f5f1 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -641,6 +641,10 @@ static void ccp_dma_release(struct ccp_device *ccp)
 	for (i = 0; i < ccp->cmd_q_count; i++) {
 		chan = ccp->ccp_dma_chan + i;
 		dma_chan = &chan->dma_chan;
+
+		if (dma_chan->client_count)
+			dma_release_channel(dma_chan);
+
 		tasklet_kill(&chan->cleanup_tasklet);
 		list_del_rcu(&dma_chan->device_node);
 	}
@@ -766,8 +770,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
 	if (!dmaengine)
 		return;
 
-	dma_async_device_unregister(dma_dev);
 	ccp_dma_release(ccp);
+	dma_async_device_unregister(dma_dev);
 
 	kmem_cache_destroy(ccp->dma_desc_cache);
 	kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 9f588c9728f8..06fc7156c04f 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -211,18 +211,24 @@ static int sev_read_init_ex_file(void)
 	if (IS_ERR(fp)) {
 		int ret = PTR_ERR(fp);
 
-		dev_err(sev->dev,
-			"SEV: could not open %s for read, error %d\n",
-			init_ex_path, ret);
+		if (ret == -ENOENT) {
+			dev_info(sev->dev,
+				"SEV: %s does not exist and will be created later.\n",
+				init_ex_path);
+			ret = 0;
+		} else {
+			dev_err(sev->dev,
+				"SEV: could not open %s for read, error %d\n",
+				init_ex_path, ret);
+		}
 		return ret;
 	}
 
 	nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL);
 	if (nread != NV_LENGTH) {
-		dev_err(sev->dev,
-			"SEV: failed to read %u bytes to non volatile memory area, ret %ld\n",
+		dev_info(sev->dev,
+			"SEV: could not read %u bytes to non volatile memory area, ret %ld\n",
 			NV_LENGTH, nread);
-		return -EIO;
 	}
 
 	dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread);
@@ -231,7 +237,7 @@ static int sev_read_init_ex_file(void)
 	return 0;
 }
 
-static void sev_write_init_ex_file(void)
+static int sev_write_init_ex_file(void)
 {
 	struct sev_device *sev = psp_master->sev_data;
 	struct file *fp;
@@ -241,14 +247,16 @@ static void sev_write_init_ex_file(void)
 	lockdep_assert_held(&sev_cmd_mutex);
 
 	if (!sev_init_ex_buffer)
-		return;
+		return 0;
 
 	fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600);
 	if (IS_ERR(fp)) {
+		int ret = PTR_ERR(fp);
+
 		dev_err(sev->dev,
-			"SEV: could not open file for write, error %ld\n",
-			PTR_ERR(fp));
-		return;
+			"SEV: could not open file for write, error %d\n",
+			ret);
+		return ret;
 	}
 
 	nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset);
@@ -259,18 +267,20 @@ static void sev_write_init_ex_file(void)
 		dev_err(sev->dev,
 			"SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
 			NV_LENGTH, nwrite);
-		return;
+		return -EIO;
 	}
 
 	dev_dbg(sev->dev, "SEV: write successful to NV file\n");
+
+	return 0;
 }
 
-static void sev_write_init_ex_file_if_required(int cmd_id)
+static int sev_write_init_ex_file_if_required(int cmd_id)
 {
 	lockdep_assert_held(&sev_cmd_mutex);
 
 	if (!sev_init_ex_buffer)
-		return;
+		return 0;
 
 	/*
 	 * Only a few platform commands modify the SPI/NV area, but none of the
@@ -285,10 +295,10 @@ static void sev_write_init_ex_file_if_required(int cmd_id)
 	case SEV_CMD_PEK_GEN:
 		break;
 	default:
-		return;
+		return 0;
 	}
 
-	sev_write_init_ex_file();
+	return sev_write_init_ex_file();
 }
 
 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
@@ -361,7 +371,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
 			cmd, reg & PSP_CMDRESP_ERR_MASK);
 		ret = -EIO;
 	} else {
-		sev_write_init_ex_file_if_required(cmd);
+		ret = sev_write_init_ex_file_if_required(cmd);
 	}
 
 	print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
@@ -410,17 +420,12 @@ static int __sev_init_locked(int *error)
 static int __sev_init_ex_locked(int *error)
 {
 	struct sev_data_init_ex data;
-	int ret;
 
 	memset(&data, 0, sizeof(data));
 	data.length = sizeof(data);
 	data.nv_address = __psp_pa(sev_init_ex_buffer);
 	data.nv_len = NV_LENGTH;
 
-	ret = sev_read_init_ex_file();
-	if (ret)
-		return ret;
-
 	if (sev_es_tmr) {
 		/*
 		 * Do not include the encryption mask on the physical
@@ -439,7 +444,7 @@ static int __sev_platform_init_locked(int *error)
 {
 	struct psp_device *psp = psp_master;
 	struct sev_device *sev;
-	int rc, psp_ret = -1;
+	int rc = 0, psp_ret = -1;
 	int (*init_function)(int *error);
 
 	if (!psp || !psp->sev_data)
@@ -450,8 +455,15 @@ static int __sev_platform_init_locked(int *error)
 	if (sev->state == SEV_STATE_INIT)
 		return 0;
 
-	init_function = sev_init_ex_buffer ? __sev_init_ex_locked :
-			__sev_init_locked;
+	if (sev_init_ex_buffer) {
+		init_function = __sev_init_ex_locked;
+		rc = sev_read_init_ex_file();
+		if (rc)
+			return rc;
+	} else {
+		init_function = __sev_init_locked;
+	}
+
 	rc = init_function(&psp_ret);
 	if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
 		/*
@@ -744,6 +756,11 @@ static int sev_update_firmware(struct device *dev)
 	struct page *p;
 	u64 data_size;
 
+	if (!sev_version_greater_or_equal(0, 15)) {
+		dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
+		return -1;
+	}
+
 	if (sev_get_firmware(dev, &firmware) == -ENOENT) {
 		dev_dbg(dev, "No SEV firmware file present\n");
 		return -1;
@@ -776,6 +793,14 @@ static int sev_update_firmware(struct device *dev)
 	data->len = firmware->size;
 
 	ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
+	/*
+	 * A quirk for fixing the committed TCB version, when upgrading from
+	 * earlier firmware version than 1.50.
+	 */
+	if (!ret && !sev_version_greater_or_equal(1, 50))
+		ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
 	if (ret)
 		dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
 	else
@@ -1285,8 +1310,7 @@ void sev_pci_init(void)
 	if (sev_get_api_version())
 		goto err;
 
-	if (sev_version_greater_or_equal(0, 15) &&
-	    sev_update_firmware(sev->dev) == 0)
+	if (sev_update_firmware(sev->dev) == 0)
 		sev_get_api_version();
 
 	/* If an init_ex_path is provided rely on INIT_EX for PSP initialization
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 6140e4927322..9efd88f871d1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -274,7 +274,7 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 	}
 
 	ret = dma_map_sg(dev, sg, *nents, direction);
-	if (dma_mapping_error(dev, ret)) {
+	if (!ret) {
 		*nents = 0;
 		dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
 		return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9a0558ed82f9..9f0b94c8e03d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -22,7 +22,8 @@ enum {
 	HPRE_CLUSTER0,
 	HPRE_CLUSTER1,
 	HPRE_CLUSTER2,
-	HPRE_CLUSTER3
+	HPRE_CLUSTER3,
+	HPRE_CLUSTERS_NUM_MAX
 };
 
 enum hpre_ctrl_dbgfs_file {
@@ -42,9 +43,6 @@ enum hpre_dfx_dbgfs_file {
 	HPRE_DFX_FILE_NUM
 };
 
-#define HPRE_CLUSTERS_NUM_V2		(HPRE_CLUSTER3 + 1)
-#define HPRE_CLUSTERS_NUM_V3		1
-#define HPRE_CLUSTERS_NUM_MAX		HPRE_CLUSTERS_NUM_V2
 #define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
 
 struct hpre_debugfs_file {
@@ -105,5 +103,5 @@ struct hpre_sqe {
 struct hisi_qp *hpre_create_qp(u8 type);
 int hpre_algs_register(struct hisi_qm *qm);
 void hpre_algs_unregister(struct hisi_qm *qm);
-
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg);
 #endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 3ba6f15deafc..ef02dadd6217 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -51,6 +51,12 @@ struct hpre_ctx;
 #define HPRE_ECC_HW256_KSZ_B	32
 #define HPRE_ECC_HW384_KSZ_B	48
 
+/* capability register mask of driver */
+#define HPRE_DRV_RSA_MASK_CAP		BIT(0)
+#define HPRE_DRV_DH_MASK_CAP		BIT(1)
+#define HPRE_DRV_ECDH_MASK_CAP		BIT(2)
+#define HPRE_DRV_X25519_MASK_CAP	BIT(5)
+
 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
 
 struct hpre_rsa_ctx {
@@ -147,7 +153,7 @@ static int hpre_alloc_req_id(struct hpre_ctx *ctx)
 	int id;
 
 	spin_lock_irqsave(&ctx->req_lock, flags);
-	id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
+	id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
 	spin_unlock_irqrestore(&ctx->req_lock, flags);
 
 	return id;
@@ -488,7 +494,7 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
 	qp->qp_ctx = ctx;
 	qp->req_cb = hpre_alg_cb;
 
-	ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+	ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
 	if (ret)
 		hpre_stop_qp_and_put(qp);
 
@@ -2002,55 +2008,53 @@ static struct kpp_alg dh = {
 	},
 };
 
-static struct kpp_alg ecdh_nist_p192 = {
-	.set_secret = hpre_ecdh_set_secret,
-	.generate_public_key = hpre_ecdh_compute_value,
-	.compute_shared_secret = hpre_ecdh_compute_value,
-	.max_size = hpre_ecdh_max_size,
-	.init = hpre_ecdh_nist_p192_init_tfm,
-	.exit = hpre_ecdh_exit_tfm,
-	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
-	.base = {
-		.cra_ctxsize = sizeof(struct hpre_ctx),
-		.cra_priority = HPRE_CRYPTO_ALG_PRI,
-		.cra_name = "ecdh-nist-p192",
-		.cra_driver_name = "hpre-ecdh-nist-p192",
-		.cra_module = THIS_MODULE,
-	},
-};
-
-static struct kpp_alg ecdh_nist_p256 = {
-	.set_secret = hpre_ecdh_set_secret,
-	.generate_public_key = hpre_ecdh_compute_value,
-	.compute_shared_secret = hpre_ecdh_compute_value,
-	.max_size = hpre_ecdh_max_size,
-	.init = hpre_ecdh_nist_p256_init_tfm,
-	.exit = hpre_ecdh_exit_tfm,
-	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
-	.base = {
-		.cra_ctxsize = sizeof(struct hpre_ctx),
-		.cra_priority = HPRE_CRYPTO_ALG_PRI,
-		.cra_name = "ecdh-nist-p256",
-		.cra_driver_name = "hpre-ecdh-nist-p256",
-		.cra_module = THIS_MODULE,
-	},
-};
-
-static struct kpp_alg ecdh_nist_p384 = {
-	.set_secret = hpre_ecdh_set_secret,
-	.generate_public_key = hpre_ecdh_compute_value,
-	.compute_shared_secret = hpre_ecdh_compute_value,
-	.max_size = hpre_ecdh_max_size,
-	.init = hpre_ecdh_nist_p384_init_tfm,
-	.exit = hpre_ecdh_exit_tfm,
-	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
-	.base = {
-		.cra_ctxsize = sizeof(struct hpre_ctx),
-		.cra_priority = HPRE_CRYPTO_ALG_PRI,
-		.cra_name = "ecdh-nist-p384",
-		.cra_driver_name = "hpre-ecdh-nist-p384",
-		.cra_module = THIS_MODULE,
-	},
+static struct kpp_alg ecdh_curves[] = {
+	{
+		.set_secret = hpre_ecdh_set_secret,
+		.generate_public_key = hpre_ecdh_compute_value,
+		.compute_shared_secret = hpre_ecdh_compute_value,
+		.max_size = hpre_ecdh_max_size,
+		.init = hpre_ecdh_nist_p192_init_tfm,
+		.exit = hpre_ecdh_exit_tfm,
+		.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+		.base = {
+			.cra_ctxsize = sizeof(struct hpre_ctx),
+			.cra_priority = HPRE_CRYPTO_ALG_PRI,
+			.cra_name = "ecdh-nist-p192",
+			.cra_driver_name = "hpre-ecdh-nist-p192",
+			.cra_module = THIS_MODULE,
+		},
+	}, {
+		.set_secret = hpre_ecdh_set_secret,
+		.generate_public_key = hpre_ecdh_compute_value,
+		.compute_shared_secret = hpre_ecdh_compute_value,
+		.max_size = hpre_ecdh_max_size,
+		.init = hpre_ecdh_nist_p256_init_tfm,
+		.exit = hpre_ecdh_exit_tfm,
+		.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+		.base = {
+			.cra_ctxsize = sizeof(struct hpre_ctx),
+			.cra_priority = HPRE_CRYPTO_ALG_PRI,
+			.cra_name = "ecdh-nist-p256",
+			.cra_driver_name = "hpre-ecdh-nist-p256",
+			.cra_module = THIS_MODULE,
+		},
+	}, {
+		.set_secret = hpre_ecdh_set_secret,
+		.generate_public_key = hpre_ecdh_compute_value,
+		.compute_shared_secret = hpre_ecdh_compute_value,
+		.max_size = hpre_ecdh_max_size,
+		.init = hpre_ecdh_nist_p384_init_tfm,
+		.exit = hpre_ecdh_exit_tfm,
+		.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+		.base = {
+			.cra_ctxsize = sizeof(struct hpre_ctx),
+			.cra_priority = HPRE_CRYPTO_ALG_PRI,
+			.cra_name = "ecdh-nist-p384",
+			.cra_driver_name = "hpre-ecdh-nist-p384",
+			.cra_module = THIS_MODULE,
+		},
+	}
 };
 
 static struct kpp_alg curve25519_alg = {
@@ -2070,78 +2074,144 @@ static struct kpp_alg curve25519_alg = {
 	},
 };
 
-
-static int hpre_register_ecdh(void)
+static int hpre_register_rsa(struct hisi_qm *qm)
 {
 	int ret;
 
-	ret = crypto_register_kpp(&ecdh_nist_p192);
-	if (ret)
-		return ret;
+	if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+		return 0;
 
-	ret = crypto_register_kpp(&ecdh_nist_p256);
+	rsa.base.cra_flags = 0;
+	ret = crypto_register_akcipher(&rsa);
 	if (ret)
-		goto unregister_ecdh_p192;
+		dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
 
-	ret = crypto_register_kpp(&ecdh_nist_p384);
+	return ret;
+}
+
+static void hpre_unregister_rsa(struct hisi_qm *qm)
+{
+	if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+		return;
+
+	crypto_unregister_akcipher(&rsa);
+}
+
+static int hpre_register_dh(struct hisi_qm *qm)
+{
+	int ret;
+
+	if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+		return 0;
+
+	ret = crypto_register_kpp(&dh);
 	if (ret)
-		goto unregister_ecdh_p256;
+		dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
+
+	return ret;
+}
+
+static void hpre_unregister_dh(struct hisi_qm *qm)
+{
+	if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+		return;
+
+	crypto_unregister_kpp(&dh);
+}
+
+static int hpre_register_ecdh(struct hisi_qm *qm)
+{
+	int ret, i;
+
+	if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
+		ret = crypto_register_kpp(&ecdh_curves[i]);
+		if (ret) {
+			dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
+				ecdh_curves[i].base.cra_name, ret);
+			goto unreg_kpp;
+		}
+	}
 
 	return 0;
 
-unregister_ecdh_p256:
-	crypto_unregister_kpp(&ecdh_nist_p256);
-unregister_ecdh_p192:
-	crypto_unregister_kpp(&ecdh_nist_p192);
+unreg_kpp:
+	for (--i; i >= 0; --i)
+		crypto_unregister_kpp(&ecdh_curves[i]);
+
 	return ret;
 }
 
-static void hpre_unregister_ecdh(void)
+static void hpre_unregister_ecdh(struct hisi_qm *qm)
 {
-	crypto_unregister_kpp(&ecdh_nist_p384);
-	crypto_unregister_kpp(&ecdh_nist_p256);
-	crypto_unregister_kpp(&ecdh_nist_p192);
+	int i;
+
+	if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+		return;
+
+	for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
+		crypto_unregister_kpp(&ecdh_curves[i]);
+}
+
+static int hpre_register_x25519(struct hisi_qm *qm)
+{
+	int ret;
+
+	if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+		return 0;
+
+	ret = crypto_register_kpp(&curve25519_alg);
+	if (ret)
+		dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
+
+	return ret;
+}
+
+static void hpre_unregister_x25519(struct hisi_qm *qm)
+{
+	if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+		return;
+
+	crypto_unregister_kpp(&curve25519_alg);
 }
 
 int hpre_algs_register(struct hisi_qm *qm)
 {
 	int ret;
 
-	rsa.base.cra_flags = 0;
-	ret = crypto_register_akcipher(&rsa);
+	ret = hpre_register_rsa(qm);
 	if (ret)
 		return ret;
 
-	ret = crypto_register_kpp(&dh);
+	ret = hpre_register_dh(qm);
 	if (ret)
 		goto unreg_rsa;
 
-	if (qm->ver >= QM_HW_V3) {
-		ret = hpre_register_ecdh();
-		if (ret)
-			goto unreg_dh;
-		ret = crypto_register_kpp(&curve25519_alg);
-		if (ret)
-			goto unreg_ecdh;
-	}
-	return 0;
+	ret = hpre_register_ecdh(qm);
+	if (ret)
+		goto unreg_dh;
+
+	ret = hpre_register_x25519(qm);
+	if (ret)
+		goto unreg_ecdh;
+
+	return ret;
 
 unreg_ecdh:
-	hpre_unregister_ecdh();
+	hpre_unregister_ecdh(qm);
 unreg_dh:
-	crypto_unregister_kpp(&dh);
+	hpre_unregister_dh(qm);
 unreg_rsa:
-	crypto_unregister_akcipher(&rsa);
+	hpre_unregister_rsa(qm);
 	return ret;
 }
 
 void hpre_algs_unregister(struct hisi_qm *qm)
 {
-	if (qm->ver >= QM_HW_V3) {
-		crypto_unregister_kpp(&curve25519_alg);
-		hpre_unregister_ecdh();
-	}
-
-	crypto_unregister_kpp(&dh);
-	crypto_unregister_akcipher(&rsa);
+	hpre_unregister_x25519(qm);
+	hpre_unregister_ecdh(qm);
+	hpre_unregister_dh(qm);
+	hpre_unregister_rsa(qm);
 }
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 9d529df0eab9..471e5ca720f5 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -53,9 +53,7 @@
 #define HPRE_CORE_IS_SCHD_OFFSET	0x90
 
 #define HPRE_RAS_CE_ENB			0x301410
-#define HPRE_HAC_RAS_CE_ENABLE		(BIT(0) | BIT(22) | BIT(23))
 #define HPRE_RAS_NFE_ENB		0x301414
-#define HPRE_HAC_RAS_NFE_ENABLE		0x3ffffe
 #define HPRE_RAS_FE_ENB			0x301418
 #define HPRE_OOO_SHUTDOWN_SEL		0x301a3c
 #define HPRE_HAC_RAS_FE_ENABLE		0
@@ -79,8 +77,6 @@
 #define HPRE_QM_AXI_CFG_MASK		GENMASK(15, 0)
 #define HPRE_QM_VFG_AX_MASK		GENMASK(7, 0)
 #define HPRE_BD_USR_MASK		GENMASK(1, 0)
-#define HPRE_CLUSTER_CORE_MASK_V2	GENMASK(3, 0)
-#define HPRE_CLUSTER_CORE_MASK_V3	GENMASK(7, 0)
 #define HPRE_PREFETCH_CFG		0x301130
 #define HPRE_SVA_PREFTCH_DFX		0x30115C
 #define HPRE_PREFETCH_ENABLE		(~(BIT(0) | BIT(30)))
@@ -122,6 +118,8 @@
 #define HPRE_DFX_COMMON2_LEN		0xE
 #define HPRE_DFX_CORE_LEN		0x43
 
+#define HPRE_DEV_ALG_MAX_LEN	256
+
 static const char hpre_name[] = "hisi_hpre";
 static struct dentry *hpre_debugfs_root;
 static const struct pci_device_id hpre_dev_ids[] = {
@@ -137,6 +135,38 @@ struct hpre_hw_error {
 	const char *msg;
 };
 
+struct hpre_dev_alg {
+	u32 alg_msk;
+	const char *alg;
+};
+
+static const struct hpre_dev_alg hpre_dev_algs[] = {
+	{
+		.alg_msk = BIT(0),
+		.alg = "rsa\n"
+	}, {
+		.alg_msk = BIT(1),
+		.alg = "dh\n"
+	}, {
+		.alg_msk = BIT(2),
+		.alg = "ecdh\n"
+	}, {
+		.alg_msk = BIT(3),
+		.alg = "ecdsa\n"
+	}, {
+		.alg_msk = BIT(4),
+		.alg = "sm2\n"
+	}, {
+		.alg_msk = BIT(5),
+		.alg = "x25519\n"
+	}, {
+		.alg_msk = BIT(6),
+		.alg = "x448\n"
+	}, {
+		/* sentinel */
+	}
+};
+
 static struct hisi_qm_list hpre_devices = {
 	.register_to_crypto	= hpre_algs_register,
 	.unregister_from_crypto	= hpre_algs_unregister,
@@ -147,6 +177,62 @@ static const char * const hpre_debug_file_name[] = {
 	[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
 };
 
+enum hpre_cap_type {
+	HPRE_QM_NFE_MASK_CAP,
+	HPRE_QM_RESET_MASK_CAP,
+	HPRE_QM_OOO_SHUTDOWN_MASK_CAP,
+	HPRE_QM_CE_MASK_CAP,
+	HPRE_NFE_MASK_CAP,
+	HPRE_RESET_MASK_CAP,
+	HPRE_OOO_SHUTDOWN_MASK_CAP,
+	HPRE_CE_MASK_CAP,
+	HPRE_CLUSTER_NUM_CAP,
+	HPRE_CORE_TYPE_NUM_CAP,
+	HPRE_CORE_NUM_CAP,
+	HPRE_CLUSTER_CORE_NUM_CAP,
+	HPRE_CORE_ENABLE_BITMAP_CAP,
+	HPRE_DRV_ALG_BITMAP_CAP,
+	HPRE_DEV_ALG_BITMAP_CAP,
+	HPRE_CORE1_ALG_BITMAP_CAP,
+	HPRE_CORE2_ALG_BITMAP_CAP,
+	HPRE_CORE3_ALG_BITMAP_CAP,
+	HPRE_CORE4_ALG_BITMAP_CAP,
+	HPRE_CORE5_ALG_BITMAP_CAP,
+	HPRE_CORE6_ALG_BITMAP_CAP,
+	HPRE_CORE7_ALG_BITMAP_CAP,
+	HPRE_CORE8_ALG_BITMAP_CAP,
+	HPRE_CORE9_ALG_BITMAP_CAP,
+	HPRE_CORE10_ALG_BITMAP_CAP
+};
+
+static const struct hisi_qm_cap_info hpre_basic_info[] = {
+	{HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
+	{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
+	{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
+	{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+	{HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
+	{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
+	{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
+	{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+	{HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0,  0x4, 0x1},
+	{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
+	{HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
+	{HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
+	{HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
+	{HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
+	{HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
+	{HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+	{HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+	{HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+	{HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+	{HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+	{HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+	{HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+	{HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+	{HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
+	{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
+};
+
 static const struct hpre_hw_error hpre_hw_errors[] = {
 	{
 		.int_msk = BIT(0),
@@ -262,6 +348,46 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
 	},
 };
 
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
+{
+	u32 cap_val;
+
+	cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
+	if (alg & cap_val)
+		return true;
+
+	return false;
+}
+
+static int hpre_set_qm_algs(struct hisi_qm *qm)
+{
+	struct device *dev = &qm->pdev->dev;
+	char *algs, *ptr;
+	u32 alg_msk;
+	int i;
+
+	if (!qm->use_sva)
+		return 0;
+
+	algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+	if (!algs)
+		return -ENOMEM;
+
+	alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
+
+	for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
+		if (alg_msk & hpre_dev_algs[i].alg_msk)
+			strcat(algs, hpre_dev_algs[i].alg);
+
+	ptr = strrchr(algs, '\n');
+	if (ptr)
+		*ptr = '\0';
+
+	qm->uacce->algs = algs;
+
+	return 0;
+}
+
 static int hpre_diff_regs_show(struct seq_file *s, void *unused)
 {
 	struct hisi_qm *qm = s->private;
@@ -330,14 +456,12 @@ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
 
 static inline int hpre_cluster_num(struct hisi_qm *qm)
 {
-	return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 :
-		HPRE_CLUSTERS_NUM_V2;
+	return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
 }
 
 static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
 {
-	return (qm->ver >= QM_HW_V3) ?
-		HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2;
+	return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
 }
 
 struct hisi_qp *hpre_create_qp(u8 type)
@@ -457,7 +581,7 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
 	u32 val;
 	int ret;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
 		return;
 
 	/* Enable prefetch */
@@ -478,7 +602,7 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
 	u32 val;
 	int ret;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
 		return;
 
 	val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
@@ -630,7 +754,8 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
 	val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
 	if (enable) {
 		val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
-		val2 = HPRE_HAC_RAS_NFE_ENABLE;
+		val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
+					   HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
 	} else {
 		val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
 		val2 = 0x0;
@@ -644,21 +769,30 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
 
 static void hpre_hw_error_disable(struct hisi_qm *qm)
 {
-	/* disable hpre hw error interrupts */
-	writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+	u32 ce, nfe;
+
+	ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+	nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
 
+	/* disable hpre hw error interrupts */
+	writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
 	/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
 	hpre_master_ooo_ctrl(qm, false);
 }
 
 static void hpre_hw_error_enable(struct hisi_qm *qm)
 {
+	u32 ce, nfe;
+
+	ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+	nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+
 	/* clear HPRE hw error source if having */
-	writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+	writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
 
 	/* configure error type */
-	writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
-	writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
+	writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
+	writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
 	writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
 
 	/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
@@ -708,7 +842,7 @@ static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
 	return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
 }
 
-static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
+static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
 {
 	struct hisi_qm *qm = hpre_file_to_qm(file);
 	int cluster_index = file->index - HPRE_CLUSTER_CTRL;
@@ -716,8 +850,6 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
 			       HPRE_CLSTR_ADDR_INTRVL;
 
 	writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
-
-	return 0;
 }
 
 static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -792,9 +924,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
 			goto err_input;
 		break;
 	case HPRE_CLUSTER_CTRL:
-		ret = hpre_cluster_inqry_write(file, val);
-		if (ret)
-			goto err_input;
+		hpre_cluster_inqry_write(file, val);
 		break;
 	default:
 		ret = -EINVAL;
@@ -1006,15 +1136,13 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
 
 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 {
+	int ret;
+
 	if (pdev->revision == QM_HW_V1) {
 		pci_warn(pdev, "HPRE version 1 is not supported!\n");
 		return -EINVAL;
 	}
 
-	if (pdev->revision >= QM_HW_V3)
-		qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
-	else
-		qm->algs = "rsa\ndh";
 	qm->mode = uacce_mode;
 	qm->pdev = pdev;
 	qm->ver = pdev->revision;
@@ -1030,7 +1158,19 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 		qm->qm_list = &hpre_devices;
 	}
 
-	return hisi_qm_init(qm);
+	ret = hisi_qm_init(qm);
+	if (ret) {
+		pci_err(pdev, "Failed to init hpre qm configures!\n");
+		return ret;
+	}
+
+	ret = hpre_set_qm_algs(qm);
+	if (ret) {
+		pci_err(pdev, "Failed to set hpre algs!\n");
+		hisi_qm_uninit(qm);
+	}
+
+	return ret;
 }
 
 static int hpre_show_last_regs_init(struct hisi_qm *qm)
@@ -1129,7 +1269,11 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
 
 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
 {
+	u32 nfe;
+
 	writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+	nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+	writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
 }
 
 static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,14 +1291,20 @@ static void hpre_err_info_init(struct hisi_qm *qm)
 {
 	struct hisi_qm_err_info *err_info = &qm->err_info;
 
-	err_info->ce = QM_BASE_CE;
-	err_info->fe = 0;
-	err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
-				   HPRE_OOO_ECC_2BIT_ERR;
-	err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+	err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
+	err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+	err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+	err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
+	err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+			HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+	err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+			HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+	err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+			HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+	err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+			HPRE_RESET_MASK_CAP, qm->cap_ver);
 	err_info->msi_wr_port = HPRE_WR_MSI_PORT;
 	err_info->acpi_rst = "HRST";
-	err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
 }
 
 static const struct hisi_qm_err_ini hpre_err_ini = {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index ad83c194d664..8b387de69d22 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -22,20 +22,17 @@
 #define QM_VF_AEQ_INT_MASK		0x4
 #define QM_VF_EQ_INT_SOURCE		0x8
 #define QM_VF_EQ_INT_MASK		0xc
-#define QM_IRQ_NUM_V1			1
-#define QM_IRQ_NUM_PF_V2		4
-#define QM_IRQ_NUM_VF_V2		2
-#define QM_IRQ_NUM_VF_V3		3
 
-#define QM_EQ_EVENT_IRQ_VECTOR		0
-#define QM_AEQ_EVENT_IRQ_VECTOR		1
-#define QM_CMD_EVENT_IRQ_VECTOR		2
-#define QM_ABNORMAL_EVENT_IRQ_VECTOR	3
+#define QM_IRQ_VECTOR_MASK		GENMASK(15, 0)
+#define QM_IRQ_TYPE_MASK		GENMASK(15, 0)
+#define QM_IRQ_TYPE_SHIFT		16
+#define QM_ABN_IRQ_TYPE_MASK		GENMASK(7, 0)
 
 /* mailbox */
 #define QM_MB_PING_ALL_VFS		0xffff
 #define QM_MB_CMD_DATA_SHIFT		32
 #define QM_MB_CMD_DATA_MASK		GENMASK(31, 0)
+#define QM_MB_STATUS_MASK		GENMASK(12, 9)
 
 /* sqc shift */
 #define QM_SQ_HOP_NUM_SHIFT		0
@@ -77,6 +74,9 @@
 #define QM_EQ_OVERFLOW			1
 #define QM_CQE_ERROR			2
 
+#define QM_XQ_DEPTH_SHIFT		16
+#define QM_XQ_DEPTH_MASK		GENMASK(15, 0)
+
 #define QM_DOORBELL_CMD_SQ		0
 #define QM_DOORBELL_CMD_CQ		1
 #define QM_DOORBELL_CMD_EQ		2
@@ -86,11 +86,7 @@
 #define QM_DB_CMD_SHIFT_V1		16
 #define QM_DB_INDEX_SHIFT_V1		32
 #define QM_DB_PRIORITY_SHIFT_V1		48
-#define QM_QUE_ISO_CFG_V		0x0030
 #define QM_PAGE_SIZE			0x0034
-#define QM_QUE_ISO_EN			0x100154
-#define QM_CAPBILITY			0x100158
-#define QM_QP_NUN_MASK			GENMASK(10, 0)
 #define QM_QP_DB_INTERVAL		0x10000
 
 #define QM_MEM_START_INIT		0x100040
@@ -126,7 +122,6 @@
 #define QM_DFX_CNT_CLR_CE		0x100118
 
 #define QM_ABNORMAL_INT_SOURCE		0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR	GENMASK(14, 0)
 #define QM_ABNORMAL_INT_MASK		0x100004
 #define QM_ABNORMAL_INT_MASK_VALUE	0x7fff
 #define QM_ABNORMAL_INT_STATUS		0x100008
@@ -144,8 +139,10 @@
 #define QM_RAS_NFE_ENABLE		0x1000f4
 #define QM_RAS_CE_THRESHOLD		0x1000f8
 #define QM_RAS_CE_TIMES_PER_IRQ		1
-#define QM_RAS_MSI_INT_SEL		0x1040f4
 #define QM_OOO_SHUTDOWN_SEL		0x1040f8
+#define QM_ECC_MBIT			BIT(2)
+#define QM_DB_TIMEOUT			BIT(10)
+#define QM_OF_FIFO_OF			BIT(11)
 
 #define QM_RESET_WAIT_TIMEOUT		400
 #define QM_PEH_VENDOR_ID		0x1000d8
@@ -205,6 +202,8 @@
 #define MAX_WAIT_COUNTS			1000
 #define QM_CACHE_WB_START		0x204
 #define QM_CACHE_WB_DONE		0x208
+#define QM_FUNC_CAPS_REG		0x3100
+#define QM_CAPBILITY_VERSION		GENMASK(7, 0)
 
 #define PCI_BAR_2			2
 #define PCI_BAR_4			4
@@ -221,7 +220,6 @@
 #define WAIT_PERIOD			20
 #define REMOVE_WAIT_DELAY		10
 #define QM_SQE_ADDR_MASK		GENMASK(7, 0)
-#define QM_EQ_DEPTH			(1024 * 2)
 
 #define QM_DRIVER_REMOVING		0
 #define QM_RST_SCHED			1
@@ -270,8 +268,8 @@
 	((buf_sz) << QM_CQ_BUF_SIZE_SHIFT)	| \
 	((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
 
-#define QM_MK_CQC_DW3_V2(cqe_sz) \
-	((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
+#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
+	((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
 
 #define QM_MK_SQC_W13(priority, orders, alg_type) \
 	(((priority) << QM_SQ_PRIORITY_SHIFT)	| \
@@ -284,8 +282,8 @@
 	((buf_sz) << QM_SQ_BUF_SIZE_SHIFT)	| \
 	((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
 
-#define QM_MK_SQC_DW3_V2(sqe_sz) \
-	((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
+#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
+	((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
 
 #define INIT_QC_COMMON(qc, base, pasid) do {			\
 	(qc)->head = 0;						\
@@ -329,6 +327,48 @@ enum qm_mb_cmd {
 	QM_VF_GET_QOS,
 };
 
+enum qm_basic_type {
+	QM_TOTAL_QP_NUM_CAP = 0x0,
+	QM_FUNC_MAX_QP_CAP,
+	QM_XEQ_DEPTH_CAP,
+	QM_QP_DEPTH_CAP,
+	QM_EQ_IRQ_TYPE_CAP,
+	QM_AEQ_IRQ_TYPE_CAP,
+	QM_ABN_IRQ_TYPE_CAP,
+	QM_PF2VF_IRQ_TYPE_CAP,
+	QM_PF_IRQ_NUM_CAP,
+	QM_VF_IRQ_NUM_CAP,
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
+	{QM_SUPPORT_DB_ISOLATION, 0x30,   0, BIT(0),  0x0, 0x0, 0x0},
+	{QM_SUPPORT_FUNC_QOS,     0x3100, 0, BIT(8),  0x0, 0x0, 0x1},
+	{QM_SUPPORT_STOP_QP,      0x3100, 0, BIT(9),  0x0, 0x0, 0x1},
+	{QM_SUPPORT_MB_COMMAND,   0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
+	{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
+	{QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
+	{QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
+};
+
+static const struct hisi_qm_cap_info qm_basic_info[] = {
+	{QM_TOTAL_QP_NUM_CAP,   0x100158, 0,  GENMASK(10, 0), 0x1000,    0x400,     0x400},
+	{QM_FUNC_MAX_QP_CAP,    0x100158, 11, GENMASK(10, 0), 0x1000,    0x400,     0x400},
+	{QM_XEQ_DEPTH_CAP,      0x3104,   0,  GENMASK(15, 0), 0x800,     0x4000800, 0x4000800},
+	{QM_QP_DEPTH_CAP,       0x3108,   0,  GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
+	{QM_EQ_IRQ_TYPE_CAP,    0x310c,   0,  GENMASK(31, 0), 0x10000,   0x10000,   0x10000},
+	{QM_AEQ_IRQ_TYPE_CAP,   0x3110,   0,  GENMASK(31, 0), 0x0,       0x10001,   0x10001},
+	{QM_ABN_IRQ_TYPE_CAP,   0x3114,   0,  GENMASK(31, 0), 0x0,       0x10003,   0x10003},
+	{QM_PF2VF_IRQ_TYPE_CAP, 0x3118,   0,  GENMASK(31, 0), 0x0,       0x0,       0x10002},
+	{QM_PF_IRQ_NUM_CAP,     0x311c,   16, GENMASK(15, 0), 0x1,       0x4,       0x4},
+	{QM_VF_IRQ_NUM_CAP,     0x311c,   0,  GENMASK(15, 0), 0x1,       0x2,       0x3},
+};
+
 struct qm_cqe {
 	__le32 rsvd0;
 	__le16 cmd_id;
@@ -421,15 +461,11 @@ struct hisi_qm_hw_ops {
 	int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
 	void (*qm_db)(struct hisi_qm *qm, u16 qn,
 		      u8 cmd, u16 index, u8 priority);
-	u32 (*get_irq_num)(struct hisi_qm *qm);
 	int (*debug_init)(struct hisi_qm *qm);
-	void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
+	void (*hw_error_init)(struct hisi_qm *qm);
 	void (*hw_error_uninit)(struct hisi_qm *qm);
 	enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
-	int (*stop_qp)(struct hisi_qp *qp);
 	int (*set_msi)(struct hisi_qm *qm, bool set);
-	int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
-	int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
 };
 
 struct qm_dfx_item {
@@ -533,6 +569,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
 	{50100, 100000, 19}
 };
 
+static void qm_irqs_unregister(struct hisi_qm *qm);
+
 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
 {
 	enum qm_state curr = atomic_read(&qm->status.flags);
@@ -623,22 +661,17 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
 }
 
 /* Check if the error causes the master ooo block */
-static int qm_check_dev_error(struct hisi_qm *qm)
+static bool qm_check_dev_error(struct hisi_qm *qm)
 {
 	u32 val, dev_val;
 
 	if (qm->fun_type == QM_HW_VF)
-		return 0;
+		return false;
 
-	val = qm_get_hw_error_status(qm);
-	dev_val = qm_get_dev_err_status(qm);
+	val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
+	dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
 
-	if (qm->ver < QM_HW_V3)
-		return (val & QM_ECC_MBIT) ||
-		       (dev_val & qm->err_info.ecc_2bits_mask);
-
-	return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
-	       (dev_val & (~qm->err_info.dev_ce_mask));
+	return val || dev_val;
 }
 
 static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -728,8 +761,12 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
 
 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
 {
+	int ret;
+	u32 val;
+
 	if (unlikely(hisi_qm_wait_mb_ready(qm))) {
 		dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
+		ret = -EBUSY;
 		goto mb_busy;
 	}
 
@@ -737,6 +774,14 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
 
 	if (unlikely(hisi_qm_wait_mb_ready(qm))) {
 		dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
+		ret = -ETIMEDOUT;
+		goto mb_busy;
+	}
+
+	val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
+	if (val & QM_MB_STATUS_MASK) {
+		dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
+		ret = -EIO;
 		goto mb_busy;
 	}
 
@@ -744,7 +789,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
 
 mb_busy:
 	atomic64_inc(&qm->debug.dfx.mb_err_cnt);
-	return -EBUSY;
+	return ret;
 }
 
 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
@@ -828,25 +873,52 @@ static int qm_dev_mem_reset(struct hisi_qm *qm)
 					  POLL_TIMEOUT);
 }
 
-static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
+/**
+ * hisi_qm_get_hw_info() - Get device information.
+ * @qm: The qm which want to get information.
+ * @info_table: Array for storing device information.
+ * @index: Index in info_table.
+ * @is_read: Whether read from reg, 0: not support read from reg.
+ *
+ * This function returns device information the caller needs.
+ */
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+			const struct hisi_qm_cap_info *info_table,
+			u32 index, bool is_read)
 {
-	return QM_IRQ_NUM_V1;
+	u32 val;
+
+	switch (qm->ver) {
+	case QM_HW_V1:
+		return info_table[index].v1_val;
+	case QM_HW_V2:
+		return info_table[index].v2_val;
+	default:
+		if (!is_read)
+			return info_table[index].v3_val;
+
+		val = readl(qm->io_base + info_table[index].offset);
+		return (val >> info_table[index].shift) & info_table[index].mask;
+	}
 }
+EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
 
-static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
+static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+			     u16 *high_bits, enum qm_basic_type type)
 {
-	if (qm->fun_type == QM_HW_PF)
-		return QM_IRQ_NUM_PF_V2;
-	else
-		return QM_IRQ_NUM_VF_V2;
+	u32 depth;
+
+	depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
+	*high_bits = depth & QM_XQ_DEPTH_MASK;
+	*low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
 }
 
-static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
+static u32 qm_get_irq_num(struct hisi_qm *qm)
 {
 	if (qm->fun_type == QM_HW_PF)
-		return QM_IRQ_NUM_PF_V2;
+		return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
 
-	return QM_IRQ_NUM_VF_V3;
+	return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
 }
 
 static int qm_pm_get_sync(struct hisi_qm *qm)
@@ -854,7 +926,7 @@ static int qm_pm_get_sync(struct hisi_qm *qm)
 	struct device *dev = &qm->pdev->dev;
 	int ret;
 
-	if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
 		return 0;
 
 	ret = pm_runtime_resume_and_get(dev);
@@ -870,7 +942,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
 {
 	struct device *dev = &qm->pdev->dev;
 
-	if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
 		return;
 
 	pm_runtime_mark_last_busy(dev);
@@ -879,7 +951,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
 
 static void qm_cq_head_update(struct hisi_qp *qp)
 {
-	if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
+	if (qp->qp_status.cq_head == qp->cq_depth - 1) {
 		qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
 		qp->qp_status.cq_head = 0;
 	} else {
@@ -911,6 +983,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
 {
 	struct hisi_qm *qm = poll_data->qm;
 	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
+	u16 eq_depth = qm->eq_depth;
 	int eqe_num = 0;
 	u16 cqn;
 
@@ -919,7 +992,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
 		poll_data->qp_finish_id[eqe_num] = cqn;
 		eqe_num++;
 
-		if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
+		if (qm->status.eq_head == eq_depth - 1) {
 			qm->status.eqc_phase = !qm->status.eqc_phase;
 			eqe = qm->eqe;
 			qm->status.eq_head = 0;
@@ -928,7 +1001,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
 			qm->status.eq_head++;
 		}
 
-		if (eqe_num == (QM_EQ_DEPTH >> 1) - 1)
+		if (eqe_num == (eq_depth >> 1) - 1)
 			break;
 	}
 
@@ -1068,6 +1141,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
 {
 	struct hisi_qm *qm = data;
 	struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+	u16 aeq_depth = qm->aeq_depth;
 	u32 type, qp_id;
 
 	while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
@@ -1092,7 +1166,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
 			break;
 		}
 
-		if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
+		if (qm->status.aeq_head == aeq_depth - 1) {
 			qm->status.aeqc_phase = !qm->status.aeqc_phase;
 			aeqe = qm->aeqe;
 			qm->status.aeq_head = 0;
@@ -1118,24 +1192,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
 	return IRQ_WAKE_THREAD;
 }
 
-static void qm_irq_unregister(struct hisi_qm *qm)
-{
-	struct pci_dev *pdev = qm->pdev;
-
-	free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
-
-	if (qm->ver > QM_HW_V1) {
-		free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-
-		if (qm->fun_type == QM_HW_PF)
-			free_irq(pci_irq_vector(pdev,
-				 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
-	}
-
-	if (qm->ver > QM_HW_V2)
-		free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
-}
-
 static void qm_init_qp_status(struct hisi_qp *qp)
 {
 	struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1151,7 +1207,7 @@ static void qm_init_prefetch(struct hisi_qm *qm)
 	struct device *dev = &qm->pdev->dev;
 	u32 page_type = 0x0;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
 		return;
 
 	switch (PAGE_SIZE) {
@@ -1270,7 +1326,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
 			}
 			break;
 		case SHAPER_VFT:
-			if (qm->ver >= QM_HW_V3) {
+			if (factor) {
 				tmp = factor->cir_b |
 				(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
 				(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
@@ -1288,10 +1344,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
 			     u32 fun_num, u32 base, u32 number)
 {
-	struct qm_shaper_factor *factor = &qm->factor[fun_num];
+	struct qm_shaper_factor *factor = NULL;
 	unsigned int val;
 	int ret;
 
+	if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+		factor = &qm->factor[fun_num];
+
 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
 					 val & BIT(0), POLL_PERIOD,
 					 POLL_TIMEOUT);
@@ -1349,7 +1408,7 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
 	}
 
 	/* init default shaper qos val */
-	if (qm->ver >= QM_HW_V3) {
+	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
 		ret = qm_shaper_init_vft(qm, fun_num);
 		if (ret)
 			goto back_sqc_cqc;
@@ -1357,11 +1416,9 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
 
 	return 0;
 back_sqc_cqc:
-	for (i = SQC_VFT; i <= CQC_VFT; i++) {
-		ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
-		if (ret)
-			return ret;
-	}
+	for (i = SQC_VFT; i <= CQC_VFT; i++)
+		qm_set_vft_common(qm, i, fun_num, 0, 0);
+
 	return ret;
 }
 
@@ -1857,39 +1914,19 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
 	kfree(ctx_addr);
 }
 
-static int dump_show(struct hisi_qm *qm, void *info,
+static void dump_show(struct hisi_qm *qm, void *info,
 		     unsigned int info_size, char *info_name)
 {
 	struct device *dev = &qm->pdev->dev;
-	u8 *info_buf, *info_curr = info;
+	u8 *info_curr = info;
 	u32 i;
 #define BYTE_PER_DW	4
 
-	info_buf = kzalloc(info_size, GFP_KERNEL);
-	if (!info_buf)
-		return -ENOMEM;
-
-	for (i = 0; i < info_size; i++, info_curr++) {
-		if (i % BYTE_PER_DW == 0)
-			info_buf[i + 3UL] = *info_curr;
-		else if (i % BYTE_PER_DW == 1)
-			info_buf[i + 1UL] = *info_curr;
-		else if (i % BYTE_PER_DW == 2)
-			info_buf[i - 1] = *info_curr;
-		else if (i % BYTE_PER_DW == 3)
-			info_buf[i - 3] = *info_curr;
-	}
-
 	dev_info(dev, "%s DUMP\n", info_name);
-	for (i = 0; i < info_size; i += BYTE_PER_DW) {
+	for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
 		pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
-			info_buf[i], info_buf[i + 1UL],
-			info_buf[i + 2UL], info_buf[i + 3UL]);
+			*(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
 	}
-
-	kfree(info_buf);
-
-	return 0;
 }
 
 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
@@ -1929,23 +1966,18 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
 		if (qm->sqc) {
 			sqc_curr = qm->sqc + qp_id;
 
-			ret = dump_show(qm, sqc_curr, sizeof(*sqc),
-					"SOFT SQC");
-			if (ret)
-				dev_info(dev, "Show soft sqc failed!\n");
+			dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
 		}
 		up_read(&qm->qps_lock);
 
-		goto err_free_ctx;
+		goto free_ctx;
 	}
 
-	ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
-	if (ret)
-		dev_info(dev, "Show hw sqc failed!\n");
+	dump_show(qm, sqc, sizeof(*sqc), "SQC");
 
-err_free_ctx:
+free_ctx:
 	qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
-	return ret;
+	return 0;
 }
 
 static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
@@ -1975,23 +2007,18 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
 		if (qm->cqc) {
 			cqc_curr = qm->cqc + qp_id;
 
-			ret = dump_show(qm, cqc_curr, sizeof(*cqc),
-					"SOFT CQC");
-			if (ret)
-				dev_info(dev, "Show soft cqc failed!\n");
+			dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
 		}
 		up_read(&qm->qps_lock);
 
-		goto err_free_ctx;
+		goto free_ctx;
 	}
 
-	ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
-	if (ret)
-		dev_info(dev, "Show hw cqc failed!\n");
+	dump_show(qm, cqc, sizeof(*cqc), "CQC");
 
-err_free_ctx:
+free_ctx:
 	qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
-	return ret;
+	return 0;
 }
 
 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
@@ -2015,9 +2042,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
 	if (ret)
 		goto err_free_ctx;
 
-	ret = dump_show(qm, xeqc, size, name);
-	if (ret)
-		dev_info(dev, "Show hw %s failed!\n", name);
+	dump_show(qm, xeqc, size, name);
 
 err_free_ctx:
 	qm_ctx_free(qm, size, xeqc, &xeqc_dma);
@@ -2025,7 +2050,7 @@ err_free_ctx:
 }
 
 static int q_dump_param_parse(struct hisi_qm *qm, char *s,
-			      u32 *e_id, u32 *q_id)
+			      u32 *e_id, u32 *q_id, u16 q_depth)
 {
 	struct device *dev = &qm->pdev->dev;
 	unsigned int qp_num = qm->qp_num;
@@ -2051,8 +2076,8 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
 	}
 
 	ret = kstrtou32(presult, 0, e_id);
-	if (ret || *e_id >= QM_Q_DEPTH) {
-		dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
+	if (ret || *e_id >= q_depth) {
+		dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
 		return -EINVAL;
 	}
 
@@ -2066,54 +2091,49 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
 
 static int qm_sq_dump(struct hisi_qm *qm, char *s)
 {
-	struct device *dev = &qm->pdev->dev;
+	u16 sq_depth = qm->qp_array->cq_depth;
 	void *sqe, *sqe_curr;
 	struct hisi_qp *qp;
 	u32 qp_id, sqe_id;
 	int ret;
 
-	ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
+	ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
 	if (ret)
 		return ret;
 
-	sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
+	sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
 	if (!sqe)
 		return -ENOMEM;
 
 	qp = &qm->qp_array[qp_id];
-	memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
+	memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
 	sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
 	memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
 	       qm->debug.sqe_mask_len);
 
-	ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
-	if (ret)
-		dev_info(dev, "Show sqe failed!\n");
+	dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
 
 	kfree(sqe);
 
-	return ret;
+	return 0;
 }
 
 static int qm_cq_dump(struct hisi_qm *qm, char *s)
 {
-	struct device *dev = &qm->pdev->dev;
 	struct qm_cqe *cqe_curr;
 	struct hisi_qp *qp;
 	u32 qp_id, cqe_id;
 	int ret;
 
-	ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
+	ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
 	if (ret)
 		return ret;
 
 	qp = &qm->qp_array[qp_id];
 	cqe_curr = qp->cqe + cqe_id;
-	ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
-	if (ret)
-		dev_info(dev, "Show cqe failed!\n");
+	dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
 
-	return ret;
+	return 0;
 }
 
 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
@@ -2131,11 +2151,11 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
 	if (ret)
 		return -EINVAL;
 
-	if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
-		dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
+	if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
+		dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
 		return -EINVAL;
-	} else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
-		dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
+	} else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
+		dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
 		return -EINVAL;
 	}
 
@@ -2150,9 +2170,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
 		goto err_unlock;
 	}
 
-	ret = dump_show(qm, xeqe, size, name);
-	if (ret)
-		dev_info(dev, "Show %s failed!\n", name);
+	dump_show(qm, xeqe, size, name);
 
 err_unlock:
 	up_read(&qm->qps_lock);
@@ -2245,8 +2263,10 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
 		return ret;
 
 	/* Judge if the instance is being reset. */
-	if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
-		return 0;
+	if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
+		ret = 0;
+		goto put_dfx_access;
+	}
 
 	if (count > QM_DBG_WRITE_LEN) {
 		ret = -ENOSPC;
@@ -2300,58 +2320,65 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
 	file->debug = &qm->debug;
 }
 
-static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v1(struct hisi_qm *qm)
 {
 	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
 }
 
-static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_cfg(struct hisi_qm *qm)
 {
-	qm->error_mask = ce | nfe | fe;
+	struct hisi_qm_err_info *err_info = &qm->err_info;
+
+	qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
 	/* clear QM hw residual error source */
-	writel(QM_ABNORMAL_INT_SOURCE_CLR,
-	       qm->io_base + QM_ABNORMAL_INT_SOURCE);
+	writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
 
 	/* configure error type */
-	writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
+	writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
 	writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
-	writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
-	writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
+	writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+	writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
 }
 
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v2(struct hisi_qm *qm)
 {
-	u32 irq_enable = ce | nfe | fe;
-	u32 irq_unmask = ~irq_enable;
+	u32 irq_unmask;
 
-	qm_hw_error_cfg(qm, ce, nfe, fe);
+	qm_hw_error_cfg(qm);
 
+	irq_unmask = ~qm->error_mask;
 	irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
 	writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
 }
 
 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
 {
-	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+	u32 irq_mask = qm->error_mask;
+
+	irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+	writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
 }
 
-static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v3(struct hisi_qm *qm)
 {
-	u32 irq_enable = ce | nfe | fe;
-	u32 irq_unmask = ~irq_enable;
+	u32 irq_unmask;
 
-	qm_hw_error_cfg(qm, ce, nfe, fe);
+	qm_hw_error_cfg(qm);
 
 	/* enable close master ooo when hardware error happened */
-	writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
+	writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
 
+	irq_unmask = ~qm->error_mask;
 	irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
 	writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
 }
 
 static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
 {
-	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+	u32 irq_mask = qm->error_mask;
+
+	irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+	writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
 
 	/* disable close master ooo when hardware error happened */
 	writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
@@ -2396,7 +2423,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
 
 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
 {
-	u32 error_status, tmp, val;
+	u32 error_status, tmp;
 
 	/* read err sts */
 	tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -2407,17 +2434,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
 			qm->err_status.is_qm_ecc_mbit = true;
 
 		qm_log_hw_error(qm, error_status);
-		val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
-		/* ce error does not need to be reset */
-		if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
-			writel(error_status, qm->io_base +
-			       QM_ABNORMAL_INT_SOURCE);
-			writel(qm->err_info.nfe,
-			       qm->io_base + QM_RAS_NFE_ENABLE);
-			return ACC_ERR_RECOVERED;
-		}
+		if (error_status & qm->err_info.qm_reset_mask)
+			return ACC_ERR_NEED_RESET;
 
-		return ACC_ERR_NEED_RESET;
+		writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+		writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
 	}
 
 	return ACC_ERR_RECOVERED;
@@ -2493,7 +2514,7 @@ static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
 	u64 val;
 	u32 i;
 
-	if (!qm->vfs_num || qm->ver < QM_HW_V3)
+	if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
 		return 0;
 
 	while (true) {
@@ -2756,7 +2777,6 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
 
 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
 	.qm_db = qm_db_v1,
-	.get_irq_num = qm_get_irq_num_v1,
 	.hw_error_init = qm_hw_error_init_v1,
 	.set_msi = qm_set_msi,
 };
@@ -2764,7 +2784,6 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
 	.get_vft = qm_get_vft_v2,
 	.qm_db = qm_db_v2,
-	.get_irq_num = qm_get_irq_num_v2,
 	.hw_error_init = qm_hw_error_init_v2,
 	.hw_error_uninit = qm_hw_error_uninit_v2,
 	.hw_error_handle = qm_hw_error_handle_v2,
@@ -2774,14 +2793,10 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
 	.get_vft = qm_get_vft_v2,
 	.qm_db = qm_db_v2,
-	.get_irq_num = qm_get_irq_num_v3,
 	.hw_error_init = qm_hw_error_init_v3,
 	.hw_error_uninit = qm_hw_error_uninit_v3,
 	.hw_error_handle = qm_hw_error_handle_v2,
-	.stop_qp = qm_stop_qp,
 	.set_msi = qm_set_msi_v3,
-	.ping_all_vfs = qm_ping_all_vfs,
-	.ping_pf = qm_ping_pf,
 };
 
 static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2789,7 +2804,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
 	struct hisi_qp_status *qp_status = &qp->qp_status;
 	u16 sq_tail = qp_status->sq_tail;
 
-	if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
+	if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
 		return NULL;
 
 	return qp->sqe + sq_tail * qp->qm->sqe_size;
@@ -2830,7 +2845,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
 
 	qp = &qm->qp_array[qp_id];
 	hisi_qm_unset_hw_reset(qp);
-	memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+	memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
 
 	qp->event_cb = NULL;
 	qp->req_cb = NULL;
@@ -2911,9 +2926,9 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
 	INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
 	if (ver == QM_HW_V1) {
 		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
-		sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+		sqc->w8 = cpu_to_le16(qp->sq_depth - 1);
 	} else {
-		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
+		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
 		sqc->w8 = 0; /* rand_qc */
 	}
 	sqc->cq_num = cpu_to_le16(qp_id);
@@ -2954,9 +2969,9 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
 	if (ver == QM_HW_V1) {
 		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
 							QM_QC_CQE_SIZE));
-		cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+		cqc->w8 = cpu_to_le16(qp->cq_depth - 1);
 	} else {
-		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
+		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
 		cqc->w8 = 0; /* rand_qc */
 	}
 	cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
@@ -3043,13 +3058,14 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
 {
 	int qp_used = atomic_read(&qp->qp_status.used);
 	u16 cur_tail = qp->qp_status.sq_tail;
-	u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
+	u16 sq_depth = qp->sq_depth;
+	u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
 	struct hisi_qm *qm = qp->qm;
 	u16 pos;
 	int i;
 
 	for (i = 0; i < qp_used; i++) {
-		pos = (i + cur_head) % QM_Q_DEPTH;
+		pos = (i + cur_head) % sq_depth;
 		qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
 		atomic_dec(&qp->qp_status.used);
 	}
@@ -3078,8 +3094,8 @@ static int qm_drain_qp(struct hisi_qp *qp)
 		return 0;
 
 	/* Kunpeng930 supports drain qp by device */
-	if (qm->ops->stop_qp) {
-		ret = qm->ops->stop_qp(qp);
+	if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
+		ret = qm_stop_qp(qp);
 		if (ret)
 			dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
 		return ret;
@@ -3197,7 +3213,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
 {
 	struct hisi_qp_status *qp_status = &qp->qp_status;
 	u16 sq_tail = qp_status->sq_tail;
-	u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
+	u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
 	void *sqe = qm_get_avail_sqe(qp);
 
 	if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
@@ -3286,7 +3302,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
 {
 	struct hisi_qp *qp = q->priv;
 
-	hisi_qm_cache_wb(qp->qm);
 	hisi_qm_release_qp(qp);
 }
 
@@ -3310,7 +3325,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
 		if (qm->ver == QM_HW_V1) {
 			if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
 				return -EINVAL;
-		} else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
+		} else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
 			if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
 			    QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
 				return -EINVAL;
@@ -3387,6 +3402,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
 				unsigned long arg)
 {
 	struct hisi_qp *qp = q->priv;
+	struct hisi_qp_info qp_info;
 	struct hisi_qp_ctx qp_ctx;
 
 	if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
@@ -3403,11 +3419,25 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
 		if (copy_to_user((void __user *)arg, &qp_ctx,
 				 sizeof(struct hisi_qp_ctx)))
 			return -EFAULT;
-	} else {
-		return -EINVAL;
+
+		return 0;
+	} else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
+		if (copy_from_user(&qp_info, (void __user *)arg,
+				   sizeof(struct hisi_qp_info)))
+			return -EFAULT;
+
+		qp_info.sqe_size = qp->qm->sqe_size;
+		qp_info.sq_depth = qp->sq_depth;
+		qp_info.cq_depth = qp->cq_depth;
+
+		if (copy_to_user((void __user *)arg, &qp_info,
+				  sizeof(struct hisi_qp_info)))
+			return -EFAULT;
+
+		return 0;
 	}
 
-	return 0;
+	return -EINVAL;
 }
 
 static const struct uacce_ops uacce_qm_ops = {
@@ -3427,6 +3457,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
 	struct uacce_device *uacce;
 	unsigned long mmio_page_nr;
 	unsigned long dus_page_nr;
+	u16 sq_depth, cq_depth;
 	struct uacce_interface interface = {
 		.flags = UACCE_DEV_SVA,
 		.ops = &uacce_qm_ops,
@@ -3453,7 +3484,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
 
 	uacce->is_vf = pdev->is_virtfn;
 	uacce->priv = qm;
-	uacce->algs = qm->algs;
 
 	if (qm->ver == QM_HW_V1)
 		uacce->api_ver = HISI_QM_API_VER_BASE;
@@ -3464,15 +3494,17 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
 
 	if (qm->ver == QM_HW_V1)
 		mmio_page_nr = QM_DOORBELL_PAGE_NR;
-	else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
+	else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
 		mmio_page_nr = QM_DOORBELL_PAGE_NR +
 			QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
 	else
 		mmio_page_nr = qm->db_interval / PAGE_SIZE;
 
+	qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
 	/* Add one more page for device or qp status */
-	dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
-		       sizeof(struct qm_cqe) * QM_Q_DEPTH  + PAGE_SIZE) >>
+	dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
+		       sizeof(struct qm_cqe) * cq_depth  + PAGE_SIZE) >>
 					 PAGE_SHIFT;
 
 	uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
@@ -3577,10 +3609,11 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
 	kfree(qm->qp_array);
 }
 
-static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
+static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
+			       u16 sq_depth, u16 cq_depth)
 {
 	struct device *dev = &qm->pdev->dev;
-	size_t off = qm->sqe_size * QM_Q_DEPTH;
+	size_t off = qm->sqe_size * sq_depth;
 	struct hisi_qp *qp;
 	int ret = -ENOMEM;
 
@@ -3600,6 +3633,8 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
 	qp->cqe = qp->qdma.va + off;
 	qp->cqe_dma = qp->qdma.dma + off;
 	qp->qdma.size = dma_size;
+	qp->sq_depth = sq_depth;
+	qp->cq_depth = cq_depth;
 	qp->qm = qm;
 	qp->qp_id = id;
 
@@ -3626,7 +3661,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
 	init_rwsem(&qm->qps_lock);
 	qm->qp_in_used = 0;
 	qm->misc_ctl = false;
-	if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+	if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
 		if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
 			dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
 	}
@@ -3636,7 +3671,7 @@ static void qm_cmd_uninit(struct hisi_qm *qm)
 {
 	u32 val;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
 		return;
 
 	val = readl(qm->io_base + QM_IFC_INT_MASK);
@@ -3648,7 +3683,7 @@ static void qm_cmd_init(struct hisi_qm *qm)
 {
 	u32 val;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
 		return;
 
 	/* Clear communication interrupt source */
@@ -3664,7 +3699,7 @@ static void qm_put_pci_res(struct hisi_qm *qm)
 {
 	struct pci_dev *pdev = qm->pdev;
 
-	if (qm->use_db_isolation)
+	if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
 		iounmap(qm->db_io_base);
 
 	iounmap(qm->io_base);
@@ -3714,7 +3749,9 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm)
 	}
 
 	idr_destroy(&qm->qp_idr);
-	kfree(qm->factor);
+
+	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+		kfree(qm->factor);
 }
 
 /**
@@ -3740,7 +3777,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
 	hisi_qm_set_state(qm, QM_NOT_READY);
 	up_write(&qm->qps_lock);
 
-	qm_irq_unregister(qm);
+	qm_irqs_unregister(qm);
 	hisi_qm_pci_uninit(qm);
 	if (qm->use_sva) {
 		uacce_remove(qm->uacce);
@@ -3841,7 +3878,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
 	eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
 	if (qm->ver == QM_HW_V1)
 		eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
-	eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+	eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
 
 	eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
 				 DMA_TO_DEVICE);
@@ -3870,7 +3907,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
 
 	aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
 	aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
-	aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+	aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
 
 	aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
 				  DMA_TO_DEVICE);
@@ -4136,14 +4173,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
 
 static void qm_hw_error_init(struct hisi_qm *qm)
 {
-	struct hisi_qm_err_info *err_info = &qm->err_info;
-
 	if (!qm->ops->hw_error_init) {
 		dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
 		return;
 	}
 
-	qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
+	qm->ops->hw_error_init(qm);
 }
 
 static void qm_hw_error_uninit(struct hisi_qm *qm)
@@ -4497,12 +4532,10 @@ static int qm_vf_read_qos(struct hisi_qm *qm)
 	qm->mb_qos = 0;
 
 	/* vf ping pf to get function qos */
-	if (qm->ops->ping_pf) {
-		ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
-		if (ret) {
-			pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
-			return ret;
-		}
+	ret = qm_ping_pf(qm, QM_VF_GET_QOS);
+	if (ret) {
+		pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
+		return ret;
 	}
 
 	while (true) {
@@ -4674,14 +4707,14 @@ static const struct file_operations qm_algqos_fops = {
  * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
  * @qm: The qm for which we want to add debugfs files.
  *
- * Create function qos debugfs files.
+ * Create function qos debugfs files, VF ping PF to get function qos.
  */
 static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
 {
 	if (qm->fun_type == QM_HW_PF)
 		debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
 				    qm, &qm_algqos_fops);
-	else
+	else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
 		debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
 				    qm, &qm_algqos_fops);
 }
@@ -4729,7 +4762,7 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
 			&qm_atomic64_ops);
 	}
 
-	if (qm->ver >= QM_HW_V3)
+	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
 		hisi_qm_set_algqos_init(qm);
 }
 EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
@@ -4768,6 +4801,14 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
 }
 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
 
+static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
+{
+	int i;
+
+	for (i = 1; i <= total_func; i++)
+		qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+}
+
 /**
  * hisi_qm_sriov_enable() - enable virtual functions
  * @pdev: the PCIe device
@@ -4794,7 +4835,17 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
 		goto err_put_sync;
 	}
 
-	num_vfs = min_t(int, max_vfs, total_vfs);
+	if (max_vfs > total_vfs) {
+		pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
+		ret = -ERANGE;
+		goto err_put_sync;
+	}
+
+	num_vfs = max_vfs;
+
+	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+		hisi_qm_init_vf_qos(qm, num_vfs);
+
 	ret = qm_vf_q_assign(qm, num_vfs);
 	if (ret) {
 		pci_err(pdev, "Can't assign queues for VF!\n");
@@ -4830,7 +4881,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
 {
 	struct hisi_qm *qm = pci_get_drvdata(pdev);
-	int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
 	int ret;
 
 	if (pci_vfs_assigned(pdev)) {
@@ -4845,8 +4895,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
 	}
 
 	pci_disable_sriov(pdev);
-	/* clear vf function shaper configure array */
-	memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+
 	ret = qm_clear_vft_config(qm);
 	if (ret)
 		return ret;
@@ -4891,17 +4940,11 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
 		if (qm->err_ini->log_dev_hw_err)
 			qm->err_ini->log_dev_hw_err(qm, err_sts);
 
-		/* ce error does not need to be reset */
-		if ((err_sts | qm->err_info.dev_ce_mask) ==
-		     qm->err_info.dev_ce_mask) {
-			if (qm->err_ini->clear_dev_hw_err_status)
-				qm->err_ini->clear_dev_hw_err_status(qm,
-								err_sts);
+		if (err_sts & qm->err_info.dev_reset_mask)
+			return ACC_ERR_NEED_RESET;
 
-			return ACC_ERR_RECOVERED;
-		}
-
-		return ACC_ERR_NEED_RESET;
+		if (qm->err_ini->clear_dev_hw_err_status)
+			qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
 	}
 
 	return ACC_ERR_RECOVERED;
@@ -5070,8 +5113,8 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
 		return 0;
 
 	/* Kunpeng930 supports to notify VFs to stop before PF reset */
-	if (qm->ops->ping_all_vfs) {
-		ret = qm->ops->ping_all_vfs(qm, cmd);
+	if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+		ret = qm_ping_all_vfs(qm, cmd);
 		if (ret)
 			pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
 	} else {
@@ -5262,8 +5305,8 @@ static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
 	}
 
 	/* Kunpeng930 supports to notify VFs to start after PF reset. */
-	if (qm->ops->ping_all_vfs) {
-		ret = qm->ops->ping_all_vfs(qm, cmd);
+	if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+		ret = qm_ping_all_vfs(qm, cmd);
 		if (ret)
 			pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
 	} else {
@@ -5466,8 +5509,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
 	if (pdev->is_virtfn)
 		return PCI_ERS_RESULT_RECOVERED;
 
-	pci_aer_clear_nonfatal_status(pdev);
-
 	/* reset pcie device controller */
 	ret = qm_controller_reset(qm);
 	if (ret) {
@@ -5599,51 +5640,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static int qm_irq_register(struct hisi_qm *qm)
-{
-	struct pci_dev *pdev = qm->pdev;
-	int ret;
-
-	ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
-			  qm_irq, 0, qm->dev_name, qm);
-	if (ret)
-		return ret;
-
-	if (qm->ver > QM_HW_V1) {
-		ret = request_threaded_irq(pci_irq_vector(pdev,
-					   QM_AEQ_EVENT_IRQ_VECTOR),
-					   qm_aeq_irq, qm_aeq_thread,
-					   0, qm->dev_name, qm);
-		if (ret)
-			goto err_aeq_irq;
-
-		if (qm->fun_type == QM_HW_PF) {
-			ret = request_irq(pci_irq_vector(pdev,
-					  QM_ABNORMAL_EVENT_IRQ_VECTOR),
-					  qm_abnormal_irq, 0, qm->dev_name, qm);
-			if (ret)
-				goto err_abonormal_irq;
-		}
-	}
-
-	if (qm->ver > QM_HW_V2) {
-		ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
-				qm_mb_cmd_irq, 0, qm->dev_name, qm);
-		if (ret)
-			goto err_mb_cmd_irq;
-	}
-
-	return 0;
-
-err_mb_cmd_irq:
-	if (qm->fun_type == QM_HW_PF)
-		free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
-err_abonormal_irq:
-	free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-err_aeq_irq:
-	free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
-	return ret;
-}
 
 /**
  * hisi_qm_dev_shutdown() - Shutdown device.
@@ -5711,7 +5707,7 @@ err_prepare:
 	hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
 out:
 	pci_save_state(pdev);
-	ret = qm->ops->ping_pf(qm, cmd);
+	ret = qm_ping_pf(qm, cmd);
 	if (ret)
 		dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
 }
@@ -5729,7 +5725,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm)
 		cmd = QM_VF_START_FAIL;
 	}
 
-	ret = qm->ops->ping_pf(qm, cmd);
+	ret = qm_ping_pf(qm, cmd);
 	if (ret)
 		dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
 
@@ -5924,21 +5920,193 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
 }
 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
 
+static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	u32 irq_vector, val;
+
+	if (qm->fun_type == QM_HW_VF)
+		return;
+
+	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+		return;
+
+	irq_vector = val & QM_IRQ_VECTOR_MASK;
+	free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_abnormal_irq(struct hisi_qm *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	u32 irq_vector, val;
+	int ret;
+
+	if (qm->fun_type == QM_HW_VF)
+		return 0;
+
+	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+		return 0;
+
+	irq_vector = val & QM_IRQ_VECTOR_MASK;
+	ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
+	if (ret)
+		dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+
+	return ret;
+}
+
+static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	u32 irq_vector, val;
+
+	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+		return;
+
+	irq_vector = val & QM_IRQ_VECTOR_MASK;
+	free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	u32 irq_vector, val;
+	int ret;
+
+	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+		return 0;
+
+	irq_vector = val & QM_IRQ_VECTOR_MASK;
+	ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
+	if (ret)
+		dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
+
+	return ret;
+}
+
+static void qm_unregister_aeq_irq(struct hisi_qm *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	u32 irq_vector, val;
+
+	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+		return;
+
+	irq_vector = val & QM_IRQ_VECTOR_MASK;
+	free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_aeq_irq(struct hisi_qm *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	u32 irq_vector, val;
+	int ret;
+
+	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+		return 0;
+
+	irq_vector = val & QM_IRQ_VECTOR_MASK;
+	ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
+						   qm_aeq_thread, 0, qm->dev_name, qm);
+	if (ret)
+		dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+	return ret;
+}
+
+static void qm_unregister_eq_irq(struct hisi_qm *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	u32 irq_vector, val;
+
+	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+		return;
+
+	irq_vector = val & QM_IRQ_VECTOR_MASK;
+	free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_eq_irq(struct hisi_qm *qm)
+{
+	struct pci_dev *pdev = qm->pdev;
+	u32 irq_vector, val;
+	int ret;
+
+	val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+		return 0;
+
+	irq_vector = val & QM_IRQ_VECTOR_MASK;
+	ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm);
+	if (ret)
+		dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+	return ret;
+}
+
+static void qm_irqs_unregister(struct hisi_qm *qm)
+{
+	qm_unregister_mb_cmd_irq(qm);
+	qm_unregister_abnormal_irq(qm);
+	qm_unregister_aeq_irq(qm);
+	qm_unregister_eq_irq(qm);
+}
+
+static int qm_irqs_register(struct hisi_qm *qm)
+{
+	int ret;
+
+	ret = qm_register_eq_irq(qm);
+	if (ret)
+		return ret;
+
+	ret = qm_register_aeq_irq(qm);
+	if (ret)
+		goto free_eq_irq;
+
+	ret = qm_register_abnormal_irq(qm);
+	if (ret)
+		goto free_aeq_irq;
+
+	ret = qm_register_mb_cmd_irq(qm);
+	if (ret)
+		goto free_abnormal_irq;
+
+	return 0;
+
+free_abnormal_irq:
+	qm_unregister_abnormal_irq(qm);
+free_aeq_irq:
+	qm_unregister_aeq_irq(qm);
+free_eq_irq:
+	qm_unregister_eq_irq(qm);
+	return ret;
+}
+
 static int qm_get_qp_num(struct hisi_qm *qm)
 {
-	if (qm->ver == QM_HW_V1)
-		qm->ctrl_qp_num = QM_QNUM_V1;
-	else if (qm->ver == QM_HW_V2)
-		qm->ctrl_qp_num = QM_QNUM_V2;
-	else
-		qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
-					QM_QP_NUN_MASK;
+	bool is_db_isolation;
 
-	if (qm->use_db_isolation)
-		qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
-				  QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
-	else
-		qm->max_qp_num = qm->ctrl_qp_num;
+	/* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+	if (qm->fun_type == QM_HW_VF) {
+		if (qm->ver != QM_HW_V1)
+			/* v2 starts to support get vft by mailbox */
+			return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+
+		return 0;
+	}
+
+	is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+	qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
+	qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+					     QM_FUNC_MAX_QP_CAP, is_db_isolation);
 
 	/* check if qp number is valid */
 	if (qm->qp_num > qm->max_qp_num) {
@@ -5950,6 +6118,39 @@ static int qm_get_qp_num(struct hisi_qm *qm)
 	return 0;
 }
 
+static void qm_get_hw_caps(struct hisi_qm *qm)
+{
+	const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
+						  qm_cap_info_pf : qm_cap_info_vf;
+	u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
+				   ARRAY_SIZE(qm_cap_info_vf);
+	u32 val, i;
+
+	/* Doorbell isolate register is a independent register. */
+	val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
+	if (val)
+		set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+
+	if (qm->ver >= QM_HW_V3) {
+		val = readl(qm->io_base + QM_FUNC_CAPS_REG);
+		qm->cap_ver = val & QM_CAPBILITY_VERSION;
+	}
+
+	/* Get PF/VF common capbility */
+	for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
+		val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
+		if (val)
+			set_bit(qm_cap_info_comm[i].type, &qm->caps);
+	}
+
+	/* Get PF/VF different capbility */
+	for (i = 0; i < size; i++) {
+		val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
+		if (val)
+			set_bit(cap_info[i].type, &qm->caps);
+	}
+}
+
 static int qm_get_pci_res(struct hisi_qm *qm)
 {
 	struct pci_dev *pdev = qm->pdev;
@@ -5969,16 +6170,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
 		goto err_request_mem_regions;
 	}
 
-	if (qm->ver > QM_HW_V2) {
-		if (qm->fun_type == QM_HW_PF)
-			qm->use_db_isolation = readl(qm->io_base +
-						     QM_QUE_ISO_EN) & BIT(0);
-		else
-			qm->use_db_isolation = readl(qm->io_base +
-						     QM_QUE_ISO_CFG_V) & BIT(0);
-	}
-
-	if (qm->use_db_isolation) {
+	qm_get_hw_caps(qm);
+	if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
 		qm->db_interval = QM_QP_DB_INTERVAL;
 		qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
 		qm->db_io_base = ioremap(qm->db_phys_base,
@@ -5993,16 +6186,14 @@ static int qm_get_pci_res(struct hisi_qm *qm)
 		qm->db_interval = 0;
 	}
 
-	if (qm->fun_type == QM_HW_PF) {
-		ret = qm_get_qp_num(qm);
-		if (ret)
-			goto err_db_ioremap;
-	}
+	ret = qm_get_qp_num(qm);
+	if (ret)
+		goto err_db_ioremap;
 
 	return 0;
 
 err_db_ioremap:
-	if (qm->use_db_isolation)
+	if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
 		iounmap(qm->db_io_base);
 err_ioremap:
 	iounmap(qm->io_base);
@@ -6033,11 +6224,7 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
 		goto err_get_pci_res;
 	pci_set_master(pdev);
 
-	if (!qm->ops->get_irq_num) {
-		ret = -EOPNOTSUPP;
-		goto err_get_pci_res;
-	}
-	num_vec = qm->ops->get_irq_num(qm);
+	num_vec = qm_get_irq_num(qm);
 	ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
 	if (ret < 0) {
 		dev_err(dev, "Failed to enable MSI vectors!\n");
@@ -6080,6 +6267,7 @@ static int hisi_qm_init_work(struct hisi_qm *qm)
 static int hisi_qp_alloc_memory(struct hisi_qm *qm)
 {
 	struct device *dev = &qm->pdev->dev;
+	u16 sq_depth, cq_depth;
 	size_t qp_dma_size;
 	int i, ret;
 
@@ -6093,13 +6281,14 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm)
 		return -ENOMEM;
 	}
 
+	qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
 	/* one more page for device or qp statuses */
-	qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
-		      sizeof(struct qm_cqe) * QM_Q_DEPTH;
+	qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
 	qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
 	for (i = 0; i < qm->qp_num; i++) {
 		qm->poll_data[i].qm = qm;
-		ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+		ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
 		if (ret)
 			goto err_init_qp_mem;
 
@@ -6116,15 +6305,18 @@ err_init_qp_mem:
 static int hisi_qm_memory_init(struct hisi_qm *qm)
 {
 	struct device *dev = &qm->pdev->dev;
-	int ret, total_func, i;
+	int ret, total_func;
 	size_t off = 0;
 
-	total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
-	qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
-	if (!qm->factor)
-		return -ENOMEM;
-	for (i = 0; i < total_func; i++)
-		qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
+		total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
+		qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+		if (!qm->factor)
+			return -ENOMEM;
+
+		/* Only the PF value needs to be initialized */
+		qm->factor[0].func_qos = QM_QOS_MAX_VAL;
+	}
 
 #define QM_INIT_BUF(qm, type, num) do { \
 	(qm)->type = ((qm)->qdma.va + (off)); \
@@ -6133,20 +6325,21 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
 } while (0)
 
 	idr_init(&qm->qp_idr);
-	qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
-			QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+	qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
+	qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
+			QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
 			QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
 			QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
 	qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
 					 GFP_ATOMIC);
 	dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
 	if (!qm->qdma.va) {
-		ret =  -ENOMEM;
-		goto err_alloc_qdma;
+		ret = -ENOMEM;
+		goto err_destroy_idr;
 	}
 
-	QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
-	QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+	QM_INIT_BUF(qm, eqe, qm->eq_depth);
+	QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
 	QM_INIT_BUF(qm, sqc, qm->qp_num);
 	QM_INIT_BUF(qm, cqc, qm->qp_num);
 
@@ -6158,8 +6351,10 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
 
 err_alloc_qp_array:
 	dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
-err_alloc_qdma:
-	kfree(qm->factor);
+err_destroy_idr:
+	idr_destroy(&qm->qp_idr);
+	if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+		kfree(qm->factor);
 
 	return ret;
 }
@@ -6202,17 +6397,10 @@ int hisi_qm_init(struct hisi_qm *qm)
 	if (ret)
 		return ret;
 
-	ret = qm_irq_register(qm);
+	ret = qm_irqs_register(qm);
 	if (ret)
 		goto err_pci_init;
 
-	if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
-		/* v2 starts to support get vft by mailbox */
-		ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
-		if (ret)
-			goto err_irq_register;
-	}
-
 	if (qm->fun_type == QM_HW_PF) {
 		qm_disable_clock_gate(qm);
 		ret = qm_dev_mem_reset(qm);
@@ -6251,7 +6439,7 @@ err_alloc_uacce:
 		qm->uacce = NULL;
 	}
 err_irq_register:
-	qm_irq_unregister(qm);
+	qm_irqs_unregister(qm);
 err_pci_init:
 	hisi_qm_pci_uninit(qm);
 	return ret;
@@ -6302,7 +6490,7 @@ void hisi_qm_pm_init(struct hisi_qm *qm)
 {
 	struct device *dev = &qm->pdev->dev;
 
-	if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
 		return;
 
 	pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
@@ -6321,7 +6509,7 @@ void hisi_qm_pm_uninit(struct hisi_qm *qm)
 {
 	struct device *dev = &qm->pdev->dev;
 
-	if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
 		return;
 
 	pm_runtime_get_noresume(dev);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index d2a0bc93e752..3e57fc04b377 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -17,6 +17,7 @@ struct sec_alg_res {
 	dma_addr_t a_ivin_dma;
 	u8 *out_mac;
 	dma_addr_t out_mac_dma;
+	u16 depth;
 };
 
 /* Cipher request of SEC private */
@@ -115,9 +116,9 @@ struct sec_cipher_ctx {
 /* SEC queue context which defines queue's relatives */
 struct sec_qp_ctx {
 	struct hisi_qp *qp;
-	struct sec_req *req_list[QM_Q_DEPTH];
+	struct sec_req **req_list;
 	struct idr req_idr;
-	struct sec_alg_res res[QM_Q_DEPTH];
+	struct sec_alg_res *res;
 	struct sec_ctx *ctx;
 	spinlock_t req_lock;
 	struct list_head backlog;
@@ -191,8 +192,37 @@ struct sec_dev {
 	bool iommu_used;
 };
 
+enum sec_cap_type {
+	SEC_QM_NFE_MASK_CAP = 0x0,
+	SEC_QM_RESET_MASK_CAP,
+	SEC_QM_OOO_SHUTDOWN_MASK_CAP,
+	SEC_QM_CE_MASK_CAP,
+	SEC_NFE_MASK_CAP,
+	SEC_RESET_MASK_CAP,
+	SEC_OOO_SHUTDOWN_MASK_CAP,
+	SEC_CE_MASK_CAP,
+	SEC_CLUSTER_NUM_CAP,
+	SEC_CORE_TYPE_NUM_CAP,
+	SEC_CORE_NUM_CAP,
+	SEC_CORES_PER_CLUSTER_NUM_CAP,
+	SEC_CORE_ENABLE_BITMAP,
+	SEC_DRV_ALG_BITMAP_LOW,
+	SEC_DRV_ALG_BITMAP_HIGH,
+	SEC_DEV_ALG_BITMAP_LOW,
+	SEC_DEV_ALG_BITMAP_HIGH,
+	SEC_CORE1_ALG_BITMAP_LOW,
+	SEC_CORE1_ALG_BITMAP_HIGH,
+	SEC_CORE2_ALG_BITMAP_LOW,
+	SEC_CORE2_ALG_BITMAP_HIGH,
+	SEC_CORE3_ALG_BITMAP_LOW,
+	SEC_CORE3_ALG_BITMAP_HIGH,
+	SEC_CORE4_ALG_BITMAP_LOW,
+	SEC_CORE4_ALG_BITMAP_HIGH,
+};
+
 void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
 struct hisi_qp **sec_create_qps(void);
 int sec_register_to_crypto(struct hisi_qm *qm);
 void sec_unregister_from_crypto(struct hisi_qm *qm);
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
 #endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 77c9f13cf69a..84ae8ddd1a13 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -59,14 +59,14 @@
 #define SEC_ICV_MASK		0x000E
 #define SEC_SQE_LEN_RATE_MASK	0x3
 
-#define SEC_TOTAL_IV_SZ		(SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_TOTAL_IV_SZ(depth)	(SEC_IV_SIZE * (depth))
 #define SEC_SGL_SGE_NR		128
 #define SEC_CIPHER_AUTH		0xfe
 #define SEC_AUTH_CIPHER		0x1
 #define SEC_MAX_MAC_LEN		64
 #define SEC_MAX_AAD_LEN		65535
 #define SEC_MAX_CCM_AAD_LEN	65279
-#define SEC_TOTAL_MAC_SZ	(SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
 
 #define SEC_PBUF_SZ			512
 #define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
@@ -74,11 +74,11 @@
 #define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
 			SEC_MAX_MAC_LEN * 2)
 #define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
-#define SEC_PBUF_PAGE_NUM	(QM_Q_DEPTH / SEC_PBUF_NUM)
-#define SEC_PBUF_LEFT_SZ	(SEC_PBUF_PKG * (QM_Q_DEPTH -	\
-			SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
-#define SEC_TOTAL_PBUF_SZ	(PAGE_SIZE * SEC_PBUF_PAGE_NUM +	\
-			SEC_PBUF_LEFT_SZ)
+#define SEC_PBUF_PAGE_NUM(depth)	((depth) / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ(depth)		(SEC_PBUF_PKG * ((depth) -	\
+				SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ(depth)	(PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) +	\
+				SEC_PBUF_LEFT_SZ(depth))
 
 #define SEC_SQE_LEN_RATE	4
 #define SEC_SQE_CFLAG		2
@@ -104,6 +104,16 @@
 #define IV_CTR_INIT		0x1
 #define IV_BYTE_OFFSET		0x8
 
+struct sec_skcipher {
+	u64 alg_msk;
+	struct skcipher_alg alg;
+};
+
+struct sec_aead {
+	u64 alg_msk;
+	struct aead_alg alg;
+};
+
 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
 {
@@ -128,9 +138,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
 	int req_id;
 
 	spin_lock_bh(&qp_ctx->req_lock);
-
-	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
-				  0, QM_Q_DEPTH, GFP_ATOMIC);
+	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
 	spin_unlock_bh(&qp_ctx->req_lock);
 	if (unlikely(req_id < 0)) {
 		dev_err(req->ctx->dev, "alloc req id fail!\n");
@@ -148,7 +156,7 @@ static void sec_free_req_id(struct sec_req *req)
 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
 	int req_id = req->req_id;
 
-	if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+	if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
 		dev_err(req->ctx->dev, "free request id invalid!\n");
 		return;
 	}
@@ -300,14 +308,15 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
 /* Get DMA memory resources */
 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
 {
+	u16 q_depth = res->depth;
 	int i;
 
-	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
 					 &res->c_ivin_dma, GFP_KERNEL);
 	if (!res->c_ivin)
 		return -ENOMEM;
 
-	for (i = 1; i < QM_Q_DEPTH; i++) {
+	for (i = 1; i < q_depth; i++) {
 		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
 		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
 	}
@@ -318,20 +327,21 @@ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
 {
 	if (res->c_ivin)
-		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
 				  res->c_ivin, res->c_ivin_dma);
 }
 
 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
 {
+	u16 q_depth = res->depth;
 	int i;
 
-	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
 					 &res->a_ivin_dma, GFP_KERNEL);
 	if (!res->a_ivin)
 		return -ENOMEM;
 
-	for (i = 1; i < QM_Q_DEPTH; i++) {
+	for (i = 1; i < q_depth; i++) {
 		res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
 		res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
 	}
@@ -342,20 +352,21 @@ static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
 {
 	if (res->a_ivin)
-		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
 				  res->a_ivin, res->a_ivin_dma);
 }
 
 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
 {
+	u16 q_depth = res->depth;
 	int i;
 
-	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
 					  &res->out_mac_dma, GFP_KERNEL);
 	if (!res->out_mac)
 		return -ENOMEM;
 
-	for (i = 1; i < QM_Q_DEPTH; i++) {
+	for (i = 1; i < q_depth; i++) {
 		res[i].out_mac_dma = res->out_mac_dma +
 				     i * (SEC_MAX_MAC_LEN << 1);
 		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
@@ -367,14 +378,14 @@ static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
 {
 	if (res->out_mac)
-		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
 				  res->out_mac, res->out_mac_dma);
 }
 
 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
 {
 	if (res->pbuf)
-		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
 				  res->pbuf, res->pbuf_dma);
 }
 
@@ -384,10 +395,12 @@ static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
  */
 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
 {
+	u16 q_depth = res->depth;
+	int size = SEC_PBUF_PAGE_NUM(q_depth);
 	int pbuf_page_offset;
 	int i, j, k;
 
-	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
+	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
 				&res->pbuf_dma, GFP_KERNEL);
 	if (!res->pbuf)
 		return -ENOMEM;
@@ -400,11 +413,11 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
 	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
 	 * for the SEC_TOTAL_PBUF_SZ
 	 */
-	for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+	for (i = 0; i <= size; i++) {
 		pbuf_page_offset = PAGE_SIZE * i;
 		for (j = 0; j < SEC_PBUF_NUM; j++) {
 			k = i * SEC_PBUF_NUM + j;
-			if (k == QM_Q_DEPTH)
+			if (k == q_depth)
 				break;
 			res[k].pbuf = res->pbuf +
 				j * SEC_PBUF_PKG + pbuf_page_offset;
@@ -470,36 +483,29 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
 		sec_free_mac_resource(dev, qp_ctx->res);
 }
 
-static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
-			     int qp_ctx_id, int alg_type)
+static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
+				     struct sec_qp_ctx *qp_ctx)
 {
+	u16 q_depth = qp_ctx->qp->sq_depth;
 	struct device *dev = ctx->dev;
-	struct sec_qp_ctx *qp_ctx;
-	struct hisi_qp *qp;
 	int ret = -ENOMEM;
 
-	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
-	qp = ctx->qps[qp_ctx_id];
-	qp->req_type = 0;
-	qp->qp_ctx = qp_ctx;
-	qp_ctx->qp = qp;
-	qp_ctx->ctx = ctx;
-
-	qp->req_cb = sec_req_cb;
+	qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
+	if (!qp_ctx->req_list)
+		return ret;
 
-	spin_lock_init(&qp_ctx->req_lock);
-	idr_init(&qp_ctx->req_idr);
-	INIT_LIST_HEAD(&qp_ctx->backlog);
+	qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
+	if (!qp_ctx->res)
+		goto err_free_req_list;
+	qp_ctx->res->depth = q_depth;
 
-	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
-						     SEC_SGL_SGE_NR);
+	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
 	if (IS_ERR(qp_ctx->c_in_pool)) {
 		dev_err(dev, "fail to create sgl pool for input!\n");
-		goto err_destroy_idr;
+		goto err_free_res;
 	}
 
-	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
-						      SEC_SGL_SGE_NR);
+	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
 	if (IS_ERR(qp_ctx->c_out_pool)) {
 		dev_err(dev, "fail to create sgl pool for output!\n");
 		goto err_free_c_in_pool;
@@ -509,34 +515,72 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
 	if (ret)
 		goto err_free_c_out_pool;
 
-	ret = hisi_qm_start_qp(qp, 0);
-	if (ret < 0)
-		goto err_queue_free;
-
 	return 0;
 
-err_queue_free:
-	sec_alg_resource_free(ctx, qp_ctx);
 err_free_c_out_pool:
 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
 err_free_c_in_pool:
 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_destroy_idr:
-	idr_destroy(&qp_ctx->req_idr);
+err_free_res:
+	kfree(qp_ctx->res);
+err_free_req_list:
+	kfree(qp_ctx->req_list);
 	return ret;
 }
 
-static void sec_release_qp_ctx(struct sec_ctx *ctx,
-			       struct sec_qp_ctx *qp_ctx)
+static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
 {
 	struct device *dev = ctx->dev;
 
-	hisi_qm_stop_qp(qp_ctx->qp);
 	sec_alg_resource_free(ctx, qp_ctx);
-
 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+	kfree(qp_ctx->res);
+	kfree(qp_ctx->req_list);
+}
+
+static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
+			     int qp_ctx_id, int alg_type)
+{
+	struct sec_qp_ctx *qp_ctx;
+	struct hisi_qp *qp;
+	int ret;
 
+	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+	qp = ctx->qps[qp_ctx_id];
+	qp->req_type = 0;
+	qp->qp_ctx = qp_ctx;
+	qp_ctx->qp = qp;
+	qp_ctx->ctx = ctx;
+
+	qp->req_cb = sec_req_cb;
+
+	spin_lock_init(&qp_ctx->req_lock);
+	idr_init(&qp_ctx->req_idr);
+	INIT_LIST_HEAD(&qp_ctx->backlog);
+
+	ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx);
+	if (ret)
+		goto err_destroy_idr;
+
+	ret = hisi_qm_start_qp(qp, 0);
+	if (ret < 0)
+		goto err_resource_free;
+
+	return 0;
+
+err_resource_free:
+	sec_free_qp_ctx_resource(ctx, qp_ctx);
+err_destroy_idr:
+	idr_destroy(&qp_ctx->req_idr);
+	return ret;
+}
+
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+			       struct sec_qp_ctx *qp_ctx)
+{
+	hisi_qm_stop_qp(qp_ctx->qp);
+	sec_free_qp_ctx_resource(ctx, qp_ctx);
 	idr_destroy(&qp_ctx->req_idr);
 }
 
@@ -559,7 +603,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
 	ctx->pbuf_supported = ctx->sec->iommu_used;
 
 	/* Half of queue depth is taken as fake requests limit in the queue. */
-	ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+	ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
 	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
 			      GFP_KERNEL);
 	if (!ctx->qp_ctx) {
@@ -1679,7 +1723,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
 					  aead_req->out_mac,
 					  authsize, a_req->cryptlen +
 					  a_req->assoclen);
-
 		if (unlikely(sz != authsize)) {
 			dev_err(c->dev, "copy out mac err!\n");
 			err = -EINVAL;
@@ -1966,7 +2009,6 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
 	return sec_aead_ctx_init(tfm, "sha512");
 }
 
-
 static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
 	struct sec_req *sreq)
 {
@@ -2126,67 +2168,80 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
 	.min_keysize = sec_min_key_size,\
 	.max_keysize = sec_max_key_size,\
 	.ivsize = iv_size,\
-},
+}
 
 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
 	max_key_size, blk_size, iv_size) \
 	SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
 	sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
 
-static struct skcipher_alg sec_skciphers[] = {
-	SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
-			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
-			 AES_BLOCK_SIZE, 0)
-
-	SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
-			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
-			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
-			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
-			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
-			 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
-			 DES3_EDE_BLOCK_SIZE, 0)
-
-	SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
-			 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
-			 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
-			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
-			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
-			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
-			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-};
-
-static struct skcipher_alg sec_skciphers_v3[] = {
-	SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
-			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
-			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
-			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
-			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
-			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
-			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
-			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
-			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
-			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
-			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
-	SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
-			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
-			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+static struct sec_skcipher sec_skciphers[] = {
+	{
+		.alg_msk = BIT(0),
+		.alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
+					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
+	},
+	{
+		.alg_msk = BIT(1),
+		.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
+					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(2),
+		.alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,	AES_MIN_KEY_SIZE,
+					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(3),
+		.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,	SEC_XTS_MIN_KEY_SIZE,
+					SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(4),
+		.alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,	AES_MIN_KEY_SIZE,
+					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(5),
+		.alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,	AES_MIN_KEY_SIZE,
+					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(12),
+		.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,	AES_MIN_KEY_SIZE,
+					AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(13),
+		.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
+					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(14),
+		.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,	SEC_XTS_MIN_KEY_SIZE,
+					SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(15),
+		.alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,	AES_MIN_KEY_SIZE,
+					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(16),
+		.alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,	AES_MIN_KEY_SIZE,
+					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(23),
+		.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
+					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
+	},
+	{
+		.alg_msk = BIT(24),
+		.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
+					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
+					DES3_EDE_BLOCK_SIZE),
+	},
 };
 
 static int aead_iv_demension_check(struct aead_request *aead_req)
@@ -2380,90 +2435,135 @@ static int sec_aead_decrypt(struct aead_request *a_req)
 	.maxauthsize = max_authsize,\
 }
 
-static struct aead_alg sec_aeads[] = {
-	SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
-		     sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
-		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
-		     AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+static struct sec_aead sec_aeads[] = {
+	{
+		.alg_msk = BIT(6),
+		.alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
+				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+				    AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(7),
+		.alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
+				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+				    AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(17),
+		.alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
+				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+				    AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(18),
+		.alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
+				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+				    AES_BLOCK_SIZE),
+	},
+	{
+		.alg_msk = BIT(43),
+		.alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
+				    sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+				    AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+	},
+	{
+		.alg_msk = BIT(44),
+		.alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
+				    sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+				    AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+	},
+	{
+		.alg_msk = BIT(45),
+		.alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
+				    sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+				    AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+	},
+};
 
-	SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
-		     sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
-		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
-		     AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+static void sec_unregister_skcipher(u64 alg_mask, int end)
+{
+	int i;
 
-	SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
-		     sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
-		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
-		     AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+	for (i = 0; i < end; i++)
+		if (sec_skciphers[i].alg_msk & alg_mask)
+			crypto_unregister_skcipher(&sec_skciphers[i].alg);
+}
 
-	SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
-		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
-		     AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+static int sec_register_skcipher(u64 alg_mask)
+{
+	int i, ret, count;
 
-	SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
-		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
-		     SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+	count = ARRAY_SIZE(sec_skciphers);
 
-static struct aead_alg sec_aeads_v3[] = {
-	SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
-		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
-		     AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+	for (i = 0; i < count; i++) {
+		if (!(sec_skciphers[i].alg_msk & alg_mask))
+			continue;
 
-	SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
-		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
-		     SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+		ret = crypto_register_skcipher(&sec_skciphers[i].alg);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	sec_unregister_skcipher(alg_mask, i);
+
+	return ret;
+}
+
+static void sec_unregister_aead(u64 alg_mask, int end)
+{
+	int i;
+
+	for (i = 0; i < end; i++)
+		if (sec_aeads[i].alg_msk & alg_mask)
+			crypto_unregister_aead(&sec_aeads[i].alg);
+}
+
+static int sec_register_aead(u64 alg_mask)
+{
+	int i, ret, count;
+
+	count = ARRAY_SIZE(sec_aeads);
+
+	for (i = 0; i < count; i++) {
+		if (!(sec_aeads[i].alg_msk & alg_mask))
+			continue;
+
+		ret = crypto_register_aead(&sec_aeads[i].alg);
+		if (ret)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	sec_unregister_aead(alg_mask, i);
+
+	return ret;
+}
 
 int sec_register_to_crypto(struct hisi_qm *qm)
 {
+	u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
 	int ret;
 
-	/* To avoid repeat register */
-	ret = crypto_register_skciphers(sec_skciphers,
-					ARRAY_SIZE(sec_skciphers));
+	ret = sec_register_skcipher(alg_mask);
 	if (ret)
 		return ret;
 
-	if (qm->ver > QM_HW_V2) {
-		ret = crypto_register_skciphers(sec_skciphers_v3,
-						ARRAY_SIZE(sec_skciphers_v3));
-		if (ret)
-			goto reg_skcipher_fail;
-	}
-
-	ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+	ret = sec_register_aead(alg_mask);
 	if (ret)
-		goto reg_aead_fail;
-	if (qm->ver > QM_HW_V2) {
-		ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
-		if (ret)
-			goto reg_aead_v3_fail;
-	}
-	return ret;
+		sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
 
-reg_aead_v3_fail:
-	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
-reg_aead_fail:
-	if (qm->ver > QM_HW_V2)
-		crypto_unregister_skciphers(sec_skciphers_v3,
-					    ARRAY_SIZE(sec_skciphers_v3));
-reg_skcipher_fail:
-	crypto_unregister_skciphers(sec_skciphers,
-				    ARRAY_SIZE(sec_skciphers));
 	return ret;
 }
 
 void sec_unregister_from_crypto(struct hisi_qm *qm)
 {
-	if (qm->ver > QM_HW_V2)
-		crypto_unregister_aeads(sec_aeads_v3,
-					ARRAY_SIZE(sec_aeads_v3));
-	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+	u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
 
-	if (qm->ver > QM_HW_V2)
-		crypto_unregister_skciphers(sec_skciphers_v3,
-					    ARRAY_SIZE(sec_skciphers_v3));
-	crypto_unregister_skciphers(sec_skciphers,
-				    ARRAY_SIZE(sec_skciphers));
+	sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
+	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
 }
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2c0be91c0b09..3705412bac5f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -27,7 +27,6 @@
 #define SEC_BD_ERR_CHK_EN3		0xffffbfff
 
 #define SEC_SQE_SIZE			128
-#define SEC_SQ_SIZE			(SEC_SQE_SIZE * QM_Q_DEPTH)
 #define SEC_PF_DEF_Q_NUM		256
 #define SEC_PF_DEF_Q_BASE		0
 #define SEC_CTX_Q_NUM_DEF		2
@@ -42,16 +41,11 @@
 #define SEC_ECC_NUM			16
 #define SEC_ECC_MASH			0xFF
 #define SEC_CORE_INT_DISABLE		0x0
-#define SEC_CORE_INT_ENABLE		0x7c1ff
-#define SEC_CORE_INT_CLEAR		0x7c1ff
-#define SEC_SAA_ENABLE			0x17f
 
 #define SEC_RAS_CE_REG			0x301050
 #define SEC_RAS_FE_REG			0x301054
 #define SEC_RAS_NFE_REG			0x301058
-#define SEC_RAS_CE_ENB_MSK		0x88
 #define SEC_RAS_FE_ENB_MSK		0x0
-#define SEC_RAS_NFE_ENB_MSK		0x7c177
 #define SEC_OOO_SHUTDOWN_SEL		0x301014
 #define SEC_RAS_DISABLE		0x0
 #define SEC_MEM_START_INIT_REG	0x301100
@@ -119,6 +113,16 @@
 #define SEC_DFX_COMMON1_LEN		0x45
 #define SEC_DFX_COMMON2_LEN		0xBA
 
+#define SEC_ALG_BITMAP_SHIFT		32
+
+#define SEC_CIPHER_BITMAP		(GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \
+					GENMASK(24, 21))
+#define SEC_DIGEST_BITMAP		(GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \
+					GENMASK_ULL(42, 25))
+#define SEC_AEAD_BITMAP			(GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
+					GENMASK_ULL(45, 43))
+#define SEC_DEV_ALG_MAX_LEN		256
+
 struct sec_hw_error {
 	u32 int_msk;
 	const char *msg;
@@ -129,6 +133,11 @@ struct sec_dfx_item {
 	u32 offset;
 };
 
+struct sec_dev_alg {
+	u64 alg_msk;
+	const char *algs;
+};
+
 static const char sec_name[] = "hisi_sec2";
 static struct dentry *sec_debugfs_root;
 
@@ -137,6 +146,46 @@ static struct hisi_qm_list sec_devices = {
 	.unregister_from_crypto	= sec_unregister_from_crypto,
 };
 
+static const struct hisi_qm_cap_info sec_basic_info[] = {
+	{SEC_QM_NFE_MASK_CAP,   0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77},
+	{SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77},
+	{SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+	{SEC_QM_CE_MASK_CAP,    0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+	{SEC_NFE_MASK_CAP,      0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177},
+	{SEC_RESET_MASK_CAP,    0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177},
+	{SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177},
+	{SEC_CE_MASK_CAP,       0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088},
+	{SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1},
+	{SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
+	{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
+	{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
+	{SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+	{SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
+	{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
+	{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+	{SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+	{SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+	{SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+	{SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+	{SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+	{SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+	{SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+	{SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+	{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+};
+
+static const struct sec_dev_alg sec_dev_algs[] = { {
+		.alg_msk = SEC_CIPHER_BITMAP,
+		.algs = "cipher\n",
+	}, {
+		.alg_msk = SEC_DIGEST_BITMAP,
+		.algs = "digest\n",
+	}, {
+		.alg_msk = SEC_AEAD_BITMAP,
+		.algs = "aead\n",
+	},
+};
+
 static const struct sec_hw_error sec_hw_errors[] = {
 	{
 		.int_msk = BIT(0),
@@ -339,6 +388,16 @@ struct hisi_qp **sec_create_qps(void)
 	return NULL;
 }
 
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
+{
+	u32 cap_val_h, cap_val_l;
+
+	cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
+	cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
+
+	return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
+}
+
 static const struct kernel_param_ops sec_uacce_mode_ops = {
 	.set = uacce_mode_set,
 	.get = param_get_int,
@@ -415,7 +474,7 @@ static void sec_open_sva_prefetch(struct hisi_qm *qm)
 	u32 val;
 	int ret;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
 		return;
 
 	/* Enable prefetch */
@@ -435,7 +494,7 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
 	u32 val;
 	int ret;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
 		return;
 
 	val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
@@ -506,7 +565,8 @@ static int sec_engine_init(struct hisi_qm *qm)
 	writel(SEC_SINGLE_PORT_MAX_TRANS,
 	       qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
 
-	writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
+	reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);
+	writel(reg, qm->io_base + SEC_SAA_EN_REG);
 
 	if (qm->ver < QM_HW_V3) {
 		/* HW V2 enable sm4 extra mode, as ctr/ecb */
@@ -576,7 +636,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
 	val1 = readl(qm->io_base + SEC_CONTROL_REG);
 	if (enable) {
 		val1 |= SEC_AXI_SHUTDOWN_ENABLE;
-		val2 = SEC_RAS_NFE_ENB_MSK;
+		val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
+					   SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
 	} else {
 		val1 &= SEC_AXI_SHUTDOWN_DISABLE;
 		val2 = 0x0;
@@ -590,25 +651,30 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
 
 static void sec_hw_error_enable(struct hisi_qm *qm)
 {
+	u32 ce, nfe;
+
 	if (qm->ver == QM_HW_V1) {
 		writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
 		pci_info(qm->pdev, "V1 not support hw error handle\n");
 		return;
 	}
 
+	ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+	nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+
 	/* clear SEC hw error source if having */
-	writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
+	writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
 
 	/* enable RAS int */
-	writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+	writel(ce, qm->io_base + SEC_RAS_CE_REG);
 	writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
-	writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
+	writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
 
 	/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
 	sec_master_ooo_ctrl(qm, true);
 
 	/* enable SEC hw error interrupts */
-	writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+	writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
 }
 
 static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -939,7 +1005,11 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
 
 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
 {
+	u32 nfe;
+
 	writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+	nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+	writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
 }
 
 static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -955,14 +1025,20 @@ static void sec_err_info_init(struct hisi_qm *qm)
 {
 	struct hisi_qm_err_info *err_info = &qm->err_info;
 
-	err_info->ce = QM_BASE_CE;
-	err_info->fe = 0;
+	err_info->fe = SEC_RAS_FE_ENB_MSK;
+	err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+	err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
 	err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
-	err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+	err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+				     SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+	err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+			SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+	err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+			SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+	err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+			SEC_RESET_MASK_CAP, qm->cap_ver);
 	err_info->msi_wr_port = BIT(0);
 	err_info->acpi_rst = "SRST";
-	err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
-			QM_ACC_WB_NOT_READY_TIMEOUT;
 }
 
 static const struct hisi_qm_err_ini sec_err_ini = {
@@ -1001,11 +1077,41 @@ static int sec_pf_probe_init(struct sec_dev *sec)
 	return ret;
 }
 
+static int sec_set_qm_algs(struct hisi_qm *qm)
+{
+	struct device *dev = &qm->pdev->dev;
+	char *algs, *ptr;
+	u64 alg_mask;
+	int i;
+
+	if (!qm->use_sva)
+		return 0;
+
+	algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+	if (!algs)
+		return -ENOMEM;
+
+	alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
+
+	for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
+		if (alg_mask & sec_dev_algs[i].alg_msk)
+			strcat(algs, sec_dev_algs[i].algs);
+
+	ptr = strrchr(algs, '\n');
+	if (ptr)
+		*ptr = '\0';
+
+	qm->uacce->algs = algs;
+
+	return 0;
+}
+
 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 {
+	int ret;
+
 	qm->pdev = pdev;
 	qm->ver = pdev->revision;
-	qm->algs = "cipher\ndigest\naead";
 	qm->mode = uacce_mode;
 	qm->sqe_size = SEC_SQE_SIZE;
 	qm->dev_name = sec_name;
@@ -1028,7 +1134,19 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 		qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
 	}
 
-	return hisi_qm_init(qm);
+	ret = hisi_qm_init(qm);
+	if (ret) {
+		pci_err(qm->pdev, "Failed to init sec qm configures!\n");
+		return ret;
+	}
+
+	ret = sec_set_qm_algs(qm);
+	if (ret) {
+		pci_err(qm->pdev, "Failed to set sec algs!\n");
+		hisi_qm_uninit(qm);
+	}
+
+	return ret;
 }
 
 static void sec_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 3dfd3bac5a33..f2e6da3240ae 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,7 +81,8 @@ struct hisi_zip_sqe {
 	u32 rsvd1[4];
 };
 
-int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
 int hisi_zip_register_to_crypto(struct hisi_qm *qm);
 void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
 #endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index ad35434a3fdb..6608971d10cd 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -39,6 +39,9 @@
 #define HZIP_ALG_PRIORITY			300
 #define HZIP_SGL_SGE_NR				10
 
+#define HZIP_ALG_ZLIB				GENMASK(1, 0)
+#define HZIP_ALG_GZIP				GENMASK(3, 2)
+
 static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
 static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
 	0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
@@ -123,19 +126,19 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
 	if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
 		return -EINVAL;
 
-	return param_set_int(val, kp);
+	return param_set_ushort(val, kp);
 }
 
 static const struct kernel_param_ops sgl_sge_nr_ops = {
 	.set = sgl_sge_nr_set,
-	.get = param_get_int,
+	.get = param_get_ushort,
 };
 
 static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
 module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
 MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
 
-static u16 get_extra_field_size(const u8 *start)
+static u32 get_extra_field_size(const u8 *start)
 {
 	return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
 }
@@ -167,7 +170,7 @@ static u32 __get_gzip_head_size(const u8 *src)
 	return size;
 }
 
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
 {
 	char buf[HZIP_GZIP_HEAD_BUF];
 
@@ -183,7 +186,7 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
 	int ret;
 
 	ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
-	if (ret != head_size) {
+	if (unlikely(ret != head_size)) {
 		pr_err("the head size of buffer is wrong (%d)!\n", ret);
 		return -ENOMEM;
 	}
@@ -193,11 +196,11 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
 
 static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
 {
-	if (!acomp_req->src || !acomp_req->slen)
+	if (unlikely(!acomp_req->src || !acomp_req->slen))
 		return -EINVAL;
 
-	if (req_type == HZIP_ALG_TYPE_GZIP &&
-	    acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+	if (unlikely(req_type == HZIP_ALG_TYPE_GZIP &&
+		     acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
 		return -EINVAL;
 
 	switch (req_type) {
@@ -230,6 +233,8 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
 	}
 	set_bit(req_id, req_q->req_bitmap);
 
+	write_unlock(&req_q->req_lock);
+
 	req_cache = q + req_id;
 	req_cache->req_id = req_id;
 	req_cache->req = req;
@@ -242,8 +247,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
 		req_cache->dskip = 0;
 	}
 
-	write_unlock(&req_q->req_lock);
-
 	return req_cache;
 }
 
@@ -254,7 +257,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
 
 	write_lock(&req_q->req_lock);
 	clear_bit(req->req_id, req_q->req_bitmap);
-	memset(req, 0, sizeof(struct hisi_zip_req));
 	write_unlock(&req_q->req_lock);
 }
 
@@ -339,7 +341,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
 	struct hisi_zip_sqe zip_sqe;
 	int ret;
 
-	if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+	if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
 		return -EINVAL;
 
 	req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
@@ -365,7 +367,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
 	/* send command to start a task */
 	atomic64_inc(&dfx->send_cnt);
 	ret = hisi_qp_send(qp, &zip_sqe);
-	if (ret < 0) {
+	if (unlikely(ret < 0)) {
 		atomic64_inc(&dfx->send_busy_cnt);
 		ret = -EAGAIN;
 		dev_dbg_ratelimited(dev, "failed to send request!\n");
@@ -417,7 +419,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
 
 	atomic64_inc(&dfx->recv_cnt);
 	status = ops->get_status(sqe);
-	if (status != 0 && status != HZIP_NC_ERR) {
+	if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
 		dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
 			(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
 			sqe->produced);
@@ -450,7 +452,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
 
 	/* let's output compression head now */
 	head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
-	if (head_size < 0) {
+	if (unlikely(head_size < 0)) {
 		dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
 				    head_size);
 		return head_size;
@@ -461,7 +463,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
 		return PTR_ERR(req);
 
 	ret = hisi_zip_do_work(req, qp_ctx);
-	if (ret != -EINPROGRESS) {
+	if (unlikely(ret != -EINPROGRESS)) {
 		dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
 		hisi_zip_remove_req(qp_ctx, req);
 	}
@@ -478,7 +480,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
 	int head_size, ret;
 
 	head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
-	if (head_size < 0) {
+	if (unlikely(head_size < 0)) {
 		dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
 				    head_size);
 		return head_size;
@@ -489,7 +491,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
 		return PTR_ERR(req);
 
 	ret = hisi_zip_do_work(req, qp_ctx);
-	if (ret != -EINPROGRESS) {
+	if (unlikely(ret != -EINPROGRESS)) {
 		dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
 				     ret);
 		hisi_zip_remove_req(qp_ctx, req);
@@ -498,7 +500,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
 	return ret;
 }
 
-static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
 			     int alg_type, int req_type)
 {
 	struct device *dev = &qp->qm->pdev->dev;
@@ -506,7 +508,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
 
 	qp->req_type = req_type;
 	qp->alg_type = alg_type;
-	qp->qp_ctx = ctx;
+	qp->qp_ctx = qp_ctx;
 
 	ret = hisi_qm_start_qp(qp, 0);
 	if (ret < 0) {
@@ -514,15 +516,15 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
 		return ret;
 	}
 
-	ctx->qp = qp;
+	qp_ctx->qp = qp;
 
 	return 0;
 }
 
-static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
+static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
 {
-	hisi_qm_stop_qp(ctx->qp);
-	hisi_qm_free_qps(&ctx->qp, 1);
+	hisi_qm_stop_qp(qp_ctx->qp);
+	hisi_qm_free_qps(&qp_ctx->qp, 1);
 }
 
 static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
@@ -594,18 +596,19 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
 {
 	int i;
 
-	for (i = 1; i >= 0; i--)
+	for (i = 0; i < HZIP_CTX_Q_NUM; i++)
 		hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
 }
 
 static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
 {
+	u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
 	struct hisi_zip_req_q *req_q;
 	int i, ret;
 
 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
 		req_q = &ctx->qp_ctx[i].req_q;
-		req_q->size = QM_Q_DEPTH;
+		req_q->size = q_depth;
 
 		req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
 		if (!req_q->req_bitmap) {
@@ -613,7 +616,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
 			if (i == 0)
 				return ret;
 
-			goto err_free_loop0;
+			goto err_free_comp_q;
 		}
 		rwlock_init(&req_q->req_lock);
 
@@ -622,19 +625,19 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
 		if (!req_q->q) {
 			ret = -ENOMEM;
 			if (i == 0)
-				goto err_free_bitmap;
+				goto err_free_comp_bitmap;
 			else
-				goto err_free_loop1;
+				goto err_free_decomp_bitmap;
 		}
 	}
 
 	return 0;
 
-err_free_loop1:
+err_free_decomp_bitmap:
 	bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
-err_free_loop0:
+err_free_comp_q:
 	kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
-err_free_bitmap:
+err_free_comp_bitmap:
 	bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
 	return ret;
 }
@@ -651,6 +654,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
 
 static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
 {
+	u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
 	struct hisi_zip_qp_ctx *tmp;
 	struct device *dev;
 	int i;
@@ -658,7 +662,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
 	for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
 		tmp = &ctx->qp_ctx[i];
 		dev = &tmp->qp->qm->pdev->dev;
-		tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1,
+		tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
 							 sgl_sge_nr);
 		if (IS_ERR(tmp->sgl_pool)) {
 			if (i == 1)
@@ -755,6 +759,28 @@ static struct acomp_alg hisi_zip_acomp_zlib = {
 	}
 };
 
+static int hisi_zip_register_zlib(struct hisi_qm *qm)
+{
+	int ret;
+
+	if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+		return 0;
+
+	ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
+	if (ret)
+		dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret);
+
+	return ret;
+}
+
+static void hisi_zip_unregister_zlib(struct hisi_qm *qm)
+{
+	if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+		return;
+
+	crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
 static struct acomp_alg hisi_zip_acomp_gzip = {
 	.init			= hisi_zip_acomp_init,
 	.exit			= hisi_zip_acomp_exit,
@@ -769,27 +795,45 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
 	}
 };
 
-int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+static int hisi_zip_register_gzip(struct hisi_qm *qm)
 {
 	int ret;
 
-	ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
-	if (ret) {
-		pr_err("failed to register to zlib (%d)!\n", ret);
-		return ret;
-	}
+	if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+		return 0;
 
 	ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
-	if (ret) {
-		pr_err("failed to register to gzip (%d)!\n", ret);
-		crypto_unregister_acomp(&hisi_zip_acomp_zlib);
-	}
+	if (ret)
+		dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret);
 
 	return ret;
 }
 
-void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+static void hisi_zip_unregister_gzip(struct hisi_qm *qm)
 {
+	if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+		return;
+
 	crypto_unregister_acomp(&hisi_zip_acomp_gzip);
-	crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+{
+	int ret = 0;
+
+	ret = hisi_zip_register_zlib(qm);
+	if (ret)
+		return ret;
+
+	ret = hisi_zip_register_gzip(qm);
+	if (ret)
+		hisi_zip_unregister_zlib(qm);
+
+	return ret;
+}
+
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+{
+	hisi_zip_unregister_zlib(qm);
+	hisi_zip_unregister_gzip(qm);
 }
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c3303d99acac..c863435e8c75 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -20,18 +20,6 @@
 #define HZIP_QUEUE_NUM_V1		4096
 
 #define HZIP_CLOCK_GATE_CTRL		0x301004
-#define COMP0_ENABLE			BIT(0)
-#define COMP1_ENABLE			BIT(1)
-#define DECOMP0_ENABLE			BIT(2)
-#define DECOMP1_ENABLE			BIT(3)
-#define DECOMP2_ENABLE			BIT(4)
-#define DECOMP3_ENABLE			BIT(5)
-#define DECOMP4_ENABLE			BIT(6)
-#define DECOMP5_ENABLE			BIT(7)
-#define HZIP_ALL_COMP_DECOMP_EN		(COMP0_ENABLE | COMP1_ENABLE | \
-					 DECOMP0_ENABLE | DECOMP1_ENABLE | \
-					 DECOMP2_ENABLE | DECOMP3_ENABLE | \
-					 DECOMP4_ENABLE | DECOMP5_ENABLE)
 #define HZIP_DECOMP_CHECK_ENABLE	BIT(16)
 #define HZIP_FSM_MAX_CNT		0x301008
 
@@ -69,20 +57,14 @@
 #define HZIP_CORE_INT_STATUS_M_ECC	BIT(1)
 #define HZIP_CORE_SRAM_ECC_ERR_INFO	0x301148
 #define HZIP_CORE_INT_RAS_CE_ENB	0x301160
-#define HZIP_CORE_INT_RAS_CE_ENABLE	0x1
 #define HZIP_CORE_INT_RAS_NFE_ENB	0x301164
 #define HZIP_CORE_INT_RAS_FE_ENB        0x301168
+#define HZIP_CORE_INT_RAS_FE_ENB_MASK	0x0
 #define HZIP_OOO_SHUTDOWN_SEL		0x30120C
-#define HZIP_CORE_INT_RAS_NFE_ENABLE	0x1FFE
 #define HZIP_SRAM_ECC_ERR_NUM_SHIFT	16
 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT	24
 #define HZIP_CORE_INT_MASK_ALL		GENMASK(12, 0)
-#define HZIP_COMP_CORE_NUM		2
-#define HZIP_DECOMP_CORE_NUM		6
-#define HZIP_CORE_NUM			(HZIP_COMP_CORE_NUM + \
-					 HZIP_DECOMP_CORE_NUM)
 #define HZIP_SQE_SIZE			128
-#define HZIP_SQ_SIZE			(HZIP_SQE_SIZE * QM_Q_DEPTH)
 #define HZIP_PF_DEF_Q_NUM		64
 #define HZIP_PF_DEF_Q_BASE		0
 
@@ -92,6 +74,12 @@
 #define HZIP_AXI_SHUTDOWN_ENABLE	BIT(14)
 #define HZIP_WR_PORT			BIT(11)
 
+#define HZIP_DEV_ALG_MAX_LEN		256
+#define HZIP_ALG_ZLIB_BIT		GENMASK(1, 0)
+#define HZIP_ALG_GZIP_BIT		GENMASK(3, 2)
+#define HZIP_ALG_DEFLATE_BIT		GENMASK(5, 4)
+#define HZIP_ALG_LZ77_BIT		GENMASK(7, 6)
+
 #define HZIP_BUF_SIZE			22
 #define HZIP_SQE_MASK_OFFSET		64
 #define HZIP_SQE_MASK_LEN		48
@@ -132,6 +120,26 @@ struct zip_dfx_item {
 	u32 offset;
 };
 
+struct zip_dev_alg {
+	u32 alg_msk;
+	const char *algs;
+};
+
+static const struct zip_dev_alg zip_dev_algs[] = { {
+		.alg_msk = HZIP_ALG_ZLIB_BIT,
+		.algs = "zlib\n",
+	}, {
+		.alg_msk = HZIP_ALG_GZIP_BIT,
+		.algs = "gzip\n",
+	}, {
+		.alg_msk = HZIP_ALG_DEFLATE_BIT,
+		.algs = "deflate\n",
+	}, {
+		.alg_msk = HZIP_ALG_LZ77_BIT,
+		.algs = "lz77_zstd\n",
+	},
+};
+
 static struct hisi_qm_list zip_devices = {
 	.register_to_crypto	= hisi_zip_register_to_crypto,
 	.unregister_from_crypto	= hisi_zip_unregister_from_crypto,
@@ -187,6 +195,58 @@ struct hisi_zip_ctrl {
 	struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
 };
 
+enum zip_cap_type {
+	ZIP_QM_NFE_MASK_CAP = 0x0,
+	ZIP_QM_RESET_MASK_CAP,
+	ZIP_QM_OOO_SHUTDOWN_MASK_CAP,
+	ZIP_QM_CE_MASK_CAP,
+	ZIP_NFE_MASK_CAP,
+	ZIP_RESET_MASK_CAP,
+	ZIP_OOO_SHUTDOWN_MASK_CAP,
+	ZIP_CE_MASK_CAP,
+	ZIP_CLUSTER_NUM_CAP,
+	ZIP_CORE_TYPE_NUM_CAP,
+	ZIP_CORE_NUM_CAP,
+	ZIP_CLUSTER_COMP_NUM_CAP,
+	ZIP_CLUSTER_DECOMP_NUM_CAP,
+	ZIP_DECOMP_ENABLE_BITMAP,
+	ZIP_COMP_ENABLE_BITMAP,
+	ZIP_DRV_ALG_BITMAP,
+	ZIP_DEV_ALG_BITMAP,
+	ZIP_CORE1_ALG_BITMAP,
+	ZIP_CORE2_ALG_BITMAP,
+	ZIP_CORE3_ALG_BITMAP,
+	ZIP_CORE4_ALG_BITMAP,
+	ZIP_CORE5_ALG_BITMAP,
+	ZIP_CAP_MAX
+};
+
+static struct hisi_qm_cap_info zip_basic_cap_info[] = {
+	{ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77},
+	{ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77},
+	{ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+	{ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+	{ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE},
+	{ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE},
+	{ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE},
+	{ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+	{ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1},
+	{ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2},
+	{ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5},
+	{ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2},
+	{ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
+	{ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
+	{ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
+	{ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF},
+	{ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF},
+	{ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+	{ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+	{ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+	{ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+	{ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+	{ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
+};
+
 enum {
 	HZIP_COMP_CORE0,
 	HZIP_COMP_CORE1,
@@ -343,12 +403,52 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
 	return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
 }
 
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
+{
+	u32 cap_val;
+
+	cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
+	if ((alg & cap_val) == alg)
+		return true;
+
+	return false;
+}
+
+static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
+{
+	struct device *dev = &qm->pdev->dev;
+	char *algs, *ptr;
+	u32 alg_mask;
+	int i;
+
+	if (!qm->use_sva)
+		return 0;
+
+	algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+	if (!algs)
+		return -ENOMEM;
+
+	alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
+
+	for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
+		if (alg_mask & zip_dev_algs[i].alg_msk)
+			strcat(algs, zip_dev_algs[i].algs);
+
+	ptr = strrchr(algs, '\n');
+	if (ptr)
+		*ptr = '\0';
+
+	qm->uacce->algs = algs;
+
+	return 0;
+}
+
 static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
 {
 	u32 val;
 	int ret;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
 		return;
 
 	/* Enable prefetch */
@@ -368,7 +468,7 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
 	u32 val;
 	int ret;
 
-	if (qm->ver < QM_HW_V3)
+	if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
 		return;
 
 	val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
@@ -401,6 +501,7 @@ static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
 static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
 {
 	void __iomem *base = qm->io_base;
+	u32 dcomp_bm, comp_bm;
 
 	/* qm user domain */
 	writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -438,8 +539,11 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
 	}
 
 	/* let's open all compression/decompression cores */
-	writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
-	       base + HZIP_CLOCK_GATE_CTRL);
+	dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+				       ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
+	comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+				      ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
+	writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
 
 	/* enable sqc,cqc writeback */
 	writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
@@ -458,7 +562,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
 	val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
 	if (enable) {
 		val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
-		val2 = HZIP_CORE_INT_RAS_NFE_ENABLE;
+		val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+				ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
 	} else {
 		val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
 		val2 = 0x0;
@@ -472,6 +577,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
 
 static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
 {
+	u32 nfe, ce;
+
 	if (qm->ver == QM_HW_V1) {
 		writel(HZIP_CORE_INT_MASK_ALL,
 		       qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -479,17 +586,17 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
 		return;
 	}
 
+	nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+	ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+
 	/* clear ZIP hw error source if having */
-	writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
+	writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
 
 	/* configure error type */
-	writel(HZIP_CORE_INT_RAS_CE_ENABLE,
-	       qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
-	writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
-	writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
-	       qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+	writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+	writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+	writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
 
-	/* enable ZIP block master OOO when nfe occurs on Kunpeng930 */
 	hisi_zip_master_ooo_ctrl(qm, true);
 
 	/* enable ZIP hw error interrupts */
@@ -498,10 +605,13 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
 
 static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
 {
+	u32 nfe, ce;
+
 	/* disable ZIP hw error interrupts */
-	writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
+	nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+	ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+	writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
 
-	/* disable ZIP block master OOO when nfe occurs on Kunpeng930 */
 	hisi_zip_master_ooo_ctrl(qm, false);
 }
 
@@ -586,8 +696,9 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
 		return len;
 
 	tbuf[len] = '\0';
-	if (kstrtoul(tbuf, 0, &val))
-		return -EFAULT;
+	ret = kstrtoul(tbuf, 0, &val);
+	if (ret)
+		return ret;
 
 	ret = hisi_qm_get_dfx_access(qm);
 	if (ret)
@@ -651,18 +762,23 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
 
 static int hisi_zip_core_debug_init(struct hisi_qm *qm)
 {
+	u32 zip_core_num, zip_comp_core_num;
 	struct device *dev = &qm->pdev->dev;
 	struct debugfs_regset32 *regset;
 	struct dentry *tmp_d;
 	char buf[HZIP_BUF_SIZE];
 	int i;
 
-	for (i = 0; i < HZIP_CORE_NUM; i++) {
-		if (i < HZIP_COMP_CORE_NUM)
+	zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+	zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+						qm->cap_ver);
+
+	for (i = 0; i < zip_core_num; i++) {
+		if (i < zip_comp_core_num)
 			scnprintf(buf, sizeof(buf), "comp_core%d", i);
 		else
 			scnprintf(buf, sizeof(buf), "decomp_core%d",
-				  i - HZIP_COMP_CORE_NUM);
+				  i - zip_comp_core_num);
 
 		regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
 		if (!regset)
@@ -675,7 +791,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
 
 		tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
 		debugfs_create_file("regs", 0444, tmp_d, regset,
-				     &hisi_zip_regs_fops);
+				    &hisi_zip_regs_fops);
 	}
 
 	return 0;
@@ -795,10 +911,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
 	int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
 	struct qm_debug *debug = &qm->debug;
 	void __iomem *io_base;
+	u32 zip_core_num;
 	int i, j, idx;
 
-	debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM +
-			com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+	zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+
+	debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
+				    sizeof(unsigned int), GFP_KERNEL);
 	if (!debug->last_words)
 		return -ENOMEM;
 
@@ -807,7 +926,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
 		debug->last_words[i] = readl_relaxed(io_base);
 	}
 
-	for (i = 0; i < HZIP_CORE_NUM; i++) {
+	for (i = 0; i < zip_core_num; i++) {
 		io_base = qm->io_base + core_offsets[i];
 		for (j = 0; j < core_dfx_regs_num; j++) {
 			idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
@@ -834,6 +953,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
 {
 	int core_dfx_regs_num =  ARRAY_SIZE(hzip_dump_dfx_regs);
 	int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
+	u32 zip_core_num, zip_comp_core_num;
 	struct qm_debug *debug = &qm->debug;
 	char buf[HZIP_BUF_SIZE];
 	void __iomem *base;
@@ -847,15 +967,18 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
 		val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
 		if (debug->last_words[i] != val)
 			pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
-				hzip_com_dfx_regs[i].name, debug->last_words[i], val);
+				 hzip_com_dfx_regs[i].name, debug->last_words[i], val);
 	}
 
-	for (i = 0; i < HZIP_CORE_NUM; i++) {
-		if (i < HZIP_COMP_CORE_NUM)
+	zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+	zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+						qm->cap_ver);
+	for (i = 0; i < zip_core_num; i++) {
+		if (i < zip_comp_core_num)
 			scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
 		else
 			scnprintf(buf, sizeof(buf), "Decomp_core-%d",
-				  i - HZIP_COMP_CORE_NUM);
+				  i - zip_comp_core_num);
 		base = qm->io_base + core_offsets[i];
 
 		pci_info(qm->pdev, "==>%s:\n", buf);
@@ -865,7 +988,8 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
 			val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
 			if (debug->last_words[idx] != val)
 				pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
-					hzip_dump_dfx_regs[j].name, debug->last_words[idx], val);
+					 hzip_dump_dfx_regs[j].name,
+					 debug->last_words[idx], val);
 		}
 	}
 }
@@ -900,7 +1024,11 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
 
 static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
 {
+	u32 nfe;
+
 	writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+	nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+	writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
 }
 
 static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -934,16 +1062,21 @@ static void hisi_zip_err_info_init(struct hisi_qm *qm)
 {
 	struct hisi_qm_err_info *err_info = &qm->err_info;
 
-	err_info->ce = QM_BASE_CE;
-	err_info->fe = 0;
+	err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+	err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+	err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+					    ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
 	err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
-	err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+	err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+							 ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+	err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+							  ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+	err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+						      ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+	err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+						       ZIP_RESET_MASK_CAP, qm->cap_ver);
 	err_info->msi_wr_port = HZIP_WR_PORT;
 	err_info->acpi_rst = "ZRST";
-	err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
-
-	if (qm->ver >= QM_HW_V3)
-		err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
 }
 
 static const struct hisi_qm_err_ini hisi_zip_err_ini = {
@@ -976,7 +1109,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
 	qm->err_ini = &hisi_zip_err_ini;
 	qm->err_ini->err_info_init(qm);
 
-	hisi_zip_set_user_domain_and_cache(qm);
+	ret = hisi_zip_set_user_domain_and_cache(qm);
+	if (ret)
+		return ret;
+
 	hisi_zip_open_sva_prefetch(qm);
 	hisi_qm_dev_err_init(qm);
 	hisi_zip_debug_regs_clear(qm);
@@ -990,12 +1126,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
 
 static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 {
+	int ret;
+
 	qm->pdev = pdev;
 	qm->ver = pdev->revision;
-	if (pdev->revision >= QM_HW_V3)
-		qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd";
-	else
-		qm->algs = "zlib\ngzip";
 	qm->mode = uacce_mode;
 	qm->sqe_size = HZIP_SQE_SIZE;
 	qm->dev_name = hisi_zip_name;
@@ -1019,7 +1153,19 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
 		qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
 	}
 
-	return hisi_qm_init(qm);
+	ret = hisi_qm_init(qm);
+	if (ret) {
+		pci_err(qm->pdev, "Failed to init zip qm configures!\n");
+		return ret;
+	}
+
+	ret = hisi_zip_set_qm_algs(qm);
+	if (ret) {
+		pci_err(qm->pdev, "Failed to set zip algs!\n");
+		hisi_qm_uninit(qm);
+	}
+
+	return ret;
 }
 
 static void hisi_zip_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d68ef16650d4..32a37e3850c5 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -63,7 +63,6 @@ struct safexcel_cipher_ctx {
 	u32 hash_alg;
 	u32 state_sz;
 
-	struct crypto_cipher *hkaes;
 	struct crypto_aead *fback;
 };
 
@@ -642,10 +641,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	safexcel_complete(priv, ring);
 
 	if (src == dst) {
-		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+		if (sreq->nr_src > 0)
+			dma_unmap_sg(priv->dev, src, sreq->nr_src,
+				     DMA_BIDIRECTIONAL);
 	} else {
-		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
-		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+		if (sreq->nr_src > 0)
+			dma_unmap_sg(priv->dev, src, sreq->nr_src,
+				     DMA_TO_DEVICE);
+		if (sreq->nr_dst > 0)
+			dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+				     DMA_FROM_DEVICE);
 	}
 
 	/*
@@ -737,23 +742,29 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 				max(totlen_src, totlen_dst));
 			return -EINVAL;
 		}
-		dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+		if (sreq->nr_src > 0)
+			dma_map_sg(priv->dev, src, sreq->nr_src,
+				   DMA_BIDIRECTIONAL);
 	} else {
 		if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
 			dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
 				totlen_src);
 			return -EINVAL;
 		}
-		dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+
+		if (sreq->nr_src > 0)
+			dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
 
 		if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
 			dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
 				totlen_dst);
-			dma_unmap_sg(priv->dev, src, sreq->nr_src,
-				     DMA_TO_DEVICE);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto unmap;
 		}
-		dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+
+		if (sreq->nr_dst > 0)
+			dma_map_sg(priv->dev, dst, sreq->nr_dst,
+				   DMA_FROM_DEVICE);
 	}
 
 	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
@@ -883,12 +894,18 @@ rdesc_rollback:
 cdesc_rollback:
 	for (i = 0; i < n_cdesc; i++)
 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-
+unmap:
 	if (src == dst) {
-		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+		if (sreq->nr_src > 0)
+			dma_unmap_sg(priv->dev, src, sreq->nr_src,
+				     DMA_BIDIRECTIONAL);
 	} else {
-		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
-		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+		if (sreq->nr_src > 0)
+			dma_unmap_sg(priv->dev, src, sreq->nr_src,
+				     DMA_TO_DEVICE);
+		if (sreq->nr_dst > 0)
+			dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+				     DMA_FROM_DEVICE);
 	}
 
 	return ret;
@@ -2589,15 +2606,8 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
 	ctx->key_len = len;
 
 	/* Compute hash key by encrypting zeroes with cipher key */
-	crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
-				CRYPTO_TFM_REQ_MASK);
-	ret = crypto_cipher_setkey(ctx->hkaes, key, len);
-	if (ret)
-		return ret;
-
 	memset(hashkey, 0, AES_BLOCK_SIZE);
-	crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey);
+	aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey);
 
 	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
 		for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
@@ -2626,15 +2636,11 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
 	ctx->xcm = EIP197_XCM_MODE_GCM;
 	ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
 
-	ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
-	return PTR_ERR_OR_ZERO(ctx->hkaes);
+	return 0;
 }
 
 static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
 {
-	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	crypto_free_cipher(ctx->hkaes);
 	safexcel_aead_cra_exit(tfm);
 }
 
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index bc60b5802256..103fc551d2af 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -30,7 +30,7 @@ struct safexcel_ahash_ctx {
 	bool fb_init_done;
 	bool fb_do_setkey;
 
-	struct crypto_cipher *kaes;
+	struct crypto_aes_ctx *aes;
 	struct crypto_ahash *fback;
 	struct crypto_shash *shpre;
 	struct shash_desc *shdesc;
@@ -383,7 +383,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 					u32 x;
 
 					x = ipad[i] ^ ipad[i + 4];
-					cache[i] ^= swab(x);
+					cache[i] ^= swab32(x);
 				}
 			}
 			cache_len = AES_BLOCK_SIZE;
@@ -821,10 +821,10 @@ static int safexcel_ahash_final(struct ahash_request *areq)
 			u32 *result = (void *)areq->result;
 
 			/* K3 */
-			result[i] = swab(ctx->base.ipad.word[i + 4]);
+			result[i] = swab32(ctx->base.ipad.word[i + 4]);
 		}
 		areq->result[0] ^= 0x80;			// 10- padding
-		crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
+		aes_encrypt(ctx->aes, areq->result, areq->result);
 		return 0;
 	} else if (unlikely(req->hmac &&
 			    (req->len == req->block_sz) &&
@@ -2083,37 +2083,26 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
 				 unsigned int len)
 {
 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
-	struct crypto_aes_ctx aes;
 	u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
 	int ret, i;
 
-	ret = aes_expandkey(&aes, key, len);
+	ret = aes_expandkey(ctx->aes, key, len);
 	if (ret)
 		return ret;
 
 	/* precompute the XCBC key material */
-	crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
-				CRYPTO_TFM_REQ_MASK);
-	ret = crypto_cipher_setkey(ctx->kaes, key, len);
-	if (ret)
-		return ret;
-
-	crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
-		"\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
-	crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
-		"\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
-	crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
-		"\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
+	aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+		    "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
+	aes_encrypt(ctx->aes, (u8 *)key_tmp,
+		    "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
+	aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE,
+		    "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
 	for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
-		ctx->base.ipad.word[i] = swab(key_tmp[i]);
-
-	crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
-				CRYPTO_TFM_REQ_MASK);
-	ret = crypto_cipher_setkey(ctx->kaes,
-				   (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
-				   AES_MIN_KEY_SIZE);
+		ctx->base.ipad.word[i] = swab32(key_tmp[i]);
+
+	ret = aes_expandkey(ctx->aes,
+			    (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+			    AES_MIN_KEY_SIZE);
 	if (ret)
 		return ret;
 
@@ -2121,7 +2110,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
 	ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
 	ctx->cbcmac = false;
 
-	memzero_explicit(&aes, sizeof(aes));
 	return 0;
 }
 
@@ -2130,15 +2118,15 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
 
 	safexcel_ahash_cra_init(tfm);
-	ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
-	return PTR_ERR_OR_ZERO(ctx->kaes);
+	ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
+	return PTR_ERR_OR_ZERO(ctx->aes);
 }
 
 static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
 {
 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	crypto_free_cipher(ctx->kaes);
+	kfree(ctx->aes);
 	safexcel_ahash_cra_exit(tfm);
 }
 
@@ -2178,31 +2166,23 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
 				unsigned int len)
 {
 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
-	struct crypto_aes_ctx aes;
 	__be64 consts[4];
 	u64 _const[2];
 	u8 msb_mask, gfmask;
 	int ret, i;
 
-	ret = aes_expandkey(&aes, key, len);
+	/* precompute the CMAC key material */
+	ret = aes_expandkey(ctx->aes, key, len);
 	if (ret)
 		return ret;
 
 	for (i = 0; i < len / sizeof(u32); i++)
-		ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]);
-
-	/* precompute the CMAC key material */
-	crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
-				CRYPTO_TFM_REQ_MASK);
-	ret = crypto_cipher_setkey(ctx->kaes, key, len);
-	if (ret)
-		return ret;
+		ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]);
 
 	/* code below borrowed from crypto/cmac.c */
 	/* encrypt the zero block */
 	memset(consts, 0, AES_BLOCK_SIZE);
-	crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
+	aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts);
 
 	gfmask = 0x87;
 	_const[0] = be64_to_cpu(consts[1]);
@@ -2234,7 +2214,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
 	}
 	ctx->cbcmac = false;
 
-	memzero_explicit(&aes, sizeof(aes));
 	return 0;
 }
 
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 7942b48dd55a..1cd62f9c3e3a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -42,7 +42,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
 config CRYPTO_DEV_KEEMBAY_OCS_ECC
 	tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
 	depends on ARCH_KEEMBAY || COMPILE_TEST
-	depends on OF || COMPILE_TEST
+	depends on OF
 	depends on HAS_IOMEM
 	select CRYPTO_ECDH
 	select CRYPTO_ENGINE
@@ -64,7 +64,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_HCU
 	select CRYPTO_ENGINE
 	depends on HAS_IOMEM
 	depends on ARCH_KEEMBAY || COMPILE_TEST
-	depends on OF || COMPILE_TEST
+	depends on OF
 	help
 	  Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
 	  Control Unit (HCU) hardware acceleration for use with Crypto API.
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
index b8bdb9f134f3..205eacac4a34 100644
--- a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
@@ -403,7 +403,7 @@ union otx_cptx_pf_exe_bist_status {
  *	big-endian format in memory.
  *  iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
  *	0 = The hardware issues NCB transient load (LDT) towards the cache,
- *	which if the line hits and is is dirty will cause the line to be
+ *	which if the line hits and is dirty will cause the line to be
  *	written back before being replaced.
  *	1 = The hardware issues NCB LDWB read-and-invalidate command towards
  *	the cache when fetching the last word of instructions; as a result the
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 40b482198ebc..df9c2b8747e6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -97,7 +97,7 @@ static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
 static void set_ucode_filename(struct otx_cpt_ucode *ucode,
 			       const char *filename)
 {
-	strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
+	strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
 }
 
 static char *get_eng_type_str(int eng_type)
@@ -138,7 +138,7 @@ static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
 	u32 i, val = 0;
 	u8 nn;
 
-	strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+	strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
 	for (i = 0; i < strlen(tmp_ver_str); i++)
 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
 
@@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev,
 	struct tar_ucode_info_t *tar_info;
 	struct otx_cpt_ucode_hdr *ucode_hdr;
 	int ucode_type, ucode_size;
+	unsigned int code_length;
 
 	/*
 	 * If size is less than microcode header size then don't report
@@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev,
 	if (get_ucode_type(ucode_hdr, &ucode_type))
 		return 0;
 
-	ucode_size = ntohl(ucode_hdr->code_length) * 2;
+	code_length = ntohl(ucode_hdr->code_length);
+	if (code_length >= INT_MAX / 2) {
+		dev_err(dev, "Invalid code_length %u\n", code_length);
+		return -EINVAL;
+	}
+
+	ucode_size = code_length * 2;
 	if (!ucode_size || (size < round_up(ucode_size, 16) +
 	    sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
 		dev_err(dev, "Ucode %s invalid size\n", filename);
@@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
 {
 	struct otx_cpt_ucode_hdr *ucode_hdr;
 	const struct firmware *fw;
+	unsigned int code_length;
 	int ret;
 
 	set_ucode_filename(ucode, ucode_filename);
@@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
 	ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
 	memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
 	ucode->ver_num = ucode_hdr->ver_num;
-	ucode->size = ntohl(ucode_hdr->code_length) * 2;
+	code_length = ntohl(ucode_hdr->code_length);
+	if (code_length >= INT_MAX / 2) {
+		dev_err(dev, "Ucode invalid code_length %u\n", code_length);
+		ret = -EINVAL;
+		goto release_fw;
+	}
+	ucode->size = code_length * 2;
 	if (!ucode->size || (fw->size < round_up(ucode->size, 16)
 	    + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
 		dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
@@ -1328,7 +1342,7 @@ static ssize_t ucode_load_store(struct device *dev,
 
 	eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
 	err_msg = "Invalid engine group format";
-	strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
+	strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
 	start = tmp_buf;
 
 	has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index 36d72e35ebeb..88a41d1ca5f6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -661,7 +661,7 @@ static ssize_t vf_type_show(struct device *dev,
 		msg = "Invalid";
 	}
 
-	return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
+	return sysfs_emit(buf, "%s\n", msg);
 }
 
 static ssize_t vf_engine_group_show(struct device *dev,
@@ -670,7 +670,7 @@ static ssize_t vf_engine_group_show(struct device *dev,
 {
 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
 
-	return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
+	return sysfs_emit(buf, "%d\n", cptvf->vfgrp);
 }
 
 static ssize_t vf_engine_group_store(struct device *dev,
@@ -706,7 +706,7 @@ static ssize_t vf_coalesc_time_wait_show(struct device *dev,
 {
 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
 
-	return scnprintf(buf, PAGE_SIZE, "%d\n",
+	return sysfs_emit(buf, "%d\n",
 			 cptvf_read_vq_done_timewait(cptvf));
 }
 
@@ -716,7 +716,7 @@ static ssize_t vf_coalesc_num_wait_show(struct device *dev,
 {
 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
 
-	return scnprintf(buf, PAGE_SIZE, "%d\n",
+	return sysfs_emit(buf, "%d\n",
 			 cptvf_read_vq_done_numwait(cptvf));
 }
 
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
index 5663787c7a62..90fdafb7c468 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
@@ -159,12 +159,10 @@ static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
 int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
 {
 	struct otx_cpt_mbox mbx = {};
-	int ret;
 
 	mbx.msg = OTX_CPT_MSG_READY;
-	ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 
-	return ret;
+	return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 }
 
 /*
@@ -174,13 +172,11 @@ int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
 int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
 {
 	struct otx_cpt_mbox mbx = {};
-	int ret;
 
 	mbx.msg = OTX_CPT_MSG_QLEN;
 	mbx.data = cptvf->qsize;
-	ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 
-	return ret;
+	return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 }
 
 /*
@@ -208,14 +204,12 @@ int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
 int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
 {
 	struct otx_cpt_mbox mbx = {};
-	int ret;
 
 	mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
 	/* Convey group of the VF */
 	mbx.data = cptvf->priority;
-	ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 
-	return ret;
+	return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 }
 
 /*
@@ -224,12 +218,10 @@ int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
 int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
 {
 	struct otx_cpt_mbox mbx = {};
-	int ret;
 
 	mbx.msg = OTX_CPT_MSG_VF_UP;
-	ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 
-	return ret;
+	return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 }
 
 /*
@@ -238,10 +230,8 @@ int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
 int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
 {
 	struct otx_cpt_mbox mbx = {};
-	int ret;
 
 	mbx.msg = OTX_CPT_MSG_VF_DOWN;
-	ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 
-	return ret;
+	return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
 }
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index f10050fead16..1577986677f6 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -68,7 +68,7 @@ static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
 			       const char *filename)
 {
-	strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+	strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
 }
 
 static char *get_eng_type_str(int eng_type)
@@ -126,7 +126,7 @@ static int get_ucode_type(struct device *dev,
 	int i, val = 0;
 	u8 nn;
 
-	strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+	strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
 	for (i = 0; i < strlen(tmp_ver_str); i++)
 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
 
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 02cb9e44afd8..75c403f2b1d9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -191,7 +191,6 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
 	struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
 	struct pci_dev *pdev = cptvf->pdev;
 	struct mbox_msghdr *req;
-	int ret;
 
 	req = (struct mbox_msghdr *)
 	      otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
@@ -204,7 +203,5 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
 	req->sig = OTX2_MBOX_REQ_SIG;
 	req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
 
-	ret = otx2_cpt_send_mbox_msg(mbox, pdev);
-
-	return ret;
+	return otx2_cpt_send_mbox_msg(mbox, pdev);
 }
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 3b0bf6fea491..31e24df18877 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1494,7 +1494,7 @@ static void n2_unregister_algs(void)
  *
  * So we have to back-translate, going through the 'intr' and 'ino'
  * property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to to figure out which
+ * 'interrupts' property entries, in order to figure out which
  * devino goes to which already-translated IRQ.
  */
 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 3793885f928d..c843f4c6f684 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -134,7 +134,6 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
 		       unsigned int cryptlen, u8 *b0)
 {
 	unsigned int l, lp, m = authsize;
-	int rc;
 
 	memcpy(b0, iv, 16);
 
@@ -148,9 +147,7 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
 	if (assoclen)
 		*b0 |= 64;
 
-	rc = set_msg_len(b0 + 16 - l, cryptlen, l);
-
-	return rc;
+	return set_msg_len(b0 + 16 - l, cryptlen, l);
 }
 
 static int generate_pat(u8                   *iv,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index e61b3e13db3b..1931e5b37f2b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -251,13 +251,13 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&key_val->list);
-	strlcpy(key_val->key, key, sizeof(key_val->key));
+	strscpy(key_val->key, key, sizeof(key_val->key));
 
 	if (type == ADF_DEC) {
 		snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
 			 "%ld", (*((long *)val)));
 	} else if (type == ADF_STR) {
-		strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+		strscpy(key_val->val, (char *)val, sizeof(key_val->val));
 	} else if (type == ADF_HEX) {
 		snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
 			 "0x%lx", (unsigned long)val);
@@ -315,7 +315,7 @@ int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
 	if (!sec)
 		return -ENOMEM;
 
-	strlcpy(sec->name, name, sizeof(sec->name));
+	strscpy(sec->name, name, sizeof(sec->name));
 	INIT_LIST_HEAD(&sec->param_head);
 	down_write(&cfg->lock);
 	list_add_tail(&sec->list, &cfg->sec_list);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e8ac932bbaab..82b69e1f725b 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -16,6 +16,9 @@
 #include "adf_cfg_common.h"
 #include "adf_cfg_user.h"
 
+#define ADF_CFG_MAX_SECTION 512
+#define ADF_CFG_MAX_KEY_VAL 256
+
 #define DEVICE_NAME "qat_adf_ctl"
 
 static DEFINE_MUTEX(adf_ctl_lock);
@@ -137,10 +140,11 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
 	struct adf_user_cfg_key_val key_val;
 	struct adf_user_cfg_key_val *params_head;
 	struct adf_user_cfg_section section, *section_head;
+	int i, j;
 
 	section_head = ctl_data->config_section;
 
-	while (section_head) {
+	for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
 		if (copy_from_user(&section, (void __user *)section_head,
 				   sizeof(*section_head))) {
 			dev_err(&GET_DEV(accel_dev),
@@ -156,7 +160,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
 
 		params_head = section.params;
 
-		while (params_head) {
+		for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
 			if (copy_from_user(&key_val, (void __user *)params_head,
 					   sizeof(key_val))) {
 				dev_err(&GET_DEV(accel_dev),
@@ -363,7 +367,7 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
 	dev_info.num_logical_accel = hw_data->num_logical_accel;
 	dev_info.banks_per_accel = hw_data->num_banks
 					/ hw_data->num_logical_accel;
-	strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+	strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
 	dev_info.instance_id = hw_data->instance_id;
 	dev_info.type = hw_data->dev_class->type;
 	dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index 43b8f864806b..4fb4b3df5a18 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -107,7 +107,7 @@ do { \
  * Timeout is in cycles. Clock speed may vary across products but this
  * value should be a few milli-seconds.
  */
-#define ADF_SSM_WDT_DEFAULT_VALUE	0x200000
+#define ADF_SSM_WDT_DEFAULT_VALUE	0x7000000ULL
 #define ADF_SSM_WDT_PKE_DEFAULT_VALUE	0x8000000
 #define ADF_SSMWDTL_OFFSET		0x54
 #define ADF_SSMWDTH_OFFSET		0x5C
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e69e5907f595..08bca1c506c0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -96,7 +96,7 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
 	if (!ring_debug)
 		return -ENOMEM;
 
-	strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+	strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
 	snprintf(entry_name, sizeof(entry_name), "ring_%02d",
 		 ring->ring_number);
 
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index 4b36869bf460..69482abdb8b9 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -86,7 +86,8 @@
 					ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
 					ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
 					ICP_QAT_CSS_SIGNATURE_LEN(handle))
-#define ICP_QAT_CSS_MAX_IMAGE_LEN   0x40000
+#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN    0x40000
+#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN    0x30000
 
 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
 #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index fb45fa83841c..cad9c58caab1 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -673,11 +673,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
 	dma_addr_t blpout = qat_req->buf.bloutp;
 	size_t sz = qat_req->buf.sz;
 	size_t sz_out = qat_req->buf.sz_out;
+	int bl_dma_dir;
 	int i;
 
+	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
 	for (i = 0; i < bl->num_bufs; i++)
 		dma_unmap_single(dev, bl->bufers[i].addr,
-				 bl->bufers[i].len, DMA_BIDIRECTIONAL);
+				 bl->bufers[i].len, bl_dma_dir);
 
 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
 
@@ -691,7 +694,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
 		for (i = bufless; i < blout->num_bufs; i++) {
 			dma_unmap_single(dev, blout->bufers[i].addr,
 					 blout->bufers[i].len,
-					 DMA_BIDIRECTIONAL);
+					 DMA_FROM_DEVICE);
 		}
 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
 
@@ -716,6 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 	struct scatterlist *sg;
 	size_t sz_out, sz = struct_size(bufl, bufers, n);
 	int node = dev_to_node(&GET_DEV(inst->accel_dev));
+	int bufl_dma_dir;
 
 	if (unlikely(!n))
 		return -EINVAL;
@@ -733,6 +737,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 		qat_req->buf.sgl_src_valid = true;
 	}
 
+	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
 	for_each_sg(sgl, sg, n, i)
 		bufl->bufers[i].addr = DMA_MAPPING_ERROR;
 
@@ -744,7 +750,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 
 		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 						      sg->length,
-						      DMA_BIDIRECTIONAL);
+						      bufl_dma_dir);
 		bufl->bufers[y].len = sg->length;
 		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
 			goto err_in;
@@ -787,7 +793,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
 
 			bufers[y].addr = dma_map_single(dev, sg_virt(sg),
 							sg->length,
-							DMA_BIDIRECTIONAL);
+							DMA_FROM_DEVICE);
 			if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
 				goto err_out;
 			bufers[y].len = sg->length;
@@ -817,7 +823,7 @@ err_out:
 		if (!dma_mapping_error(dev, buflout->bufers[i].addr))
 			dma_unmap_single(dev, buflout->bufers[i].addr,
 					 buflout->bufers[i].len,
-					 DMA_BIDIRECTIONAL);
+					 DMA_FROM_DEVICE);
 
 	if (!qat_req->buf.sgl_dst_valid)
 		kfree(buflout);
@@ -831,7 +837,7 @@ err_in:
 		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
 			dma_unmap_single(dev, bufl->bufers[i].addr,
 					 bufl->bufers[i].len,
-					 DMA_BIDIRECTIONAL);
+					 bufl_dma_dir);
 
 	if (!qat_req->buf.sgl_src_valid)
 		kfree(bufl);
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 095ed2a404d2..94a26702aeae 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,14 +332,14 @@ static int qat_dh_compute_value(struct kpp_request *req)
 	qat_req->in.dh.in_tab[n_input_params] = 0;
 	qat_req->out.dh.out_tab[1] = 0;
 	/* Mapping in.in.b or in.in_g2.xa is the same */
-	qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
-					 sizeof(qat_req->in.dh.in.b),
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
+					 sizeof(struct qat_dh_input_params),
 					 DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
 		goto unmap_dst;
 
-	qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
-					  sizeof(qat_req->out.dh.r),
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
+					  sizeof(struct qat_dh_output_params),
 					  DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
 		goto unmap_in_params;
@@ -729,14 +729,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
 
 	qat_req->in.rsa.in_tab[3] = 0;
 	qat_req->out.rsa.out_tab[1] = 0;
-	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
-					 sizeof(qat_req->in.rsa.enc.m),
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+					 sizeof(struct qat_rsa_input_params),
 					 DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
 		goto unmap_dst;
 
-	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
-					  sizeof(qat_req->out.rsa.enc.c),
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+					  sizeof(struct qat_rsa_output_params),
 					  DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
 		goto unmap_in_params;
@@ -875,14 +875,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
 	else
 		qat_req->in.rsa.in_tab[3] = 0;
 	qat_req->out.rsa.out_tab[1] = 0;
-	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
-					 sizeof(qat_req->in.rsa.dec.c),
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+					 sizeof(struct qat_rsa_input_params),
 					 DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
 		goto unmap_dst;
 
-	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
-					  sizeof(qat_req->out.rsa.dec.m),
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+					  sizeof(struct qat_rsa_output_params),
 					  DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
 		goto unmap_in_params;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 0fe5a474aa45..b7f7869ef8b2 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -1367,6 +1367,48 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
 	}
 }
 
+static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
+				char *image, unsigned int size,
+				unsigned int fw_type)
+{
+	char *fw_type_name = fw_type ? "MMP" : "AE";
+	unsigned int css_dword_size = sizeof(u32);
+
+	if (handle->chip_info->fw_auth) {
+		struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+		unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+
+		if ((css_hdr->header_len * css_dword_size) != header_len)
+			goto err;
+		if ((css_hdr->size * css_dword_size) != size)
+			goto err;
+		if (fw_type != css_hdr->fw_type)
+			goto err;
+		if (size <= header_len)
+			goto err;
+		size -= header_len;
+	}
+
+	if (fw_type == CSS_AE_FIRMWARE) {
+		if (size < sizeof(struct icp_qat_simg_ae_mode *) +
+		    ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
+			goto err;
+		if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
+			goto err;
+	} else if (fw_type == CSS_MMP_FIRMWARE) {
+		if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
+			goto err;
+	} else {
+		pr_err("QAT: Unsupported firmware type\n");
+		return -EINVAL;
+	}
+	return 0;
+
+err:
+	pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+	return -EINVAL;
+}
+
 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
 				char *image, unsigned int size,
 				struct icp_qat_fw_auth_desc **desc)
@@ -1379,7 +1421,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
 	struct icp_qat_simg_ae_mode *simg_ae_mode;
 	struct icp_firml_dram_desc img_desc;
 
-	if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
+	if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
 		pr_err("QAT: error, input image size overflow %d\n", size);
 		return -EINVAL;
 	}
@@ -1547,6 +1589,11 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
 {
 	struct icp_qat_fw_auth_desc *desc = NULL;
 	int status = 0;
+	int ret;
+
+	ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
+	if (ret)
+		return ret;
 
 	if (handle->chip_info->fw_auth) {
 		status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
@@ -2018,8 +2065,15 @@ static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
 	struct icp_qat_fw_auth_desc *desc = NULL;
 	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
 	struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+	int ret;
 
 	for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+		ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
+					   simg_hdr[i].simg_len,
+					   CSS_AE_FIRMWARE);
+		if (ret)
+			return ret;
+
 		if (qat_uclo_map_auth_fw(handle,
 					 (char *)simg_hdr[i].simg_buf,
 					 (unsigned int)
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 97a530171f07..6eb4d2e35629 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -450,8 +450,8 @@ qce_aead_async_req_handle(struct crypto_async_request *async_req)
 	if (ret)
 		return ret;
 	dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
-	if (dst_nents < 0) {
-		ret = dst_nents;
+	if (!dst_nents) {
+		ret = -EIO;
 		goto error_free;
 	}
 
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 59159f5e64e5..37bafd7aeb79 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -97,14 +97,16 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
 	}
 
 	ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
-	if (ret < 0)
-		return ret;
+	if (!ret)
+		return -EIO;
 
 	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
 
 	ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
-	if (ret < 0)
+	if (!ret) {
+		ret = -EIO;
 		goto error_unmap_src;
+	}
 
 	ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
 			       &rctx->result_sg, 1, qce_ahash_done, async_req);
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 3d27cd5210ef..5b493fdc1e74 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -124,15 +124,15 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
 	rctx->dst_sg = rctx->dst_tbl.sgl;
 
 	dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
-	if (dst_nents < 0) {
-		ret = dst_nents;
+	if (!dst_nents) {
+		ret = -EIO;
 		goto error_free;
 	}
 
 	if (diff_dst) {
 		src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
-		if (src_nents < 0) {
-			ret = src_nents;
+		if (!src_nents) {
+			ret = -EIO;
 			goto error_unmap_dst;
 		}
 		rctx->src_sg = req->src;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 031b5f701a0a..72dd1a4ebac4 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -9,6 +9,7 @@
 #include <linux/crypto.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -201,15 +202,13 @@ static int qcom_rng_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#if IS_ENABLED(CONFIG_ACPI)
-static const struct acpi_device_id qcom_rng_acpi_match[] = {
+static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
 	{ .id = "QCOM8160", .driver_data = 1 },
 	{}
 };
 MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
-#endif
 
-static const struct of_device_id qcom_rng_of_match[] = {
+static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
 	{ .compatible = "qcom,prng", .data = (void *)0},
 	{ .compatible = "qcom,prng-ee", .data = (void *)1},
 	{}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 457084b344c1..7ab20fb95166 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -26,10 +26,10 @@
 #include <linux/kernel.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/spinlock.h>
 
 #define SHA_BUFFER_LEN		PAGE_SIZE
 #define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
@@ -196,7 +196,7 @@ struct sahara_dev {
 	void __iomem		*regs_base;
 	struct clk		*clk_ipg;
 	struct clk		*clk_ahb;
-	struct mutex		queue_mutex;
+	spinlock_t		queue_spinlock;
 	struct task_struct	*kthread;
 	struct completion	dma_completion;
 
@@ -487,13 +487,13 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 
 	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 			 DMA_TO_DEVICE);
-	if (ret != dev->nb_in_sg) {
+	if (!ret) {
 		dev_err(dev->device, "couldn't map in sg\n");
 		goto unmap_in;
 	}
 	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 			 DMA_FROM_DEVICE);
-	if (ret != dev->nb_out_sg) {
+	if (!ret) {
 		dev_err(dev->device, "couldn't map out sg\n");
 		goto unmap_out;
 	}
@@ -642,9 +642,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
 
 	rctx->mode = mode;
 
-	mutex_lock(&dev->queue_mutex);
+	spin_lock_bh(&dev->queue_spinlock);
 	err = crypto_enqueue_request(&dev->queue, &req->base);
-	mutex_unlock(&dev->queue_mutex);
+	spin_unlock_bh(&dev->queue_spinlock);
 
 	wake_up_process(dev->kthread);
 
@@ -1043,10 +1043,10 @@ static int sahara_queue_manage(void *data)
 	do {
 		__set_current_state(TASK_INTERRUPTIBLE);
 
-		mutex_lock(&dev->queue_mutex);
+		spin_lock_bh(&dev->queue_spinlock);
 		backlog = crypto_get_backlog(&dev->queue);
 		async_req = crypto_dequeue_request(&dev->queue);
-		mutex_unlock(&dev->queue_mutex);
+		spin_unlock_bh(&dev->queue_spinlock);
 
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
@@ -1092,9 +1092,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
 		rctx->first = 1;
 	}
 
-	mutex_lock(&dev->queue_mutex);
+	spin_lock_bh(&dev->queue_spinlock);
 	ret = crypto_enqueue_request(&dev->queue, &req->base);
-	mutex_unlock(&dev->queue_mutex);
+	spin_unlock_bh(&dev->queue_spinlock);
 
 	wake_up_process(dev->kthread);
 
@@ -1449,7 +1449,7 @@ static int sahara_probe(struct platform_device *pdev)
 
 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
 
-	mutex_init(&dev->queue_mutex);
+	spin_lock_init(&dev->queue_spinlock);
 
 	dev_ptr = dev;
 
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 15d4a38b1351..cd4c410da5a5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -85,8 +85,6 @@ config WIREGUARD
 	select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
 	select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
 	select CRYPTO_CURVE25519_X86 if X86 && 64BIT
-	select ARM_CRYPTO if ARM
-	select ARM64_CRYPTO if ARM64
 	select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
 	select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
 	select CRYPTO_POLY1305_ARM if ARM
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index cb5414265a9b..58c0ab01771b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -83,7 +83,8 @@ static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
 		if (!wait || !max || likely(bytes_read) || fail_stats > 110)
 			break;
 
-		msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
+		if (hwrng_msleep(rng, ath9k_rng_delay_get(++fail_stats)))
+			break;
 	}
 
 	if (wait && !bytes_read && max)