summary refs log tree commit diff
path: root/drivers/crypto/inside-secure
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/inside-secure')
-rw-r--r--drivers/crypto/inside-secure/safexcel.c91
-rw-r--r--drivers/crypto/inside-secure/safexcel.h31
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c39
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c31
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c48
5 files changed, 119 insertions, 121 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 7bfdba1ada26..bd3635cb1e50 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -509,7 +509,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
 {
 	struct crypto_async_request *req, *backlog;
 	struct safexcel_context *ctx;
-	struct safexcel_request *request;
 	int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 
 	/* If a request wasn't properly dequeued because of a lack of resources,
@@ -533,16 +532,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
 		}
 
 handle_req:
-		request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
-		if (!request)
-			goto request_failed;
-
 		ctx = crypto_tfm_ctx(req->tfm);
-		ret = ctx->send(req, ring, request, &commands, &results);
-		if (ret) {
-			kfree(request);
+		ret = ctx->send(req, ring, &commands, &results);
+		if (ret)
 			goto request_failed;
-		}
 
 		if (backlog)
 			backlog->complete(backlog, -EINPROGRESS);
@@ -551,14 +544,8 @@ handle_req:
 		 * to the engine because the input data was cached, continue to
 		 * dequeue other requests as this is valid and not an error.
 		 */
-		if (!commands && !results) {
-			kfree(request);
+		if (!commands && !results)
 			continue;
-		}
-
-		spin_lock_bh(&priv->ring[ring].egress_lock);
-		list_add_tail(&request->list, &priv->ring[ring].list);
-		spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 		cdesc += commands;
 		rdesc += results;
@@ -576,7 +563,7 @@ finalize:
 	if (!nreq)
 		return;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
+	spin_lock_bh(&priv->ring[ring].lock);
 
 	priv->ring[ring].requests += nreq;
 
@@ -585,7 +572,7 @@ finalize:
 		priv->ring[ring].busy = true;
 	}
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
+	spin_unlock_bh(&priv->ring[ring].lock);
 
 	/* let the RDR know we have pending descriptors */
 	writel((rdesc * priv->config.rd_offset) << 2,
@@ -617,6 +604,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
 	return -EINVAL;
 }
 
+inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+				 int ring,
+				 struct safexcel_result_desc *rdesc,
+				 struct crypto_async_request *req)
+{
+	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
+
+	priv->ring[ring].rdr_req[i] = req;
+}
+
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
+{
+	int i = safexcel_ring_first_rdr_index(priv, ring);
+
+	return priv->ring[ring].rdr_req[i];
+}
+
 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
 {
 	struct safexcel_command_desc *cdesc;
@@ -645,21 +650,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
 
 int safexcel_invalidate_cache(struct crypto_async_request *async,
 			      struct safexcel_crypto_priv *priv,
-			      dma_addr_t ctxr_dma, int ring,
-			      struct safexcel_request *request)
+			      dma_addr_t ctxr_dma, int ring)
 {
 	struct safexcel_command_desc *cdesc;
 	struct safexcel_result_desc *rdesc;
 	int ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
-
 	/* Prepare command descriptor */
 	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
-	if (IS_ERR(cdesc)) {
-		ret = PTR_ERR(cdesc);
-		goto unlock;
-	}
+	if (IS_ERR(cdesc))
+		return PTR_ERR(cdesc);
 
 	cdesc->control_data.type = EIP197_TYPE_EXTENDED;
 	cdesc->control_data.options = 0;
@@ -674,21 +674,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
 		goto cdesc_rollback;
 	}
 
-	request->req = async;
-	goto unlock;
+	safexcel_rdr_req_set(priv, ring, rdesc, async);
+
+	return ret;
 
 cdesc_rollback:
 	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 
-unlock:
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 	return ret;
 }
 
 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
 						     int ring)
 {
-	struct safexcel_request *sreq;
+	struct crypto_async_request *req;
 	struct safexcel_context *ctx;
 	int ret, i, nreq, ndesc, tot_descs, handled = 0;
 	bool should_complete;
@@ -703,28 +702,22 @@ handle_results:
 		goto requests_left;
 
 	for (i = 0; i < nreq; i++) {
-		spin_lock_bh(&priv->ring[ring].egress_lock);
-		sreq = list_first_entry(&priv->ring[ring].list,
-					struct safexcel_request, list);
-		list_del(&sreq->list);
-		spin_unlock_bh(&priv->ring[ring].egress_lock);
-
-		ctx = crypto_tfm_ctx(sreq->req->tfm);
-		ndesc = ctx->handle_result(priv, ring, sreq->req,
+		req = safexcel_rdr_req_get(priv, ring);
+
+		ctx = crypto_tfm_ctx(req->tfm);
+		ndesc = ctx->handle_result(priv, ring, req,
 					   &should_complete, &ret);
 		if (ndesc < 0) {
-			kfree(sreq);
 			dev_err(priv->dev, "failed to handle result (%d)", ndesc);
 			goto acknowledge;
 		}
 
 		if (should_complete) {
 			local_bh_disable();
-			sreq->req->complete(sreq->req, ret);
+			req->complete(req, ret);
 			local_bh_enable();
 		}
 
-		kfree(sreq);
 		tot_descs += ndesc;
 		handled++;
 	}
@@ -743,7 +736,7 @@ acknowledge:
 		goto handle_results;
 
 requests_left:
-	spin_lock_bh(&priv->ring[ring].egress_lock);
+	spin_lock_bh(&priv->ring[ring].lock);
 
 	priv->ring[ring].requests -= handled;
 	safexcel_try_push_requests(priv, ring);
@@ -751,7 +744,7 @@ requests_left:
 	if (!priv->ring[ring].requests)
 		priv->ring[ring].busy = false;
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
+	spin_unlock_bh(&priv->ring[ring].lock);
 }
 
 static void safexcel_dequeue_work(struct work_struct *work)
@@ -1073,6 +1066,14 @@ static int safexcel_probe(struct platform_device *pdev)
 		if (ret)
 			goto err_reg_clk;
 
+		priv->ring[i].rdr_req = devm_kzalloc(dev,
+			sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+			GFP_KERNEL);
+		if (!priv->ring[i].rdr_req) {
+			ret = -ENOMEM;
+			goto err_reg_clk;
+		}
+
 		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
 		if (!ring_irq) {
 			ret = -ENOMEM;
@@ -1108,9 +1109,7 @@ static int safexcel_probe(struct platform_device *pdev)
 		crypto_init_queue(&priv->ring[i].queue,
 				  EIP197_DEFAULT_RING_SIZE);
 
-		INIT_LIST_HEAD(&priv->ring[i].list);
 		spin_lock_init(&priv->ring[i].lock);
-		spin_lock_init(&priv->ring[i].egress_lock);
 		spin_lock_init(&priv->ring[i].queue_lock);
 	}
 
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 839f63ef1556..2f2834e67608 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -512,8 +512,7 @@ struct safexcel_desc_ring {
 	void *write;
 	void *read;
 
-	/* number of elements used in the ring */
-	unsigned nr;
+	/* descriptor element offset */
 	unsigned offset;
 };
 
@@ -523,11 +522,6 @@ enum safexcel_alg_type {
 	SAFEXCEL_ALG_TYPE_AHASH,
 };
 
-struct safexcel_request {
-	struct list_head list;
-	struct crypto_async_request *req;
-};
-
 struct safexcel_config {
 	u32 pes;
 	u32 rings;
@@ -547,9 +541,7 @@ struct safexcel_work_data {
 
 struct safexcel_ring {
 	spinlock_t lock;
-	spinlock_t egress_lock;
 
-	struct list_head list;
 	struct workqueue_struct *workqueue;
 	struct safexcel_work_data work_data;
 
@@ -557,6 +549,9 @@ struct safexcel_ring {
 	struct safexcel_desc_ring cdr;
 	struct safexcel_desc_ring rdr;
 
+	/* result ring crypto API request */
+	struct crypto_async_request **rdr_req;
+
 	/* queue */
 	struct crypto_queue queue;
 	spinlock_t queue_lock;
@@ -618,8 +613,7 @@ struct safexcel_crypto_priv {
 
 struct safexcel_context {
 	int (*send)(struct crypto_async_request *req, int ring,
-		    struct safexcel_request *request, int *commands,
-		    int *results);
+		    int *commands, int *results);
 	int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
 			     struct crypto_async_request *req, bool *complete,
 			     int *ret);
@@ -668,14 +662,14 @@ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
 int safexcel_invalidate_cache(struct crypto_async_request *async,
 			      struct safexcel_crypto_priv *priv,
-			      dma_addr_t ctxr_dma, int ring,
-			      struct safexcel_request *request);
+			      dma_addr_t ctxr_dma, int ring);
 int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
 				   struct safexcel_desc_ring *cdr,
 				   struct safexcel_desc_ring *rdr);
 int safexcel_select_ring(struct safexcel_crypto_priv *priv);
 void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
 			      struct safexcel_desc_ring *ring);
+void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int  ring);
 void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
 				 struct safexcel_desc_ring *ring);
 struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
@@ -688,6 +682,17 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
 						 int ring_id,
 						bool first, bool last,
 						dma_addr_t data, u32 len);
+int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+				  int ring);
+int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+				  int ring,
+				  struct safexcel_result_desc *rdesc);
+void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
+			  int ring,
+			  struct safexcel_result_desc *rdesc,
+			  struct crypto_async_request *req);
+inline struct crypto_async_request *
+safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
 void safexcel_inv_complete(struct crypto_async_request *req, int error);
 int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
 			 void *istate, void *ostate);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index e175d0bf69d8..9d292427a488 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -336,7 +336,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 
 	*ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
 	do {
 		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
 		if (IS_ERR(rdesc)) {
@@ -353,7 +352,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	} while (!rdesc->last_seg);
 
 	safexcel_complete(priv, ring);
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 	if (src == dst) {
 		dma_unmap_sg(priv->dev, src,
@@ -374,7 +372,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 }
 
 static int safexcel_send_req(struct crypto_async_request *base, int ring,
-			     struct safexcel_request *request,
 			     struct safexcel_cipher_req *sreq,
 			     struct scatterlist *src, struct scatterlist *dst,
 			     unsigned int cryptlen, unsigned int assoclen,
@@ -384,7 +381,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	struct safexcel_command_desc *cdesc;
-	struct safexcel_result_desc *rdesc;
+	struct safexcel_result_desc *rdesc, *first_rdesc;
 	struct scatterlist *sg;
 	unsigned int totlen = cryptlen + assoclen;
 	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
@@ -424,8 +421,6 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 		       ctx->opad, ctx->state_sz);
 	}
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
-
 	/* command descriptors */
 	for_each_sg(src, sg, nr_src, i) {
 		int len = sg_dma_len(sg);
@@ -472,12 +467,12 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 			ret = PTR_ERR(rdesc);
 			goto rdesc_rollback;
 		}
+		if (first)
+			first_rdesc = rdesc;
 		n_rdesc++;
 	}
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
-
-	request->req = base;
+	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
 
 	*commands = n_cdesc;
 	*results = n_rdesc;
@@ -490,8 +485,6 @@ cdesc_rollback:
 	for (i = 0; i < n_cdesc; i++)
 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
-
 	if (src == dst) {
 		dma_unmap_sg(priv->dev, src,
 			     sg_nents_for_len(src, totlen),
@@ -519,7 +512,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
 
 	*ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
 	do {
 		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
 		if (IS_ERR(rdesc)) {
@@ -536,7 +528,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
 	} while (!rdesc->last_seg);
 
 	safexcel_complete(priv, ring);
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 	if (ctx->base.exit_inv) {
 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -612,15 +603,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
 }
 
 static int safexcel_cipher_send_inv(struct crypto_async_request *base,
-				    int ring, struct safexcel_request *request,
-				    int *commands, int *results)
+				    int ring, int *commands, int *results)
 {
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	int ret;
 
-	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring,
-					request);
+	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
 	if (unlikely(ret))
 		return ret;
 
@@ -631,7 +620,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
 }
 
 static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
-				  struct safexcel_request *request,
 				  int *commands, int *results)
 {
 	struct skcipher_request *req = skcipher_request_cast(async);
@@ -643,18 +631,16 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
 	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
 
 	if (sreq->needs_inv)
-		ret = safexcel_cipher_send_inv(async, ring, request, commands,
-					       results);
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
 	else
-		ret = safexcel_send_req(async, ring, request, sreq, req->src,
+		ret = safexcel_send_req(async, ring, sreq, req->src,
 					req->dst, req->cryptlen, 0, 0, req->iv,
 					commands, results);
 	return ret;
 }
 
 static int safexcel_aead_send(struct crypto_async_request *async, int ring,
-			      struct safexcel_request *request, int *commands,
-			      int *results)
+			      int *commands, int *results)
 {
 	struct aead_request *req = aead_request_cast(async);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -666,11 +652,10 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
 	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
 
 	if (sreq->needs_inv)
-		ret = safexcel_cipher_send_inv(async, ring, request, commands,
-					       results);
+		ret = safexcel_cipher_send_inv(async, ring, commands, results);
 	else
-		ret = safexcel_send_req(async, ring, request, sreq, req->src,
-					req->dst, req->cryptlen, req->assoclen,
+		ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
+					req->cryptlen, req->assoclen,
 					crypto_aead_authsize(tfm), req->iv,
 					commands, results);
 	return ret;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 46aac55c0b31..83bd49d1249b 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -160,7 +160,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 
 	*ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
 	if (IS_ERR(rdesc)) {
 		dev_err(priv->dev,
@@ -171,7 +170,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	}
 
 	safexcel_complete(priv, ring);
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 	if (sreq->nents) {
 		dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
@@ -204,7 +202,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 }
 
 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
-				   struct safexcel_request *request,
 				   int *commands, int *results)
 {
 	struct ahash_request *areq = ahash_request_cast(async);
@@ -251,16 +248,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 		}
 	}
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
-
 	/* Add a command descriptor for the cached data, if any */
 	if (cache_len) {
 		req->cache_dma = dma_map_single(priv->dev, req->cache,
 						cache_len, DMA_TO_DEVICE);
-		if (dma_mapping_error(priv->dev, req->cache_dma)) {
-			spin_unlock_bh(&priv->ring[ring].egress_lock);
+		if (dma_mapping_error(priv->dev, req->cache_dma))
 			return -EINVAL;
-		}
 
 		req->cache_sz = cache_len;
 		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
@@ -333,14 +326,12 @@ send_command:
 		goto unmap_result;
 	}
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
+	safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
 
 	req->processed[0] += len;
 	if (req->processed[0] < len)
 		req->processed[1]++;
 
-	request->req = &areq->base;
-
 	*commands = n_cdesc;
 	*results = 1;
 	return 0;
@@ -360,7 +351,6 @@ unmap_cache:
 		req->cache_sz = 0;
 	}
 
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 	return ret;
 }
 
@@ -398,7 +388,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
 
 	*ret = 0;
 
-	spin_lock_bh(&priv->ring[ring].egress_lock);
 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
 	if (IS_ERR(rdesc)) {
 		dev_err(priv->dev,
@@ -409,7 +398,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
 	}
 
 	safexcel_complete(priv, ring);
-	spin_unlock_bh(&priv->ring[ring].egress_lock);
 
 	if (ctx->base.exit_inv) {
 		dma_pool_free(priv->context_pool, ctx->base.ctxr,
@@ -460,15 +448,14 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
 }
 
 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
-				   int ring, struct safexcel_request *request,
-				   int *commands, int *results)
+				   int ring, int *commands, int *results)
 {
 	struct ahash_request *areq = ahash_request_cast(async);
 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	int ret;
 
 	ret = safexcel_invalidate_cache(async, ctx->priv,
-					ctx->base.ctxr_dma, ring, request);
+					ctx->base.ctxr_dma, ring);
 	if (unlikely(ret))
 		return ret;
 
@@ -479,19 +466,17 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
 }
 
 static int safexcel_ahash_send(struct crypto_async_request *async,
-			       int ring, struct safexcel_request *request,
-			       int *commands, int *results)
+			       int ring, int *commands, int *results)
 {
 	struct ahash_request *areq = ahash_request_cast(async);
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	int ret;
 
 	if (req->needs_inv)
-		ret = safexcel_ahash_send_inv(async, ring, request,
-					      commands, results);
+		ret = safexcel_ahash_send_inv(async, ring, commands, results);
 	else
-		ret = safexcel_ahash_send_req(async, ring, request,
-					      commands, results);
+		ret = safexcel_ahash_send_req(async, ring, commands, results);
+
 	return ret;
 }
 
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index cfd843b834f1..a1e81cfdc1f1 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -24,7 +24,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
 	if (!cdr->base)
 		return -ENOMEM;
 	cdr->write = cdr->base;
-	cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
+	cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
 	cdr->read = cdr->base;
 
 	rdr->offset = sizeof(u32) * priv->config.rd_offset;
@@ -34,7 +34,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
 	if (!rdr->base)
 		return -ENOMEM;
 	rdr->write = rdr->base;
-	rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
+	rdr->base_end = rdr->base + rdr->offset  * (EIP197_DEFAULT_RING_SIZE - 1);
 	rdr->read = rdr->base;
 
 	return 0;
@@ -50,14 +50,15 @@ static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
 {
 	void *ptr = ring->write;
 
-	if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
+	if ((ring->write == ring->read - ring->offset) ||
+	    (ring->read == ring->base && ring->write == ring->base_end))
 		return ERR_PTR(-ENOMEM);
 
-	ring->write += ring->offset;
 	if (ring->write == ring->base_end)
 		ring->write = ring->base;
+	else
+		ring->write += ring->offset;
 
-	ring->nr++;
 	return ptr;
 }
 
@@ -66,29 +67,52 @@ void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
 {
 	void *ptr = ring->read;
 
-	if (!ring->nr)
+	if (ring->write == ring->read)
 		return ERR_PTR(-ENOENT);
 
-	ring->read += ring->offset;
 	if (ring->read == ring->base_end)
 		ring->read = ring->base;
+	else
+		ring->read += ring->offset;
 
-	ring->nr--;
 	return ptr;
 }
 
+inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
+				     int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return rdr->read;
+}
+
+inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
+					 int ring)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return (rdr->read - rdr->base) / rdr->offset;
+}
+
+inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
+					 int ring,
+					 struct safexcel_result_desc *rdesc)
+{
+	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
+
+	return ((void *)rdesc - rdr->base) / rdr->offset;
+}
+
 void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
 				 struct safexcel_desc_ring *ring)
 {
-	if (!ring->nr)
+	if (ring->write == ring->read)
 		return;
 
 	if (ring->write == ring->base)
-		ring->write = ring->base_end - ring->offset;
+		ring->write = ring->base_end;
 	else
 		ring->write -= ring->offset;
-
-	ring->nr--;
 }
 
 struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,