summary refs log tree commit diff
path: root/drivers/crypto/qat/qat_common/qat_algs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_common/qat_algs.c')
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c248
1 files changed, 185 insertions, 63 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index d552dbcfe0a0..31c7a206a629 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -6,11 +6,13 @@
 #include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/aes.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
 #include <crypto/hash.h>
 #include <crypto/hmac.h>
 #include <crypto/algapi.h>
 #include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
 #include <crypto/xts.h>
 #include <linux/dma-mapping.h>
 #include "adf_accel_devices.h"
@@ -31,6 +33,15 @@
 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
 				       ICP_QAT_HW_CIPHER_DECRYPT)
 
+#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
+	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
+				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
+				       ICP_QAT_HW_CIPHER_DECRYPT)
+
+#define HW_CAP_AES_V2(accel_dev) \
+	(GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
+	 ICP_ACCEL_CAPABILITIES_AES_V2)
+
 static DEFINE_MUTEX(algs_lock);
 static unsigned int active_devs;
 
@@ -89,7 +100,9 @@ struct qat_alg_skcipher_ctx {
 	struct icp_qat_fw_la_bulk_req dec_fw_req;
 	struct qat_crypto_instance *inst;
 	struct crypto_skcipher *ftfm;
+	struct crypto_cipher *tweak;
 	bool fallback;
+	int mode;
 };
 
 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -103,7 +116,7 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 		return ICP_QAT_HW_SHA512_STATE1_SZ;
 	default:
 		return -EFAULT;
-	};
+	}
 	return -EFAULT;
 }
 
@@ -214,24 +227,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 	return 0;
 }
 
-static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
-{
-	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
-					   ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
-				       ICP_QAT_FW_LA_UPDATE_STATE);
-}
-
-static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
-{
-	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
-					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
-	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
-				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
-}
-
-static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
-				    int aead)
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
 {
 	header->hdr_flags =
 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
@@ -241,12 +237,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
 					    QAT_COMN_PTR_TYPE_SGL);
 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
 				  ICP_QAT_FW_LA_PARTIAL_NONE);
-	if (aead)
-		qat_alg_init_hdr_no_iv_updt(header);
-	else
-		qat_alg_init_hdr_iv_updt(header);
+	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
 				ICP_QAT_FW_LA_NO_PROTO);
+	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 }
 
 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
@@ -281,7 +277,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
 		return -EFAULT;
 
 	/* Request setup */
-	qat_alg_init_common_hdr(header, 1);
+	qat_alg_init_common_hdr(header);
 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -368,7 +364,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
 		return -EFAULT;
 
 	/* Request setup */
-	qat_alg_init_common_hdr(header, 1);
+	qat_alg_init_common_hdr(header);
 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
@@ -430,12 +426,32 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
+	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+	int mode = ctx->mode;
 
-	memcpy(cd->aes.key, key, keylen);
-	qat_alg_init_common_hdr(header, 0);
+	qat_alg_init_common_hdr(header);
 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
 	cd_pars->u.s.content_desc_params_sz =
 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+
+	if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
+		ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+					     ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+
+		/* Store both XTS keys in CD, only the first key is sent
+		 * to the HW, the second key is used for tweak calculation
+		 */
+		memcpy(cd->ucs_aes.key, key, keylen);
+		keylen = keylen / 2;
+	} else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+		ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+					     ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+		keylen = round_up(keylen, 16);
+		memcpy(cd->ucs_aes.key, key, keylen);
+	} else {
+		memcpy(cd->aes.key, key, keylen);
+	}
+
 	/* Cipher CD config setup */
 	cd_ctrl->cipher_key_sz = keylen >> 3;
 	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
@@ -457,6 +473,28 @@ static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
 	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
 }
 
+static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
+				    u8 *key_reverse)
+{
+	struct crypto_aes_ctx aes_expanded;
+	int nrounds;
+	u8 *key;
+
+	aes_expandkey(&aes_expanded, key_forward, keylen);
+	if (keylen == AES_KEYSIZE_128) {
+		nrounds = 10;
+		key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
+		memcpy(key_reverse, key, AES_BLOCK_SIZE);
+	} else {
+		/* AES_KEYSIZE_256 */
+		nrounds = 14;
+		key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
+		memcpy(key_reverse, key, AES_BLOCK_SIZE);
+		memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
+		       AES_BLOCK_SIZE);
+	}
+}
+
 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
 				      int alg, const u8 *key,
 				      unsigned int keylen, int mode)
@@ -464,16 +502,26 @@ static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
 	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
 
 	qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
 
-	if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
+	if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
+		/* Key reversing not supported, set no convert */
+		dec_cd->aes.cipher_config.val =
+				QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
+
+		/* In-place key reversal */
+		qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
+					dec_cd->ucs_aes.key);
+	} else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
 		dec_cd->aes.cipher_config.val =
 					QAT_AES_HW_CONFIG_DEC(alg, mode);
-	else
+	} else {
 		dec_cd->aes.cipher_config.val =
 					QAT_AES_HW_CONFIG_ENC(alg, mode);
+	}
 }
 
 static int qat_alg_validate_key(int key_len, int *alg, int mode)
@@ -787,6 +835,61 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 	areq->base.complete(&areq->base, res);
 }
 
+static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
+{
+	struct skcipher_request *sreq = qat_req->skcipher_req;
+	u64 iv_lo_prev;
+	u64 iv_lo;
+	u64 iv_hi;
+
+	memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
+
+	iv_lo = be64_to_cpu(qat_req->iv_lo);
+	iv_hi = be64_to_cpu(qat_req->iv_hi);
+
+	iv_lo_prev = iv_lo;
+	iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
+	if (iv_lo < iv_lo_prev)
+		iv_hi++;
+
+	qat_req->iv_lo = cpu_to_be64(iv_lo);
+	qat_req->iv_hi = cpu_to_be64(iv_hi);
+}
+
+static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
+{
+	struct skcipher_request *sreq = qat_req->skcipher_req;
+	int offset = sreq->cryptlen - AES_BLOCK_SIZE;
+	struct scatterlist *sgl;
+
+	if (qat_req->encryption)
+		sgl = sreq->dst;
+	else
+		sgl = sreq->src;
+
+	scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
+}
+
+static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
+{
+	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+	switch (ctx->mode) {
+	case ICP_QAT_HW_CIPHER_CTR_MODE:
+		qat_alg_update_iv_ctr_mode(qat_req);
+		break;
+	case ICP_QAT_HW_CIPHER_CBC_MODE:
+		qat_alg_update_iv_cbc_mode(qat_req);
+		break;
+	case ICP_QAT_HW_CIPHER_XTS_MODE:
+		break;
+	default:
+		dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
+			 ctx->mode);
+	}
+}
+
 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 				      struct qat_crypto_request *qat_req)
 {
@@ -794,16 +897,16 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
 	struct qat_crypto_instance *inst = ctx->inst;
 	struct skcipher_request *sreq = qat_req->skcipher_req;
 	u8 stat_filed = qat_resp->comn_resp.comn_status;
-	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
 
 	qat_alg_free_bufl(inst, qat_req);
 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
 		res = -EINVAL;
 
+	if (qat_req->encryption)
+		qat_alg_update_iv(qat_req);
+
 	memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
-	dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-			  qat_req->iv_paddr);
 
 	sreq->base.complete(&sreq->base, res);
 }
@@ -981,6 +1084,8 @@ static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
 {
 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 
+	ctx->mode = mode;
+
 	if (ctx->enc_cd)
 		return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
 	else
@@ -1023,8 +1128,33 @@ static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
 
 	ctx->fallback = false;
 
-	return qat_alg_skcipher_setkey(tfm, key, keylen,
-				       ICP_QAT_HW_CIPHER_XTS_MODE);
+	ret = qat_alg_skcipher_setkey(tfm, key, keylen,
+				      ICP_QAT_HW_CIPHER_XTS_MODE);
+	if (ret)
+		return ret;
+
+	if (HW_CAP_AES_V2(ctx->inst->accel_dev))
+		ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
+					   keylen / 2);
+
+	return ret;
+}
+
+static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
+	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
+	u8 *iv = qat_req->skcipher_req->iv;
+
+	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+
+	if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
+		crypto_cipher_encrypt_one(ctx->tweak,
+					  (u8 *)cipher_param->u.cipher_IV_array,
+					  iv);
+	else
+		memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
 }
 
 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
@@ -1035,23 +1165,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
 	struct icp_qat_fw_la_bulk_req *msg;
-	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 	int ret, ctr = 0;
 
 	if (req->cryptlen == 0)
 		return 0;
 
-	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
-					 &qat_req->iv_paddr, GFP_ATOMIC);
-	if (!qat_req->iv)
-		return -ENOMEM;
-
 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
-	if (unlikely(ret)) {
-		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-				  qat_req->iv_paddr);
+	if (unlikely(ret))
 		return ret;
-	}
 
 	msg = &qat_req->req;
 	*msg = ctx->enc_fw_req;
@@ -1061,19 +1182,19 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	qat_req->encryption = true;
 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 	cipher_param->cipher_length = req->cryptlen;
 	cipher_param->cipher_offset = 0;
-	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
-	memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
+
+	qat_alg_set_req_iv(qat_req);
+
 	do {
 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
 	} while (ret == -EAGAIN && ctr++ < 10);
 
 	if (ret == -EAGAIN) {
 		qat_alg_free_bufl(ctx->inst, qat_req);
-		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-				  qat_req->iv_paddr);
 		return -EBUSY;
 	}
 	return -EINPROGRESS;
@@ -1113,23 +1234,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
 	struct icp_qat_fw_la_bulk_req *msg;
-	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 	int ret, ctr = 0;
 
 	if (req->cryptlen == 0)
 		return 0;
 
-	qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
-					 &qat_req->iv_paddr, GFP_ATOMIC);
-	if (!qat_req->iv)
-		return -ENOMEM;
-
 	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
-	if (unlikely(ret)) {
-		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-				  qat_req->iv_paddr);
+	if (unlikely(ret))
 		return ret;
-	}
 
 	msg = &qat_req->req;
 	*msg = ctx->dec_fw_req;
@@ -1139,19 +1251,20 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+	qat_req->encryption = false;
 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 	cipher_param->cipher_length = req->cryptlen;
 	cipher_param->cipher_offset = 0;
-	cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
-	memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
+
+	qat_alg_set_req_iv(qat_req);
+	qat_alg_update_iv(qat_req);
+
 	do {
 		ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
 	} while (ret == -EAGAIN && ctr++ < 10);
 
 	if (ret == -EAGAIN) {
 		qat_alg_free_bufl(ctx->inst, qat_req);
-		dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
-				  qat_req->iv_paddr);
 		return -EBUSY;
 	}
 	return -EINPROGRESS;
@@ -1253,6 +1366,12 @@ static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
 	if (IS_ERR(ctx->ftfm))
 		return PTR_ERR(ctx->ftfm);
 
+	ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
+	if (IS_ERR(ctx->tweak)) {
+		crypto_free_skcipher(ctx->ftfm);
+		return PTR_ERR(ctx->tweak);
+	}
+
 	reqsize = max(sizeof(struct qat_crypto_request),
 		      sizeof(struct skcipher_request) +
 		      crypto_skcipher_reqsize(ctx->ftfm));
@@ -1295,6 +1414,9 @@ static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
 	if (ctx->ftfm)
 		crypto_free_skcipher(ctx->ftfm);
 
+	if (ctx->tweak)
+		crypto_free_cipher(ctx->tweak);
+
 	qat_alg_skcipher_exit_tfm(tfm);
 }