DPDK patches and discussions
 help / color / mirror / Atom feed
From: Arek Kusztal <arkadiuszx.kusztal@intel.com>
To: dev@dpdk.org
Cc: akhil.goyal@nxp.com, fiona.trahe@intel.com,
	Arek Kusztal <arkadiuszx.kusztal@intel.com>
Subject: [dpdk-dev] [PATCH v2 1/2] crypto/qat: add chacha poly implementation
Date: Wed, 15 Jan 2020 18:55:23 +0100	[thread overview]
Message-ID: <20200115175524.15796-2-arkadiuszx.kusztal@intel.com> (raw)
In-Reply-To: <20200115175524.15796-1-arkadiuszx.kusztal@intel.com>

This patchset adds Chacha20-Poly1305 implementation to Intel
QuickAssist Technology pmd.

Signed-off-by: Arek Kusztal <arkadiuszx.kusztal@intel.com>
---
 doc/guides/cryptodevs/features/qat.ini    |  13 +--
 doc/guides/cryptodevs/qat.rst             |   1 +
 doc/guides/rel_notes/release_20_02.rst    |   5 ++
 drivers/common/qat/qat_adf/icp_qat_hw.h   |  10 ++-
 drivers/crypto/qat/qat_sym_capabilities.h |  32 +++++++
 drivers/crypto/qat/qat_sym_pmd.c          |  11 ++-
 drivers/crypto/qat/qat_sym_session.c      | 144 ++++++++++++++++--------------
 7 files changed, 139 insertions(+), 77 deletions(-)

diff --git a/doc/guides/cryptodevs/features/qat.ini b/doc/guides/cryptodevs/features/qat.ini
index 6e350eb..32591b5 100644
--- a/doc/guides/cryptodevs/features/qat.ini
+++ b/doc/guides/cryptodevs/features/qat.ini
@@ -60,12 +60,13 @@ AES CMAC (128) = Y
 ; Supported AEAD algorithms of the 'qat' crypto driver.
 ;
 [AEAD]
-AES GCM (128) = Y
-AES GCM (192) = Y
-AES GCM (256) = Y
-AES CCM (128) = Y
-AES CCM (192) = Y
-AES CCM (256) = Y
+AES GCM (128)     = Y
+AES GCM (192)     = Y
+AES GCM (256)     = Y
+AES CCM (128)     = Y
+AES CCM (192)     = Y
+AES CCM (256)     = Y
+CHACHA20-POLY1305 = Y
 
 ;
 ; Supported Asymmetric algorithms of the 'qat' crypto driver.
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 98ea9b5..fbc67e2 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -70,6 +70,7 @@ Supported AEAD algorithms:
 
 * ``RTE_CRYPTO_AEAD_AES_GCM``
 * ``RTE_CRYPTO_AEAD_AES_CCM``
+* ``RTE_CRYPTO_AEAD_CHACHA20_POLY1305``
 
 
 Supported Chains
diff --git a/doc/guides/rel_notes/release_20_02.rst b/doc/guides/rel_notes/release_20_02.rst
index 6207410..6cbe457 100644
--- a/doc/guides/rel_notes/release_20_02.rst
+++ b/doc/guides/rel_notes/release_20_02.rst
@@ -78,6 +78,11 @@ New Features
   Such algorithm combinations are not supported on GEN1/GEN2 hardware
   and executing the request returns RTE_CRYPTO_OP_STATUS_INVALID_SESSION.
 
+* **Updated the Intel QuickAssist Technology (QAT) symmetric crypto PMD.**
+
+  Added Chacha20-Poly1305 AEAD algorithm.
+
+
 Removed Items
 -------------
 
diff --git a/drivers/common/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h
index cef6486..fdc0f19 100644
--- a/drivers/common/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_hw.h
@@ -204,7 +204,9 @@ enum icp_qat_hw_cipher_algo {
 	ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
 	ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
 	ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
-	ICP_QAT_HW_CIPHER_DELIMITER = 10
+	ICP_QAT_HW_CIPHER_ALGO_SM4 = 10,
+	ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 = 11,
+	ICP_QAT_HW_CIPHER_DELIMITER = 12
 };
 
 enum icp_qat_hw_cipher_mode {
@@ -306,6 +308,12 @@ enum icp_qat_hw_cipher_convert {
 #define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
 #define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
 #define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define ICP_QAT_HW_CHACHAPOLY_KEY_SZ 32
+#define ICP_QAT_HW_CHACHAPOLY_IV_SZ 12
+#define ICP_QAT_HW_CHACHAPOLY_BLK_SZ 64
+#define ICP_QAT_HW_SPC_CTR_SZ 16
+#define ICP_QAT_HW_CHACHAPOLY_ICV_SZ 16
+#define ICP_QAT_HW_CHACHAPOLY_AAD_MAX_LOG 14
 
 #define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
 
diff --git a/drivers/crypto/qat/qat_sym_capabilities.h b/drivers/crypto/qat/qat_sym_capabilities.h
index 028a56c..f919cf3 100644
--- a/drivers/crypto/qat/qat_sym_capabilities.h
+++ b/drivers/crypto/qat/qat_sym_capabilities.h
@@ -594,4 +594,36 @@
 		}, }							\
 	}
 
+#define QAT_EXTRA_GEN3_SYM_CAPABILITIES					\
+	{	/* Chacha20-Poly1305 */					\
+	.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305, \
+				.block_size = 64,			\
+				.key_size = {				\
+					.min = 32,			\
+					.max = 32,			\
+					.increment = 0			\
+				},					\
+				.digest_size = {			\
+					.min = 16,			\
+					.max = 16,			\
+					.increment = 0			\
+				},					\
+				.aad_size = {				\
+					.min = 0,			\
+					.max = 240,			\
+					.increment = 1			\
+				},					\
+				.iv_size = {				\
+					.min = 12,			\
+					.max = 12,			\
+					.increment = 0			\
+				},					\
+			}, }						\
+		}, }							\
+	}
+
 #endif /* _QAT_SYM_CAPABILITIES_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index 666ede7..e0aa25c 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -27,6 +27,13 @@ static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
+static const struct rte_cryptodev_capabilities qat_gen3_sym_capabilities[] = {
+	QAT_BASE_GEN1_SYM_CAPABILITIES,
+	QAT_EXTRA_GEN2_SYM_CAPABILITIES,
+	QAT_EXTRA_GEN3_SYM_CAPABILITIES,
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
 static int qat_sym_qp_release(struct rte_cryptodev *dev,
 	uint16_t queue_pair_id);
 
@@ -294,9 +301,11 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 		internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
 		break;
 	case QAT_GEN2:
-	case QAT_GEN3:
 		internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
 		break;
+	case QAT_GEN3:
+		internals->qat_dev_capabilities = qat_gen3_sym_capabilities;
+		break;
 	default:
 		internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
 		QAT_LOG(DEBUG,
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 4359f2f..1adef8f 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -576,69 +576,68 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 }
 
 static int
-qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals,
-		struct qat_sym_session *session,
+qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 		struct rte_crypto_aead_xform *aead_xform)
 {
-	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *) &session->fw_req.serv_specif_rqpars;
 
-	if (qat_dev_gen == QAT_GEN3 &&
-			aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) {
-		/* Use faster Single-Pass GCM */
-		struct icp_qat_fw_la_cipher_req_params *cipher_param =
-				(void *) &session->fw_req.serv_specif_rqpars;
-
-		session->is_single_pass = 1;
-		session->min_qat_dev_gen = QAT_GEN3;
-		session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
+	session->is_single_pass = 1;
+	session->min_qat_dev_gen = QAT_GEN3;
+	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
+	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
 		session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
-		session->cipher_iv.offset = aead_xform->iv.offset;
-		session->cipher_iv.length = aead_xform->iv.length;
-		if (qat_sym_session_aead_create_cd_cipher(session,
-				aead_xform->key.data, aead_xform->key.length))
-			return -EINVAL;
-		session->aad_len = aead_xform->aad_length;
-		session->digest_length = aead_xform->digest_length;
-		if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
-			session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
-			session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
-			ICP_QAT_FW_LA_RET_AUTH_SET(
-				session->fw_req.comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_RET_AUTH_RES);
-		} else {
-			session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
-			session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
-			ICP_QAT_FW_LA_CMP_AUTH_SET(
-				session->fw_req.comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_CMP_AUTH_RES);
-		}
-		ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
-				session->fw_req.comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
-		ICP_QAT_FW_LA_PROTO_SET(
-				session->fw_req.comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
 		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-				session->fw_req.comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		session->fw_req.comn_hdr.service_cmd_id =
-				ICP_QAT_FW_LA_CMD_CIPHER;
-		session->cd.cipher.cipher_config.val =
-				ICP_QAT_HW_CIPHER_CONFIG_BUILD(
-					ICP_QAT_HW_CIPHER_AEAD_MODE,
-					session->qat_cipher_alg,
-					ICP_QAT_HW_CIPHER_NO_CONVERT,
-					session->qat_dir);
-		QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
-				aead_xform->digest_length,
-				QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
-				QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
-		session->cd.cipher.cipher_config.reserved =
-				ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
-					aead_xform->aad_length);
-		cipher_param->spc_aad_sz = aead_xform->aad_length;
-		cipher_param->spc_auth_res_sz = aead_xform->digest_length;
+			session->fw_req.comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+	} else {
+		/* Chacha-Poly is special case that use QAT CTR mode */
+		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+	}
+	session->cipher_iv.offset = aead_xform->iv.offset;
+	session->cipher_iv.length = aead_xform->iv.length;
+	if (qat_sym_session_aead_create_cd_cipher(session,
+			aead_xform->key.data, aead_xform->key.length))
+		return -EINVAL;
+	session->aad_len = aead_xform->aad_length;
+	session->digest_length = aead_xform->digest_length;
+	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+		session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+		ICP_QAT_FW_LA_RET_AUTH_SET(
+			session->fw_req.comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_RET_AUTH_RES);
+	} else {
+		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+		session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+		ICP_QAT_FW_LA_CMP_AUTH_SET(
+			session->fw_req.comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_CMP_AUTH_RES);
 	}
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			session->fw_req.comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			session->fw_req.comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+	session->fw_req.comn_hdr.service_cmd_id =
+			ICP_QAT_FW_LA_CMD_CIPHER;
+	session->cd.cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				session->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				session->qat_dir);
+	QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
+			aead_xform->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	session->cd.cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
+				aead_xform->aad_length);
+	cipher_param->spc_aad_sz = aead_xform->aad_length;
+	cipher_param->spc_auth_res_sz = aead_xform->digest_length;
+
 	return 0;
 }
 
@@ -791,6 +790,10 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 {
 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
 	enum rte_crypto_auth_operation crypto_operation;
+	struct qat_sym_dev_private *internals =
+			dev->data->dev_private;
+	enum qat_device_gen qat_dev_gen =
+			internals->qat_dev->qat_dev_gen;
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -799,6 +802,7 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 	session->cipher_iv.offset = xform->aead.iv.offset;
 	session->cipher_iv.length = xform->aead.iv.length;
 
+	session->is_single_pass = 0;
 	switch (aead_xform->algo) {
 	case RTE_CRYPTO_AEAD_AES_GCM:
 		if (qat_sym_validate_aes_key(aead_xform->key.length,
@@ -807,7 +811,13 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 			return -EINVAL;
 		}
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
-		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+		session->qat_hash_alg =
+				ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+		if (qat_dev_gen > QAT_GEN2 && aead_xform->iv.length ==
+				QAT_AES_GCM_SPC_IV_SIZE) {
+			return qat_sym_session_handle_single_pass(session,
+						aead_xform);
+		}
 		break;
 	case RTE_CRYPTO_AEAD_AES_CCM:
 		if (qat_sym_validate_aes_key(aead_xform->key.length,
@@ -818,23 +828,19 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
 		break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
+			return -EINVAL;
+		session->qat_cipher_alg =
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
+		return qat_sym_session_handle_single_pass(session,
+						aead_xform);
 	default:
 		QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
 				aead_xform->algo);
 		return -EINVAL;
 	}
 
-	session->is_single_pass = 0;
-	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
-		/* Use faster Single-Pass GCM if possible */
-		int res = qat_sym_session_handle_single_pass(
-				dev->data->dev_private, session, aead_xform);
-		if (res < 0)
-			return res;
-		if (session->is_single_pass)
-			return 0;
-	}
-
 	if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
 			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
 			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
-- 
2.1.0


  reply	other threads:[~2020-01-15 17:55 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-15 17:55 [dpdk-dev] [PATCH v2 0/2] Add Chacha20-Poly1305 algorithm to QAT Arek Kusztal
2020-01-15 17:55 ` Arek Kusztal [this message]
2020-01-15 17:55 ` [dpdk-dev] [PATCH v2 2/2] test/cryptodev: add chacha poly test cases to cryptodev Arek Kusztal
2020-01-15 18:01 ` [dpdk-dev] [PATCH v2 0/2] Add Chacha20-Poly1305 algorithm to QAT Trahe, Fiona
2020-01-16 16:32   ` Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200115175524.15796-2-arkadiuszx.kusztal@intel.com \
    --to=arkadiuszx.kusztal@intel.com \
    --cc=akhil.goyal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=fiona.trahe@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).