DPDK patches and discussions
 help / color / mirror / Atom feed
From: Pablo de Lara <pablo.de.lara.guarch@intel.com>
To: declan.doherty@intel.com, fiona.trahe@intel.com,
	deepak.k.jain@intel.com, john.griffin@intel.com
Cc: dev@dpdk.org, Arek Kusztal <arkadiuszx.kusztal@intel.com>
Subject: [dpdk-dev] [PATCH v2 7/9] crypto/qat: add AES-CCM support
Date: Thu, 21 Sep 2017 14:11:20 +0100	[thread overview]
Message-ID: <20170921131123.16513-8-pablo.de.lara.guarch@intel.com> (raw)
In-Reply-To: <20170921131123.16513-1-pablo.de.lara.guarch@intel.com>

From: Arek Kusztal <arkadiuszx.kusztal@intel.com>

This patch adds AES-CCM AEAD cipher and hash algorithm to
Intel QuickAssist Technology driver.

Signed-off-by: Arek Kusztal <arkadiuszx.kusztal@intel.com>
---
 drivers/crypto/qat/qat_adf/icp_qat_hw.h          |  20 +++
 drivers/crypto/qat/qat_adf/qat_algs_build_desc.c |  28 ++++
 drivers/crypto/qat/qat_crypto.c                  | 169 +++++++++++++++++++++--
 drivers/crypto/qat/qat_crypto_capabilities.h     |  30 ++++
 4 files changed, 233 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
index ebe245f..d03688c 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -301,6 +301,26 @@ enum icp_qat_hw_cipher_convert {
 
 #define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
 
+/* These defines describe position of the bit-fields
+ * in the flags byte in B0
+ */
+#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT      6
+#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT          3
+
+#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q)                  \
+	((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
+	| ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
+	| ((q) - 1))
+
+#define ICP_QAT_HW_CCM_NQ_CONST 15
+#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
+#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
+#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
+		ICP_QAT_HW_CCM_AAD_LEN_INFO)
+#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
+#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
+#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
+
 struct icp_qat_hw_cipher_algo_blk {
 	struct icp_qat_hw_cipher_config cipher_config;
 	uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 2d16c9e..db6c9a3 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -124,6 +124,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 	case ICP_QAT_HW_AUTH_ALGO_NULL:
 		return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
 						QAT_HW_DEFAULT_ALIGNMENT);
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
+						QAT_HW_DEFAULT_ALIGNMENT);
 	case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
 		/* return maximum state1 size in this case */
 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -876,6 +879,31 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
 				ICP_QAT_HW_AUTH_ALGO_NULL);
 		state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
 		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+		state1_size = qat_hash_get_state1_size(
+				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
+		state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
+				ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
+
+		if (aad_length > 0) {
+			aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			auth_param->u2.aad_sz =
+					RTE_ALIGN_CEIL(aad_length,
+					ICP_QAT_HW_CCM_AAD_ALIGNMENT);
+		} else {
+			auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
+		}
+
+		cdesc->aad_len = aad_length;
+		hash->auth_counter.counter = 0;
+
+		hash_cd_ctrl->outer_prefix_sz = digestsize;
+		auth_param->hash_state_sz = digestsize;
+
+		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+		break;
 	case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
 		state1_size = qat_hash_get_state1_size(
 				ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index a2b202f..ae73c78 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,6 +59,7 @@
 #include <rte_hexdump.h>
 #include <rte_crypto_sym.h>
 #include <rte_cryptodev_pci.h>
+#include <rte_byteorder.h>
 #include <openssl/evp.h>
 
 #include "qat_logs.h"
@@ -251,10 +252,21 @@ qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
 
 	/* AEAD */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		/* AES-GCM and AES-CCM works with different direction
+		 * GCM first encrypts and generate hash where AES-CCM
+		 * first generate hash and encrypts. Similar relation
+		 * applies to decryption.
+		 */
 		if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
-			return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+				return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+			else
+				return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
 		else
-			return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+				return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+			else
+				return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
 	}
 
 	if (xform->next == NULL)
@@ -734,6 +746,7 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
 				struct qat_session *session)
 {
 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+	enum rte_crypto_auth_operation crypto_operation;
 
 	/*
 	 * Store AEAD IV parameters as cipher IV,
@@ -753,21 +766,33 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
 		break;
 	case RTE_CRYPTO_AEAD_AES_CCM:
-		PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
-				aead_xform->algo);
-		return -ENOTSUP;
+		if (qat_alg_validate_aes_key(aead_xform->key.length,
+				&session->qat_cipher_alg) != 0) {
+			PMD_DRV_LOG(ERR, "Invalid AES key size");
+			return -EINVAL;
+		}
+		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+		break;
 	default:
 		PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
 				aead_xform->algo);
 		return -EINVAL;
 	}
 
-	if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+	if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+			aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+			(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+			aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
 		session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
 		/*
 		 * It needs to create cipher desc content first,
 		 * then authentication
 		 */
+
+		crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+			RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
 		if (qat_alg_aead_session_create_content_desc_cipher(session,
 					aead_xform->key.data,
 					aead_xform->key.length))
@@ -778,7 +803,7 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
 					aead_xform->key.length,
 					aead_xform->aad_length,
 					aead_xform->digest_length,
-					RTE_CRYPTO_AUTH_OP_GENERATE))
+					crypto_operation))
 			return -EINVAL;
 	} else {
 		session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
@@ -786,12 +811,16 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
 		 * It needs to create authentication desc content first,
 		 * then cipher
 		 */
+
+		crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+			RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
 		if (qat_alg_aead_session_create_content_desc_auth(session,
 					aead_xform->key.data,
 					aead_xform->key.length,
 					aead_xform->aad_length,
 					aead_xform->digest_length,
-					RTE_CRYPTO_AUTH_OP_VERIFY))
+					crypto_operation))
 			return -EINVAL;
 
 		if (qat_alg_aead_session_create_content_desc_cipher(session,
@@ -1043,7 +1072,6 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
 		rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
 			sizeof(struct icp_qat_fw_comn_resp));
-
 #endif
 		if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
 				ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
@@ -1153,6 +1181,29 @@ set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
 	}
 }
 
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ *  where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+		struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+	rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+			ICP_QAT_HW_CCM_NONCE_OFFSET,
+			rte_crypto_op_ctod_offset(op, uint8_t *,
+				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			iv_length);
+	*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+	if (aad_len_field_sz)
+		rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+			rte_crypto_op_ctod_offset(op, uint8_t *,
+				iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			iv_length);
+}
+
 static inline int
 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 		struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
@@ -1197,6 +1248,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
+
+
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
@@ -1205,9 +1258,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 
 	if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
 			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
-		/* AES-GCM */
+		/* AES-GCM or AES-CCM */
 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
-				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+				(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+				&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+				&& ctx->qat_hash_alg ==
+						ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
 			do_aead = 1;
 		} else {
 			do_auth = 1;
@@ -1314,6 +1371,11 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 	}
 
 	if (do_aead) {
+		/*
+		 * This address may used for setting AAD physical pointer
+		 * into IV offset from op
+		 */
+		phys_addr_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
 		if (ctx->qat_hash_alg ==
 				ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
 				ctx->qat_hash_alg ==
@@ -1327,6 +1389,87 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 					ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
 			}
 
+			set_cipher_iv(ctx->cipher_iv.length,
+					ctx->cipher_iv.offset,
+					cipher_param, op, qat_req);
+
+		} else if (ctx->qat_hash_alg ==
+				ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+			/* In case of AES-CCM this may point to user selected memory
+			 * or iv offset in cypto_op
+			 */
+			uint8_t *aad_data = op->sym->aead.aad.data;
+			/* This is true AAD length, it not includes 18 bytes of
+			 * preceding data
+			 */
+			uint8_t aad_ccm_real_len = 0;
+
+			uint8_t aad_len_field_sz = 0;
+			uint32_t msg_len_be =
+					rte_bswap32(op->sym->aead.data.length);
+
+			if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+				aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+				aad_ccm_real_len = ctx->aad_len -
+					ICP_QAT_HW_CCM_AAD_B0_LEN -
+					ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			} else {
+				/*
+				 * aad_len not greater than 18, so no actual aad data,
+				 * then use IV after op for B0 block
+				 */
+				aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
+						ctx->cipher_iv.offset);
+				aad_phys_addr_aead =
+						rte_crypto_op_ctophys_offset(op,
+								ctx->cipher_iv.offset);
+			}
+
+			uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+
+			aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+							ctx->digest_length, q);
+
+			if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+				memcpy(aad_data	+ ctx->cipher_iv.length +
+					ICP_QAT_HW_CCM_NONCE_OFFSET
+					+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+					(uint8_t *)&msg_len_be,
+					ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+			} else {
+				memcpy(aad_data	+ ctx->cipher_iv.length +
+					ICP_QAT_HW_CCM_NONCE_OFFSET,
+					(uint8_t *)&msg_len_be
+					+ (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+					- q), q);
+			}
+
+			if (aad_len_field_sz > 0) {
+				*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+						= rte_bswap16(aad_ccm_real_len);
+
+				if ((aad_ccm_real_len + aad_len_field_sz)
+						% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+					uint8_t pad_len = 0;
+					uint8_t pad_idx = 0;
+
+					pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+						((aad_ccm_real_len + aad_len_field_sz) %
+							ICP_QAT_HW_CCM_AAD_B0_LEN);
+					pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+						aad_ccm_real_len + aad_len_field_sz;
+					memset(&aad_data[pad_idx],
+							0, pad_len);
+				}
+
+			}
+
+			set_cipher_iv_ccm(ctx->cipher_iv.length,
+					ctx->cipher_iv.offset,
+					cipher_param, op, q,
+					aad_len_field_sz);
+
 		}
 
 		cipher_len = op->sym->aead.data.length;
@@ -1334,10 +1477,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 		auth_len = op->sym->aead.data.length;
 		auth_ofs = op->sym->aead.data.offset;
 
-		auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
+		auth_param->u1.aad_adr = aad_phys_addr_aead;
 		auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
-		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
-				cipher_param, op, qat_req);
 		min_ofs = op->sym->aead.data.offset;
 	}
 
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 7012007..d8d3fa1 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -183,6 +183,36 @@
 			}, }						\
 		}, }							\
 	},								\
+	{	/* AES CCM */						\
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
+		{.sym = {						\
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,	\
+			{.aead = {					\
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,	\
+				.block_size = 16,			\
+				.key_size = {				\
+					.min = 16,			\
+					.max = 16,			\
+					.increment = 0			\
+				},					\
+				.digest_size = {			\
+					.min = 4,			\
+					.max = 16,			\
+					.increment = 2			\
+				},					\
+				.aad_size = {				\
+					.min = 0,			\
+					.max = 224,			\
+					.increment = 1			\
+				},					\
+				.iv_size = {				\
+					.min = 7,			\
+					.max = 13,			\
+					.increment = 1			\
+				},					\
+			}, }						\
+		}, }							\
+	},								\
 	{	/* AES GCM */						\
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
 		{.sym = {						\
-- 
2.9.4

  parent reply	other threads:[~2017-09-21 21:11 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-18  8:07 [dpdk-dev] [PATCH 0/4] Add support for AES-CCM Pablo de Lara
2017-08-18  8:07 ` [dpdk-dev] [PATCH 1/4] crypto/openssl: fix AEAD parameters Pablo de Lara
2017-08-18  8:07 ` [dpdk-dev] [PATCH] test/crypto: rename GCM test code Pablo de Lara
2017-08-18 16:09   ` De Lara Guarch, Pablo
2017-08-18  8:07 ` [dpdk-dev] [PATCH 2/4] crypto/openssl: init GCM key at session creation Pablo de Lara
2017-08-18  8:07 ` [dpdk-dev] [PATCH 3/4] test/crypto: rename GCM test code Pablo de Lara
2017-08-18  8:07 ` [dpdk-dev] [PATCH 4/4] crypto/openssl: add AES-CCM support Pablo de Lara
2017-09-21 13:11 ` [dpdk-dev] [PATCH v2 0/9] Add support for AES-CCM Pablo de Lara
2017-09-21 13:11   ` [dpdk-dev] [PATCH v2 1/9] cryptodev: clarify API " Pablo de Lara
2017-10-09  9:57     ` Trahe, Fiona
2017-09-21 13:11   ` [dpdk-dev] [PATCH v2 2/9] examples/l2fwd-crypto: add AES-CCM support Pablo de Lara
2017-09-21 13:11   ` [dpdk-dev] [PATCH v2 3/9] app/crypto-perf: " Pablo de Lara
2017-09-21 13:11   ` [dpdk-dev] [PATCH v2 4/9] crypto/openssl: fix AEAD parameters Pablo de Lara
2017-09-21 13:11   ` [dpdk-dev] [PATCH v2 5/9] crypto/openssl: init GCM key at session creation Pablo de Lara
2017-09-21 13:11   ` [dpdk-dev] [PATCH v2 6/9] crypto/openssl: add AES-CCM support Pablo de Lara
2017-09-21 13:11   ` Pablo de Lara [this message]
2017-10-09  9:55     ` [dpdk-dev] [PATCH v2 7/9] crypto/qat: " Trahe, Fiona
2017-09-21 13:11   ` [dpdk-dev] [PATCH v2 8/9] test/crypto: rename GCM test code Pablo de Lara
2017-09-21 13:11   ` [dpdk-dev] [PATCH v2 9/9] test/crypto: add AES-CCM tests Pablo de Lara
2017-10-05  9:12   ` [dpdk-dev] [PATCH v2 0/9] Add support for AES-CCM Zhang, Roy Fan
2017-10-09 10:10   ` De Lara Guarch, Pablo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170921131123.16513-8-pablo.de.lara.guarch@intel.com \
    --to=pablo.de.lara.guarch@intel.com \
    --cc=arkadiuszx.kusztal@intel.com \
    --cc=declan.doherty@intel.com \
    --cc=deepak.k.jain@intel.com \
    --cc=dev@dpdk.org \
    --cc=fiona.trahe@intel.com \
    --cc=john.griffin@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).