DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
@ 2020-09-11 12:08 Pablo de Lara
  2020-09-11 12:08 ` [dpdk-dev] [PATCH 2/2] test/crypto: add SGL tests for AES-GMAC Pablo de Lara
  2020-09-22 10:01 ` [dpdk-dev] [PATCH v2 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  0 siblings, 2 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-09-11 12:08 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-gather list support for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
 doc/guides/cryptodevs/aesni_gcm.rst      |  2 -
 doc/guides/rel_notes/release_20_11.rst   |  4 ++
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 27 ++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 88 +++++++++++++++++++++++-
 4 files changed, 118 insertions(+), 3 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 74e0de63a..0e146486e 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -31,8 +31,6 @@ Limitations
 -----------
 
 * In out-of-place operations, chained destination mbufs are not supported.
-* Chained mbufs are only supported by RTE_CRYPTO_AEAD_AES_GCM algorithm,
-  not RTE_CRYPTO_AUTH_AES_GMAC.
 * Cipher only is not supported.
 
 
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index df227a177..98fd4ae0f 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated the AESNI GCM crypto PMD.**
+
+  * Added SGL support AES-GMAC.
+
 
 Removed Items
 -------------
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 74acac09c..8a0d074b6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -53,6 +53,23 @@ typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
 		uint8_t *auth_tag,
 		uint64_t auth_tag_len);
 
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *iv,
+		const uint64_t iv_len);
+
+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *in,
+		const uint64_t plaintext_len);
+
+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		uint8_t *auth_tag,
+		const uint64_t auth_tag_len);
+#endif
+
 /** GCM library function pointer table */
 struct aesni_gcm_ops {
 	aesni_gcm_t enc;        /**< GCM encode function pointer */
@@ -63,6 +80,11 @@ struct aesni_gcm_ops {
 	aesni_gcm_update_t update_dec;
 	aesni_gcm_finalize_t finalize_enc;
 	aesni_gcm_finalize_t finalize_dec;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 /** GCM per-session operation handlers */
@@ -72,6 +94,11 @@ struct aesni_gcm_session_ops {
 	aesni_gcm_init_t init;
 	aesni_gcm_update_t update;
 	aesni_gcm_finalize_t finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 #endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1d2a0ce00..aea599ebf 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -350,6 +350,76 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				&qp->gdata_ctx,
 				tag,
 				session->gen_digest_length);
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp->temp_digest;
+		else
+			tag = sym_op->auth.digest.data;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	} else { /* AESNI_GMAC_OP_VERIFY */
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp->temp_digest;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	}
+#else
 	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
 		qp->ops[session->key].init(&session->gdata_key,
 				&qp->gdata_ctx,
@@ -381,6 +451,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				tag,
 				session->gen_digest_length);
 	}
+#endif
 
 	return 0;
 }
@@ -769,7 +840,7 @@ aesni_gcm_create(const char *name,
 		init_mb_mgr_avx2(mb_mgr);
 		break;
 	case RTE_AESNI_GCM_AVX512:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
 		init_mb_mgr_avx512(mb_mgr);
 		break;
 	default:
@@ -791,6 +862,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
 	internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
 	internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
+	internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
+	internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
+#endif
 
 	internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
 	internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
@@ -800,6 +876,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
 	internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
 	internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
+	internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
+	internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
+#endif
 
 	internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
 	internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
@@ -809,6 +890,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
 	internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
 	internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
+	internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
+	internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
+#endif
 
 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH 2/2] test/crypto: add SGL tests for AES-GMAC
  2020-09-11 12:08 [dpdk-dev] [PATCH 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
@ 2020-09-11 12:08 ` Pablo de Lara
  2020-09-22 10:01 ` [dpdk-dev] [PATCH v2 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  1 sibling, 0 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-09-11 12:08 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-Gather List tests for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
 app/test/test_cryptodev.c | 220 +++++++++++++++++++++++++++++++++++++-
 1 file changed, 218 insertions(+), 2 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 70bf6fe2c..1d61aaac7 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -52,6 +52,8 @@
 #define IN_PLACE 0
 #define OUT_OF_PLACE 1
 
+#define SGL_MAX_NO	16
+
 static int gbl_driver_id;
 
 static enum rte_security_session_action_type gbl_action_type =
@@ -9995,6 +9997,53 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 	return 0;
 }
 
+static int
+create_gmac_operation_sgl(enum rte_crypto_auth_operation op,
+		const struct gmac_test_data *tdata,
+		void *digest_mem, uint64_t digest_phys)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_crypto_sym_op *sym_op;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	sym_op = ut_params->op->sym;
+
+	sym_op->auth.digest.data = digest_mem;
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+
+	sym_op->auth.digest.phys_addr = digest_phys;
+
+	if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
+				tdata->gmac_tag.len);
+		debug_hexdump(stdout, "digest:",
+				sym_op->auth.digest.data,
+				tdata->gmac_tag.len);
+	}
+
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+			uint8_t *, IV_OFFSET);
+
+	rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
+
+	debug_hexdump(stdout, "iv:", iv_ptr, tdata->iv.len);
+
+	sym_op->cipher.data.length = 0;
+	sym_op->cipher.data.offset = 0;
+
+	sym_op->auth.data.offset = 0;
+	sym_op->auth.data.length = tdata->plaintext.len;
+
+	return 0;
+}
+
 static int create_gmac_session(uint8_t dev_id,
 		const struct gmac_test_data *tdata,
 		enum rte_crypto_auth_operation auth_op)
@@ -10251,6 +10300,167 @@ test_AES_GMAC_authentication_verify_test_case_4(void)
 	return test_AES_GMAC_authentication_verify(&gmac_test_case_4);
 }
 
+static int
+test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata,
+				uint32_t fragsz)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feature_flags;
+	unsigned int trn_data = 0;
+	void *digest_mem = NULL;
+	uint32_t segs = 1;
+	unsigned int to_trn = 0;
+	struct rte_mbuf *buf = NULL;
+	uint8_t *auth_tag, *plaintext;
+	int retval;
+
+	TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
+			      "No GMAC length in the source data");
+
+	/* Verify the capabilities */
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	cap_idx.algo.auth = RTE_CRYPTO_AUTH_AES_GMAC;
+	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+			&cap_idx) == NULL)
+		return -ENOTSUP;
+
+	/* Check for any input SGL support */
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feature_flags = dev_info.feature_flags;
+
+	if ((!(feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)))
+		return -ENOTSUP;
+
+	if (fragsz > tdata->plaintext.len)
+		fragsz = tdata->plaintext.len;
+
+	uint16_t plaintext_len = fragsz;
+
+	retval = create_gmac_session(ts_params->valid_devs[0],
+			tdata, RTE_CRYPTO_AUTH_OP_GENERATE);
+
+	if (retval < 0)
+		return retval;
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+			"Failed to allocate input buffer in mempool");
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				plaintext_len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+	memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+	trn_data += plaintext_len;
+
+	buf = ut_params->ibuf;
+
+	/*
+	 * Loop until no more fragments
+	 */
+
+	while (trn_data < tdata->plaintext.len) {
+		++segs;
+		to_trn = (tdata->plaintext.len - trn_data < fragsz) ?
+				(tdata->plaintext.len - trn_data) : fragsz;
+
+		buf->next = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+		buf = buf->next;
+
+		memset(rte_pktmbuf_mtod(buf, uint8_t *), 0,
+				rte_pktmbuf_tailroom(buf));
+
+		plaintext = (uint8_t *)rte_pktmbuf_append(buf,
+				to_trn);
+
+		memcpy(plaintext, tdata->plaintext.data + trn_data,
+				to_trn);
+		trn_data += to_trn;
+		if (trn_data  == tdata->plaintext.len)
+			digest_mem = (uint8_t *)rte_pktmbuf_append(buf,
+					tdata->gmac_tag.len);
+	}
+	ut_params->ibuf->nb_segs = segs;
+
+	/*
+	 * Place digest at the end of the last buffer
+	 */
+	uint64_t digest_phys = rte_pktmbuf_iova(buf) + to_trn;
+
+	if (!digest_mem) {
+		digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				+ tdata->gmac_tag.len);
+		digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
+				tdata->plaintext.len);
+	}
+
+	retval = create_gmac_operation_sgl(RTE_CRYPTO_AUTH_OP_GENERATE,
+			tdata, digest_mem, digest_phys);
+
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+		return -ENOTSUP;
+
+	TEST_ASSERT_NOT_NULL(
+		process_crypto_request(ts_params->valid_devs[0],
+		ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	auth_tag = digest_mem;
+	debug_hexdump(stdout, "auth tag:", auth_tag, tdata->gmac_tag.len);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->gmac_tag.data,
+			tdata->gmac_tag.len,
+			"GMAC Generated auth tag not as expected");
+
+	return 0;
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_40B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 40);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_80B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 80);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_400B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_4, 400);
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_401B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_4, 401);
+}
+
 struct test_crypto_vector {
 	enum rte_crypto_cipher_algorithm crypto_algo;
 	unsigned int cipher_offset;
@@ -11245,8 +11455,6 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op,
 	return 0;
 }
 
-#define SGL_MAX_NO	16
-
 static int
 test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 		const int oop, uint32_t fragsz, uint32_t fragsz_oop)
@@ -12164,6 +12372,14 @@ static struct unit_test_suite cryptodev_testsuite  = {
 			test_AES_GMAC_authentication_test_case_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GMAC_authentication_verify_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_40B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_80B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_400B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_401B),
 		/** Chacha20-Poly1305 */
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_chacha20_poly1305_encrypt_test_case_rfc8439),
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v2 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
  2020-09-11 12:08 [dpdk-dev] [PATCH 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  2020-09-11 12:08 ` [dpdk-dev] [PATCH 2/2] test/crypto: add SGL tests for AES-GMAC Pablo de Lara
@ 2020-09-22 10:01 ` Pablo de Lara
  2020-09-22 10:01   ` [dpdk-dev] [PATCH v2 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
  2020-09-22 10:34   ` [dpdk-dev] [PATCH v3 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  1 sibling, 2 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-09-22 10:01 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-gather list support for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---

Changes:
- v2: no change

---

 doc/guides/cryptodevs/aesni_gcm.rst      |  2 -
 doc/guides/rel_notes/release_20_11.rst   |  4 ++
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 27 ++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 88 +++++++++++++++++++++++-
 4 files changed, 118 insertions(+), 3 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 74e0de63a..0e146486e 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -31,8 +31,6 @@ Limitations
 -----------
 
 * In out-of-place operations, chained destination mbufs are not supported.
-* Chained mbufs are only supported by RTE_CRYPTO_AEAD_AES_GCM algorithm,
-  not RTE_CRYPTO_AUTH_AES_GMAC.
 * Cipher only is not supported.
 
 
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index df227a177..98fd4ae0f 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated the AESNI GCM crypto PMD.**
+
+  * Added SGL support AES-GMAC.
+
 
 Removed Items
 -------------
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 74acac09c..8a0d074b6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -53,6 +53,23 @@ typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
 		uint8_t *auth_tag,
 		uint64_t auth_tag_len);
 
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *iv,
+		const uint64_t iv_len);
+
+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *in,
+		const uint64_t plaintext_len);
+
+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		uint8_t *auth_tag,
+		const uint64_t auth_tag_len);
+#endif
+
 /** GCM library function pointer table */
 struct aesni_gcm_ops {
 	aesni_gcm_t enc;        /**< GCM encode function pointer */
@@ -63,6 +80,11 @@ struct aesni_gcm_ops {
 	aesni_gcm_update_t update_dec;
 	aesni_gcm_finalize_t finalize_enc;
 	aesni_gcm_finalize_t finalize_dec;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 /** GCM per-session operation handlers */
@@ -72,6 +94,11 @@ struct aesni_gcm_session_ops {
 	aesni_gcm_init_t init;
 	aesni_gcm_update_t update;
 	aesni_gcm_finalize_t finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 #endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1d2a0ce00..aea599ebf 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -350,6 +350,76 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				&qp->gdata_ctx,
 				tag,
 				session->gen_digest_length);
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp->temp_digest;
+		else
+			tag = sym_op->auth.digest.data;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	} else { /* AESNI_GMAC_OP_VERIFY */
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp->temp_digest;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	}
+#else
 	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
 		qp->ops[session->key].init(&session->gdata_key,
 				&qp->gdata_ctx,
@@ -381,6 +451,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				tag,
 				session->gen_digest_length);
 	}
+#endif
 
 	return 0;
 }
@@ -769,7 +840,7 @@ aesni_gcm_create(const char *name,
 		init_mb_mgr_avx2(mb_mgr);
 		break;
 	case RTE_AESNI_GCM_AVX512:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
 		init_mb_mgr_avx512(mb_mgr);
 		break;
 	default:
@@ -791,6 +862,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
 	internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
 	internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
+	internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
+	internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
+#endif
 
 	internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
 	internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
@@ -800,6 +876,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
 	internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
 	internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
+	internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
+	internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
+#endif
 
 	internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
 	internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
@@ -809,6 +890,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
 	internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
 	internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
+	internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
+	internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
+#endif
 
 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v2 2/2] test/crypto: add GMAC SGL tests
  2020-09-22 10:01 ` [dpdk-dev] [PATCH v2 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
@ 2020-09-22 10:01   ` Pablo de Lara
  2020-09-22 10:34   ` [dpdk-dev] [PATCH v3 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  1 sibling, 0 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-09-22 10:01 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

---

Changes:
v2:
  - Modified segment size to make reduce maximum number of segments needed
---

 app/test/test_cryptodev.c | 220 +++++++++++++++++++++++++++++++++++++-
 app/test/test_cryptodev.h |   4 +-
 2 files changed, 220 insertions(+), 4 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 70bf6fe2c..849823d8f 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -52,6 +52,8 @@
 #define IN_PLACE 0
 #define OUT_OF_PLACE 1
 
+#define SGL_MAX_NO	16
+
 static int gbl_driver_id;
 
 static enum rte_security_session_action_type gbl_action_type =
@@ -9995,6 +9997,53 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 	return 0;
 }
 
+static int
+create_gmac_operation_sgl(enum rte_crypto_auth_operation op,
+		const struct gmac_test_data *tdata,
+		void *digest_mem, uint64_t digest_phys)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_crypto_sym_op *sym_op;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	sym_op = ut_params->op->sym;
+
+	sym_op->auth.digest.data = digest_mem;
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+
+	sym_op->auth.digest.phys_addr = digest_phys;
+
+	if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
+				tdata->gmac_tag.len);
+		debug_hexdump(stdout, "digest:",
+				sym_op->auth.digest.data,
+				tdata->gmac_tag.len);
+	}
+
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+			uint8_t *, IV_OFFSET);
+
+	rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
+
+	debug_hexdump(stdout, "iv:", iv_ptr, tdata->iv.len);
+
+	sym_op->cipher.data.length = 0;
+	sym_op->cipher.data.offset = 0;
+
+	sym_op->auth.data.offset = 0;
+	sym_op->auth.data.length = tdata->plaintext.len;
+
+	return 0;
+}
+
 static int create_gmac_session(uint8_t dev_id,
 		const struct gmac_test_data *tdata,
 		enum rte_crypto_auth_operation auth_op)
@@ -10251,6 +10300,166 @@ test_AES_GMAC_authentication_verify_test_case_4(void)
 	return test_AES_GMAC_authentication_verify(&gmac_test_case_4);
 }
 
+static int
+test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata,
+                                 uint32_t fragsz)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feature_flags;
+	unsigned int trn_data = 0;
+	void *digest_mem = NULL;
+	uint32_t segs = 1;
+	unsigned int to_trn = 0;
+	struct rte_mbuf *buf = NULL;
+	uint8_t *auth_tag, *plaintext;
+	int retval;
+
+	TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
+			      "No GMAC length in the source data");
+
+	/* Verify the capabilities */
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	cap_idx.algo.auth = RTE_CRYPTO_AUTH_AES_GMAC;
+	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+			&cap_idx) == NULL)
+		return -ENOTSUP;
+
+	/* Check for any input SGL support */
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feature_flags = dev_info.feature_flags;
+
+        if ((!(feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)))
+		return -ENOTSUP;
+
+	if (fragsz > tdata->plaintext.len)
+		fragsz = tdata->plaintext.len;
+
+	uint16_t plaintext_len = fragsz;
+
+	retval = create_gmac_session(ts_params->valid_devs[0],
+			tdata, RTE_CRYPTO_AUTH_OP_GENERATE);
+
+	if (retval < 0)
+		return retval;
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+			"Failed to allocate input buffer in mempool");
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				plaintext_len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+	memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+	trn_data += plaintext_len;
+
+	buf = ut_params->ibuf;
+
+	/*
+	 * Loop until no more fragments
+	 */
+
+	while (trn_data < tdata->plaintext.len) {
+		++segs;
+		to_trn = (tdata->plaintext.len - trn_data < fragsz) ?
+				(tdata->plaintext.len - trn_data) : fragsz;
+
+		buf->next = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+		buf = buf->next;
+
+		memset(rte_pktmbuf_mtod(buf, uint8_t *), 0,
+				rte_pktmbuf_tailroom(buf));
+
+		plaintext = (uint8_t *)rte_pktmbuf_append(buf,
+				to_trn);
+
+		memcpy(plaintext, tdata->plaintext.data + trn_data,
+				to_trn);
+		trn_data += to_trn;
+		if (trn_data  == tdata->plaintext.len)
+			digest_mem = (uint8_t *)rte_pktmbuf_append(buf,
+					tdata->gmac_tag.len);
+	}
+	ut_params->ibuf->nb_segs = segs;
+
+	/*
+	 * Place digest at the end of the last buffer
+	 */
+	uint64_t digest_phys = rte_pktmbuf_iova(buf) + to_trn;
+
+	if (!digest_mem) {
+		digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				+ tdata->gmac_tag.len);
+		digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
+				tdata->plaintext.len);
+	}
+
+	retval = create_gmac_operation_sgl(RTE_CRYPTO_AUTH_OP_GENERATE,
+			tdata, digest_mem, digest_phys);
+
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+		return -ENOTSUP;
+	else
+		TEST_ASSERT_NOT_NULL(
+			process_crypto_request(ts_params->valid_devs[0],
+			ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	auth_tag = digest_mem;
+	debug_hexdump(stdout, "auth tag:", auth_tag, tdata->gmac_tag.len);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->gmac_tag.data,
+			tdata->gmac_tag.len,
+			"GMAC Generated auth tag not as expected");
+
+	return 0;
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_40B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 40);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_80B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 80);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_4096B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_4, 4096);
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_4097B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_4, 4097);
+}
+
 struct test_crypto_vector {
 	enum rte_crypto_cipher_algorithm crypto_algo;
 	unsigned int cipher_offset;
@@ -11245,8 +11454,6 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op,
 	return 0;
 }
 
-#define SGL_MAX_NO	16
-
 static int
 test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
 		const int oop, uint32_t fragsz, uint32_t fragsz_oop)
@@ -12164,6 +12371,15 @@ static struct unit_test_suite cryptodev_testsuite  = {
 			test_AES_GMAC_authentication_test_case_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GMAC_authentication_verify_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_40B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_80B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_4096B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_4097B),
+
 		/** Chacha20-Poly1305 */
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_chacha20_poly1305_encrypt_test_case_rfc8439),
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..57003733b 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -17,9 +17,9 @@
 #define DEFAULT_NUM_QPS_PER_QAT_DEVICE  (2)
 #define DEFAULT_BURST_SIZE              (64)
 #define DEFAULT_NUM_XFORMS              (2)
-#define NUM_MBUFS                       (8191)
+#define NUM_MBUFS                       (4095)
 #define MBUF_CACHE_SIZE                 (256)
-#define MBUF_DATAPAYLOAD_SIZE		(2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_DATAPAYLOAD_SIZE		(4096 + DIGEST_BYTE_LENGTH_SHA512)
 #define MBUF_SIZE			(sizeof(struct rte_mbuf) + \
 		RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v3 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
  2020-09-22 10:01 ` [dpdk-dev] [PATCH v2 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  2020-09-22 10:01   ` [dpdk-dev] [PATCH v2 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
@ 2020-09-22 10:34   ` Pablo de Lara
  2020-09-22 10:34     ` [dpdk-dev] [PATCH v3 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
  2020-10-09 11:40     ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  1 sibling, 2 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-09-22 10:34 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-gather list support for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---

Changes:
- v2/v3: no change

---

 doc/guides/cryptodevs/aesni_gcm.rst      |  2 -
 doc/guides/rel_notes/release_20_11.rst   |  4 ++
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 27 ++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 88 +++++++++++++++++++++++-
 4 files changed, 118 insertions(+), 3 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 74e0de63a..0e146486e 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -31,8 +31,6 @@ Limitations
 -----------
 
 * In out-of-place operations, chained destination mbufs are not supported.
-* Chained mbufs are only supported by RTE_CRYPTO_AEAD_AES_GCM algorithm,
-  not RTE_CRYPTO_AUTH_AES_GMAC.
 * Cipher only is not supported.
 
 
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index df227a177..98fd4ae0f 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated the AESNI GCM crypto PMD.**
+
+  * Added SGL support AES-GMAC.
+
 
 Removed Items
 -------------
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 74acac09c..8a0d074b6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -53,6 +53,23 @@ typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
 		uint8_t *auth_tag,
 		uint64_t auth_tag_len);
 
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *iv,
+		const uint64_t iv_len);
+
+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *in,
+		const uint64_t plaintext_len);
+
+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		uint8_t *auth_tag,
+		const uint64_t auth_tag_len);
+#endif
+
 /** GCM library function pointer table */
 struct aesni_gcm_ops {
 	aesni_gcm_t enc;        /**< GCM encode function pointer */
@@ -63,6 +80,11 @@ struct aesni_gcm_ops {
 	aesni_gcm_update_t update_dec;
 	aesni_gcm_finalize_t finalize_enc;
 	aesni_gcm_finalize_t finalize_dec;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 /** GCM per-session operation handlers */
@@ -72,6 +94,11 @@ struct aesni_gcm_session_ops {
 	aesni_gcm_init_t init;
 	aesni_gcm_update_t update;
 	aesni_gcm_finalize_t finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 #endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1d2a0ce00..aea599ebf 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -350,6 +350,76 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				&qp->gdata_ctx,
 				tag,
 				session->gen_digest_length);
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp->temp_digest;
+		else
+			tag = sym_op->auth.digest.data;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	} else { /* AESNI_GMAC_OP_VERIFY */
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp->temp_digest;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	}
+#else
 	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
 		qp->ops[session->key].init(&session->gdata_key,
 				&qp->gdata_ctx,
@@ -381,6 +451,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				tag,
 				session->gen_digest_length);
 	}
+#endif
 
 	return 0;
 }
@@ -769,7 +840,7 @@ aesni_gcm_create(const char *name,
 		init_mb_mgr_avx2(mb_mgr);
 		break;
 	case RTE_AESNI_GCM_AVX512:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
 		init_mb_mgr_avx512(mb_mgr);
 		break;
 	default:
@@ -791,6 +862,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
 	internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
 	internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
+	internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
+	internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
+#endif
 
 	internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
 	internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
@@ -800,6 +876,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
 	internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
 	internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
+	internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
+	internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
+#endif
 
 	internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
 	internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
@@ -809,6 +890,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
 	internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
 	internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
+	internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
+	internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
+#endif
 
 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v3 2/2] test/crypto: add GMAC SGL tests
  2020-09-22 10:34   ` [dpdk-dev] [PATCH v3 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
@ 2020-09-22 10:34     ` Pablo de Lara
  2020-10-09 11:40     ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  1 sibling, 0 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-09-22 10:34 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-Gather List tests for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---

Changes:
v3:
  - Fixed checkpatch issues

v2:
  - Modified segment size to make reduce maximum number of segments needed
---

 app/test/test_cryptodev.c | 216 ++++++++++++++++++++++++++++++++++++++
 app/test/test_cryptodev.h |   4 +-
 2 files changed, 218 insertions(+), 2 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 70bf6fe2c..5d009e096 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -9995,6 +9995,53 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 	return 0;
 }
 
+static int
+create_gmac_operation_sgl(enum rte_crypto_auth_operation op,
+		const struct gmac_test_data *tdata,
+		void *digest_mem, uint64_t digest_phys)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_crypto_sym_op *sym_op;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	sym_op = ut_params->op->sym;
+
+	sym_op->auth.digest.data = digest_mem;
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+
+	sym_op->auth.digest.phys_addr = digest_phys;
+
+	if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
+				tdata->gmac_tag.len);
+		debug_hexdump(stdout, "digest:",
+				sym_op->auth.digest.data,
+				tdata->gmac_tag.len);
+	}
+
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+			uint8_t *, IV_OFFSET);
+
+	rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
+
+	debug_hexdump(stdout, "iv:", iv_ptr, tdata->iv.len);
+
+	sym_op->cipher.data.length = 0;
+	sym_op->cipher.data.offset = 0;
+
+	sym_op->auth.data.offset = 0;
+	sym_op->auth.data.length = tdata->plaintext.len;
+
+	return 0;
+}
+
 static int create_gmac_session(uint8_t dev_id,
 		const struct gmac_test_data *tdata,
 		enum rte_crypto_auth_operation auth_op)
@@ -10251,6 +10298,166 @@ test_AES_GMAC_authentication_verify_test_case_4(void)
 	return test_AES_GMAC_authentication_verify(&gmac_test_case_4);
 }
 
+static int
+test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata,
+				uint32_t fragsz)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feature_flags;
+	unsigned int trn_data = 0;
+	void *digest_mem = NULL;
+	uint32_t segs = 1;
+	unsigned int to_trn = 0;
+	struct rte_mbuf *buf = NULL;
+	uint8_t *auth_tag, *plaintext;
+	int retval;
+
+	TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
+			      "No GMAC length in the source data");
+
+	/* Verify the capabilities */
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	cap_idx.algo.auth = RTE_CRYPTO_AUTH_AES_GMAC;
+	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+			&cap_idx) == NULL)
+		return -ENOTSUP;
+
+	/* Check for any input SGL support */
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feature_flags = dev_info.feature_flags;
+
+	if ((!(feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)))
+		return -ENOTSUP;
+
+	if (fragsz > tdata->plaintext.len)
+		fragsz = tdata->plaintext.len;
+
+	uint16_t plaintext_len = fragsz;
+
+	retval = create_gmac_session(ts_params->valid_devs[0],
+			tdata, RTE_CRYPTO_AUTH_OP_GENERATE);
+
+	if (retval < 0)
+		return retval;
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+			"Failed to allocate input buffer in mempool");
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				plaintext_len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+	memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+	trn_data += plaintext_len;
+
+	buf = ut_params->ibuf;
+
+	/*
+	 * Loop until no more fragments
+	 */
+
+	while (trn_data < tdata->plaintext.len) {
+		++segs;
+		to_trn = (tdata->plaintext.len - trn_data < fragsz) ?
+				(tdata->plaintext.len - trn_data) : fragsz;
+
+		buf->next = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+		buf = buf->next;
+
+		memset(rte_pktmbuf_mtod(buf, uint8_t *), 0,
+				rte_pktmbuf_tailroom(buf));
+
+		plaintext = (uint8_t *)rte_pktmbuf_append(buf,
+				to_trn);
+
+		memcpy(plaintext, tdata->plaintext.data + trn_data,
+				to_trn);
+		trn_data += to_trn;
+		if (trn_data  == tdata->plaintext.len)
+			digest_mem = (uint8_t *)rte_pktmbuf_append(buf,
+					tdata->gmac_tag.len);
+	}
+	ut_params->ibuf->nb_segs = segs;
+
+	/*
+	 * Place digest at the end of the last buffer
+	 */
+	uint64_t digest_phys = rte_pktmbuf_iova(buf) + to_trn;
+
+	if (!digest_mem) {
+		digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				+ tdata->gmac_tag.len);
+		digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
+				tdata->plaintext.len);
+	}
+
+	retval = create_gmac_operation_sgl(RTE_CRYPTO_AUTH_OP_GENERATE,
+			tdata, digest_mem, digest_phys);
+
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+		return -ENOTSUP;
+
+	TEST_ASSERT_NOT_NULL(
+		process_crypto_request(ts_params->valid_devs[0],
+		ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	auth_tag = digest_mem;
+	debug_hexdump(stdout, "auth tag:", auth_tag, tdata->gmac_tag.len);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->gmac_tag.data,
+			tdata->gmac_tag.len,
+			"GMAC Generated auth tag not as expected");
+
+	return 0;
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_40B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 40);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_80B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 80);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_4096B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_4, 4096);
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_4097B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_4, 4097);
+}
+
 struct test_crypto_vector {
 	enum rte_crypto_cipher_algorithm crypto_algo;
 	unsigned int cipher_offset;
@@ -12164,6 +12371,15 @@ static struct unit_test_suite cryptodev_testsuite  = {
 			test_AES_GMAC_authentication_test_case_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GMAC_authentication_verify_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_40B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_80B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_4096B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_4097B),
+
 		/** Chacha20-Poly1305 */
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_chacha20_poly1305_encrypt_test_case_rfc8439),
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..57003733b 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -17,9 +17,9 @@
 #define DEFAULT_NUM_QPS_PER_QAT_DEVICE  (2)
 #define DEFAULT_BURST_SIZE              (64)
 #define DEFAULT_NUM_XFORMS              (2)
-#define NUM_MBUFS                       (8191)
+#define NUM_MBUFS                       (4095)
 #define MBUF_CACHE_SIZE                 (256)
-#define MBUF_DATAPAYLOAD_SIZE		(2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_DATAPAYLOAD_SIZE		(4096 + DIGEST_BYTE_LENGTH_SHA512)
 #define MBUF_SIZE			(sizeof(struct rte_mbuf) + \
 		RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
  2020-09-22 10:34   ` [dpdk-dev] [PATCH v3 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  2020-09-22 10:34     ` [dpdk-dev] [PATCH v3 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
@ 2020-10-09 11:40     ` Pablo de Lara
  2020-10-09 11:40       ` [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
                         ` (2 more replies)
  1 sibling, 3 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-10-09 11:40 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-gather list support for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---

This patchset depends on series http://patches.dpdk.org/project/dpdk/list/?series=12819

Changes:

- v4: Rebased on top of crypto subtree

- v2/v3: no change

---

 doc/guides/cryptodevs/aesni_gcm.rst      |  2 -
 doc/guides/rel_notes/release_20_11.rst   |  4 ++
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 27 ++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 88 +++++++++++++++++++++++-
 4 files changed, 118 insertions(+), 3 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 74e0de63a..0e146486e 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -31,8 +31,6 @@ Limitations
 -----------
 
 * In out-of-place operations, chained destination mbufs are not supported.
-* Chained mbufs are only supported by RTE_CRYPTO_AEAD_AES_GCM algorithm,
-  not RTE_CRYPTO_AUTH_AES_GMAC.
 * Cipher only is not supported.
 
 
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index 015cea576..55f60ecfd 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -81,6 +81,10 @@ New Features
   * Added support for SNOW3G-UEA2/UIA2 algorithms.
   * Added support for KASUMI-F8/F9 algorithms.
 
+* **Updated the aesni_gcm crypto PMD.**
+
+  * Added SGL support AES-GMAC.
+
 * **Added Intel ACC100 bbdev PMD.**
 
   Added a new ``acc100`` bbdev driver for the Intel\ |reg| ACC100 accelerator
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 74acac09c..8a0d074b6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -53,6 +53,23 @@ typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
 		uint8_t *auth_tag,
 		uint64_t auth_tag_len);
 
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *iv,
+		const uint64_t iv_len);
+
+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *in,
+		const uint64_t plaintext_len);
+
+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		uint8_t *auth_tag,
+		const uint64_t auth_tag_len);
+#endif
+
 /** GCM library function pointer table */
 struct aesni_gcm_ops {
 	aesni_gcm_t enc;        /**< GCM encode function pointer */
@@ -63,6 +80,11 @@ struct aesni_gcm_ops {
 	aesni_gcm_update_t update_dec;
 	aesni_gcm_finalize_t finalize_enc;
 	aesni_gcm_finalize_t finalize_dec;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 /** GCM per-session operation handlers */
@@ -72,6 +94,11 @@ struct aesni_gcm_session_ops {
 	aesni_gcm_init_t init;
 	aesni_gcm_update_t update;
 	aesni_gcm_finalize_t finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 #endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1d2a0ce00..aea599ebf 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -350,6 +350,76 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				&qp->gdata_ctx,
 				tag,
 				session->gen_digest_length);
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp->temp_digest;
+		else
+			tag = sym_op->auth.digest.data;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	} else { /* AESNI_GMAC_OP_VERIFY */
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp->temp_digest;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	}
+#else
 	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
 		qp->ops[session->key].init(&session->gdata_key,
 				&qp->gdata_ctx,
@@ -381,6 +451,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				tag,
 				session->gen_digest_length);
 	}
+#endif
 
 	return 0;
 }
@@ -769,7 +840,7 @@ aesni_gcm_create(const char *name,
 		init_mb_mgr_avx2(mb_mgr);
 		break;
 	case RTE_AESNI_GCM_AVX512:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
 		init_mb_mgr_avx512(mb_mgr);
 		break;
 	default:
@@ -791,6 +862,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
 	internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
 	internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
+	internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
+	internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
+#endif
 
 	internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
 	internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
@@ -800,6 +876,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
 	internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
 	internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
+	internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
+	internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
+#endif
 
 	internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
 	internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
@@ -809,6 +890,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
 	internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
 	internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
+	internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
+	internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
+#endif
 
 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests
  2020-10-09 11:40     ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
@ 2020-10-09 11:40       ` Pablo de Lara
  2020-10-09 14:09         ` Akhil Goyal
  2020-10-12  9:20       ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Zhang, Roy Fan
  2020-10-12 11:19       ` [dpdk-dev] [PATCH v5 " Pablo de Lara
  2 siblings, 1 reply; 19+ messages in thread
From: Pablo de Lara @ 2020-10-09 11:40 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-Gather List tests for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---

This patchset depends on series http://patches.dpdk.org/project/dpdk/list/?series=12819

Changes:

- v4: Rebased on top of crypto subtree

- v3: Fixed checkpatch issues

- v2: Modified segment size to make reduce maximum number of segments needed
---

 app/test/test_cryptodev.c | 216 ++++++++++++++++++++++++++++++++++++++
 app/test/test_cryptodev.h |   4 +-
 2 files changed, 218 insertions(+), 2 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 61e1be072..a339895a9 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -9993,6 +9993,53 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 	return 0;
 }
 
+static int
+create_gmac_operation_sgl(enum rte_crypto_auth_operation op,
+		const struct gmac_test_data *tdata,
+		void *digest_mem, uint64_t digest_phys)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_crypto_sym_op *sym_op;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	sym_op = ut_params->op->sym;
+
+	sym_op->auth.digest.data = digest_mem;
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+
+	sym_op->auth.digest.phys_addr = digest_phys;
+
+	if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
+				tdata->gmac_tag.len);
+		debug_hexdump(stdout, "digest:",
+				sym_op->auth.digest.data,
+				tdata->gmac_tag.len);
+	}
+
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+			uint8_t *, IV_OFFSET);
+
+	rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
+
+	debug_hexdump(stdout, "iv:", iv_ptr, tdata->iv.len);
+
+	sym_op->cipher.data.length = 0;
+	sym_op->cipher.data.offset = 0;
+
+	sym_op->auth.data.offset = 0;
+	sym_op->auth.data.length = tdata->plaintext.len;
+
+	return 0;
+}
+
 static int create_gmac_session(uint8_t dev_id,
 		const struct gmac_test_data *tdata,
 		enum rte_crypto_auth_operation auth_op)
@@ -10249,6 +10296,166 @@ test_AES_GMAC_authentication_verify_test_case_4(void)
 	return test_AES_GMAC_authentication_verify(&gmac_test_case_4);
 }
 
+static int
+test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata,
+				uint32_t fragsz)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feature_flags;
+	unsigned int trn_data = 0;
+	void *digest_mem = NULL;
+	uint32_t segs = 1;
+	unsigned int to_trn = 0;
+	struct rte_mbuf *buf = NULL;
+	uint8_t *auth_tag, *plaintext;
+	int retval;
+
+	TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
+			      "No GMAC length in the source data");
+
+	/* Verify the capabilities */
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	cap_idx.algo.auth = RTE_CRYPTO_AUTH_AES_GMAC;
+	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+			&cap_idx) == NULL)
+		return -ENOTSUP;
+
+	/* Check for any input SGL support */
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feature_flags = dev_info.feature_flags;
+
+	if ((!(feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)))
+		return -ENOTSUP;
+
+	if (fragsz > tdata->plaintext.len)
+		fragsz = tdata->plaintext.len;
+
+	uint16_t plaintext_len = fragsz;
+
+	retval = create_gmac_session(ts_params->valid_devs[0],
+			tdata, RTE_CRYPTO_AUTH_OP_GENERATE);
+
+	if (retval < 0)
+		return retval;
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+			"Failed to allocate input buffer in mempool");
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				plaintext_len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+	memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+	trn_data += plaintext_len;
+
+	buf = ut_params->ibuf;
+
+	/*
+	 * Loop until no more fragments
+	 */
+
+	while (trn_data < tdata->plaintext.len) {
+		++segs;
+		to_trn = (tdata->plaintext.len - trn_data < fragsz) ?
+				(tdata->plaintext.len - trn_data) : fragsz;
+
+		buf->next = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+		buf = buf->next;
+
+		memset(rte_pktmbuf_mtod(buf, uint8_t *), 0,
+				rte_pktmbuf_tailroom(buf));
+
+		plaintext = (uint8_t *)rte_pktmbuf_append(buf,
+				to_trn);
+
+		memcpy(plaintext, tdata->plaintext.data + trn_data,
+				to_trn);
+		trn_data += to_trn;
+		if (trn_data  == tdata->plaintext.len)
+			digest_mem = (uint8_t *)rte_pktmbuf_append(buf,
+					tdata->gmac_tag.len);
+	}
+	ut_params->ibuf->nb_segs = segs;
+
+	/*
+	 * Place digest at the end of the last buffer
+	 */
+	uint64_t digest_phys = rte_pktmbuf_iova(buf) + to_trn;
+
+	if (!digest_mem) {
+		digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				+ tdata->gmac_tag.len);
+		digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
+				tdata->plaintext.len);
+	}
+
+	retval = create_gmac_operation_sgl(RTE_CRYPTO_AUTH_OP_GENERATE,
+			tdata, digest_mem, digest_phys);
+
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+		return -ENOTSUP;
+
+	TEST_ASSERT_NOT_NULL(
+		process_crypto_request(ts_params->valid_devs[0],
+		ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	auth_tag = digest_mem;
+	debug_hexdump(stdout, "auth tag:", auth_tag, tdata->gmac_tag.len);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->gmac_tag.data,
+			tdata->gmac_tag.len,
+			"GMAC Generated auth tag not as expected");
+
+	return 0;
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_40B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 40);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_80B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 80);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_4096B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_4, 4096);
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_4097B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_4, 4097);
+}
+
 struct test_crypto_vector {
 	enum rte_crypto_cipher_algorithm crypto_algo;
 	unsigned int cipher_offset;
@@ -12162,6 +12369,15 @@ static struct unit_test_suite cryptodev_testsuite  = {
 			test_AES_GMAC_authentication_test_case_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GMAC_authentication_verify_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_40B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_80B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_4096B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_4097B),
+
 		/** Chacha20-Poly1305 */
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_chacha20_poly1305_encrypt_test_case_rfc8439),
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..57003733b 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -17,9 +17,9 @@
 #define DEFAULT_NUM_QPS_PER_QAT_DEVICE  (2)
 #define DEFAULT_BURST_SIZE              (64)
 #define DEFAULT_NUM_XFORMS              (2)
-#define NUM_MBUFS                       (8191)
+#define NUM_MBUFS                       (4095)
 #define MBUF_CACHE_SIZE                 (256)
-#define MBUF_DATAPAYLOAD_SIZE		(2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_DATAPAYLOAD_SIZE		(4096 + DIGEST_BYTE_LENGTH_SHA512)
 #define MBUF_SIZE			(sizeof(struct rte_mbuf) + \
 		RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests
  2020-10-09 11:40       ` [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
@ 2020-10-09 14:09         ` Akhil Goyal
  2020-10-09 18:31           ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 19+ messages in thread
From: Akhil Goyal @ 2020-10-09 14:09 UTC (permalink / raw)
  To: Pablo de Lara, declan.doherty; +Cc: dev

Hi Pablo,

> diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
> index 41542e055..57003733b 100644
> --- a/app/test/test_cryptodev.h
> +++ b/app/test/test_cryptodev.h
> @@ -17,9 +17,9 @@
>  #define DEFAULT_NUM_QPS_PER_QAT_DEVICE  (2)
>  #define DEFAULT_BURST_SIZE              (64)
>  #define DEFAULT_NUM_XFORMS              (2)
> -#define NUM_MBUFS                       (8191)
> +#define NUM_MBUFS                       (4095)
>  #define MBUF_CACHE_SIZE                 (256)
> -#define MBUF_DATAPAYLOAD_SIZE		(2048 +
> DIGEST_BYTE_LENGTH_SHA512)
> +#define MBUF_DATAPAYLOAD_SIZE		(4096 +
> DIGEST_BYTE_LENGTH_SHA512)
>  #define MBUF_SIZE			(sizeof(struct rte_mbuf) + \
>  		RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
> 
Why are these changes done? Any specific requirement for GMAC? I believe 2048 is also good enough.

Regards,
Akhil

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests
  2020-10-09 14:09         ` Akhil Goyal
@ 2020-10-09 18:31           ` De Lara Guarch, Pablo
  2020-10-09 18:36             ` Akhil Goyal
  0 siblings, 1 reply; 19+ messages in thread
From: De Lara Guarch, Pablo @ 2020-10-09 18:31 UTC (permalink / raw)
  To: Akhil Goyal, Doherty, Declan; +Cc: dev

Hi Akhil,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Akhil Goyal
> Sent: Friday, October 9, 2020 3:10 PM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Doherty, Declan
> <declan.doherty@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests
> 
> Hi Pablo,
> 
> > diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
> > index 41542e055..57003733b 100644
> > --- a/app/test/test_cryptodev.h
> > +++ b/app/test/test_cryptodev.h
> > @@ -17,9 +17,9 @@
> >  #define DEFAULT_NUM_QPS_PER_QAT_DEVICE  (2)
> >  #define DEFAULT_BURST_SIZE              (64)
> >  #define DEFAULT_NUM_XFORMS              (2)
> > -#define NUM_MBUFS                       (8191)
> > +#define NUM_MBUFS                       (4095)
> >  #define MBUF_CACHE_SIZE                 (256)
> > -#define MBUF_DATAPAYLOAD_SIZE		(2048 +
> > DIGEST_BYTE_LENGTH_SHA512)
> > +#define MBUF_DATAPAYLOAD_SIZE		(4096 +
> > DIGEST_BYTE_LENGTH_SHA512)
> >  #define MBUF_SIZE			(sizeof(struct rte_mbuf) + \
> >  		RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
> >
> Why are these changes done? Any specific requirement for GMAC? I believe
> 2048 is also good enough.

I needed to reduce the maximum number of segments for the SGL tests, since Intel QAT PMD does not support more than 16, and this way both PMDs can be tested with the new tests.

Thanks,
Pablo
> 
> Regards,
> Akhil

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests
  2020-10-09 18:31           ` De Lara Guarch, Pablo
@ 2020-10-09 18:36             ` Akhil Goyal
  2020-10-12  9:38               ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 19+ messages in thread
From: Akhil Goyal @ 2020-10-09 18:36 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, Doherty, Declan; +Cc: dev

Hi Pablo,

> Hi Akhil,
> 
> > > diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
> > > index 41542e055..57003733b 100644
> > > --- a/app/test/test_cryptodev.h
> > > +++ b/app/test/test_cryptodev.h
> > > @@ -17,9 +17,9 @@
> > >  #define DEFAULT_NUM_QPS_PER_QAT_DEVICE  (2)
> > >  #define DEFAULT_BURST_SIZE              (64)
> > >  #define DEFAULT_NUM_XFORMS              (2)
> > > -#define NUM_MBUFS                       (8191)
> > > +#define NUM_MBUFS                       (4095)
> > >  #define MBUF_CACHE_SIZE                 (256)
> > > -#define MBUF_DATAPAYLOAD_SIZE		(2048 +
> > > DIGEST_BYTE_LENGTH_SHA512)
> > > +#define MBUF_DATAPAYLOAD_SIZE		(4096 +
> > > DIGEST_BYTE_LENGTH_SHA512)
> > >  #define MBUF_SIZE			(sizeof(struct rte_mbuf) + \
> > >  		RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
> > >
> > Why are these changes done? Any specific requirement for GMAC? I believe
> > 2048 is also good enough.
> 
> I needed to reduce the maximum number of segments for the SGL tests, since
> Intel QAT PMD does not support more than 16, and this way both PMDs can be
> tested with the new tests.
> 
What is your packet size. I believe 16 segments of 2K means 32KB of data which
Should be more than enough for testing. Right? Can we reduce the length of vector?

But by changing the MBUF_DATAPAYLOAD_SIZE to 4K would change the behavior of
Other cases. Isn't it?

Regards,
Akhil



^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
  2020-10-09 11:40     ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  2020-10-09 11:40       ` [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
@ 2020-10-12  9:20       ` Zhang, Roy Fan
  2020-10-12 11:19       ` [dpdk-dev] [PATCH v5 " Pablo de Lara
  2 siblings, 0 replies; 19+ messages in thread
From: Zhang, Roy Fan @ 2020-10-12  9:20 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, Doherty, Declan; +Cc: dev, De Lara Guarch, Pablo

Hi Pablo,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Pablo de Lara
> Sent: Friday, October 9, 2020 12:40 PM
> To: Doherty, Declan <declan.doherty@intel.com>
> Cc: dev@dpdk.org; De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-
> GMAC
> 
> Add Scatter-gather list support for AES-GMAC.
> 
> Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> ---

The patch has been verified with FIPS_validation GCM test cases. 
Tested-by: Fan Zhang <roy.fan.zhang@intel.com> 
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests
  2020-10-09 18:36             ` Akhil Goyal
@ 2020-10-12  9:38               ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 19+ messages in thread
From: De Lara Guarch, Pablo @ 2020-10-12  9:38 UTC (permalink / raw)
  To: Akhil Goyal, Doherty, Declan; +Cc: dev

Hi Akhil,

> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Friday, October 9, 2020 7:36 PM
> To: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Doherty, Declan
> <declan.doherty@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests
> 
> Hi Pablo,
> 
> > Hi Akhil,
> >
> > > > diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
> > > > index 41542e055..57003733b 100644
> > > > --- a/app/test/test_cryptodev.h
> > > > +++ b/app/test/test_cryptodev.h
> > > > @@ -17,9 +17,9 @@
> > > >  #define DEFAULT_NUM_QPS_PER_QAT_DEVICE  (2)
> > > >  #define DEFAULT_BURST_SIZE              (64)
> > > >  #define DEFAULT_NUM_XFORMS              (2)
> > > > -#define NUM_MBUFS                       (8191)
> > > > +#define NUM_MBUFS                       (4095)
> > > >  #define MBUF_CACHE_SIZE                 (256)
> > > > -#define MBUF_DATAPAYLOAD_SIZE		(2048 +
> > > > DIGEST_BYTE_LENGTH_SHA512)
> > > > +#define MBUF_DATAPAYLOAD_SIZE		(4096 +
> > > > DIGEST_BYTE_LENGTH_SHA512)
> > > >  #define MBUF_SIZE			(sizeof(struct rte_mbuf) + \
> > > >  		RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
> > > >
> > > Why are these changes done? Any specific requirement for GMAC? I
> > > believe
> > > 2048 is also good enough.
> >
> > I needed to reduce the maximum number of segments for the SGL tests,
> > since Intel QAT PMD does not support more than 16, and this way both
> > PMDs can be tested with the new tests.
> >
> What is your packet size. I believe 16 segments of 2K means 32KB of data which
> Should be more than enough for testing. Right? Can we reduce the length of
> vector?
> 
> But by changing the MBUF_DATAPAYLOAD_SIZE to 4K would change the
> behavior of Other cases. Isn't it?

Right. I will send another version reducing the length of the vector (I'll add another test case with 32KB of data,
instead of 64KB, so we don't need to change the mbuf size).

Thanks,
Pablo

> 
> Regards,
> Akhil
> 


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v5 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
  2020-10-09 11:40     ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  2020-10-09 11:40       ` [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
  2020-10-12  9:20       ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Zhang, Roy Fan
@ 2020-10-12 11:19       ` Pablo de Lara
  2020-10-12 11:19         ` [dpdk-dev] [PATCH v5 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
  2020-10-12 11:29         ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  2 siblings, 2 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-10-12 11:19 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara, Fan Zhang

Add Scatter-gather list support for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Tested-by: Fan Zhang <roy.fan.zhang@intel.com> 
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---

Changes:

- v4/v5: Rebased on top of crypto subtree

- v2/v3: no change

---
 doc/guides/cryptodevs/aesni_gcm.rst      |  2 -
 doc/guides/rel_notes/release_20_11.rst   |  1 +
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 27 ++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 88 +++++++++++++++++++++++-
 4 files changed, 115 insertions(+), 3 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 74e0de63a..0e146486e 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -31,8 +31,6 @@ Limitations
 -----------
 
 * In out-of-place operations, chained destination mbufs are not supported.
-* Chained mbufs are only supported by RTE_CRYPTO_AEAD_AES_GCM algorithm,
-  not RTE_CRYPTO_AUTH_AES_GMAC.
 * Cipher only is not supported.
 
 
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index c34ab5493..497a8ed9c 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -80,6 +80,7 @@ New Features
   * Added support for ZUC-EEA3/EIA3 algorithms.
   * Added support for SNOW3G-UEA2/UIA2 algorithms.
   * Added support for KASUMI-F8/F9 algorithms.
+  * Added SGL support AES-GMAC.
 
 * **Updated the OCTEON TX2 crypto PMD.**
 
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 74acac09c..8a0d074b6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -53,6 +53,23 @@ typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
 		uint8_t *auth_tag,
 		uint64_t auth_tag_len);
 
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *iv,
+		const uint64_t iv_len);
+
+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *in,
+		const uint64_t plaintext_len);
+
+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		uint8_t *auth_tag,
+		const uint64_t auth_tag_len);
+#endif
+
 /** GCM library function pointer table */
 struct aesni_gcm_ops {
 	aesni_gcm_t enc;        /**< GCM encode function pointer */
@@ -63,6 +80,11 @@ struct aesni_gcm_ops {
 	aesni_gcm_update_t update_dec;
 	aesni_gcm_finalize_t finalize_enc;
 	aesni_gcm_finalize_t finalize_dec;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 /** GCM per-session operation handlers */
@@ -72,6 +94,11 @@ struct aesni_gcm_session_ops {
 	aesni_gcm_init_t init;
 	aesni_gcm_update_t update;
 	aesni_gcm_finalize_t finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 #endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1d2a0ce00..aea599ebf 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -350,6 +350,76 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				&qp->gdata_ctx,
 				tag,
 				session->gen_digest_length);
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp->temp_digest;
+		else
+			tag = sym_op->auth.digest.data;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	} else { /* AESNI_GMAC_OP_VERIFY */
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp->temp_digest;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	}
+#else
 	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
 		qp->ops[session->key].init(&session->gdata_key,
 				&qp->gdata_ctx,
@@ -381,6 +451,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				tag,
 				session->gen_digest_length);
 	}
+#endif
 
 	return 0;
 }
@@ -769,7 +840,7 @@ aesni_gcm_create(const char *name,
 		init_mb_mgr_avx2(mb_mgr);
 		break;
 	case RTE_AESNI_GCM_AVX512:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
 		init_mb_mgr_avx512(mb_mgr);
 		break;
 	default:
@@ -791,6 +862,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
 	internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
 	internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
+	internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
+	internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
+#endif
 
 	internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
 	internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
@@ -800,6 +876,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
 	internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
 	internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
+	internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
+	internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
+#endif
 
 	internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
 	internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
@@ -809,6 +890,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
 	internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
 	internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
+	internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
+	internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
+#endif
 
 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v5 2/2] test/crypto: add GMAC SGL tests
  2020-10-12 11:19       ` [dpdk-dev] [PATCH v5 " Pablo de Lara
@ 2020-10-12 11:19         ` Pablo de Lara
  2020-10-12 11:29         ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  1 sibling, 0 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-10-12 11:19 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-Gather List tests for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---

Changes:

- v5: Modified GMAC test cases, so mbuf size remains the same size (2KB),
      reducing the vector length to make it compatible with QAT PMD.

- v4: Rebased on top of crypto subtree

- v3: Fixed checkpatch issues

- v2: Modified segment size to make reduce maximum number of segments needed

---

 app/test/test_cryptodev.c                   | 216 ++++++++++++++++++++
 app/test/test_cryptodev_aead_test_vectors.h |  30 +++
 2 files changed, 246 insertions(+)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index ac2a36bc2..6da199dfb 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -9993,6 +9993,53 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 	return 0;
 }
 
+static int
+create_gmac_operation_sgl(enum rte_crypto_auth_operation op,
+		const struct gmac_test_data *tdata,
+		void *digest_mem, uint64_t digest_phys)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_crypto_sym_op *sym_op;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	sym_op = ut_params->op->sym;
+
+	sym_op->auth.digest.data = digest_mem;
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+
+	sym_op->auth.digest.phys_addr = digest_phys;
+
+	if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
+				tdata->gmac_tag.len);
+		debug_hexdump(stdout, "digest:",
+				sym_op->auth.digest.data,
+				tdata->gmac_tag.len);
+	}
+
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+			uint8_t *, IV_OFFSET);
+
+	rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
+
+	debug_hexdump(stdout, "iv:", iv_ptr, tdata->iv.len);
+
+	sym_op->cipher.data.length = 0;
+	sym_op->cipher.data.offset = 0;
+
+	sym_op->auth.data.offset = 0;
+	sym_op->auth.data.length = tdata->plaintext.len;
+
+	return 0;
+}
+
 static int create_gmac_session(uint8_t dev_id,
 		const struct gmac_test_data *tdata,
 		enum rte_crypto_auth_operation auth_op)
@@ -10249,6 +10296,166 @@ test_AES_GMAC_authentication_verify_test_case_4(void)
 	return test_AES_GMAC_authentication_verify(&gmac_test_case_4);
 }
 
+static int
+test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata,
+				uint32_t fragsz)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feature_flags;
+	unsigned int trn_data = 0;
+	void *digest_mem = NULL;
+	uint32_t segs = 1;
+	unsigned int to_trn = 0;
+	struct rte_mbuf *buf = NULL;
+	uint8_t *auth_tag, *plaintext;
+	int retval;
+
+	TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
+			      "No GMAC length in the source data");
+
+	/* Verify the capabilities */
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	cap_idx.algo.auth = RTE_CRYPTO_AUTH_AES_GMAC;
+	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+			&cap_idx) == NULL)
+		return -ENOTSUP;
+
+	/* Check for any input SGL support */
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feature_flags = dev_info.feature_flags;
+
+	if ((!(feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)))
+		return -ENOTSUP;
+
+	if (fragsz > tdata->plaintext.len)
+		fragsz = tdata->plaintext.len;
+
+	uint16_t plaintext_len = fragsz;
+
+	retval = create_gmac_session(ts_params->valid_devs[0],
+			tdata, RTE_CRYPTO_AUTH_OP_GENERATE);
+
+	if (retval < 0)
+		return retval;
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+			"Failed to allocate input buffer in mempool");
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				plaintext_len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+	memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+	trn_data += plaintext_len;
+
+	buf = ut_params->ibuf;
+
+	/*
+	 * Loop until no more fragments
+	 */
+
+	while (trn_data < tdata->plaintext.len) {
+		++segs;
+		to_trn = (tdata->plaintext.len - trn_data < fragsz) ?
+				(tdata->plaintext.len - trn_data) : fragsz;
+
+		buf->next = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+		buf = buf->next;
+
+		memset(rte_pktmbuf_mtod(buf, uint8_t *), 0,
+				rte_pktmbuf_tailroom(buf));
+
+		plaintext = (uint8_t *)rte_pktmbuf_append(buf,
+				to_trn);
+
+		memcpy(plaintext, tdata->plaintext.data + trn_data,
+				to_trn);
+		trn_data += to_trn;
+		if (trn_data  == tdata->plaintext.len)
+			digest_mem = (uint8_t *)rte_pktmbuf_append(buf,
+					tdata->gmac_tag.len);
+	}
+	ut_params->ibuf->nb_segs = segs;
+
+	/*
+	 * Place digest at the end of the last buffer
+	 */
+	uint64_t digest_phys = rte_pktmbuf_iova(buf) + to_trn;
+
+	if (!digest_mem) {
+		digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				+ tdata->gmac_tag.len);
+		digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
+				tdata->plaintext.len);
+	}
+
+	retval = create_gmac_operation_sgl(RTE_CRYPTO_AUTH_OP_GENERATE,
+			tdata, digest_mem, digest_phys);
+
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+		return -ENOTSUP;
+
+	TEST_ASSERT_NOT_NULL(
+		process_crypto_request(ts_params->valid_devs[0],
+		ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	auth_tag = digest_mem;
+	debug_hexdump(stdout, "auth tag:", auth_tag, tdata->gmac_tag.len);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->gmac_tag.data,
+			tdata->gmac_tag.len,
+			"GMAC Generated auth tag not as expected");
+
+	return 0;
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_40B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 40);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_80B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 80);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_2048B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_5, 2048);
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_2047B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_5, 2047);
+}
+
 struct test_crypto_vector {
 	enum rte_crypto_cipher_algorithm crypto_algo;
 	unsigned int cipher_offset;
@@ -12162,6 +12369,15 @@ static struct unit_test_suite cryptodev_testsuite  = {
 			test_AES_GMAC_authentication_test_case_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GMAC_authentication_verify_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_40B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_80B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_2048B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_2047B),
+
 		/** Chacha20-Poly1305 */
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_chacha20_poly1305_encrypt_test_case_rfc8439),
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 140f25362..736c44358 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -2613,6 +2613,36 @@ static const struct gmac_test_data gmac_test_case_4 = {
 	}
 };
 
+/* Test vector used to test GMAC SGL with 16 segments
+   plaintext length = ~32KB / segment size = ~2 KB */
+static const struct gmac_test_data gmac_test_case_5 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = gmac_plaintext,
+		.len = GMAC_LARGE_PLAINTEXT_LENGTH/2
+	},
+	.gmac_tag = {
+		.data = {
+                        0xb1, 0xba, 0xe7, 0x28, 0xd3, 0x95, 0x80, 0xd7,
+                        0x2e, 0xf5, 0xd0, 0x20, 0x80, 0x95, 0x16, 0x97
+		},
+		.len = 16
+	}
+};
+
 static const struct aead_test_data gcm_test_case_SGL_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
 	.key = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
  2020-10-12 11:19       ` [dpdk-dev] [PATCH v5 " Pablo de Lara
  2020-10-12 11:19         ` [dpdk-dev] [PATCH v5 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
@ 2020-10-12 11:29         ` Pablo de Lara
  2020-10-12 11:30           ` [dpdk-dev] [PATCH v6 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
  2020-10-12 14:34           ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Akhil Goyal
  1 sibling, 2 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-10-12 11:29 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara, Fan Zhang

Add Scatter-gather list support for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Tested-by: Fan Zhang <roy.fan.zhang@intel.com> 
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
---

Changes:

- v4/v5/v6: Rebased on top of crypto subtree

- v2/v3: no change

---
 doc/guides/cryptodevs/aesni_gcm.rst      |  2 -
 doc/guides/rel_notes/release_20_11.rst   |  1 +
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 27 ++++++++
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 88 +++++++++++++++++++++++-
 4 files changed, 115 insertions(+), 3 deletions(-)

diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 74e0de63a..0e146486e 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -31,8 +31,6 @@ Limitations
 -----------
 
 * In out-of-place operations, chained destination mbufs are not supported.
-* Chained mbufs are only supported by RTE_CRYPTO_AEAD_AES_GCM algorithm,
-  not RTE_CRYPTO_AUTH_AES_GMAC.
 * Cipher only is not supported.
 
 
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index c34ab5493..497a8ed9c 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -80,6 +80,7 @@ New Features
   * Added support for ZUC-EEA3/EIA3 algorithms.
   * Added support for SNOW3G-UEA2/UIA2 algorithms.
   * Added support for KASUMI-F8/F9 algorithms.
+  * Added SGL support AES-GMAC.
 
 * **Updated the OCTEON TX2 crypto PMD.**
 
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 74acac09c..8a0d074b6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -53,6 +53,23 @@ typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
 		uint8_t *auth_tag,
 		uint64_t auth_tag_len);
 
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *iv,
+		const uint64_t iv_len);
+
+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		const uint8_t *in,
+		const uint64_t plaintext_len);
+
+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
+		struct gcm_context_data *gcm_ctx_data,
+		uint8_t *auth_tag,
+		const uint64_t auth_tag_len);
+#endif
+
 /** GCM library function pointer table */
 struct aesni_gcm_ops {
 	aesni_gcm_t enc;        /**< GCM encode function pointer */
@@ -63,6 +80,11 @@ struct aesni_gcm_ops {
 	aesni_gcm_update_t update_dec;
 	aesni_gcm_finalize_t finalize_enc;
 	aesni_gcm_finalize_t finalize_dec;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 /** GCM per-session operation handlers */
@@ -72,6 +94,11 @@ struct aesni_gcm_session_ops {
 	aesni_gcm_init_t init;
 	aesni_gcm_update_t update;
 	aesni_gcm_finalize_t finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+#endif
 };
 
 #endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1d2a0ce00..aea599ebf 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -350,6 +350,76 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				&qp->gdata_ctx,
 				tag,
 				session->gen_digest_length);
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp->temp_digest;
+		else
+			tag = sym_op->auth.digest.data;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	} else { /* AESNI_GMAC_OP_VERIFY */
+		qp->ops[session->key].gmac_init(&session->gdata_key,
+				&qp->gdata_ctx,
+				iv_ptr,
+				session->iv.length);
+
+		qp->ops[session->key].gmac_update(&session->gdata_key,
+				&qp->gdata_ctx, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			qp->ops[session->key].gmac_update(&session->gdata_key,
+					&qp->gdata_ctx, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp->temp_digest;
+
+		qp->ops[session->key].gmac_finalize(&session->gdata_key,
+				&qp->gdata_ctx,
+				tag,
+				session->gen_digest_length);
+	}
+#else
 	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
 		qp->ops[session->key].init(&session->gdata_key,
 				&qp->gdata_ctx,
@@ -381,6 +451,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 				tag,
 				session->gen_digest_length);
 	}
+#endif
 
 	return 0;
 }
@@ -769,7 +840,7 @@ aesni_gcm_create(const char *name,
 		init_mb_mgr_avx2(mb_mgr);
 		break;
 	case RTE_AESNI_GCM_AVX512:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
 		init_mb_mgr_avx512(mb_mgr);
 		break;
 	default:
@@ -791,6 +862,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
 	internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
 	internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
+	internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
+	internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
+#endif
 
 	internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
 	internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
@@ -800,6 +876,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
 	internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
 	internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
+	internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
+	internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
+#endif
 
 	internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
 	internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
@@ -809,6 +890,11 @@ aesni_gcm_create(const char *name,
 	internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
 	internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
 	internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
+#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
+	internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
+	internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
+	internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
+#endif
 
 	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [dpdk-dev] [PATCH v6 2/2] test/crypto: add GMAC SGL tests
  2020-10-12 11:29         ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
@ 2020-10-12 11:30           ` Pablo de Lara
  2020-10-12 14:34           ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Akhil Goyal
  1 sibling, 0 replies; 19+ messages in thread
From: Pablo de Lara @ 2020-10-12 11:30 UTC (permalink / raw)
  To: declan.doherty; +Cc: dev, Pablo de Lara

Add Scatter-Gather List tests for AES-GMAC.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---

Changes:

- v6: Fixed coding style issues

- v5: Modified GMAC test cases, so mbuf size remains the same size (2KB),
      reducing the vector length to make it compatible with QAT PMD.

- v4: Rebased on top of crypto subtree

- v3: Fixed checkpatch issues

- v2: Modified segment size to make reduce maximum number of segments needed

---

 app/test/test_cryptodev.c                   | 216 ++++++++++++++++++++
 app/test/test_cryptodev_aead_test_vectors.h |  32 +++
 2 files changed, 248 insertions(+)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index ac2a36bc2..6da199dfb 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -9993,6 +9993,53 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 	return 0;
 }
 
+static int
+create_gmac_operation_sgl(enum rte_crypto_auth_operation op,
+		const struct gmac_test_data *tdata,
+		void *digest_mem, uint64_t digest_phys)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_crypto_sym_op *sym_op;
+
+	/* Generate Crypto op data structure */
+	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+			RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+	TEST_ASSERT_NOT_NULL(ut_params->op,
+			"Failed to allocate symmetric crypto operation struct");
+
+	sym_op = ut_params->op->sym;
+
+	sym_op->auth.digest.data = digest_mem;
+	TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+			"no room to append digest");
+
+	sym_op->auth.digest.phys_addr = digest_phys;
+
+	if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
+				tdata->gmac_tag.len);
+		debug_hexdump(stdout, "digest:",
+				sym_op->auth.digest.data,
+				tdata->gmac_tag.len);
+	}
+
+	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(ut_params->op,
+			uint8_t *, IV_OFFSET);
+
+	rte_memcpy(iv_ptr, tdata->iv.data, tdata->iv.len);
+
+	debug_hexdump(stdout, "iv:", iv_ptr, tdata->iv.len);
+
+	sym_op->cipher.data.length = 0;
+	sym_op->cipher.data.offset = 0;
+
+	sym_op->auth.data.offset = 0;
+	sym_op->auth.data.length = tdata->plaintext.len;
+
+	return 0;
+}
+
 static int create_gmac_session(uint8_t dev_id,
 		const struct gmac_test_data *tdata,
 		enum rte_crypto_auth_operation auth_op)
@@ -10249,6 +10296,166 @@ test_AES_GMAC_authentication_verify_test_case_4(void)
 	return test_AES_GMAC_authentication_verify(&gmac_test_case_4);
 }
 
+static int
+test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata,
+				uint32_t fragsz)
+{
+	struct crypto_testsuite_params *ts_params = &testsuite_params;
+	struct crypto_unittest_params *ut_params = &unittest_params;
+	struct rte_cryptodev_info dev_info;
+	uint64_t feature_flags;
+	unsigned int trn_data = 0;
+	void *digest_mem = NULL;
+	uint32_t segs = 1;
+	unsigned int to_trn = 0;
+	struct rte_mbuf *buf = NULL;
+	uint8_t *auth_tag, *plaintext;
+	int retval;
+
+	TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
+			      "No GMAC length in the source data");
+
+	/* Verify the capabilities */
+	struct rte_cryptodev_sym_capability_idx cap_idx;
+	cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+	cap_idx.algo.auth = RTE_CRYPTO_AUTH_AES_GMAC;
+	if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+			&cap_idx) == NULL)
+		return -ENOTSUP;
+
+	/* Check for any input SGL support */
+	rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+	feature_flags = dev_info.feature_flags;
+
+	if ((!(feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT)) &&
+			(!(feature_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)))
+		return -ENOTSUP;
+
+	if (fragsz > tdata->plaintext.len)
+		fragsz = tdata->plaintext.len;
+
+	uint16_t plaintext_len = fragsz;
+
+	retval = create_gmac_session(ts_params->valid_devs[0],
+			tdata, RTE_CRYPTO_AUTH_OP_GENERATE);
+
+	if (retval < 0)
+		return retval;
+
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+			"Failed to allocate input buffer in mempool");
+
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+			rte_pktmbuf_tailroom(ut_params->ibuf));
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				plaintext_len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+	memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+	trn_data += plaintext_len;
+
+	buf = ut_params->ibuf;
+
+	/*
+	 * Loop until no more fragments
+	 */
+
+	while (trn_data < tdata->plaintext.len) {
+		++segs;
+		to_trn = (tdata->plaintext.len - trn_data < fragsz) ?
+				(tdata->plaintext.len - trn_data) : fragsz;
+
+		buf->next = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+		buf = buf->next;
+
+		memset(rte_pktmbuf_mtod(buf, uint8_t *), 0,
+				rte_pktmbuf_tailroom(buf));
+
+		plaintext = (uint8_t *)rte_pktmbuf_append(buf,
+				to_trn);
+
+		memcpy(plaintext, tdata->plaintext.data + trn_data,
+				to_trn);
+		trn_data += to_trn;
+		if (trn_data  == tdata->plaintext.len)
+			digest_mem = (uint8_t *)rte_pktmbuf_append(buf,
+					tdata->gmac_tag.len);
+	}
+	ut_params->ibuf->nb_segs = segs;
+
+	/*
+	 * Place digest at the end of the last buffer
+	 */
+	uint64_t digest_phys = rte_pktmbuf_iova(buf) + to_trn;
+
+	if (!digest_mem) {
+		digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				+ tdata->gmac_tag.len);
+		digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
+				tdata->plaintext.len);
+	}
+
+	retval = create_gmac_operation_sgl(RTE_CRYPTO_AUTH_OP_GENERATE,
+			tdata, digest_mem, digest_phys);
+
+	if (retval < 0)
+		return retval;
+
+	rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+	ut_params->op->sym->m_src = ut_params->ibuf;
+
+	if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
+		return -ENOTSUP;
+
+	TEST_ASSERT_NOT_NULL(
+		process_crypto_request(ts_params->valid_devs[0],
+		ut_params->op), "failed to process sym crypto op");
+
+	TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+			"crypto op processing failed");
+
+	auth_tag = digest_mem;
+	debug_hexdump(stdout, "auth tag:", auth_tag, tdata->gmac_tag.len);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(
+			auth_tag,
+			tdata->gmac_tag.data,
+			tdata->gmac_tag.len,
+			"GMAC Generated auth tag not as expected");
+
+	return 0;
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_40B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 40);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_80B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_1, 80);
+}
+
+static int
+test_AES_GMAC_authentication_SGL_2048B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_5, 2048);
+}
+
+/* Segment size not multiple of block size (16B) */
+static int
+test_AES_GMAC_authentication_SGL_2047B(void)
+{
+	return test_AES_GMAC_authentication_SGL(&gmac_test_case_5, 2047);
+}
+
 struct test_crypto_vector {
 	enum rte_crypto_cipher_algorithm crypto_algo;
 	unsigned int cipher_offset;
@@ -12162,6 +12369,15 @@ static struct unit_test_suite cryptodev_testsuite  = {
 			test_AES_GMAC_authentication_test_case_4),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_AES_GMAC_authentication_verify_test_case_4),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_40B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_80B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_2048B),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_AES_GMAC_authentication_SGL_2047B),
+
 		/** Chacha20-Poly1305 */
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_chacha20_poly1305_encrypt_test_case_rfc8439),
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 140f25362..73cc143f1 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -2613,6 +2613,38 @@ static const struct gmac_test_data gmac_test_case_4 = {
 	}
 };
 
+/*
+ * Test vector used to test GMAC SGL with 16 segments
+ * plaintext length = ~32KB / segment size = ~2 KB
+ */
+static const struct gmac_test_data gmac_test_case_5 = {
+	.key = {
+		.data = {
+			0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+			0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+		},
+		.len = 16
+	},
+	.iv = {
+		.data = {
+			0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+			0xde, 0xca, 0xf8, 0x88
+		},
+		.len = 12
+	},
+	.plaintext = {
+		.data = gmac_plaintext,
+		.len = GMAC_LARGE_PLAINTEXT_LENGTH/2
+	},
+	.gmac_tag = {
+		.data = {
+			0xb1, 0xba, 0xe7, 0x28, 0xd3, 0x95, 0x80, 0xd7,
+			0x2e, 0xf5, 0xd0, 0x20, 0x80, 0x95, 0x16, 0x97
+		},
+		.len = 16
+	}
+};
+
 static const struct aead_test_data gcm_test_case_SGL_1 = {
 	.algo = RTE_CRYPTO_AEAD_AES_GCM,
 	.key = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
  2020-10-12 11:29         ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
  2020-10-12 11:30           ` [dpdk-dev] [PATCH v6 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
@ 2020-10-12 14:34           ` Akhil Goyal
  2020-10-12 14:43             ` Akhil Goyal
  1 sibling, 1 reply; 19+ messages in thread
From: Akhil Goyal @ 2020-10-12 14:34 UTC (permalink / raw)
  To: Pablo de Lara, declan.doherty; +Cc: dev, Fan Zhang

> 
> Add Scatter-gather list support for AES-GMAC.
> 
> Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> Tested-by: Fan Zhang <roy.fan.zhang@intel.com>
> Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
> ---
Series applied to dpdk-next-crypto

Thanks.


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC
  2020-10-12 14:34           ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Akhil Goyal
@ 2020-10-12 14:43             ` Akhil Goyal
  0 siblings, 0 replies; 19+ messages in thread
From: Akhil Goyal @ 2020-10-12 14:43 UTC (permalink / raw)
  To: Pablo de Lara, declan.doherty; +Cc: dev, Fan Zhang


> >
> > Add Scatter-gather list support for AES-GMAC.
> >
> > Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> > Tested-by: Fan Zhang <roy.fan.zhang@intel.com>
> > Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
> > ---
> Series applied to dpdk-next-crypto
> 
> Thanks.

There was a change in release notes of this patch from v3-> v6 which I believe is not intended.
Reverted to the release notes in v3 for aesni-gcm instead of aesni-mb while merging.

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2020-10-12 14:43 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-11 12:08 [dpdk-dev] [PATCH 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
2020-09-11 12:08 ` [dpdk-dev] [PATCH 2/2] test/crypto: add SGL tests for AES-GMAC Pablo de Lara
2020-09-22 10:01 ` [dpdk-dev] [PATCH v2 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
2020-09-22 10:01   ` [dpdk-dev] [PATCH v2 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
2020-09-22 10:34   ` [dpdk-dev] [PATCH v3 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
2020-09-22 10:34     ` [dpdk-dev] [PATCH v3 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
2020-10-09 11:40     ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
2020-10-09 11:40       ` [dpdk-dev] [PATCH v4 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
2020-10-09 14:09         ` Akhil Goyal
2020-10-09 18:31           ` De Lara Guarch, Pablo
2020-10-09 18:36             ` Akhil Goyal
2020-10-12  9:38               ` De Lara Guarch, Pablo
2020-10-12  9:20       ` [dpdk-dev] [PATCH v4 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Zhang, Roy Fan
2020-10-12 11:19       ` [dpdk-dev] [PATCH v5 " Pablo de Lara
2020-10-12 11:19         ` [dpdk-dev] [PATCH v5 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
2020-10-12 11:29         ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Pablo de Lara
2020-10-12 11:30           ` [dpdk-dev] [PATCH v6 2/2] test/crypto: add GMAC SGL tests Pablo de Lara
2020-10-12 14:34           ` [dpdk-dev] [PATCH v6 1/2] crypto/aesni_gcm: support SGL on AES-GMAC Akhil Goyal
2020-10-12 14:43             ` Akhil Goyal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).