DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [dpdk-dev v1] crypto/snow3g: add support for digest appended ops
@ 2021-05-05 17:15 Kai Ji
  2021-05-08 12:57 ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-05-10  9:50 ` [dpdk-dev] [dpdk-dev v2] " Kai Ji
  0 siblings, 2 replies; 30+ messages in thread
From: Kai Ji @ 2021-05-05 17:15 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, Kai Ji, pablo.de.lara.guarch, Damian Nowak

This patch enable out-of-place auth-cipher operations where
digest should be encrypted among with the rest of raw data.
It also adds support for partially encrypted digest when using
auth-cipher operations.

Fixes: 7c87e2d7b359 ("crypto/snow3g: use IPsec library")
Cc: pablo.de.lara.guarch@intel.com

Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/snow3g/rte_snow3g_pmd.c | 131 ++++++++++++++++++++++---
 1 file changed, 118 insertions(+), 13 deletions(-)

diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 962868e1fc..95c4a36a45 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
+ * Copyright(c) 2016-2021 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -179,6 +179,24 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
 	return sess;
 }
 
+/** Check if conditions are met for digest-appended operations */
+static uint8_t *
+snow3g_digest_appended_in_src(struct rte_crypto_op *op)
+{
+	unsigned int auth_size, cipher_size;
+
+	auth_size = (op->sym->auth.data.offset >> 3) +
+		(op->sym->auth.data.length >> 3);
+	cipher_size = (op->sym->cipher.data.offset >> 3) +
+		(op->sym->cipher.data.length >> 3);
+
+	if (auth_size < cipher_size)
+		return rte_pktmbuf_mtod_offset(op->sym->m_src,
+				uint8_t *, auth_size);
+
+	return NULL;
+}
+
 /** Encrypt/decrypt mbufs with same cipher key. */
 static uint8_t
 process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
@@ -189,20 +207,50 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	uint8_t processed_ops = 0;
 	const void *src[SNOW3G_MAX_BURST];
 	void *dst[SNOW3G_MAX_BURST];
+	uint8_t *digest_appended[SNOW3G_MAX_BURST];
 	const void *iv[SNOW3G_MAX_BURST];
 	uint32_t num_bytes[SNOW3G_MAX_BURST];
+	uint32_t cipher_off, cipher_len;
+	int unencrypted_bytes = 0;
 
 	for (i = 0; i < num_ops; i++) {
-		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
-		dst[i] = ops[i]->sym->m_dst ?
-			rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3) :
-			rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
+		cipher_off = ops[i]->sym->cipher.data.offset >> 3;
+		cipher_len = ops[i]->sym->cipher.data.length >> 3;
+		src[i] = rte_pktmbuf_mtod_offset(
+			ops[i]->sym->m_src,	uint8_t *, cipher_off);
+
+		/* If out-of-place operation */
+		if (ops[i]->sym->m_dst &&
+			ops[i]->sym->m_src != ops[i]->sym->m_dst) {
+			dst[i] = rte_pktmbuf_mtod_offset(
+				ops[i]->sym->m_dst, uint8_t *, cipher_off);
+
+			/* In case of out-of-place, auth-cipher operation
+			 * with partial encryption of the digest, copy
+			 * the remaining, unencrypted part.
+			 */
+			if (session->op == SNOW3G_OP_AUTH_CIPHER)
+				unencrypted_bytes =
+					(ops[i]->sym->auth.data.offset >> 3) +
+					(ops[i]->sym->auth.data.length >> 3) +
+					(SNOW3G_DIGEST_LENGTH) -
+					cipher_off - cipher_len;
+			if (unencrypted_bytes > 0)
+				rte_memcpy(
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_dst, uint8_t *,
+						cipher_off + cipher_len),
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_src, uint8_t *,
+						cipher_off + cipher_len),
+					unencrypted_bytes);
+		} else
+			dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
+						uint8_t *, cipher_off);
+
 		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
 				session->cipher_iv_offset);
-		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+		num_bytes[i] = cipher_len;
 
 		processed_ops++;
 	}
@@ -210,6 +258,20 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	IMB_SNOW3G_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
 			src, dst, num_bytes, processed_ops);
 
+	/* Take care of the raw digest data in src buffer */
+	for (i = 0; i < num_ops; i++) {
+		if (session->op == SNOW3G_OP_AUTH_CIPHER &&
+				ops[i]->sym->m_dst != NULL) {
+			digest_appended[i] =
+				snow3g_digest_appended_in_src(ops[i]);
+			/* Clear unencrypted digest from
+			 * the src buffer
+			 */
+			if (digest_appended[i] != NULL)
+				memset(digest_appended[i],
+					0, SNOW3G_DIGEST_LENGTH);
+		}
+	}
 	return processed_ops;
 }
 
@@ -222,6 +284,7 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 	uint8_t *src, *dst;
 	uint8_t *iv;
 	uint32_t length_in_bits, offset_in_bits;
+	int unencrypted_bytes = 0;
 
 	offset_in_bits = op->sym->cipher.data.offset;
 	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
@@ -230,10 +293,30 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 		SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
 		return 0;
 	}
+	length_in_bits = op->sym->cipher.data.length;
 	dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+	/* In case of out-of-place, auth-cipher operation
+	 * with partial encryption of the digest, copy
+	 * the remaining, unencrypted part.
+	 */
+	if (session->op == SNOW3G_OP_AUTH_CIPHER)
+		unencrypted_bytes =
+			(op->sym->auth.data.offset >> 3) +
+			(op->sym->auth.data.length >> 3) +
+			(SNOW3G_DIGEST_LENGTH) -
+			(offset_in_bits >> 3) -
+			(length_in_bits >> 3);
+	if (unencrypted_bytes > 0)
+		rte_memcpy(
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_dst, uint8_t *,
+				(length_in_bits >> 3)),
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_src, uint8_t *,
+				(length_in_bits >> 3)),
+				unencrypted_bytes);
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
 				session->cipher_iv_offset);
-	length_in_bits = op->sym->cipher.data.length;
 
 	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
 			src, dst, length_in_bits, offset_in_bits);
@@ -252,6 +335,7 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	uint8_t *src, *dst;
 	uint32_t length_in_bits;
 	uint8_t *iv;
+	uint8_t digest_appended = 0;
 
 	for (i = 0; i < num_ops; i++) {
 		/* Data must be byte aligned */
@@ -261,6 +345,8 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 			break;
 		}
 
+		dst = NULL;
+
 		length_in_bits = ops[i]->sym->auth.data.length;
 
 		src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
@@ -271,6 +357,13 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
 			dst = qp->temp_digest;
 
+			 /* Handle auth cipher verify oop case*/
+			if (session->op == SNOW3G_OP_CIPHER_AUTH &&
+				ops[i]->sym->m_dst != NULL)
+				src = rte_pktmbuf_mtod_offset(
+					ops[i]->sym->m_dst, uint8_t *,
+					ops[i]->sym->auth.data.offset >> 3);
+
 			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
@@ -278,12 +371,23 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 			if (memcmp(dst, ops[i]->sym->auth.digest.data,
 					SNOW3G_DIGEST_LENGTH) != 0)
 				ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-		} else  {
-			dst = ops[i]->sym->auth.digest.data;
+		} else {
+			if (session->op == SNOW3G_OP_AUTH_CIPHER)
+				dst = snow3g_digest_appended_in_src(ops[i]);
+
+			if (dst != NULL)
+				digest_appended = 1;
+			else
+				dst = ops[i]->sym->auth.digest.data;
 
 			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
+
+			/* Copy back digest from src to auth.digest.data */
+			if (digest_appended)
+				rte_memcpy(ops[i]->sym->auth.digest.data,
+					dst, SNOW3G_DIGEST_LENGTH);
 		}
 		processed_ops++;
 	}
@@ -564,7 +668,8 @@ cryptodev_snow3g_create(const char *name,
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
 			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
 
 	mgr = alloc_mb_mgr(0);
 	if (mgr == NULL)
-- 
2.17.1

--------------------------------------------------------------
Intel Research and Development Ireland Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263


This e-mail and any attachments may contain confidential material for the sole
use of the intended recipient(s). Any review or distribution by others is
strictly prohibited. If you are not the intended recipient, please contact the
sender and delete all copies.


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [dpdk-dev v1] crypto/snow3g: add support for digest appended ops
  2021-05-05 17:15 [dpdk-dev] [dpdk-dev v1] crypto/snow3g: add support for digest appended ops Kai Ji
@ 2021-05-08 12:57 ` Akhil Goyal
  2021-05-10  9:50 ` [dpdk-dev] [dpdk-dev v2] " Kai Ji
  1 sibling, 0 replies; 30+ messages in thread
From: Akhil Goyal @ 2021-05-08 12:57 UTC (permalink / raw)
  To: Kai Ji, dev; +Cc: roy.fan.zhang, pablo.de.lara.guarch, Damian Nowak

> This patch enable out-of-place auth-cipher operations where
> digest should be encrypted among with the rest of raw data.
> It also adds support for partially encrypted digest when using
> auth-cipher operations.
> 
> Fixes: 7c87e2d7b359 ("crypto/snow3g: use IPsec library")
> Cc: pablo.de.lara.guarch@intel.com
> 
I believe it is a new feature to be supported.
Hence is not a Fix.

And submitting a new feature after RC2 is a bit late.
I recommend to defer to next release.

The documentation update in .ini file is also missing.
You may also update the release notes for supporting
this feature.

> Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> ---
>  drivers/crypto/snow3g/rte_snow3g_pmd.c | 131 ++++++++++++++++++++++-
> --


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [dpdk-dev v2] crypto/snow3g: add support for digest appended ops
  2021-05-05 17:15 [dpdk-dev] [dpdk-dev v1] crypto/snow3g: add support for digest appended ops Kai Ji
  2021-05-08 12:57 ` [dpdk-dev] [EXT] " Akhil Goyal
@ 2021-05-10  9:50 ` Kai Ji
  2021-06-29 20:14   ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-07-21  9:22   ` [dpdk-dev] [dpdk-dev v3] " Kai Ji
  1 sibling, 2 replies; 30+ messages in thread
From: Kai Ji @ 2021-05-10  9:50 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, Kai Ji, pablo.de.lara.guarch, Damian Nowak

This patch enable out-of-place auth-cipher operations where
digest should be encrypted among with the rest of raw data.
It also adds support for partially encrypted digest when using
auth-cipher operations.

Fixes: 7c87e2d7b359 ("crypto/snow3g: use IPsec library")
Cc: pablo.de.lara.guarch@intel.com

Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
Signed-off-by: Kai Ji <kai.ji@intel.com>

---
v2:
	- Documentation update
---
 doc/guides/cryptodevs/features/snow3g.ini |   1 +
 doc/guides/cryptodevs/snow3g.rst          |   2 +-
 drivers/crypto/snow3g/rte_snow3g_pmd.c    | 131 +++++++++++++++++++---
 3 files changed, 120 insertions(+), 14 deletions(-)

diff --git a/doc/guides/cryptodevs/features/snow3g.ini b/doc/guides/cryptodevs/features/snow3g.ini
index 14ac7e4b6d..4d4c5b579b 100644
--- a/doc/guides/cryptodevs/features/snow3g.ini
+++ b/doc/guides/cryptodevs/features/snow3g.ini
@@ -8,6 +8,7 @@ Symmetric crypto       = Y
 Sym operation chaining = Y
 Symmetric sessionless  = Y
 Non-Byte aligned data  = Y
+Digest encrypted       = Y
 OOP LB  In LB  Out     = Y
 
 ;
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index e8f9b5ff04..49ec1661f2 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -77,7 +77,7 @@ and the external crypto libraries supported by them:
    DPDK version   Crypto library version
    =============  ================================
    16.04 - 19.11  LibSSO SNOW3G
-   20.02+         Multi-buffer library 0.53 - 0.54
+   20.02+         Multi-buffer library 0.53 - 1.0
    =============  ================================
 
 
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 962868e1fc..95c4a36a45 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
+ * Copyright(c) 2016-2021 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -179,6 +179,24 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
 	return sess;
 }
 
+/** Check if conditions are met for digest-appended operations */
+static uint8_t *
+snow3g_digest_appended_in_src(struct rte_crypto_op *op)
+{
+	unsigned int auth_size, cipher_size;
+
+	auth_size = (op->sym->auth.data.offset >> 3) +
+		(op->sym->auth.data.length >> 3);
+	cipher_size = (op->sym->cipher.data.offset >> 3) +
+		(op->sym->cipher.data.length >> 3);
+
+	if (auth_size < cipher_size)
+		return rte_pktmbuf_mtod_offset(op->sym->m_src,
+				uint8_t *, auth_size);
+
+	return NULL;
+}
+
 /** Encrypt/decrypt mbufs with same cipher key. */
 static uint8_t
 process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
@@ -189,20 +207,50 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	uint8_t processed_ops = 0;
 	const void *src[SNOW3G_MAX_BURST];
 	void *dst[SNOW3G_MAX_BURST];
+	uint8_t *digest_appended[SNOW3G_MAX_BURST];
 	const void *iv[SNOW3G_MAX_BURST];
 	uint32_t num_bytes[SNOW3G_MAX_BURST];
+	uint32_t cipher_off, cipher_len;
+	int unencrypted_bytes = 0;
 
 	for (i = 0; i < num_ops; i++) {
-		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
-		dst[i] = ops[i]->sym->m_dst ?
-			rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3) :
-			rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
+		cipher_off = ops[i]->sym->cipher.data.offset >> 3;
+		cipher_len = ops[i]->sym->cipher.data.length >> 3;
+		src[i] = rte_pktmbuf_mtod_offset(
+			ops[i]->sym->m_src,	uint8_t *, cipher_off);
+
+		/* If out-of-place operation */
+		if (ops[i]->sym->m_dst &&
+			ops[i]->sym->m_src != ops[i]->sym->m_dst) {
+			dst[i] = rte_pktmbuf_mtod_offset(
+				ops[i]->sym->m_dst, uint8_t *, cipher_off);
+
+			/* In case of out-of-place, auth-cipher operation
+			 * with partial encryption of the digest, copy
+			 * the remaining, unencrypted part.
+			 */
+			if (session->op == SNOW3G_OP_AUTH_CIPHER)
+				unencrypted_bytes =
+					(ops[i]->sym->auth.data.offset >> 3) +
+					(ops[i]->sym->auth.data.length >> 3) +
+					(SNOW3G_DIGEST_LENGTH) -
+					cipher_off - cipher_len;
+			if (unencrypted_bytes > 0)
+				rte_memcpy(
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_dst, uint8_t *,
+						cipher_off + cipher_len),
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_src, uint8_t *,
+						cipher_off + cipher_len),
+					unencrypted_bytes);
+		} else
+			dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
+						uint8_t *, cipher_off);
+
 		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
 				session->cipher_iv_offset);
-		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+		num_bytes[i] = cipher_len;
 
 		processed_ops++;
 	}
@@ -210,6 +258,20 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	IMB_SNOW3G_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
 			src, dst, num_bytes, processed_ops);
 
+	/* Take care of the raw digest data in src buffer */
+	for (i = 0; i < num_ops; i++) {
+		if (session->op == SNOW3G_OP_AUTH_CIPHER &&
+				ops[i]->sym->m_dst != NULL) {
+			digest_appended[i] =
+				snow3g_digest_appended_in_src(ops[i]);
+			/* Clear unencrypted digest from
+			 * the src buffer
+			 */
+			if (digest_appended[i] != NULL)
+				memset(digest_appended[i],
+					0, SNOW3G_DIGEST_LENGTH);
+		}
+	}
 	return processed_ops;
 }
 
@@ -222,6 +284,7 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 	uint8_t *src, *dst;
 	uint8_t *iv;
 	uint32_t length_in_bits, offset_in_bits;
+	int unencrypted_bytes = 0;
 
 	offset_in_bits = op->sym->cipher.data.offset;
 	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
@@ -230,10 +293,30 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 		SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
 		return 0;
 	}
+	length_in_bits = op->sym->cipher.data.length;
 	dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+	/* In case of out-of-place, auth-cipher operation
+	 * with partial encryption of the digest, copy
+	 * the remaining, unencrypted part.
+	 */
+	if (session->op == SNOW3G_OP_AUTH_CIPHER)
+		unencrypted_bytes =
+			(op->sym->auth.data.offset >> 3) +
+			(op->sym->auth.data.length >> 3) +
+			(SNOW3G_DIGEST_LENGTH) -
+			(offset_in_bits >> 3) -
+			(length_in_bits >> 3);
+	if (unencrypted_bytes > 0)
+		rte_memcpy(
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_dst, uint8_t *,
+				(length_in_bits >> 3)),
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_src, uint8_t *,
+				(length_in_bits >> 3)),
+				unencrypted_bytes);
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
 				session->cipher_iv_offset);
-	length_in_bits = op->sym->cipher.data.length;
 
 	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
 			src, dst, length_in_bits, offset_in_bits);
@@ -252,6 +335,7 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	uint8_t *src, *dst;
 	uint32_t length_in_bits;
 	uint8_t *iv;
+	uint8_t digest_appended = 0;
 
 	for (i = 0; i < num_ops; i++) {
 		/* Data must be byte aligned */
@@ -261,6 +345,8 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 			break;
 		}
 
+		dst = NULL;
+
 		length_in_bits = ops[i]->sym->auth.data.length;
 
 		src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
@@ -271,6 +357,13 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
 			dst = qp->temp_digest;
 
+			 /* Handle auth cipher verify oop case*/
+			if (session->op == SNOW3G_OP_CIPHER_AUTH &&
+				ops[i]->sym->m_dst != NULL)
+				src = rte_pktmbuf_mtod_offset(
+					ops[i]->sym->m_dst, uint8_t *,
+					ops[i]->sym->auth.data.offset >> 3);
+
 			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
@@ -278,12 +371,23 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 			if (memcmp(dst, ops[i]->sym->auth.digest.data,
 					SNOW3G_DIGEST_LENGTH) != 0)
 				ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-		} else  {
-			dst = ops[i]->sym->auth.digest.data;
+		} else {
+			if (session->op == SNOW3G_OP_AUTH_CIPHER)
+				dst = snow3g_digest_appended_in_src(ops[i]);
+
+			if (dst != NULL)
+				digest_appended = 1;
+			else
+				dst = ops[i]->sym->auth.digest.data;
 
 			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
+
+			/* Copy back digest from src to auth.digest.data */
+			if (digest_appended)
+				rte_memcpy(ops[i]->sym->auth.digest.data,
+					dst, SNOW3G_DIGEST_LENGTH);
 		}
 		processed_ops++;
 	}
@@ -564,7 +668,8 @@ cryptodev_snow3g_create(const char *name,
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
 			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
 
 	mgr = alloc_mb_mgr(0);
 	if (mgr == NULL)
-- 
2.17.1

--------------------------------------------------------------
Intel Research and Development Ireland Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263


This e-mail and any attachments may contain confidential material for the sole
use of the intended recipient(s). Any review or distribution by others is
strictly prohibited. If you are not the intended recipient, please contact the
sender and delete all copies.


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [dpdk-dev v2] crypto/snow3g: add support for digest appended ops
  2021-05-10  9:50 ` [dpdk-dev] [dpdk-dev v2] " Kai Ji
@ 2021-06-29 20:14   ` Akhil Goyal
  2021-06-30 12:08     ` Zhang, Roy Fan
  2021-07-21  9:22   ` [dpdk-dev] [dpdk-dev v3] " Kai Ji
  1 sibling, 1 reply; 30+ messages in thread
From: Akhil Goyal @ 2021-06-29 20:14 UTC (permalink / raw)
  To: Kai Ji, dev; +Cc: roy.fan.zhang, pablo.de.lara.guarch, Damian Nowak

> This patch enable out-of-place auth-cipher operations where
> digest should be encrypted among with the rest of raw data.
> It also adds support for partially encrypted digest when using
> auth-cipher operations.
> 
> Fixes: 7c87e2d7b359 ("crypto/snow3g: use IPsec library")
> Cc: pablo.de.lara.guarch@intel.com
> 
This patch is a feature addition and not a fix. So no need for this fixes tag.

> Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> 
Is this patch really required now, as I see that you plan to remove this PMD?

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [dpdk-dev v2] crypto/snow3g: add support for digest appended ops
  2021-06-29 20:14   ` [dpdk-dev] [EXT] " Akhil Goyal
@ 2021-06-30 12:08     ` Zhang, Roy Fan
  2021-07-06 19:48       ` Akhil Goyal
  0 siblings, 1 reply; 30+ messages in thread
From: Zhang, Roy Fan @ 2021-06-30 12:08 UTC (permalink / raw)
  To: Akhil Goyal, Ji, Kai, dev; +Cc: De Lara Guarch, Pablo, Damian Nowak

Hi Akhil,

This is a required feature from our customer. So could you help us merging it? 

And we do not plan to remove this PMD - just put it to a new folder with common code shared for all SW Intel-ipsec-mb based PMDs.
From the user point of view everything will be the same including the EAL commands and the ways of accessing the driver.
And sure the changes in this patch will be moved to new place then.

Regards,
Fan

> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Tuesday, June 29, 2021 9:15 PM
> To: Ji, Kai <kai.ji@intel.com>; dev@dpdk.org
> Cc: Zhang, Roy Fan <roy.fan.zhang@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Damian Nowak
> <damianx.nowak@intel.com>
> Subject: RE: [EXT] [dpdk-dev] [dpdk-dev v2] crypto/snow3g: add support for
> digest appended ops
> 
> > This patch enable out-of-place auth-cipher operations where
> > digest should be encrypted among with the rest of raw data.
> > It also adds support for partially encrypted digest when using
> > auth-cipher operations.
> >
> > Fixes: 7c87e2d7b359 ("crypto/snow3g: use IPsec library")
> > Cc: pablo.de.lara.guarch@intel.com
> >
> This patch is a feature addition and not a fix. So no need for this fixes tag.
> 
> > Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
> > Signed-off-by: Kai Ji <kai.ji@intel.com>
> >
> Is this patch really required now, as I see that you plan to remove this PMD?

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [dpdk-dev v2] crypto/snow3g: add support for digest appended ops
  2021-06-30 12:08     ` Zhang, Roy Fan
@ 2021-07-06 19:48       ` Akhil Goyal
  0 siblings, 0 replies; 30+ messages in thread
From: Akhil Goyal @ 2021-07-06 19:48 UTC (permalink / raw)
  To: Zhang, Roy Fan, Ji, Kai, dev; +Cc: De Lara Guarch, Pablo, Damian Nowak

> Hi Akhil,
> 
> This is a required feature from our customer. So could you help us merging
> it?
> 
> And we do not plan to remove this PMD - just put it to a new folder with
> common code shared for all SW Intel-ipsec-mb based PMDs.
> From the user point of view everything will be the same including the EAL
> commands and the ways of accessing the driver.
> And sure the changes in this patch will be moved to new place then.
> 
This patch has a conflict with Pablo's patch. Can you rebase?

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [dpdk-dev v3] crypto/snow3g: add support for digest appended ops
  2021-05-10  9:50 ` [dpdk-dev] [dpdk-dev v2] " Kai Ji
  2021-06-29 20:14   ` [dpdk-dev] [EXT] " Akhil Goyal
@ 2021-07-21  9:22   ` Kai Ji
  2021-07-27  8:38     ` [dpdk-dev] [dpdk-dev v4] " Fan Zhang
  1 sibling, 1 reply; 30+ messages in thread
From: Kai Ji @ 2021-07-21  9:22 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, Kai Ji, pablo.de.lara.guarch, Damian Nowak

This patch enable out-of-place auth-cipher operations where
digest should be encrypted among with the rest of raw data.
It also adds support for partially encrypted digest when using
auth-cipher operations.

Fixes: 7c87e2d7b359 ("crypto/snow3g: use IPsec library")
Cc: pablo.de.lara.guarch@intel.com

Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
Signed-off-by: Kai Ji <kai.ji@intel.com>
---
v3:
	- Code rebase
	- Documentation update

---
 doc/guides/cryptodevs/features/snow3g.ini |   1 +
 drivers/crypto/snow3g/rte_snow3g_pmd.c    | 131 +++++++++++++++++++---
 2 files changed, 119 insertions(+), 13 deletions(-)

diff --git a/doc/guides/cryptodevs/features/snow3g.ini b/doc/guides/cryptodevs/features/snow3g.ini
index 14ac7e4b6d..4d4c5b579b 100644
--- a/doc/guides/cryptodevs/features/snow3g.ini
+++ b/doc/guides/cryptodevs/features/snow3g.ini
@@ -8,6 +8,7 @@ Symmetric crypto       = Y
 Sym operation chaining = Y
 Symmetric sessionless  = Y
 Non-Byte aligned data  = Y
+Digest encrypted       = Y
 OOP LB  In LB  Out     = Y
 
 ;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 9aab357846..a7c012be92 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
+ * Copyright(c) 2016-2021 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -179,6 +179,24 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
 	return sess;
 }
 
+/** Check if conditions are met for digest-appended operations */
+static uint8_t *
+snow3g_digest_appended_in_src(struct rte_crypto_op *op)
+{
+	unsigned int auth_size, cipher_size;
+
+	auth_size = (op->sym->auth.data.offset >> 3) +
+		(op->sym->auth.data.length >> 3);
+	cipher_size = (op->sym->cipher.data.offset >> 3) +
+		(op->sym->cipher.data.length >> 3);
+
+	if (auth_size < cipher_size)
+		return rte_pktmbuf_mtod_offset(op->sym->m_src,
+				uint8_t *, auth_size);
+
+	return NULL;
+}
+
 /** Encrypt/decrypt mbufs with same cipher key. */
 static uint8_t
 process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
@@ -189,20 +207,50 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	uint8_t processed_ops = 0;
 	const void *src[SNOW3G_MAX_BURST];
 	void *dst[SNOW3G_MAX_BURST];
+	uint8_t *digest_appended[SNOW3G_MAX_BURST];
 	const void *iv[SNOW3G_MAX_BURST];
 	uint32_t num_bytes[SNOW3G_MAX_BURST];
+	uint32_t cipher_off, cipher_len;
+	int unencrypted_bytes = 0;
 
 	for (i = 0; i < num_ops; i++) {
-		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
-		dst[i] = ops[i]->sym->m_dst ?
-			rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3) :
-			rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
+		cipher_off = ops[i]->sym->cipher.data.offset >> 3;
+		cipher_len = ops[i]->sym->cipher.data.length >> 3;
+		src[i] = rte_pktmbuf_mtod_offset(
+			ops[i]->sym->m_src,	uint8_t *, cipher_off);
+
+		/* If out-of-place operation */
+		if (ops[i]->sym->m_dst &&
+			ops[i]->sym->m_src != ops[i]->sym->m_dst) {
+			dst[i] = rte_pktmbuf_mtod_offset(
+				ops[i]->sym->m_dst, uint8_t *, cipher_off);
+
+			/* In case of out-of-place, auth-cipher operation
+			 * with partial encryption of the digest, copy
+			 * the remaining, unencrypted part.
+			 */
+			if (session->op == SNOW3G_OP_AUTH_CIPHER)
+				unencrypted_bytes =
+					(ops[i]->sym->auth.data.offset >> 3) +
+					(ops[i]->sym->auth.data.length >> 3) +
+					(SNOW3G_DIGEST_LENGTH) -
+					cipher_off - cipher_len;
+			if (unencrypted_bytes > 0)
+				rte_memcpy(
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_dst, uint8_t *,
+						cipher_off + cipher_len),
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_src, uint8_t *,
+						cipher_off + cipher_len),
+					unencrypted_bytes);
+		} else
+			dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
+						uint8_t *, cipher_off);
+
 		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
 				session->cipher_iv_offset);
-		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+		num_bytes[i] = cipher_len;
 
 		processed_ops++;
 	}
@@ -210,6 +258,20 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	IMB_SNOW3G_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
 			src, dst, num_bytes, processed_ops);
 
+	/* Take care of the raw digest data in src buffer */
+	for (i = 0; i < num_ops; i++) {
+		if (session->op == SNOW3G_OP_AUTH_CIPHER &&
+				ops[i]->sym->m_dst != NULL) {
+			digest_appended[i] =
+				snow3g_digest_appended_in_src(ops[i]);
+			/* Clear unencrypted digest from
+			 * the src buffer
+			 */
+			if (digest_appended[i] != NULL)
+				memset(digest_appended[i],
+					0, SNOW3G_DIGEST_LENGTH);
+		}
+	}
 	return processed_ops;
 }
 
@@ -222,6 +284,7 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 	uint8_t *src, *dst;
 	uint8_t *iv;
 	uint32_t length_in_bits, offset_in_bits;
+	int unencrypted_bytes = 0;
 
 	offset_in_bits = op->sym->cipher.data.offset;
 	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
@@ -230,10 +293,30 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 		SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
 		return 0;
 	}
+	length_in_bits = op->sym->cipher.data.length;
 	dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+	/* In case of out-of-place, auth-cipher operation
+	 * with partial encryption of the digest, copy
+	 * the remaining, unencrypted part.
+	 */
+	if (session->op == SNOW3G_OP_AUTH_CIPHER)
+		unencrypted_bytes =
+			(op->sym->auth.data.offset >> 3) +
+			(op->sym->auth.data.length >> 3) +
+			(SNOW3G_DIGEST_LENGTH) -
+			(offset_in_bits >> 3) -
+			(length_in_bits >> 3);
+	if (unencrypted_bytes > 0)
+		rte_memcpy(
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_dst, uint8_t *,
+				(length_in_bits >> 3)),
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_src, uint8_t *,
+				(length_in_bits >> 3)),
+				unencrypted_bytes);
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
 				session->cipher_iv_offset);
-	length_in_bits = op->sym->cipher.data.length;
 
 	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
 			src, dst, length_in_bits, offset_in_bits);
@@ -252,6 +335,7 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	uint8_t *src, *dst;
 	uint32_t length_in_bits;
 	uint8_t *iv;
+	uint8_t digest_appended = 0;
 
 	for (i = 0; i < num_ops; i++) {
 		/* Data must be byte aligned */
@@ -261,6 +345,8 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 			break;
 		}
 
+		dst = NULL;
+
 		length_in_bits = ops[i]->sym->auth.data.length;
 
 		src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
@@ -271,6 +357,13 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
 			dst = qp->temp_digest;
 
+			 /* Handle auth cipher verify oop case*/
+			if (session->op == SNOW3G_OP_CIPHER_AUTH &&
+				ops[i]->sym->m_dst != NULL)
+				src = rte_pktmbuf_mtod_offset(
+					ops[i]->sym->m_dst, uint8_t *,
+					ops[i]->sym->auth.data.offset >> 3);
+
 			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
@@ -278,12 +371,23 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 			if (memcmp(dst, ops[i]->sym->auth.digest.data,
 					SNOW3G_DIGEST_LENGTH) != 0)
 				ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-		} else  {
-			dst = ops[i]->sym->auth.digest.data;
+		} else {
+			if (session->op == SNOW3G_OP_AUTH_CIPHER)
+				dst = snow3g_digest_appended_in_src(ops[i]);
+
+			if (dst != NULL)
+				digest_appended = 1;
+			else
+				dst = ops[i]->sym->auth.digest.data;
 
 			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
+
+			/* Copy back digest from src to auth.digest.data */
+			if (digest_appended)
+				rte_memcpy(ops[i]->sym->auth.digest.data,
+					dst, SNOW3G_DIGEST_LENGTH);
 		}
 		processed_ops++;
 	}
@@ -564,7 +668,8 @@ cryptodev_snow3g_create(const char *name,
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
 			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
 
 	mgr = alloc_mb_mgr(0);
 	if (mgr == NULL)
-- 
2.17.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [dpdk-dev v4] crypto/snow3g: add support for digest appended ops
  2021-07-21  9:22   ` [dpdk-dev] [dpdk-dev v3] " Kai Ji
@ 2021-07-27  8:38     ` Fan Zhang
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
  0 siblings, 1 reply; 30+ messages in thread
From: Fan Zhang @ 2021-07-27  8:38 UTC (permalink / raw)
  To: dev; +Cc: gakhil, Kai Ji, pablo.de.lara.guarch, Damian Nowak

From: Kai Ji <kai.ji@intel.com>

This patch enable out-of-place auth-cipher operations where
digest should be encrypted among with the rest of raw data.
It also adds support for partially encrypted digest when using
auth-cipher operations.

Fixes: 7c87e2d7b359 ("crypto/snow3g: use IPsec library")
Cc: pablo.de.lara.guarch@intel.com

Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
Signed-off-by: Kai Ji <kai.ji@intel.com>
---
v4:
fixed compile issue.


 doc/guides/cryptodevs/features/snow3g.ini |   1 +
 drivers/crypto/snow3g/rte_snow3g_pmd.c    | 139 +++++++++++++++++++---
 2 files changed, 123 insertions(+), 17 deletions(-)

diff --git a/doc/guides/cryptodevs/features/snow3g.ini b/doc/guides/cryptodevs/features/snow3g.ini
index 14ac7e4b6d..4d4c5b579b 100644
--- a/doc/guides/cryptodevs/features/snow3g.ini
+++ b/doc/guides/cryptodevs/features/snow3g.ini
@@ -8,6 +8,7 @@ Symmetric crypto       = Y
 Sym operation chaining = Y
 Symmetric sessionless  = Y
 Non-Byte aligned data  = Y
+Digest encrypted       = Y
 OOP LB  In LB  Out     = Y
 
 ;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 9aab357846..2c99ed02db 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
+ * Copyright(c) 2016-2021 Intel Corporation
  */
 
 #include <rte_common.h>
@@ -179,6 +179,24 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
 	return sess;
 }
 
+/** Check if conditions are met for digest-appended operations */
+static uint8_t *
+snow3g_digest_appended_in_src(struct rte_crypto_op *op)
+{
+	unsigned int auth_size, cipher_size;
+
+	auth_size = (op->sym->auth.data.offset >> 3) +
+		(op->sym->auth.data.length >> 3);
+	cipher_size = (op->sym->cipher.data.offset >> 3) +
+		(op->sym->cipher.data.length >> 3);
+
+	if (auth_size < cipher_size)
+		return rte_pktmbuf_mtod_offset(op->sym->m_src,
+				uint8_t *, auth_size);
+
+	return NULL;
+}
+
 /** Encrypt/decrypt mbufs with same cipher key. */
 static uint8_t
 process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
@@ -187,22 +205,52 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 {
 	unsigned i;
 	uint8_t processed_ops = 0;
-	const void *src[SNOW3G_MAX_BURST];
-	void *dst[SNOW3G_MAX_BURST];
-	const void *iv[SNOW3G_MAX_BURST];
-	uint32_t num_bytes[SNOW3G_MAX_BURST];
+	const void *src[SNOW3G_MAX_BURST] = {NULL};
+	void *dst[SNOW3G_MAX_BURST] = {NULL};
+	uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL };
+	const void *iv[SNOW3G_MAX_BURST] = {NULL};
+	uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
+	uint32_t cipher_off, cipher_len;
+	int unencrypted_bytes = 0;
 
 	for (i = 0; i < num_ops; i++) {
-		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
-		dst[i] = ops[i]->sym->m_dst ?
-			rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3) :
-			rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
+		cipher_off = ops[i]->sym->cipher.data.offset >> 3;
+		cipher_len = ops[i]->sym->cipher.data.length >> 3;
+		src[i] = rte_pktmbuf_mtod_offset(
+			ops[i]->sym->m_src,	uint8_t *, cipher_off);
+
+		/* If out-of-place operation */
+		if (ops[i]->sym->m_dst &&
+			ops[i]->sym->m_src != ops[i]->sym->m_dst) {
+			dst[i] = rte_pktmbuf_mtod_offset(
+				ops[i]->sym->m_dst, uint8_t *, cipher_off);
+
+			/* In case of out-of-place, auth-cipher operation
+			 * with partial encryption of the digest, copy
+			 * the remaining, unencrypted part.
+			 */
+			if (session->op == SNOW3G_OP_AUTH_CIPHER)
+				unencrypted_bytes =
+					(ops[i]->sym->auth.data.offset >> 3) +
+					(ops[i]->sym->auth.data.length >> 3) +
+					(SNOW3G_DIGEST_LENGTH) -
+					cipher_off - cipher_len;
+			if (unencrypted_bytes > 0)
+				rte_memcpy(
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_dst, uint8_t *,
+						cipher_off + cipher_len),
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_src, uint8_t *,
+						cipher_off + cipher_len),
+					unencrypted_bytes);
+		} else
+			dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
+						uint8_t *, cipher_off);
+
 		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
 				session->cipher_iv_offset);
-		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+		num_bytes[i] = cipher_len;
 
 		processed_ops++;
 	}
@@ -210,6 +258,20 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	IMB_SNOW3G_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
 			src, dst, num_bytes, processed_ops);
 
+	/* Take care of the raw digest data in src buffer */
+	for (i = 0; i < num_ops; i++) {
+		if (session->op == SNOW3G_OP_AUTH_CIPHER &&
+				ops[i]->sym->m_dst != NULL) {
+			digest_appended[i] =
+				snow3g_digest_appended_in_src(ops[i]);
+			/* Clear unencrypted digest from
+			 * the src buffer
+			 */
+			if (digest_appended[i] != NULL)
+				memset(digest_appended[i],
+					0, SNOW3G_DIGEST_LENGTH);
+		}
+	}
 	return processed_ops;
 }
 
@@ -222,6 +284,7 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 	uint8_t *src, *dst;
 	uint8_t *iv;
 	uint32_t length_in_bits, offset_in_bits;
+	int unencrypted_bytes = 0;
 
 	offset_in_bits = op->sym->cipher.data.offset;
 	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
@@ -230,10 +293,30 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 		SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
 		return 0;
 	}
+	length_in_bits = op->sym->cipher.data.length;
 	dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+	/* In case of out-of-place, auth-cipher operation
+	 * with partial encryption of the digest, copy
+	 * the remaining, unencrypted part.
+	 */
+	if (session->op == SNOW3G_OP_AUTH_CIPHER)
+		unencrypted_bytes =
+			(op->sym->auth.data.offset >> 3) +
+			(op->sym->auth.data.length >> 3) +
+			(SNOW3G_DIGEST_LENGTH) -
+			(offset_in_bits >> 3) -
+			(length_in_bits >> 3);
+	if (unencrypted_bytes > 0)
+		rte_memcpy(
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_dst, uint8_t *,
+				(length_in_bits >> 3)),
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_src, uint8_t *,
+				(length_in_bits >> 3)),
+				unencrypted_bytes);
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
 				session->cipher_iv_offset);
-	length_in_bits = op->sym->cipher.data.length;
 
 	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
 			src, dst, length_in_bits, offset_in_bits);
@@ -252,6 +335,7 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 	uint8_t *src, *dst;
 	uint32_t length_in_bits;
 	uint8_t *iv;
+	uint8_t digest_appended = 0;
 
 	for (i = 0; i < num_ops; i++) {
 		/* Data must be byte aligned */
@@ -261,6 +345,8 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 			break;
 		}
 
+		dst = NULL;
+
 		length_in_bits = ops[i]->sym->auth.data.length;
 
 		src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
@@ -271,6 +357,13 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
 			dst = qp->temp_digest;
 
+			 /* Handle auth cipher verify oop case*/
+			if (session->op == SNOW3G_OP_CIPHER_AUTH &&
+				ops[i]->sym->m_dst != NULL)
+				src = rte_pktmbuf_mtod_offset(
+					ops[i]->sym->m_dst, uint8_t *,
+					ops[i]->sym->auth.data.offset >> 3);
+
 			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
@@ -278,12 +371,23 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 			if (memcmp(dst, ops[i]->sym->auth.digest.data,
 					SNOW3G_DIGEST_LENGTH) != 0)
 				ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-		} else  {
-			dst = ops[i]->sym->auth.digest.data;
+		} else {
+			if (session->op == SNOW3G_OP_AUTH_CIPHER)
+				dst = snow3g_digest_appended_in_src(ops[i]);
+
+			if (dst != NULL)
+				digest_appended = 1;
+			else
+				dst = ops[i]->sym->auth.digest.data;
 
 			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
+
+			/* Copy back digest from src to auth.digest.data */
+			if (digest_appended)
+				rte_memcpy(ops[i]->sym->auth.digest.data,
+					dst, SNOW3G_DIGEST_LENGTH);
 		}
 		processed_ops++;
 	}
@@ -564,7 +668,8 @@ cryptodev_snow3g_create(const char *name,
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
 			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
 
 	mgr = alloc_mb_mgr(0);
 	if (mgr == NULL)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework
  2021-07-27  8:38     ` [dpdk-dev] [dpdk-dev v4] " Fan Zhang
@ 2021-09-29 16:30       ` Ciara Power
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework Ciara Power
                           ` (9 more replies)
  0 siblings, 10 replies; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power

This set of patches introduces a new framework, making all common code of
SW crypto PMD implementations built on top of intel-ipsec-mb library
sharable. This helps to reduce future effort on the code maintenance and
future updates. It also moves all SW PMD implementation specific details
into single files located in the crypto/ipsec_mb folder.
A chacha20-poly1305 SW PMD is added based on this framework.

Multi-process support for the PMDs is added for intel-ipsec-mb v1.1.
The minimum intel-ipsec-mb version required is bumped to 1.0.

v3:
  - Added multiprocess support for v1.1 of intel-ipsec-mb.
  - Fixed some formatting and comments.
  - Added detail to some commit logs.
  - Updated macros to use intel-ipsec-mb v1.0 supported macros.
  - Added use of intel-ipsec-mb API to auto initialise IMB_MGR
    based on CPU vector support.

v2:
  - Added qp NULL check in get stats function.
  - Updated maintainers file.
  - Replaced use of strlcpy with rte_strlcpy.
  - Fixed enum for GCM key length.
  - Updated release notes.
  - Added Chacha20_poly1305 PMD documentation.

Ciara Power (2):
  crypto/ipsec_mb: add multiprocess support
  doc/rel_notes: added note for SW Crypto PMD change

Fan Zhang (1):
  drivers/crypto: introduce IPsec-mb framework

Kai Ji (1):
  crypto/ipsec_mb: add chacha20-poly1305 PMD to framework

Piotr Bronowski (6):
  drivers/crypto: move aesni-mb PMD to IPsec-mb framework
  drivers/crypto: move aesni-gcm PMD to IPsec-mb framework
  drivers/crypto: move kasumi PMD to IPsec-mb framework
  drivers/crypto: move snow3g PMD to IPsec-mb framework
  crypto/ipsec_mb: add snow3g digest appended ops support
  drivers/crypto: move zuc PMD to IPsec-mb framework

 MAINTAINERS                                   |   39 +-
 app/test/test_cryptodev.c                     |   23 +
 app/test/test_cryptodev.h                     |    1 +
 app/test/test_cryptodev_aead_test_vectors.h   |  114 +
 doc/guides/cryptodevs/aesni_gcm.rst           |    4 +-
 doc/guides/cryptodevs/aesni_mb.rst            |    4 +-
 doc/guides/cryptodevs/chacha20_poly1305.rst   |   99 +
 .../cryptodevs/features/chacha20_poly1305.ini |   35 +
 doc/guides/cryptodevs/features/snow3g.ini     |    1 +
 doc/guides/cryptodevs/index.rst               |    1 +
 doc/guides/cryptodevs/kasumi.rst              |    3 +-
 doc/guides/cryptodevs/snow3g.rst              |    3 +-
 doc/guides/cryptodevs/zuc.rst                 |    3 +-
 doc/guides/rel_notes/release_21_11.rst        |   34 +
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h      |  104 -
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c      |  984 ------
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c  |  333 --
 .../crypto/aesni_gcm/aesni_gcm_pmd_private.h  |  123 -
 drivers/crypto/aesni_gcm/meson.build          |   24 -
 .../crypto/aesni_mb/aesni_mb_pmd_private.h    |  337 --
 drivers/crypto/aesni_mb/meson.build           |   25 -
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c    | 2232 ------------
 .../crypto/aesni_mb/rte_aesni_mb_pmd_ops.c    | 1126 -------
 drivers/crypto/aesni_mb/version.map           |    3 -
 drivers/crypto/ipsec_mb/meson.build           |   33 +
 drivers/crypto/ipsec_mb/pmd_aesni_gcm.c       | 1003 ++++++
 drivers/crypto/ipsec_mb/pmd_aesni_mb.c        | 2977 +++++++++++++++++
 drivers/crypto/ipsec_mb/pmd_chacha_poly.c     |  482 +++
 drivers/crypto/ipsec_mb/pmd_kasumi.c          |  556 +++
 drivers/crypto/ipsec_mb/pmd_snow3g.c          |  697 ++++
 .../{zuc/rte_zuc_pmd.c => ipsec_mb/pmd_zuc.c} |  459 +--
 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c    |  172 +
 .../crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c    |  369 ++
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |  456 +++
 .../{aesni_gcm => ipsec_mb}/version.map       |    2 +-
 drivers/crypto/kasumi/kasumi_pmd_private.h    |   81 -
 drivers/crypto/kasumi/meson.build             |   24 -
 drivers/crypto/kasumi/rte_kasumi_pmd.c        |  642 ----
 drivers/crypto/kasumi/rte_kasumi_pmd_ops.c    |  316 --
 drivers/crypto/kasumi/version.map             |    3 -
 drivers/crypto/meson.build                    |    6 +-
 drivers/crypto/snow3g/meson.build             |   24 -
 drivers/crypto/snow3g/rte_snow3g_pmd.c        |  656 ----
 drivers/crypto/snow3g/rte_snow3g_pmd_ops.c    |  323 --
 drivers/crypto/snow3g/snow3g_pmd_private.h    |   84 -
 drivers/crypto/snow3g/version.map             |    3 -
 drivers/crypto/zuc/meson.build                |   24 -
 drivers/crypto/zuc/rte_zuc_pmd_ops.c          |  322 --
 drivers/crypto/zuc/version.map                |    3 -
 drivers/crypto/zuc/zuc_pmd_private.h          |   83 -
 50 files changed, 7264 insertions(+), 8191 deletions(-)
 create mode 100644 doc/guides/cryptodevs/chacha20_poly1305.rst
 create mode 100644 doc/guides/cryptodevs/features/chacha20_poly1305.ini
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
 delete mode 100644 drivers/crypto/aesni_gcm/meson.build
 delete mode 100644 drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
 delete mode 100644 drivers/crypto/aesni_mb/meson.build
 delete mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
 delete mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
 delete mode 100644 drivers/crypto/aesni_mb/version.map
 create mode 100644 drivers/crypto/ipsec_mb/meson.build
 create mode 100644 drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
 create mode 100644 drivers/crypto/ipsec_mb/pmd_aesni_mb.c
 create mode 100644 drivers/crypto/ipsec_mb/pmd_chacha_poly.c
 create mode 100644 drivers/crypto/ipsec_mb/pmd_kasumi.c
 create mode 100644 drivers/crypto/ipsec_mb/pmd_snow3g.c
 rename drivers/crypto/{zuc/rte_zuc_pmd.c => ipsec_mb/pmd_zuc.c} (50%)
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
 rename drivers/crypto/{aesni_gcm => ipsec_mb}/version.map (58%)
 delete mode 100644 drivers/crypto/kasumi/kasumi_pmd_private.h
 delete mode 100644 drivers/crypto/kasumi/meson.build
 delete mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd.c
 delete mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
 delete mode 100644 drivers/crypto/kasumi/version.map
 delete mode 100644 drivers/crypto/snow3g/meson.build
 delete mode 100644 drivers/crypto/snow3g/rte_snow3g_pmd.c
 delete mode 100644 drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
 delete mode 100644 drivers/crypto/snow3g/snow3g_pmd_private.h
 delete mode 100644 drivers/crypto/snow3g/version.map
 delete mode 100644 drivers/crypto/zuc/meson.build
 delete mode 100644 drivers/crypto/zuc/rte_zuc_pmd_ops.c
 delete mode 100644 drivers/crypto/zuc/version.map
 delete mode 100644 drivers/crypto/zuc/zuc_pmd_private.h

-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-09-30  9:51           ` Kinsella, Ray
  2021-10-06 13:50           ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 02/10] crypto/ipsec_mb: add multiprocess support Ciara Power
                           ` (8 subsequent siblings)
  9 siblings, 2 replies; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power,
	Thomas Monjalon, Pablo de Lara, Ray Kinsella

From: Fan Zhang <roy.fan.zhang@intel.com>

This patch introduces the new framework to share common code between
the SW crypto PMDs that depend on the intel-ipsec-mb library.
This change helps to reduce future effort on the code maintenance and
feature updates.

The PMDs that will be added to this framework in subsequent patches are:
  - AESNI MB
  - AESNI GCM
  - KASUMI
  - SNOW3G
  - ZUC

The use of these PMDs will not change, they will still be supported for
x86, and will use the same EAL args as before.

The minimum required version for the intel-ipsec-mb library is now v1.0.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v3:
  - Updated intel-ipsec-mb macros.
  - Added use of auto init function for IMB_MGR.
  - Added detail to commit log.
v2:
  - Added qp NULL check in get stats function.
  - Added maintainers file entry.
  - Replaced strlcpy with rte_strlcpy.
---
 MAINTAINERS                                   |   4 +
 drivers/crypto/ipsec_mb/meson.build           |  27 ++
 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c    | 169 ++++++++++
 .../crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c    | 291 ++++++++++++++++++
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       | 275 +++++++++++++++++
 drivers/crypto/ipsec_mb/version.map           |   3 +
 drivers/crypto/meson.build                    |   1 +
 7 files changed, 770 insertions(+)
 create mode 100644 drivers/crypto/ipsec_mb/meson.build
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
 create mode 100644 drivers/crypto/ipsec_mb/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 1e0d303394..f1aaf7d408 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1065,6 +1065,10 @@ F: drivers/common/qat/
 F: doc/guides/cryptodevs/qat.rst
 F: doc/guides/cryptodevs/features/qat.ini
 
+IPsec MB
+M: Fan Zhang <roy.fan.zhang@intel.com>
+F: drivers/crypto/ipsec_mb/
+
 KASUMI
 M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 F: drivers/crypto/kasumi/
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
new file mode 100644
index 0000000000..3d48da60ed
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 - 2021 Intel Corporation
+
+IMB_required_ver = '1.0.0'
+lib = cc.find_library('IPSec_MB', required: false)
+if not lib.found()
+	build = false
+	reason = 'missing dependency, "libIPSec_MB"'
+else
+	ext_deps += lib
+
+	# version comes with quotes, so we split based on " and take the middle
+	imb_ver = cc.get_define('IMB_VERSION_STR',
+		prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
+
+	if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
+		reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
+				IMB_required_ver, imb_ver)
+		build = false
+	endif
+
+endif
+
+sources = files('rte_ipsec_mb_pmd.c',
+		'rte_ipsec_mb_pmd_ops.c',
+		)
+deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
new file mode 100644
index 0000000000..3f2cefed52
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
+
+struct ipsec_mb_pmd_data ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
+int ipsec_mb_logtype_driver;
+enum ipsec_mb_vector_mode vector_mode;
+
+/**
+ * Generic burst enqueue, place crypto operations on ingress queue for
+ * processing.
+ *
+ * @param __qp         Queue Pair to process
+ * @param ops          Crypto operations for processing
+ * @param nb_ops       Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+ipsec_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct ipsec_mb_qp *qp = __qp;
+
+	unsigned int nb_enqueued;
+
+	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	qp->stats.enqueued_count += nb_enqueued;
+	qp->stats.enqueue_err_count += nb_ops - nb_enqueued;
+
+	return nb_enqueued;
+}
+
+int
+cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
+	enum ipsec_mb_pmd_types pmd_type)
+{
+	struct rte_cryptodev *dev;
+	struct ipsec_mb_private *internals;
+	struct ipsec_mb_pmd_data *pmd_data = &ipsec_mb_pmds[pmd_type];
+	struct rte_cryptodev_pmd_init_params init_params = {};
+	const char *name, *args;
+	int retval;
+
+	if (vector_mode == IPSEC_MB_NOT_SUPPORTED) {
+		/* Check CPU for supported vector instruction set */
+		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+			vector_mode = IPSEC_MB_AVX512;
+		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+			vector_mode = IPSEC_MB_AVX2;
+		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+			vector_mode = IPSEC_MB_AVX;
+		else
+			vector_mode = IPSEC_MB_SSE;
+	}
+
+	init_params.private_data_size = sizeof(struct ipsec_mb_private) +
+		pmd_data->internals_priv_size;
+	init_params.max_nb_queue_pairs =
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS;
+	init_params.socket_id = rte_socket_id();
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	args = rte_vdev_device_args(vdev);
+
+	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+	if (retval) {
+		IPSEC_MB_LOG(
+		    ERR, "Failed to parse initialisation arguments[%s]", args);
+		return -EINVAL;
+	}
+
+	dev = rte_cryptodev_pmd_create(name, &vdev->device, &init_params);
+	if (dev == NULL) {
+		IPSEC_MB_LOG(ERR, "driver %s: create failed",
+			     init_params.name);
+		return -ENODEV;
+	}
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+	internals->pmd_type = pmd_type;
+	internals->max_nb_queue_pairs = init_params.max_nb_queue_pairs;
+
+	dev->driver_id = ipsec_mb_get_driver_id(pmd_type);
+	if (dev->driver_id == UINT8_MAX) {
+		IPSEC_MB_LOG(ERR, "driver %s: create failed",
+			     init_params.name);
+		return -ENODEV;
+	}
+	dev->dev_ops = ipsec_mb_pmds[pmd_type].ops;
+	dev->enqueue_burst = ipsec_mb_pmd_enqueue_burst;
+	dev->dequeue_burst = ipsec_mb_pmds[pmd_type].dequeue_burst;
+
+	if (pmd_data->dev_config) {
+		retval = (*pmd_data->dev_config)(dev);
+		if (retval < 0) {
+			IPSEC_MB_LOG(ERR,
+				"Failed to configure device %s", name);
+			rte_cryptodev_pmd_destroy(dev);
+			return retval;
+		}
+	}
+
+	dev->feature_flags = pmd_data->feature_flags;
+
+	switch (vector_mode) {
+	case IPSEC_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		break;
+	case IPSEC_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		break;
+	case IPSEC_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		break;
+	case IPSEC_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		break;
+	default:
+		break;
+	}
+
+	IPSEC_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+		     imb_get_version_str());
+
+	return 0;
+}
+
+int
+cryptodev_ipsec_mb_remove(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev *cryptodev;
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	if (RTE_PER_LCORE(mb_mgr)) {
+		free_mb_mgr(RTE_PER_LCORE(mb_mgr));
+		RTE_PER_LCORE(mb_mgr) = NULL;
+	}
+
+	if (cryptodev->security_ctx) {
+		rte_free(cryptodev->security_ctx);
+		cryptodev->security_ctx = NULL;
+	}
+
+	return rte_cryptodev_pmd_destroy(cryptodev);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
new file mode 100644
index 0000000000..1146297216
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+/** Configure device */
+int
+ipsec_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+		    __rte_unused struct rte_cryptodev_config *config)
+{
+	return 0;
+}
+
+/** Start device */
+int
+ipsec_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+void
+ipsec_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+int
+ipsec_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Get device statistics */
+void
+ipsec_mb_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
+		if (qp == NULL) {
+			IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d", qp_id);
+			continue;
+		}
+
+		stats->enqueued_count += qp->stats.enqueued_count;
+		stats->dequeued_count += qp->stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+void
+ipsec_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->stats, 0, sizeof(qp->stats));
+	}
+}
+
+/** Get device info */
+void
+ipsec_mb_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct ipsec_mb_private *internals = dev->data->dev_private;
+	struct ipsec_mb_pmd_data *pmd_info =
+		&ipsec_mb_pmds[internals->pmd_type];
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = pmd_info->caps;
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		/* No limit of number of sessions */
+		dev_info->sym.max_nb_sessions = 0;
+	}
+}
+
+/** Release queue pair */
+int
+ipsec_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
+	struct rte_ring *r = NULL;
+
+	if (qp != NULL) {
+		r = rte_ring_lookup(qp->name);
+		if (r)
+			rte_ring_free(r);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** Set a unique name for the queue pair */
+int
+ipsec_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+					   struct ipsec_mb_qp *qp)
+{
+	uint32_t n =
+	    snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u",
+		     dev->data->dev_id, qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring
+*ipsec_mb_pmd_qp_create_processed_ops_ring(
+	struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	unsigned int n = rte_strlcpy(ring_name, qp->name, sizeof(ring_name));
+
+	if (n >= sizeof(ring_name))
+		return NULL;
+
+	r = rte_ring_lookup(ring_name);
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			IPSEC_MB_LOG(
+			    INFO, "Reusing existing ring %s for processed ops",
+			    ring_name);
+			return r;
+		}
+		IPSEC_MB_LOG(
+		    ERR, "Unable to reuse existing ring %s for processed ops",
+		    ring_name);
+		return NULL;
+	}
+
+	return rte_ring_create(ring_name, ring_size, socket_id,
+			       RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+int
+ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+				const struct rte_cryptodev_qp_conf *qp_conf,
+				int socket_id)
+{
+	struct ipsec_mb_qp *qp = NULL;
+	struct ipsec_mb_private *internals = dev->data->dev_private;
+	struct ipsec_mb_pmd_data *pmd_data =
+		&ipsec_mb_pmds[internals->pmd_type];
+	uint32_t qp_size;
+	int ret = -1;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		ipsec_mb_pmd_qp_release(dev, qp_id);
+
+	qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return -ENOMEM;
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+	if (ipsec_mb_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+	qp->pmd_type = internals->pmd_type;
+	qp->sess_mp = qp_conf->mp_session;
+	qp->sess_mp_priv = qp_conf->mp_session_private;
+
+	qp->ingress_queue = ipsec_mb_pmd_qp_create_processed_ops_ring(qp,
+		qp_conf->nb_descriptors, socket_id);
+	if (qp->ingress_queue == NULL) {
+		ret = -1;
+		goto qp_setup_cleanup;
+	}
+
+	qp->mb_mgr = alloc_init_mb_mgr();
+	if (!qp->mb_mgr) {
+		ret = -ENOMEM;
+		goto qp_setup_cleanup;
+	}
+
+	memset(&qp->stats, 0, sizeof(qp->stats));
+
+	if (pmd_data->queue_pair_configure) {
+		ret = pmd_data->queue_pair_configure(qp);
+		if (ret < 0)
+			goto qp_setup_cleanup;
+	}
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp->mb_mgr)
+		free_mb_mgr(qp->mb_mgr);
+	if (qp)
+		rte_free(qp);
+	return ret;
+}
+
+/** Return the size of the specific pmd session structure */
+unsigned
+ipsec_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev)
+{
+	struct ipsec_mb_private *internals = dev->data->dev_private;
+	struct ipsec_mb_pmd_data *pmd_data =
+		&ipsec_mb_pmds[internals->pmd_type];
+
+	return pmd_data->session_priv_size;
+}
+
+/** Configure pmd specific multi-buffer session from a crypto xform chain */
+int
+ipsec_mb_pmd_sym_session_configure(
+	struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform,
+	struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct ipsec_mb_private *internals = dev->data->dev_private;
+	struct ipsec_mb_pmd_data *pmd_data =
+		&ipsec_mb_pmds[internals->pmd_type];
+	IMB_MGR *mb_mgr = alloc_init_mb_mgr();
+	int ret = 0;
+
+	if (!mb_mgr)
+		return -ENOMEM;
+
+	if (unlikely(sess == NULL)) {
+		IPSEC_MB_LOG(ERR, "invalid session struct");
+		free_mb_mgr(mb_mgr);
+		return -EINVAL;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		IPSEC_MB_LOG(ERR, "Couldn't get object from session mempool");
+		free_mb_mgr(mb_mgr);
+		return -ENOMEM;
+	}
+
+	ret = (*pmd_data->session_configure)(mb_mgr, sess_private_data, xform);
+	if (ret != 0) {
+		IPSEC_MB_LOG(ERR, "failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		free_mb_mgr(mb_mgr);
+		return ret;
+	}
+
+	set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
+
+	return 0;
+}
+
+/** Clear the session memory */
+void
+ipsec_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+			       struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_sym_session_private_data(sess, index);
+
+	/* Zero out the whole structure */
+	if (sess_priv) {
+		memset(sess_priv, 0, ipsec_mb_pmd_sym_session_get_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		set_sym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
new file mode 100644
index 0000000000..754259aa59
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#ifndef _IPSEC_MB_PMD_PRIVATE_H_
+#define _IPSEC_MB_PMD_PRIVATE_H_
+
+#include <intel-ipsec-mb.h>
+#include <cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+
+#if defined(RTE_LIB_SECURITY)
+#define IPSEC_MB_DOCSIS_SEC_ENABLED 1
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#endif
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+
+enum ipsec_mb_vector_mode {
+	IPSEC_MB_NOT_SUPPORTED = 0,
+	IPSEC_MB_SSE,
+	IPSEC_MB_AVX,
+	IPSEC_MB_AVX2,
+	IPSEC_MB_AVX512
+};
+
+extern enum ipsec_mb_vector_mode vector_mode;
+
+/** IMB_MGR instances, one per thread */
+extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
+
+/** PMD LOGTYPE DRIVER, common to all PMDs */
+extern int ipsec_mb_logtype_driver;
+#define IPSEC_MB_LOG(level, fmt, ...)                                         \
+	rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
+		"%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+/** All supported device types */
+enum ipsec_mb_pmd_types {
+	IPSEC_MB_N_PMD_TYPES
+};
+
+/** Crypto operations */
+enum ipsec_mb_operation {
+	IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
+	IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
+	IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
+	IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
+	IPSEC_MB_OP_ENCRYPT_ONLY,
+	IPSEC_MB_OP_DECRYPT_ONLY,
+	IPSEC_MB_OP_HASH_GEN_ONLY,
+	IPSEC_MB_OP_HASH_VERIFY_ONLY,
+	IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
+	IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
+	IPSEC_MB_OP_NOT_SUPPORTED
+};
+
+/** Helper function. Gets driver ID based on PMD type */
+static __rte_always_inline uint8_t
+ipsec_mb_get_driver_id(__rte_unused enum ipsec_mb_pmd_types pmd_type)
+{
+	return UINT8_MAX;
+}
+
+/** Common private data structure for each PMD */
+struct ipsec_mb_private {
+	enum ipsec_mb_pmd_types pmd_type;
+	/**< PMD  type */
+	uint32_t max_nb_queue_pairs;
+	/**< Max number of queue pairs supported by device */
+	__extension__ uint8_t priv[0];
+};
+
+/** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
+struct ipsec_mb_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_ring *ingress_queue;
+	/**< Ring for placing operations ready for processing */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_mempool *sess_mp_priv;
+	/**< Session Private Data Mempool */
+	struct rte_cryptodev_stats stats;
+	/**< Queue pair statistics */
+	enum ipsec_mb_pmd_types pmd_type;
+	/**< pmd type */
+	uint8_t digest_idx;
+	/**< Index of the next
+	 * slot to be used in temp_digests,
+	 * to store the digest for a given operation
+	 */
+	IMB_MGR *mb_mgr;
+	/* Multi buffer manager */
+	__extension__ uint8_t additional_data[0];
+	/**< Storing PMD specific additional data */
+};
+
+static __rte_always_inline void *
+ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
+{
+	return (void *)qp->additional_data;
+}
+
+/** Helper function. Allocates job manager */
+static __rte_always_inline IMB_MGR *
+alloc_init_mb_mgr(void)
+{
+	IMB_MGR *mb_mgr = alloc_mb_mgr(0);
+
+	if (unlikely(mb_mgr == NULL)) {
+		IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
+		return NULL;
+	}
+
+	init_mb_mgr_auto(mb_mgr, NULL);
+
+	return mb_mgr;
+}
+
+/** Helper function. Gets per thread job manager */
+static __rte_always_inline IMB_MGR *
+get_per_thread_mb_mgr(void)
+{
+	if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
+		RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
+
+	return RTE_PER_LCORE(mb_mgr);
+}
+
+/** Device creation function */
+int
+cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
+	enum ipsec_mb_pmd_types pmd_type);
+
+/** Device remove function */
+int
+cryptodev_ipsec_mb_remove(struct rte_vdev_device *vdev);
+
+/** Configure queue pair PMD type specific data */
+typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
+
+/** Configure session PMD type specific data */
+typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
+		void *session_private,
+		const struct rte_crypto_sym_xform *xform);
+
+/** Configure internals PMD type specific data */
+typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
+
+/** Per PMD type operation and data */
+struct ipsec_mb_pmd_data {
+	uint8_t is_configured;
+	dequeue_pkt_burst_t dequeue_burst;
+	ipsec_mb_dev_configure_t dev_config;
+	ipsec_mb_queue_pair_configure_t queue_pair_configure;
+	ipsec_mb_session_configure_t session_configure;
+	const struct rte_cryptodev_capabilities *caps;
+	struct rte_cryptodev_ops *ops;
+	struct rte_security_ops *security_ops;
+	uint64_t feature_flags;
+	uint32_t session_priv_size;
+	uint32_t qp_priv_size;
+	uint32_t internals_priv_size;
+};
+
+/** Global PMD type specific data */
+extern struct ipsec_mb_pmd_data ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
+
+int
+ipsec_mb_pmd_config(struct rte_cryptodev *dev,
+	struct rte_cryptodev_config *config);
+
+int
+ipsec_mb_pmd_start(struct rte_cryptodev *dev);
+
+void
+ipsec_mb_pmd_stop(struct rte_cryptodev *dev);
+
+int
+ipsec_mb_pmd_close(struct rte_cryptodev *dev);
+
+void
+ipsec_mb_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats);
+
+void
+ipsec_mb_pmd_stats_reset(struct rte_cryptodev *dev);
+
+void
+ipsec_mb_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info);
+
+int
+ipsec_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
+
+int
+ipsec_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+					   struct ipsec_mb_qp *qp);
+
+int
+ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+				 const struct rte_cryptodev_qp_conf *qp_conf,
+				 int socket_id);
+
+/** Returns the size of the aesni multi-buffer session structure */
+unsigned
+ipsec_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev);
+
+/** Configure an aesni multi-buffer session from a crypto xform chain */
+int ipsec_mb_pmd_sym_session_configure(
+	struct rte_cryptodev *dev,
+	struct rte_crypto_sym_xform *xform,
+	struct rte_cryptodev_sym_session *sess,
+	struct rte_mempool *mempool);
+
+/** Clear the memory of session so it does not leave key material behind */
+void
+ipsec_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+				struct rte_cryptodev_sym_session *sess);
+
+/** Get session from op. If sessionless create a session */
+static __rte_always_inline void *
+ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
+{
+	void *sess = NULL;
+	uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint8_t sess_type = op->sess_type;
+	void *_sess;
+	void *_sess_private_data = NULL;
+	struct ipsec_mb_pmd_data *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
+
+	switch (sess_type) {
+	case RTE_CRYPTO_OP_WITH_SESSION:
+		if (likely(sym_op->session != NULL))
+			sess = get_sym_session_private_data(sym_op->session,
+							    driver_id);
+	break;
+	case RTE_CRYPTO_OP_SESSIONLESS:
+		if (!qp->sess_mp ||
+		    rte_mempool_get(qp->sess_mp, (void **)&_sess))
+			return NULL;
+
+		if (!qp->sess_mp_priv ||
+		    rte_mempool_get(qp->sess_mp_priv,
+					(void **)&_sess_private_data))
+			return NULL;
+
+		sess = _sess_private_data;
+		if (unlikely(pmd_data->session_configure(qp->mb_mgr,
+				sess, sym_op->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
+			sess = NULL;
+		}
+
+		sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_sym_session_private_data(sym_op->session, driver_id,
+					     _sess_private_data);
+	break;
+	default:
+		IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
+	}
+
+	if (unlikely(sess == NULL))
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+#endif /* _IPSEC_MB_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ipsec_mb/version.map b/drivers/crypto/ipsec_mb/version.map
new file mode 100644
index 0000000000..4a76d1d52d
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/version.map
@@ -0,0 +1,3 @@
+DPDK_21 {
+	local: *;
+};
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index ea239f4c56..e40b18b17b 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -6,6 +6,7 @@ if is_windows
 endif
 
 drivers = [
+        'ipsec_mb',
         'aesni_gcm',
         'aesni_mb',
         'armv8',
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 02/10] crypto/ipsec_mb: add multiprocess support
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-10-06 14:01           ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 03/10] drivers/crypto: move aesni-mb PMD to IPsec-mb framework Ciara Power
                           ` (7 subsequent siblings)
  9 siblings, 1 reply; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power,
	Pablo de Lara, Anatoly Burakov

The ipsec_mb SW PMD now has multiprocess support.
The queue-pair IMB_MGR is stored in a memzone instead of being allocated
externally by the Intel IPSec MB library, when v1.1 is used.
If v1.0 is used, multi process is not supported, and allocation is
done as before.
The secondary process needs to reconfigure the queue-pair to allow for
IMB_MGR function pointers be updated.

Intel IPsec MB library version 1.1 is required for this support.

Signed-off-by: Ciara Power <ciara.power@intel.com>
---
 doc/guides/rel_notes/release_21_11.rst        |   7 ++
 .../crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c    | 110 +++++++++++++++---
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   5 +
 3 files changed, 106 insertions(+), 16 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 43d367bcad..3c9d7e19cb 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -62,6 +62,13 @@ New Features
   * Added bus-level parsing of the devargs syntax.
   * Kept compatibility with the legacy syntax as parsing fallback.
 
+* **Added multi-process support for IPsec-mb PMD.**
+
+  Added multi-process support to IPsec-mb PMD, which will add support
+  for PMDs that are moved to use this shared framework.
+  This feature makes use of an intel-ipsec-mb API found in v1.1,
+  which is the minimum required version to use this multi-process support.
+
 * **Updated Marvell cnxk crypto PMD.**
 
   * Added AES-CBC SHA1-HMAC support in lookaside protocol (IPsec) for CN10K.
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
index 1146297216..c7bcfd3dce 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
@@ -9,6 +9,8 @@
 
 #include "rte_ipsec_mb_pmd_private.h"
 
+#define IMB_MP_REQ_VER_STR "1.1.0"
+
 /** Configure device */
 int
 ipsec_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
@@ -98,10 +100,20 @@ ipsec_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
 	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
 	struct rte_ring *r = NULL;
 
-	if (qp != NULL) {
+	if (qp != NULL && rte_eal_process_type() == RTE_PROC_PRIMARY) {
 		r = rte_ring_lookup(qp->name);
 		if (r)
 			rte_ring_free(r);
+
+#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+#else
+		if (qp->mb_mgr_mz) {
+			rte_memzone_free(qp->mb_mgr_mz);
+			qp->mb_mgr = NULL;
+		}
+#endif
 		rte_free(qp);
 		dev->data->queue_pairs[qp_id] = NULL;
 	}
@@ -154,6 +166,42 @@ static struct rte_ring
 			       RING_F_SP_ENQ | RING_F_SC_DEQ);
 }
 
+#if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM
+static IMB_MGR *
+ipsec_mb_pmd_alloc_mb_from_memzone(const struct rte_memzone **mb_mgr_mz,
+		const char *mb_mgr_mz_name)
+{
+	IMB_MGR *mb_mgr;
+
+	if (rte_eal_process_type() ==  RTE_PROC_PRIMARY) {
+		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
+		if (*mb_mgr_mz == NULL) {
+			*mb_mgr_mz = rte_memzone_reserve(mb_mgr_mz_name,
+			imb_get_mb_mgr_size(),
+			rte_socket_id(), 0);
+		}
+		if (*mb_mgr_mz == NULL) {
+			IPSEC_MB_LOG(DEBUG, "Error allocating memzone for %s",
+					mb_mgr_mz_name);
+			return NULL;
+		}
+		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 1);
+		init_mb_mgr_auto(mb_mgr, NULL);
+	} else {
+		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
+		if (*mb_mgr_mz == NULL) {
+			IPSEC_MB_LOG(ERR,
+				"Secondary can't find %s mz, did primary create it?",
+				mb_mgr_mz_name);
+			return NULL;
+		}
+		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr, 0, 0);
+		init_mb_mgr_auto(mb_mgr, NULL);
+	}
+	return mb_mgr;
+}
+#endif
+
 /** Setup a queue pair */
 int
 ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
@@ -167,16 +215,44 @@ ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	uint32_t qp_size;
 	int ret = -1;
 
-	/* Free memory prior to re-allocation if needed. */
-	if (dev->data->queue_pairs[qp_id] != NULL)
-		ipsec_mb_pmd_qp_release(dev, qp_id);
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
+		IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does not support multiprocess,"
+				"the minimum version required for this feature is %s.",
+				IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
+		return -EINVAL;
+#endif
+		if (dev->data->queue_pairs[qp_id] != NULL)
+			qp = dev->data->queue_pairs[qp_id];
+	} else {
+		/* Free memory prior to re-allocation if needed. */
+		if (dev->data->queue_pairs[qp_id] != NULL)
+			ipsec_mb_pmd_qp_release(dev, qp_id);
+
+		qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
+		/* Allocate the queue pair data structure. */
+		qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
+					RTE_CACHE_LINE_SIZE, socket_id);
+		if (qp == NULL)
+			return -ENOMEM;
+	}
+
+#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
+	qp->mb_mgr = alloc_init_mb_mgr();
+#else
+	char mz_name[IPSEC_MB_MAX_MZ_NAME];
+	snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
+			dev->data->dev_id, qp_id);
+	qp->mb_mgr = ipsec_mb_pmd_alloc_mb_from_memzone(&(qp->mb_mgr_mz),
+			mz_name);
+#endif
+	if (qp->mb_mgr == NULL) {
+		ret = -ENOMEM;
+		goto qp_setup_cleanup;
+	}
 
-	qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
-	/* Allocate the queue pair data structure. */
-	qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
-				RTE_CACHE_LINE_SIZE, socket_id);
-	if (qp == NULL)
-		return -ENOMEM;
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return 0;
 
 	qp->id = qp_id;
 	dev->data->queue_pairs[qp_id] = qp;
@@ -194,12 +270,6 @@ ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 		goto qp_setup_cleanup;
 	}
 
-	qp->mb_mgr = alloc_init_mb_mgr();
-	if (!qp->mb_mgr) {
-		ret = -ENOMEM;
-		goto qp_setup_cleanup;
-	}
-
 	memset(&qp->stats, 0, sizeof(qp->stats));
 
 	if (pmd_data->queue_pair_configure) {
@@ -211,8 +281,15 @@ ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 
 qp_setup_cleanup:
+#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
 	if (qp->mb_mgr)
 		free_mb_mgr(qp->mb_mgr);
+#else
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return ret;
+	if (qp->mb_mgr_mz)
+		rte_memzone_free(qp->mb_mgr_mz);
+#endif
 	if (qp)
 		rte_free(qp);
 	return ret;
@@ -269,6 +346,7 @@ ipsec_mb_pmd_sym_session_configure(
 
 	set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
 
+	free_mb_mgr(mb_mgr);
 	return 0;
 }
 
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index 754259aa59..35860b1b10 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -18,6 +18,9 @@
 /* Maximum length for digest */
 #define DIGEST_LENGTH_MAX 64
 
+/* Maximum length for memzone name */
+#define IPSEC_MB_MAX_MZ_NAME 32
+
 enum ipsec_mb_vector_mode {
 	IPSEC_MB_NOT_SUPPORTED = 0,
 	IPSEC_MB_SSE,
@@ -95,6 +98,8 @@ struct ipsec_mb_qp {
 	 */
 	IMB_MGR *mb_mgr;
 	/* Multi buffer manager */
+	const struct rte_memzone *mb_mgr_mz;
+	/* Shared memzone for storing mb_mgr */
 	__extension__ uint8_t additional_data[0];
 	/**< Storing PMD specific additional data */
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 03/10] drivers/crypto: move aesni-mb PMD to IPsec-mb framework
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework Ciara Power
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 02/10] crypto/ipsec_mb: add multiprocess support Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-10-11 11:09           ` De Lara Guarch, Pablo
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 04/10] drivers/crypto: move aesni-gcm " Ciara Power
                           ` (6 subsequent siblings)
  9 siblings, 1 reply; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power,
	Thomas Monjalon, Pablo de Lara, Ray Kinsella

From: Piotr Bronowski <piotrx.bronowski@intel.com>

This patch removes the crypto/aesni_mb folder and gathers all
aesni-mb PMD implementation specific details into a single file,
pmd_aesni_mb.c in crypto/ipsec_mb.

Now that intel-ipsec-mb v1.0 is the minimum supported version, old
macros can be replaced with the newer macros supported by this version.

Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v3:
  - Updated intel-ipsec-mb library macros.
  - Fixed some formatting.
v2: Updated maintainers file.
---
 MAINTAINERS                                   |   10 +-
 doc/guides/cryptodevs/aesni_mb.rst            |    4 +-
 .../crypto/aesni_mb/aesni_mb_pmd_private.h    |  337 --
 drivers/crypto/aesni_mb/meson.build           |   25 -
 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c    | 2232 ------------
 .../crypto/aesni_mb/rte_aesni_mb_pmd_ops.c    | 1126 -------
 drivers/crypto/aesni_mb/version.map           |    3 -
 drivers/crypto/ipsec_mb/meson.build           |    1 +
 drivers/crypto/ipsec_mb/pmd_aesni_mb.c        | 2977 +++++++++++++++++
 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c    |    7 +-
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |  143 +-
 drivers/crypto/meson.build                    |    1 -
 12 files changed, 3131 insertions(+), 3735 deletions(-)
 delete mode 100644 drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
 delete mode 100644 drivers/crypto/aesni_mb/meson.build
 delete mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
 delete mode 100644 drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
 delete mode 100644 drivers/crypto/aesni_mb/version.map
 create mode 100644 drivers/crypto/ipsec_mb/pmd_aesni_mb.c

diff --git a/MAINTAINERS b/MAINTAINERS
index f1aaf7d408..7b00cd8791 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1049,13 +1049,6 @@ F: drivers/crypto/aesni_gcm/
 F: doc/guides/cryptodevs/aesni_gcm.rst
 F: doc/guides/cryptodevs/features/aesni_gcm.ini
 
-Intel AES-NI Multi-Buffer
-M: Declan Doherty <declan.doherty@intel.com>
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-F: drivers/crypto/aesni_mb/
-F: doc/guides/cryptodevs/aesni_mb.rst
-F: doc/guides/cryptodevs/features/aesni_mb.ini
-
 Intel QuickAssist
 M: John Griffin <john.griffin@intel.com>
 M: Fiona Trahe <fiona.trahe@intel.com>
@@ -1067,7 +1060,10 @@ F: doc/guides/cryptodevs/features/qat.ini
 
 IPsec MB
 M: Fan Zhang <roy.fan.zhang@intel.com>
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 F: drivers/crypto/ipsec_mb/
+F: doc/guides/cryptodevs/aesni_mb.rst
+F: doc/guides/cryptodevs/features/aesni_mb.ini
 
 KASUMI
 M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index a466d0ab48..3551a0dbd7 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -130,7 +130,9 @@ and the Multi-Buffer library version supported by them:
    18.02           0.48
    18.05 - 19.02   0.49 - 0.52
    19.05 - 19.08   0.52
-   19.11+          0.52 - 1.0*
+   19.11 - 20.08   0.52 - 0.55
+   20.11 - 21.08   0.53 - 1.0*
+   21.11+          1.0*
    ==============  ============================
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
deleted file mode 100644
index 11e7bf5d18..0000000000
--- a/drivers/crypto/aesni_mb/aesni_mb_pmd_private.h
+++ /dev/null
@@ -1,337 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2016 Intel Corporation
- */
-
-#ifndef _AESNI_MB_PMD_PRIVATE_H_
-#define _AESNI_MB_PMD_PRIVATE_H_
-
-#include <intel-ipsec-mb.h>
-
-#if defined(RTE_LIB_SECURITY) && (IMB_VERSION_NUM) >= IMB_VERSION(0, 54, 0)
-#define AESNI_MB_DOCSIS_SEC_ENABLED 1
-#include <rte_security.h>
-#include <rte_security_driver.h>
-#endif
-
-enum aesni_mb_vector_mode {
-	RTE_AESNI_MB_NOT_SUPPORTED = 0,
-	RTE_AESNI_MB_SSE,
-	RTE_AESNI_MB_AVX,
-	RTE_AESNI_MB_AVX2,
-	RTE_AESNI_MB_AVX512
-};
-
-#define CRYPTODEV_NAME_AESNI_MB_PMD	crypto_aesni_mb
-/**< AES-NI Multi buffer PMD device name */
-
-/** AESNI_MB PMD LOGTYPE DRIVER */
-extern int aesni_mb_logtype_driver;
-
-#define AESNI_MB_LOG(level, fmt, ...)  \
-	rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver,  \
-			"%s() line %u: " fmt "\n", __func__, __LINE__,  \
-					## __VA_ARGS__)
-
-
-#define HMAC_IPAD_VALUE			(0x36)
-#define HMAC_OPAD_VALUE			(0x5C)
-
-/* Maximum length for digest */
-#define DIGEST_LENGTH_MAX 64
-static const unsigned auth_blocksize[] = {
-		[NULL_HASH]			= 0,
-		[MD5]				= 64,
-		[SHA1]				= 64,
-		[SHA_224]			= 64,
-		[SHA_256]			= 64,
-		[SHA_384]			= 128,
-		[SHA_512]			= 128,
-		[AES_XCBC]			= 16,
-		[AES_CCM]			= 16,
-		[AES_CMAC]			= 16,
-		[AES_GMAC]			= 16,
-		[PLAIN_SHA1]			= 64,
-		[PLAIN_SHA_224]			= 64,
-		[PLAIN_SHA_256]			= 64,
-		[PLAIN_SHA_384]			= 128,
-		[PLAIN_SHA_512]			= 128,
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-		[IMB_AUTH_ZUC_EIA3_BITLEN]	= 16,
-		[IMB_AUTH_SNOW3G_UIA2_BITLEN]	= 16,
-		[IMB_AUTH_KASUMI_UIA1]		= 16
-#endif
-};
-
-/**
- * Get the blocksize in bytes for a specified authentication algorithm
- *
- * @Note: this function will not return a valid value for a non-valid
- * authentication algorithm
- */
-static inline unsigned
-get_auth_algo_blocksize(JOB_HASH_ALG algo)
-{
-	return auth_blocksize[algo];
-}
-
-static const unsigned auth_truncated_digest_byte_lengths[] = {
-		[MD5]				= 12,
-		[SHA1]				= 12,
-		[SHA_224]			= 14,
-		[SHA_256]			= 16,
-		[SHA_384]			= 24,
-		[SHA_512]			= 32,
-		[AES_XCBC]			= 12,
-		[AES_CMAC]			= 12,
-		[AES_CCM]			= 8,
-		[NULL_HASH]			= 0,
-		[AES_GMAC]			= 12,
-		[PLAIN_SHA1]			= 20,
-		[PLAIN_SHA_224]			= 28,
-		[PLAIN_SHA_256]			= 32,
-		[PLAIN_SHA_384]			= 48,
-		[PLAIN_SHA_512]			= 64,
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-		[IMB_AUTH_ZUC_EIA3_BITLEN]	= 4,
-		[IMB_AUTH_SNOW3G_UIA2_BITLEN]	= 4,
-		[IMB_AUTH_KASUMI_UIA1]		= 4
-#endif
-};
-
-/**
- * Get the IPsec specified truncated length in bytes of the HMAC digest for a
- * specified authentication algorithm
- *
- * @Note: this function will not return a valid value for a non-valid
- * authentication algorithm
- */
-static inline unsigned
-get_truncated_digest_byte_length(JOB_HASH_ALG algo)
-{
-	return auth_truncated_digest_byte_lengths[algo];
-}
-
-static const unsigned auth_digest_byte_lengths[] = {
-		[MD5]				= 16,
-		[SHA1]				= 20,
-		[SHA_224]			= 28,
-		[SHA_256]			= 32,
-		[SHA_384]			= 48,
-		[SHA_512]			= 64,
-		[AES_XCBC]			= 16,
-		[AES_CMAC]			= 16,
-		[AES_CCM]			= 16,
-		[AES_GMAC]			= 16,
-		[NULL_HASH]			= 0,
-		[PLAIN_SHA1]			= 20,
-		[PLAIN_SHA_224]			= 28,
-		[PLAIN_SHA_256]			= 32,
-		[PLAIN_SHA_384]			= 48,
-		[PLAIN_SHA_512]			= 64,
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-		[IMB_AUTH_ZUC_EIA3_BITLEN]	= 4,
-		[IMB_AUTH_SNOW3G_UIA2_BITLEN]	= 4,
-		[IMB_AUTH_KASUMI_UIA1]		= 4
-#endif
-	/**< Vector mode dependent pointer table of the multi-buffer APIs */
-
-};
-
-/**
- * Get the full digest size in bytes for a specified authentication algorithm
- * (if available in the Multi-buffer library)
- *
- * @Note: this function will not return a valid value for a non-valid
- * authentication algorithm
- */
-static inline unsigned
-get_digest_byte_length(JOB_HASH_ALG algo)
-{
-	return auth_digest_byte_lengths[algo];
-}
-
-enum aesni_mb_operation {
-	AESNI_MB_OP_HASH_CIPHER,
-	AESNI_MB_OP_CIPHER_HASH,
-	AESNI_MB_OP_HASH_ONLY,
-	AESNI_MB_OP_CIPHER_ONLY,
-	AESNI_MB_OP_AEAD_HASH_CIPHER,
-	AESNI_MB_OP_AEAD_CIPHER_HASH,
-	AESNI_MB_OP_NOT_SUPPORTED
-};
-
-/** private data structure for each virtual AESNI device */
-struct aesni_mb_private {
-	enum aesni_mb_vector_mode vector_mode;
-	/**< CPU vector instruction set mode */
-	unsigned max_nb_queue_pairs;
-	/**< Max number of queue pairs supported by device */
-	MB_MGR *mb_mgr;
-	/**< Multi-buffer instance */
-};
-
-/** AESNI Multi buffer queue pair */
-struct aesni_mb_qp {
-	uint16_t id;
-	/**< Queue Pair Identifier */
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	/**< Unique Queue Pair Name */
-	MB_MGR *mb_mgr;
-	/**< Multi-buffer instance */
-	struct rte_ring *ingress_queue;
-	/**< Ring for placing operations ready for processing */
-	struct rte_mempool *sess_mp;
-	/**< Session Mempool */
-	struct rte_mempool *sess_mp_priv;
-	/**< Session Private Data Mempool */
-	struct rte_cryptodev_stats stats;
-	/**< Queue pair statistics */
-	uint8_t digest_idx;
-	/**< Index of the next slot to be used in temp_digests,
-	 * to store the digest for a given operation
-	 */
-	uint8_t temp_digests[MAX_JOBS][DIGEST_LENGTH_MAX];
-	/**< Buffers used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-} __rte_cache_aligned;
-
-/** AES-NI multi-buffer private session structure */
-struct aesni_mb_session {
-	JOB_CHAIN_ORDER chain_order;
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} iv;
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} auth_iv;
-	/**< IV parameters */
-
-	/** Cipher Parameters */const struct aesni_mb_op_fns *op_fns;
-	/**< Vector mode dependent pointer table of the multi-buffer APIs */
-
-	struct {
-		/** Cipher direction - encrypt / decrypt */
-		JOB_CIPHER_DIRECTION direction;
-		/** Cipher mode - CBC / Counter */
-		JOB_CIPHER_MODE mode;
-
-		uint64_t key_length_in_bytes;
-
-		union {
-			struct {
-				uint32_t encode[60] __rte_aligned(16);
-				/**< encode key */
-				uint32_t decode[60] __rte_aligned(16);
-				/**< decode key */
-			} expanded_aes_keys;
-			/**< Expanded AES keys - Allocating space to
-			 * contain the maximum expanded key size which
-			 * is 240 bytes for 256 bit AES, calculate by:
-			 * ((key size (bytes)) *
-			 * ((number of rounds) + 1))
-			 */
-			struct {
-				const void *ks_ptr[3];
-				uint64_t key[3][16];
-			} exp_3des_keys;
-			/**< Expanded 3DES keys */
-
-			struct gcm_key_data gcm_key;
-			/**< Expanded GCM key */
-			uint8_t zuc_cipher_key[16];
-			/**< ZUC cipher key */
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-			snow3g_key_schedule_t pKeySched_snow3g_cipher;
-			/**< SNOW3G scheduled cipher key */
-			kasumi_key_sched_t pKeySched_kasumi_cipher;
-			/**< KASUMI scheduled cipher key */
-#endif
-		};
-	} cipher;
-
-	/** Authentication Parameters */
-	struct {
-		JOB_HASH_ALG algo; /**< Authentication Algorithm */
-		enum rte_crypto_auth_operation operation;
-		/**< auth operation generate or verify */
-		union {
-			struct {
-				uint8_t inner[128] __rte_aligned(16);
-				/**< inner pad */
-				uint8_t outer[128] __rte_aligned(16);
-				/**< outer pad */
-			} pads;
-			/**< HMAC Authentication pads -
-			 * allocating space for the maximum pad
-			 * size supported which is 128 bytes for
-			 * SHA512
-			 */
-
-			struct {
-			    uint32_t k1_expanded[44] __rte_aligned(16);
-			    /**< k1 (expanded key). */
-			    uint8_t k2[16] __rte_aligned(16);
-			    /**< k2. */
-			    uint8_t k3[16] __rte_aligned(16);
-			    /**< k3. */
-			} xcbc;
-
-			struct {
-				uint32_t expkey[60] __rte_aligned(16);
-						    /**< k1 (expanded key). */
-				uint32_t skey1[4] __rte_aligned(16);
-						    /**< k2. */
-				uint32_t skey2[4] __rte_aligned(16);
-						    /**< k3. */
-			} cmac;
-			/**< Expanded XCBC authentication keys */
-			uint8_t zuc_auth_key[16];
-			/**< ZUC authentication key */
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-			snow3g_key_schedule_t pKeySched_snow3g_auth;
-			/**< SNOW3G scheduled authentication key */
-			kasumi_key_sched_t pKeySched_kasumi_auth;
-			/**< KASUMI scheduled authentication key */
-#endif
-		};
-	/** Generated digest size by the Multi-buffer library */
-	uint16_t gen_digest_len;
-	/** Requested digest size from Cryptodev */
-	uint16_t req_digest_len;
-
-	} auth;
-	struct {
-		/** AAD data length */
-		uint16_t aad_len;
-	} aead;
-} __rte_cache_aligned;
-
-extern int
-aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
-		struct aesni_mb_session *sess,
-		const struct rte_crypto_sym_xform *xform);
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-extern int
-aesni_mb_set_docsis_sec_session_parameters(
-		__rte_unused struct rte_cryptodev *dev,
-		struct rte_security_session_conf *conf,
-		void *sess);
-#endif
-
-/** device specific operations function pointer structures */
-extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-extern struct rte_security_ops *rte_aesni_mb_pmd_sec_ops;
-#endif
-
-extern uint32_t
-aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
-	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
-	struct rte_crypto_sym_vec *vec);
-
-#endif /* _AESNI_MB_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
deleted file mode 100644
index ed6b9f53e4..0000000000
--- a/drivers/crypto/aesni_mb/meson.build
+++ /dev/null
@@ -1,25 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 Intel Corporation
-
-IMB_required_ver = '0.52.0'
-lib = cc.find_library('IPSec_MB', required: false)
-if not lib.found()
-    build = false
-    reason = 'missing dependency, "libIPSec_MB"'
-else
-    ext_deps += lib
-
-    # version comes with quotes, so we split based on " and take the middle
-    imb_ver = cc.get_define('IMB_VERSION_STR',
-        prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
-
-    if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
-        reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
-                IMB_required_ver, imb_ver)
-        build = false
-    endif
-
-endif
-
-sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
-deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
deleted file mode 100644
index 60963a8208..0000000000
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ /dev/null
@@ -1,2232 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
- */
-
-#include <intel-ipsec-mb.h>
-
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_cryptodev.h>
-#include <cryptodev_pmd.h>
-#include <rte_bus_vdev.h>
-#include <rte_malloc.h>
-#include <rte_cpuflags.h>
-#include <rte_per_lcore.h>
-#include <rte_ether.h>
-
-#include "aesni_mb_pmd_private.h"
-
-#define AES_CCM_DIGEST_MIN_LEN 4
-#define AES_CCM_DIGEST_MAX_LEN 16
-#define HMAC_MAX_BLOCK_SIZE 128
-static uint8_t cryptodev_driver_id;
-
-/*
- * Needed to support CPU-CRYPTO API (rte_cryptodev_sym_cpu_crypto_process),
- * as we still use JOB based API even for synchronous processing.
- */
-static RTE_DEFINE_PER_LCORE(MB_MGR *, sync_mb_mgr);
-
-typedef void (*hash_one_block_t)(const void *data, void *digest);
-typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
-
-/**
- * Calculate the authentication pre-computes
- *
- * @param one_block_hash	Function pointer to calculate digest on ipad/opad
- * @param ipad			Inner pad output byte array
- * @param opad			Outer pad output byte array
- * @param hkey			Authentication key
- * @param hkey_len		Authentication key length
- * @param blocksize		Block size of selected hash algo
- */
-static void
-calculate_auth_precomputes(hash_one_block_t one_block_hash,
-		uint8_t *ipad, uint8_t *opad,
-		const uint8_t *hkey, uint16_t hkey_len,
-		uint16_t blocksize)
-{
-	unsigned i, length;
-
-	uint8_t ipad_buf[blocksize] __rte_aligned(16);
-	uint8_t opad_buf[blocksize] __rte_aligned(16);
-
-	/* Setup inner and outer pads */
-	memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
-	memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
-
-	/* XOR hash key with inner and outer pads */
-	length = hkey_len > blocksize ? blocksize : hkey_len;
-
-	for (i = 0; i < length; i++) {
-		ipad_buf[i] ^= hkey[i];
-		opad_buf[i] ^= hkey[i];
-	}
-
-	/* Compute partial hashes */
-	(*one_block_hash)(ipad_buf, ipad);
-	(*one_block_hash)(opad_buf, opad);
-
-	/* Clean up stack */
-	memset(ipad_buf, 0, blocksize);
-	memset(opad_buf, 0, blocksize);
-}
-
-/** Get xform chain order */
-static enum aesni_mb_operation
-aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
-{
-	if (xform == NULL)
-		return AESNI_MB_OP_NOT_SUPPORTED;
-
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
-		if (xform->next == NULL)
-			return AESNI_MB_OP_CIPHER_ONLY;
-		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
-			return AESNI_MB_OP_CIPHER_HASH;
-	}
-
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		if (xform->next == NULL)
-			return AESNI_MB_OP_HASH_ONLY;
-		if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
-			return AESNI_MB_OP_HASH_CIPHER;
-	}
-#if IMB_VERSION_NUM > IMB_VERSION(0, 52, 0)
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
-			/*
-			 * CCM requires to hash first and cipher later
-			 * when encrypting
-			 */
-			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
-				return AESNI_MB_OP_AEAD_HASH_CIPHER;
-			else
-				return AESNI_MB_OP_AEAD_CIPHER_HASH;
-		} else {
-			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
-				return AESNI_MB_OP_AEAD_CIPHER_HASH;
-			else
-				return AESNI_MB_OP_AEAD_HASH_CIPHER;
-		}
-	}
-#else
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
-				xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
-			if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
-				return AESNI_MB_OP_AEAD_CIPHER_HASH;
-			else
-				return AESNI_MB_OP_AEAD_HASH_CIPHER;
-		}
-	}
-#endif
-
-	return AESNI_MB_OP_NOT_SUPPORTED;
-}
-
-static inline int
-is_aead_algo(JOB_HASH_ALG hash_alg, JOB_CIPHER_MODE cipher_mode)
-{
-#if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
-	return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 || hash_alg == AES_CCM ||
-		(hash_alg == AES_GMAC && cipher_mode == GCM));
-#else
-	return ((hash_alg == AES_GMAC && cipher_mode == GCM) ||
-		hash_alg == AES_CCM);
-#endif
-}
-
-/** Set session authentication parameters */
-static int
-aesni_mb_set_session_auth_parameters(const MB_MGR *mb_mgr,
-		struct aesni_mb_session *sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	hash_one_block_t hash_oneblock_fn = NULL;
-	unsigned int key_larger_block_size = 0;
-	uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
-	uint32_t auth_precompute = 1;
-
-	if (xform == NULL) {
-		sess->auth.algo = NULL_HASH;
-		return 0;
-	}
-
-	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
-		AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
-		return -1;
-	}
-
-	/* Set IV parameters */
-	sess->auth_iv.offset = xform->auth.iv.offset;
-	sess->auth_iv.length = xform->auth.iv.length;
-
-	/* Set the request digest size */
-	sess->auth.req_digest_len = xform->auth.digest_length;
-
-	/* Select auth generate/verify */
-	sess->auth.operation = xform->auth.op;
-
-	/* Set Authentication Parameters */
-	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
-		sess->auth.algo = AES_XCBC;
-
-		uint16_t xcbc_mac_digest_len =
-			get_truncated_digest_byte_length(AES_XCBC);
-		if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		sess->auth.gen_digest_len = sess->auth.req_digest_len;
-
-		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
-				sess->auth.xcbc.k1_expanded,
-				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
-		return 0;
-	}
-
-	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
-		uint32_t dust[4*15];
-
-		sess->auth.algo = AES_CMAC;
-
-		uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
-
-		if (sess->auth.req_digest_len > cmac_digest_len) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		/*
-		 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
-		 * in version 0.50 and sizes of 12 and 16 bytes,
-		 * in version 0.49.
-		 * If size requested is different, generate the full digest
-		 * (16 bytes) in a temporary location and then memcpy
-		 * the requested number of bytes.
-		 */
-		if (sess->auth.req_digest_len < 4)
-			sess->auth.gen_digest_len = cmac_digest_len;
-		else
-			sess->auth.gen_digest_len = sess->auth.req_digest_len;
-
-		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
-				sess->auth.cmac.expkey, dust);
-		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
-				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
-		return 0;
-	}
-
-	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
-		if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
-			sess->cipher.direction = ENCRYPT;
-			sess->chain_order = CIPHER_HASH;
-		} else
-			sess->cipher.direction = DECRYPT;
-
-		sess->auth.algo = AES_GMAC;
-		if (sess->auth.req_digest_len > get_digest_byte_length(AES_GMAC)) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		sess->auth.gen_digest_len = sess->auth.req_digest_len;
-		sess->iv.length = xform->auth.iv.length;
-		sess->iv.offset = xform->auth.iv.offset;
-
-		switch (xform->auth.key.length) {
-		case AES_128_BYTES:
-			IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
-				&sess->cipher.gcm_key);
-			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			break;
-		case AES_192_BYTES:
-			IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
-				&sess->cipher.gcm_key);
-			sess->cipher.key_length_in_bytes = AES_192_BYTES;
-			break;
-		case AES_256_BYTES:
-			IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
-				&sess->cipher.gcm_key);
-			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			break;
-		default:
-			RTE_LOG(ERR, PMD, "failed to parse test type\n");
-			return -EINVAL;
-		}
-
-		return 0;
-	}
-
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-	if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
-		sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
-		uint16_t zuc_eia3_digest_len =
-			get_truncated_digest_byte_length(IMB_AUTH_ZUC_EIA3_BITLEN);
-		if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		sess->auth.gen_digest_len = sess->auth.req_digest_len;
-
-		memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 16);
-		return 0;
-	} else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
-		sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
-		uint16_t snow3g_uia2_digest_len =
-			get_truncated_digest_byte_length(IMB_AUTH_SNOW3G_UIA2_BITLEN);
-		if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		sess->auth.gen_digest_len = sess->auth.req_digest_len;
-
-		IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
-					&sess->auth.pKeySched_snow3g_auth);
-		return 0;
-	} else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
-		sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
-		uint16_t kasumi_f9_digest_len =
-			get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
-		if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		sess->auth.gen_digest_len = sess->auth.req_digest_len;
-
-		IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
-					&sess->auth.pKeySched_kasumi_auth);
-		return 0;
-	}
-#endif
-
-	switch (xform->auth.algo) {
-	case RTE_CRYPTO_AUTH_MD5_HMAC:
-		sess->auth.algo = MD5;
-		hash_oneblock_fn = mb_mgr->md5_one_block;
-		break;
-	case RTE_CRYPTO_AUTH_SHA1_HMAC:
-		sess->auth.algo = SHA1;
-		hash_oneblock_fn = mb_mgr->sha1_one_block;
-		if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
-			IMB_SHA1(mb_mgr,
-				xform->auth.key.data,
-				xform->auth.key.length,
-				hashed_key);
-			key_larger_block_size = 1;
-		}
-		break;
-	case RTE_CRYPTO_AUTH_SHA1:
-		sess->auth.algo = PLAIN_SHA1;
-		auth_precompute = 0;
-		break;
-	case RTE_CRYPTO_AUTH_SHA224_HMAC:
-		sess->auth.algo = SHA_224;
-		hash_oneblock_fn = mb_mgr->sha224_one_block;
-		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
-			IMB_SHA224(mb_mgr,
-				xform->auth.key.data,
-				xform->auth.key.length,
-				hashed_key);
-			key_larger_block_size = 1;
-		}
-		break;
-	case RTE_CRYPTO_AUTH_SHA224:
-		sess->auth.algo = PLAIN_SHA_224;
-		auth_precompute = 0;
-		break;
-	case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		sess->auth.algo = SHA_256;
-		hash_oneblock_fn = mb_mgr->sha256_one_block;
-		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
-			IMB_SHA256(mb_mgr,
-				xform->auth.key.data,
-				xform->auth.key.length,
-				hashed_key);
-			key_larger_block_size = 1;
-		}
-		break;
-	case RTE_CRYPTO_AUTH_SHA256:
-		sess->auth.algo = PLAIN_SHA_256;
-		auth_precompute = 0;
-		break;
-	case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		sess->auth.algo = SHA_384;
-		hash_oneblock_fn = mb_mgr->sha384_one_block;
-		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
-			IMB_SHA384(mb_mgr,
-				xform->auth.key.data,
-				xform->auth.key.length,
-				hashed_key);
-			key_larger_block_size = 1;
-		}
-		break;
-	case RTE_CRYPTO_AUTH_SHA384:
-		sess->auth.algo = PLAIN_SHA_384;
-		auth_precompute = 0;
-		break;
-	case RTE_CRYPTO_AUTH_SHA512_HMAC:
-		sess->auth.algo = SHA_512;
-		hash_oneblock_fn = mb_mgr->sha512_one_block;
-		if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
-			IMB_SHA512(mb_mgr,
-				xform->auth.key.data,
-				xform->auth.key.length,
-				hashed_key);
-			key_larger_block_size = 1;
-		}
-		break;
-	case RTE_CRYPTO_AUTH_SHA512:
-		sess->auth.algo = PLAIN_SHA_512;
-		auth_precompute = 0;
-		break;
-	default:
-		AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
-		return -ENOTSUP;
-	}
-	uint16_t trunc_digest_size =
-			get_truncated_digest_byte_length(sess->auth.algo);
-	uint16_t full_digest_size =
-			get_digest_byte_length(sess->auth.algo);
-
-	if (sess->auth.req_digest_len > full_digest_size ||
-			sess->auth.req_digest_len == 0) {
-		AESNI_MB_LOG(ERR, "Invalid digest size\n");
-		return -EINVAL;
-	}
-
-	if (sess->auth.req_digest_len != trunc_digest_size &&
-			sess->auth.req_digest_len != full_digest_size)
-		sess->auth.gen_digest_len = full_digest_size;
-	else
-		sess->auth.gen_digest_len = sess->auth.req_digest_len;
-
-	/* Plain SHA does not require precompute key */
-	if (auth_precompute == 0)
-		return 0;
-
-	/* Calculate Authentication precomputes */
-	if (key_larger_block_size) {
-		calculate_auth_precomputes(hash_oneblock_fn,
-			sess->auth.pads.inner, sess->auth.pads.outer,
-			hashed_key,
-			xform->auth.key.length,
-			get_auth_algo_blocksize(sess->auth.algo));
-	} else {
-		calculate_auth_precomputes(hash_oneblock_fn,
-			sess->auth.pads.inner, sess->auth.pads.outer,
-			xform->auth.key.data,
-			xform->auth.key.length,
-			get_auth_algo_blocksize(sess->auth.algo));
-	}
-
-	return 0;
-}
-
-/** Set session cipher parameters */
-static int
-aesni_mb_set_session_cipher_parameters(const MB_MGR *mb_mgr,
-		struct aesni_mb_session *sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	uint8_t is_aes = 0;
-	uint8_t is_3DES = 0;
-	uint8_t is_docsis = 0;
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-	uint8_t is_zuc = 0;
-	uint8_t is_snow3g = 0;
-	uint8_t is_kasumi = 0;
-#endif
-
-	if (xform == NULL) {
-		sess->cipher.mode = NULL_CIPHER;
-		return 0;
-	}
-
-	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
-		AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
-		return -EINVAL;
-	}
-
-	/* Select cipher direction */
-	switch (xform->cipher.op) {
-	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
-		sess->cipher.direction = ENCRYPT;
-		break;
-	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
-		sess->cipher.direction = DECRYPT;
-		break;
-	default:
-		AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
-		return -EINVAL;
-	}
-
-	/* Select cipher mode */
-	switch (xform->cipher.algo) {
-	case RTE_CRYPTO_CIPHER_AES_CBC:
-		sess->cipher.mode = CBC;
-		is_aes = 1;
-		break;
-	case RTE_CRYPTO_CIPHER_AES_CTR:
-		sess->cipher.mode = CNTR;
-		is_aes = 1;
-		break;
-	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
-		sess->cipher.mode = DOCSIS_SEC_BPI;
-		is_docsis = 1;
-		break;
-	case RTE_CRYPTO_CIPHER_DES_CBC:
-		sess->cipher.mode = DES;
-		break;
-	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
-		sess->cipher.mode = DOCSIS_DES;
-		break;
-	case RTE_CRYPTO_CIPHER_3DES_CBC:
-		sess->cipher.mode = DES3;
-		is_3DES = 1;
-		break;
-#if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
-	case RTE_CRYPTO_CIPHER_AES_ECB:
-		sess->cipher.mode = ECB;
-		is_aes = 1;
-		break;
-#endif
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
-		sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
-		is_zuc = 1;
-		break;
-	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
-		sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
-		is_snow3g = 1;
-		break;
-	case RTE_CRYPTO_CIPHER_KASUMI_F8:
-		sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
-		is_kasumi = 1;
-		break;
-#endif
-	default:
-		AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
-		return -ENOTSUP;
-	}
-
-	/* Set IV parameters */
-	sess->iv.offset = xform->cipher.iv.offset;
-	sess->iv.length = xform->cipher.iv.length;
-
-	/* Check key length and choose key expansion function for AES */
-	if (is_aes) {
-		switch (xform->cipher.key.length) {
-		case AES_128_BYTES:
-			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
-					sess->cipher.expanded_aes_keys.encode,
-					sess->cipher.expanded_aes_keys.decode);
-			break;
-		case AES_192_BYTES:
-			sess->cipher.key_length_in_bytes = AES_192_BYTES;
-			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
-					sess->cipher.expanded_aes_keys.encode,
-					sess->cipher.expanded_aes_keys.decode);
-			break;
-		case AES_256_BYTES:
-			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
-					sess->cipher.expanded_aes_keys.encode,
-					sess->cipher.expanded_aes_keys.decode);
-			break;
-		default:
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-	} else if (is_docsis) {
-		switch (xform->cipher.key.length) {
-		case AES_128_BYTES:
-			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
-					sess->cipher.expanded_aes_keys.encode,
-					sess->cipher.expanded_aes_keys.decode);
-			break;
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-		case AES_256_BYTES:
-			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
-					sess->cipher.expanded_aes_keys.encode,
-					sess->cipher.expanded_aes_keys.decode);
-			break;
-#endif
-		default:
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-	} else if (is_3DES) {
-		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
-				sess->cipher.exp_3des_keys.key[1],
-				sess->cipher.exp_3des_keys.key[2]};
-
-		switch (xform->cipher.key.length) {
-		case  24:
-			IMB_DES_KEYSCHED(mb_mgr, keys[0],
-					xform->cipher.key.data);
-			IMB_DES_KEYSCHED(mb_mgr, keys[1],
-					xform->cipher.key.data + 8);
-			IMB_DES_KEYSCHED(mb_mgr, keys[2],
-					xform->cipher.key.data + 16);
-
-			/* Initialize keys - 24 bytes: [K1-K2-K3] */
-			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
-			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
-			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
-			break;
-		case 16:
-			IMB_DES_KEYSCHED(mb_mgr, keys[0],
-					xform->cipher.key.data);
-			IMB_DES_KEYSCHED(mb_mgr, keys[1],
-					xform->cipher.key.data + 8);
-			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
-			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
-			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
-			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
-			break;
-		case 8:
-			IMB_DES_KEYSCHED(mb_mgr, keys[0],
-					xform->cipher.key.data);
-
-			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
-			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
-			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
-			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
-			break;
-		default:
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-
-		sess->cipher.key_length_in_bytes = 24;
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-	} else if (is_zuc) {
-		if (xform->cipher.key.length != 16) {
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-		sess->cipher.key_length_in_bytes = 16;
-		memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
-			16);
-	} else if (is_snow3g) {
-		if (xform->cipher.key.length != 16) {
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-		sess->cipher.key_length_in_bytes = 16;
-		IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
-					&sess->cipher.pKeySched_snow3g_cipher);
-	} else if (is_kasumi) {
-		if (xform->cipher.key.length != 16) {
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-		sess->cipher.key_length_in_bytes = 16;
-		IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
-					&sess->cipher.pKeySched_kasumi_cipher);
-#endif
-	} else {
-		if (xform->cipher.key.length != 8) {
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-		sess->cipher.key_length_in_bytes = 8;
-
-		IMB_DES_KEYSCHED(mb_mgr,
-			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
-				xform->cipher.key.data);
-		IMB_DES_KEYSCHED(mb_mgr,
-			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
-				xform->cipher.key.data);
-	}
-
-	return 0;
-}
-
-static int
-aesni_mb_set_session_aead_parameters(const MB_MGR *mb_mgr,
-		struct aesni_mb_session *sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	switch (xform->aead.op) {
-	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
-		sess->cipher.direction = ENCRYPT;
-		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
-		break;
-	case RTE_CRYPTO_AEAD_OP_DECRYPT:
-		sess->cipher.direction = DECRYPT;
-		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
-		break;
-	default:
-		AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
-		return -EINVAL;
-	}
-
-	/* Set IV parameters */
-	sess->iv.offset = xform->aead.iv.offset;
-	sess->iv.length = xform->aead.iv.length;
-
-	/* Set digest sizes */
-	sess->auth.req_digest_len = xform->aead.digest_length;
-	sess->auth.gen_digest_len = sess->auth.req_digest_len;
-
-	switch (xform->aead.algo) {
-	case RTE_CRYPTO_AEAD_AES_CCM:
-		sess->cipher.mode = CCM;
-		sess->auth.algo = AES_CCM;
-
-		/* Check key length and choose key expansion function for AES */
-		switch (xform->aead.key.length) {
-		case AES_128_BYTES:
-			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
-					sess->cipher.expanded_aes_keys.encode,
-					sess->cipher.expanded_aes_keys.decode);
-			break;
-		case AES_256_BYTES:
-			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data,
-					sess->cipher.expanded_aes_keys.encode,
-					sess->cipher.expanded_aes_keys.decode);
-			break;
-		default:
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-
-		/* CCM digests must be between 4 and 16 and an even number */
-		if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
-				sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
-				(sess->auth.req_digest_len & 1) == 1) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		break;
-
-	case RTE_CRYPTO_AEAD_AES_GCM:
-		sess->cipher.mode = GCM;
-		sess->auth.algo = AES_GMAC;
-
-		switch (xform->aead.key.length) {
-		case AES_128_BYTES:
-			sess->cipher.key_length_in_bytes = AES_128_BYTES;
-			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
-				&sess->cipher.gcm_key);
-			break;
-		case AES_192_BYTES:
-			sess->cipher.key_length_in_bytes = AES_192_BYTES;
-			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
-				&sess->cipher.gcm_key);
-			break;
-		case AES_256_BYTES:
-			sess->cipher.key_length_in_bytes = AES_256_BYTES;
-			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
-				&sess->cipher.gcm_key);
-			break;
-		default:
-			AESNI_MB_LOG(ERR, "Invalid cipher key length");
-			return -EINVAL;
-		}
-
-		/* GCM digest size must be between 1 and 16 */
-		if (sess->auth.req_digest_len == 0 ||
-				sess->auth.req_digest_len > 16) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		break;
-
-#if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
-	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
-		sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305;
-		sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305;
-
-		if (xform->aead.key.length != 32) {
-			AESNI_MB_LOG(ERR, "Invalid key length");
-			return -EINVAL;
-		}
-		sess->cipher.key_length_in_bytes = 32;
-		memcpy(sess->cipher.expanded_aes_keys.encode,
-			xform->aead.key.data, 32);
-		if (sess->auth.req_digest_len != 16) {
-			AESNI_MB_LOG(ERR, "Invalid digest size\n");
-			return -EINVAL;
-		}
-		break;
-#endif
-	default:
-		AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
-		return -ENOTSUP;
-	}
-
-	return 0;
-}
-
-/** Parse crypto xform chain and set private session parameters */
-int
-aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
-		struct aesni_mb_session *sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	const struct rte_crypto_sym_xform *auth_xform = NULL;
-	const struct rte_crypto_sym_xform *cipher_xform = NULL;
-	const struct rte_crypto_sym_xform *aead_xform = NULL;
-	int ret;
-
-	/* Select Crypto operation - hash then cipher / cipher then hash */
-	switch (aesni_mb_get_chain_order(xform)) {
-	case AESNI_MB_OP_HASH_CIPHER:
-		sess->chain_order = HASH_CIPHER;
-		auth_xform = xform;
-		cipher_xform = xform->next;
-		break;
-	case AESNI_MB_OP_CIPHER_HASH:
-		sess->chain_order = CIPHER_HASH;
-		auth_xform = xform->next;
-		cipher_xform = xform;
-		break;
-	case AESNI_MB_OP_HASH_ONLY:
-		sess->chain_order = HASH_CIPHER;
-		auth_xform = xform;
-		cipher_xform = NULL;
-		break;
-	case AESNI_MB_OP_CIPHER_ONLY:
-		/*
-		 * Multi buffer library operates only at two modes,
-		 * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
-		 * chain order depends on cipher operation: encryption is always
-		 * the first operation and decryption the last one.
-		 */
-		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
-			sess->chain_order = CIPHER_HASH;
-		else
-			sess->chain_order = HASH_CIPHER;
-		auth_xform = NULL;
-		cipher_xform = xform;
-		break;
-	case AESNI_MB_OP_AEAD_CIPHER_HASH:
-		sess->chain_order = CIPHER_HASH;
-		sess->aead.aad_len = xform->aead.aad_length;
-		aead_xform = xform;
-		break;
-	case AESNI_MB_OP_AEAD_HASH_CIPHER:
-		sess->chain_order = HASH_CIPHER;
-		sess->aead.aad_len = xform->aead.aad_length;
-		aead_xform = xform;
-		break;
-	case AESNI_MB_OP_NOT_SUPPORTED:
-	default:
-		AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
-		return -ENOTSUP;
-	}
-
-	/* Default IV length = 0 */
-	sess->iv.length = 0;
-	sess->auth_iv.length = 0;
-
-	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
-	if (ret != 0) {
-		AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
-		return ret;
-	}
-
-	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
-			cipher_xform);
-	if (ret != 0) {
-		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
-		return ret;
-	}
-
-	if (aead_xform) {
-		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
-				aead_xform);
-		if (ret != 0) {
-			AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-/** Check DOCSIS security session configuration is valid */
-static int
-check_docsis_sec_session(struct rte_security_session_conf *conf)
-{
-	struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
-	struct rte_security_docsis_xform *docsis = &conf->docsis;
-
-	/* Downlink: CRC generate -> Cipher encrypt */
-	if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
-
-		if (crypto_sym != NULL &&
-		    crypto_sym->type ==	RTE_CRYPTO_SYM_XFORM_CIPHER &&
-		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
-		    crypto_sym->cipher.algo ==
-					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
-		    (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
-		     crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
-		    crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
-		    crypto_sym->next == NULL) {
-			return 0;
-		}
-	/* Uplink: Cipher decrypt -> CRC verify */
-	} else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
-
-		if (crypto_sym != NULL &&
-		    crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
-		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
-		    crypto_sym->cipher.algo ==
-					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
-		    (crypto_sym->cipher.key.length == IMB_KEY_AES_128_BYTES ||
-		     crypto_sym->cipher.key.length == IMB_KEY_AES_256_BYTES) &&
-		    crypto_sym->cipher.iv.length == AES_BLOCK_SIZE &&
-		    crypto_sym->next == NULL) {
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-
-/** Set DOCSIS security session auth (CRC) parameters */
-static int
-aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
-		struct rte_security_docsis_xform *xform)
-{
-	if (xform == NULL) {
-		AESNI_MB_LOG(ERR, "Invalid DOCSIS xform");
-		return -EINVAL;
-	}
-
-	/* Select CRC generate/verify */
-	if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
-		sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
-		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
-	} else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
-		sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
-		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
-	} else {
-		AESNI_MB_LOG(ERR, "Unsupported DOCSIS direction");
-		return -ENOTSUP;
-	}
-
-	sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
-	sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
-
-	return 0;
-}
-
-/**
- * Parse DOCSIS security session configuration and set private session
- * parameters
- */
-int
-aesni_mb_set_docsis_sec_session_parameters(
-		__rte_unused struct rte_cryptodev *dev,
-		struct rte_security_session_conf *conf,
-		void *sess)
-{
-	struct rte_security_docsis_xform *docsis_xform;
-	struct rte_crypto_sym_xform *cipher_xform;
-	struct aesni_mb_session *aesni_sess = sess;
-	struct aesni_mb_private *internals = dev->data->dev_private;
-	int ret;
-
-	ret = check_docsis_sec_session(conf);
-	if (ret) {
-		AESNI_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
-		return ret;
-	}
-
-	switch (conf->docsis.direction) {
-	case RTE_SECURITY_DOCSIS_UPLINK:
-		aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
-		docsis_xform = &conf->docsis;
-		cipher_xform = conf->crypto_xform;
-		break;
-	case RTE_SECURITY_DOCSIS_DOWNLINK:
-		aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
-		cipher_xform = conf->crypto_xform;
-		docsis_xform = &conf->docsis;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Default IV length = 0 */
-	aesni_sess->iv.length = 0;
-
-	ret = aesni_mb_set_docsis_sec_session_auth_parameters(aesni_sess,
-			docsis_xform);
-	if (ret != 0) {
-		AESNI_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
-		return -EINVAL;
-	}
-
-	ret = aesni_mb_set_session_cipher_parameters(internals->mb_mgr,
-			aesni_sess, cipher_xform);
-
-	if (ret != 0) {
-		AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-#endif
-
-/**
- * burst enqueue, place crypto operations on ingress queue for processing.
- *
- * @param __qp         Queue Pair to process
- * @param ops          Crypto operations for processing
- * @param nb_ops       Number of crypto operations for processing
- *
- * @return
- * - Number of crypto operations enqueued
- */
-static uint16_t
-aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	struct aesni_mb_qp *qp = __qp;
-
-	unsigned int nb_enqueued;
-
-	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
-			(void **)ops, nb_ops, NULL);
-
-	qp->stats.enqueued_count += nb_enqueued;
-
-	return nb_enqueued;
-}
-
-/** Get multi buffer session */
-static inline struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
-{
-	struct aesni_mb_session *sess = NULL;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		if (likely(op->sym->session != NULL))
-			sess = (struct aesni_mb_session *)
-					get_sym_session_private_data(
-					op->sym->session,
-					cryptodev_driver_id);
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-		if (likely(op->sym->sec_session != NULL))
-			sess = (struct aesni_mb_session *)
-					get_sec_session_private_data(
-						op->sym->sec_session);
-#endif
-	} else {
-		void *_sess = rte_cryptodev_sym_session_create(qp->sess_mp);
-		void *_sess_private_data = NULL;
-
-		if (_sess == NULL)
-			return NULL;
-
-		if (rte_mempool_get(qp->sess_mp_priv,
-				(void **)&_sess_private_data))
-			return NULL;
-
-		sess = (struct aesni_mb_session *)_sess_private_data;
-
-		if (unlikely(aesni_mb_set_session_parameters(qp->mb_mgr,
-				sess, op->sym->xform) != 0)) {
-			rte_mempool_put(qp->sess_mp, _sess);
-			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
-			sess = NULL;
-		}
-		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
-		set_sym_session_private_data(op->sym->session,
-				cryptodev_driver_id, _sess_private_data);
-	}
-
-	if (unlikely(sess == NULL))
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
-	return sess;
-}
-
-static inline uint64_t
-auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
-		uint32_t oop)
-{
-	struct rte_mbuf *m_src, *m_dst;
-	uint8_t *p_src, *p_dst;
-	uintptr_t u_src, u_dst;
-	uint32_t cipher_end, auth_end;
-
-	/* Only cipher then hash needs special calculation. */
-	if (!oop || session->chain_order != CIPHER_HASH)
-		return op->sym->auth.data.offset;
-
-	m_src = op->sym->m_src;
-	m_dst = op->sym->m_dst;
-
-	p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
-	p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
-	u_src = (uintptr_t)p_src;
-	u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
-
-	/**
-	 * Copy the content between cipher offset and auth offset for generating
-	 * correct digest.
-	 */
-	if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
-		memcpy(p_dst + op->sym->auth.data.offset,
-				p_src + op->sym->auth.data.offset,
-				op->sym->cipher.data.offset -
-				op->sym->auth.data.offset);
-
-	/**
-	 * Copy the content between (cipher offset + length) and (auth offset +
-	 * length) for generating correct digest
-	 */
-	cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
-	auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
-	if (cipher_end < auth_end)
-		memcpy(p_dst + cipher_end, p_src + cipher_end,
-				auth_end - cipher_end);
-
-	/**
-	 * Since intel-ipsec-mb only supports positive values,
-	 * we need to deduct the correct offset between src and dst.
-	 */
-
-	return u_src < u_dst ? (u_dst - u_src) :
-			(UINT64_MAX - u_src + u_dst + 1);
-}
-
-static inline void
-set_cpu_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_session *session,
-		union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
-		struct rte_crypto_va_iova_ptr *iv,
-		struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
-{
-	/* Set crypto operation */
-	job->chain_order = session->chain_order;
-
-	/* Set cipher parameters */
-	job->cipher_direction = session->cipher.direction;
-	job->cipher_mode = session->cipher.mode;
-
-	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
-
-	/* Set authentication parameters */
-	job->hash_alg = session->auth.algo;
-	job->iv = iv->va;
-
-	switch (job->hash_alg) {
-	case AES_XCBC:
-		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
-		job->u.XCBC._k2 = session->auth.xcbc.k2;
-		job->u.XCBC._k3 = session->auth.xcbc.k3;
-
-		job->aes_enc_key_expanded =
-				session->cipher.expanded_aes_keys.encode;
-		job->aes_dec_key_expanded =
-				session->cipher.expanded_aes_keys.decode;
-		break;
-
-	case AES_CCM:
-		job->u.CCM.aad = (uint8_t *)aad->va + 18;
-		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
-		job->aes_enc_key_expanded =
-				session->cipher.expanded_aes_keys.encode;
-		job->aes_dec_key_expanded =
-				session->cipher.expanded_aes_keys.decode;
-		job->iv++;
-		break;
-
-	case AES_CMAC:
-		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
-		job->u.CMAC._skey1 = session->auth.cmac.skey1;
-		job->u.CMAC._skey2 = session->auth.cmac.skey2;
-		job->aes_enc_key_expanded =
-				session->cipher.expanded_aes_keys.encode;
-		job->aes_dec_key_expanded =
-				session->cipher.expanded_aes_keys.decode;
-		break;
-
-	case AES_GMAC:
-		if (session->cipher.mode == GCM) {
-			job->u.GCM.aad = aad->va;
-			job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
-		} else {
-			/* For GMAC */
-			job->u.GCM.aad = buf;
-			job->u.GCM.aad_len_in_bytes = len;
-			job->cipher_mode = GCM;
-		}
-		job->aes_enc_key_expanded = &session->cipher.gcm_key;
-		job->aes_dec_key_expanded = &session->cipher.gcm_key;
-		break;
-
-#if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
-	case IMB_AUTH_CHACHA20_POLY1305:
-		job->u.CHACHA20_POLY1305.aad = aad->va;
-		job->u.CHACHA20_POLY1305.aad_len_in_bytes = session->aead.aad_len;
-		job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
-		job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.encode;
-		break;
-#endif
-	default:
-		job->u.HMAC._hashed_auth_key_xor_ipad =
-				session->auth.pads.inner;
-		job->u.HMAC._hashed_auth_key_xor_opad =
-				session->auth.pads.outer;
-
-		if (job->cipher_mode == DES3) {
-			job->aes_enc_key_expanded =
-				session->cipher.exp_3des_keys.ks_ptr;
-			job->aes_dec_key_expanded =
-				session->cipher.exp_3des_keys.ks_ptr;
-		} else {
-			job->aes_enc_key_expanded =
-				session->cipher.expanded_aes_keys.encode;
-			job->aes_dec_key_expanded =
-				session->cipher.expanded_aes_keys.decode;
-		}
-	}
-
-	/*
-	 * Multi-buffer library current only support returning a truncated
-	 * digest length as specified in the relevant IPsec RFCs
-	 */
-
-	/* Set digest location and length */
-	job->auth_tag_output = digest;
-	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
-
-	/* Set IV parameters */
-	job->iv_len_in_bytes = session->iv.length;
-
-	/* Data Parameters */
-	job->src = buf;
-	job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
-	job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
-	job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
-	if (job->hash_alg == AES_GMAC && session->cipher.mode != GCM) {
-		job->msg_len_to_hash_in_bytes = 0;
-		job->msg_len_to_cipher_in_bytes = 0;
-	} else {
-		job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
-			sofs.ofs.auth.tail;
-		job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
-			sofs.ofs.cipher.tail;
-	}
-
-	job->user_data = udata;
-}
-
-/**
- * Process a crypto operation and complete a JOB_AES_HMAC job structure for
- * submission to the multi buffer library for processing.
- *
- * @param	qp	queue pair
- * @param	job	JOB_AES_HMAC structure to fill
- * @param	m	mbuf to process
- *
- * @return
- * - Completed JOB_AES_HMAC structure pointer on success
- * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
- */
-static inline int
-set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
-		struct rte_crypto_op *op, uint8_t *digest_idx)
-{
-	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
-	struct aesni_mb_session *session;
-	uint32_t m_offset, oop;
-
-	session = get_session(qp, op);
-	if (session == NULL) {
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -1;
-	}
-
-	/* Set crypto operation */
-	job->chain_order = session->chain_order;
-
-	/* Set cipher parameters */
-	job->cipher_direction = session->cipher.direction;
-	job->cipher_mode = session->cipher.mode;
-
-	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
-
-	/* Set authentication parameters */
-	job->hash_alg = session->auth.algo;
-
-	const int aead = is_aead_algo(job->hash_alg, job->cipher_mode);
-
-	switch (job->hash_alg) {
-	case AES_XCBC:
-		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
-		job->u.XCBC._k2 = session->auth.xcbc.k2;
-		job->u.XCBC._k3 = session->auth.xcbc.k3;
-
-		job->aes_enc_key_expanded =
-				session->cipher.expanded_aes_keys.encode;
-		job->aes_dec_key_expanded =
-				session->cipher.expanded_aes_keys.decode;
-		break;
-
-	case AES_CCM:
-		job->u.CCM.aad = op->sym->aead.aad.data + 18;
-		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
-		job->aes_enc_key_expanded =
-				session->cipher.expanded_aes_keys.encode;
-		job->aes_dec_key_expanded =
-				session->cipher.expanded_aes_keys.decode;
-		break;
-
-	case AES_CMAC:
-		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
-		job->u.CMAC._skey1 = session->auth.cmac.skey1;
-		job->u.CMAC._skey2 = session->auth.cmac.skey2;
-		job->aes_enc_key_expanded =
-				session->cipher.expanded_aes_keys.encode;
-		job->aes_dec_key_expanded =
-				session->cipher.expanded_aes_keys.decode;
-		break;
-
-	case AES_GMAC:
-		if (session->cipher.mode == GCM) {
-			job->u.GCM.aad = op->sym->aead.aad.data;
-			job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
-		} else {
-			/* For GMAC */
-			job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
-					uint8_t *, op->sym->auth.data.offset);
-			job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
-			job->cipher_mode = GCM;
-		}
-		job->aes_enc_key_expanded = &session->cipher.gcm_key;
-		job->aes_dec_key_expanded = &session->cipher.gcm_key;
-		break;
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-	case IMB_AUTH_ZUC_EIA3_BITLEN:
-		job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
-		job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-						session->auth_iv.offset);
-		break;
-	case IMB_AUTH_SNOW3G_UIA2_BITLEN:
-		job->u.SNOW3G_UIA2._key = (void *) &session->auth.pKeySched_snow3g_auth;
-		job->u.SNOW3G_UIA2._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-						session->auth_iv.offset);
-		break;
-	case IMB_AUTH_KASUMI_UIA1:
-		job->u.KASUMI_UIA1._key = (void *) &session->auth.pKeySched_kasumi_auth;
-		break;
-#endif
-#if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
-	case IMB_AUTH_CHACHA20_POLY1305:
-		job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
-		job->u.CHACHA20_POLY1305.aad_len_in_bytes = session->aead.aad_len;
-		job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
-		job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.encode;
-		break;
-#endif
-	default:
-		job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
-		job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
-
-		if (job->cipher_mode == DES3) {
-			job->aes_enc_key_expanded =
-				session->cipher.exp_3des_keys.ks_ptr;
-			job->aes_dec_key_expanded =
-				session->cipher.exp_3des_keys.ks_ptr;
-		} else {
-			job->aes_enc_key_expanded =
-				session->cipher.expanded_aes_keys.encode;
-			job->aes_dec_key_expanded =
-				session->cipher.expanded_aes_keys.decode;
-		}
-	}
-
-	if (aead)
-		m_offset = op->sym->aead.data.offset;
-	else
-		m_offset = op->sym->cipher.data.offset;
-
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
-		job->aes_enc_key_expanded = session->cipher.zuc_cipher_key;
-		job->aes_dec_key_expanded = session->cipher.zuc_cipher_key;
-	} else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
-		job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
-		m_offset = 0;
-	} else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
-		job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
-		m_offset = 0;
-	}
-#endif
-
-	if (!op->sym->m_dst) {
-		/* in-place operation */
-		m_dst = m_src;
-		oop = 0;
-	} else if (op->sym->m_dst == op->sym->m_src) {
-		/* in-place operation */
-		m_dst = m_src;
-		oop = 0;
-	} else {
-		/* out-of-place operation */
-		m_dst = op->sym->m_dst;
-		oop = 1;
-	}
-
-	/* Set digest output location */
-	if (job->hash_alg != NULL_HASH &&
-			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
-		job->auth_tag_output = qp->temp_digests[*digest_idx];
-		*digest_idx = (*digest_idx + 1) % MAX_JOBS;
-	} else {
-		if (aead)
-			job->auth_tag_output = op->sym->aead.digest.data;
-		else
-			job->auth_tag_output = op->sym->auth.digest.data;
-
-		if (session->auth.req_digest_len != session->auth.gen_digest_len) {
-			job->auth_tag_output = qp->temp_digests[*digest_idx];
-			*digest_idx = (*digest_idx + 1) % MAX_JOBS;
-		}
-	}
-	/*
-	 * Multi-buffer library current only support returning a truncated
-	 * digest length as specified in the relevant IPsec RFCs
-	 */
-
-	/* Set digest length */
-	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
-
-	/* Set IV parameters */
-	job->iv_len_in_bytes = session->iv.length;
-
-	/* Data Parameters */
-	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
-	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
-
-	switch (job->hash_alg) {
-	case AES_CCM:
-		job->cipher_start_src_offset_in_bytes =
-				op->sym->aead.data.offset;
-		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
-		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
-		job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
-
-		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-			session->iv.offset + 1);
-		break;
-
-	case AES_GMAC:
-		if (session->cipher.mode == GCM) {
-			job->cipher_start_src_offset_in_bytes =
-					op->sym->aead.data.offset;
-			job->hash_start_src_offset_in_bytes =
-					op->sym->aead.data.offset;
-			job->msg_len_to_cipher_in_bytes =
-					op->sym->aead.data.length;
-			job->msg_len_to_hash_in_bytes =
-					op->sym->aead.data.length;
-		} else {
-			job->cipher_start_src_offset_in_bytes =
-					op->sym->auth.data.offset;
-			job->hash_start_src_offset_in_bytes =
-					op->sym->auth.data.offset;
-			job->msg_len_to_cipher_in_bytes = 0;
-			job->msg_len_to_hash_in_bytes = 0;
-		}
-
-		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-				session->iv.offset);
-		break;
-
-#if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
-	case IMB_AUTH_CHACHA20_POLY1305:
-		job->cipher_start_src_offset_in_bytes = op->sym->aead.data.offset;
-		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
-		job->msg_len_to_cipher_in_bytes =
-				op->sym->aead.data.length;
-		job->msg_len_to_hash_in_bytes =
-					op->sym->aead.data.length;
-
-		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-				session->iv.offset);
-		break;
-#endif
-	default:
-		/* For SNOW3G, length and offsets are already in bits */
-		job->cipher_start_src_offset_in_bytes =
-				op->sym->cipher.data.offset;
-		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
-
-		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
-				session, oop);
-		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
-
-		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
-			session->iv.offset);
-	}
-
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
-		job->msg_len_to_cipher_in_bytes >>= 3;
-	else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
-		job->msg_len_to_hash_in_bytes >>= 3;
-#endif
-
-	/* Set user data to be crypto operation data struct */
-	job->user_data = op;
-
-	return 0;
-}
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-/**
- * Process a crypto operation containing a security op and complete a
- * JOB_AES_HMAC job structure for submission to the multi buffer library for
- * processing.
- */
-static inline int
-set_sec_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
-		struct rte_crypto_op *op, uint8_t *digest_idx)
-{
-	struct rte_mbuf *m_src, *m_dst;
-	struct rte_crypto_sym_op *sym;
-	struct aesni_mb_session *session;
-
-	session = get_session(qp, op);
-	if (unlikely(session == NULL)) {
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -1;
-	}
-
-	/* Only DOCSIS protocol operations supported now */
-	if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
-			session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
-		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		return -1;
-	}
-
-	sym = op->sym;
-	m_src = sym->m_src;
-
-	if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
-		/* in-place operation */
-		m_dst = m_src;
-	} else {
-		/* out-of-place operation not supported */
-		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		return -ENOTSUP;
-	}
-
-	/* Set crypto operation */
-	job->chain_order = session->chain_order;
-
-	/* Set cipher parameters */
-	job->cipher_direction = session->cipher.direction;
-	job->cipher_mode = session->cipher.mode;
-
-	job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
-	job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
-	job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
-
-	/* Set IV parameters */
-	job->iv_len_in_bytes = session->iv.length;
-	job->iv = (uint8_t *)op + session->iv.offset;
-
-	/* Set authentication parameters */
-	job->hash_alg = session->auth.algo;
-
-	/* Set digest output location */
-	job->auth_tag_output = qp->temp_digests[*digest_idx];
-	*digest_idx = (*digest_idx + 1) % MAX_JOBS;
-
-	/* Set digest length */
-	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
-
-	/* Set data parameters */
-	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
-	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
-						sym->cipher.data.offset);
-
-	job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
-	job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
-
-	job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
-	job->msg_len_to_hash_in_bytes = sym->auth.data.length;
-
-	job->user_data = op;
-
-	return 0;
-}
-
-static inline void
-verify_docsis_sec_crc(JOB_AES_HMAC *job, uint8_t *status)
-{
-	uint16_t crc_offset;
-	uint8_t *crc;
-
-	if (!job->msg_len_to_hash_in_bytes)
-		return;
-
-	crc_offset = job->hash_start_src_offset_in_bytes +
-			job->msg_len_to_hash_in_bytes -
-			job->cipher_start_src_offset_in_bytes;
-	crc = job->dst + crc_offset;
-
-	/* Verify CRC (at the end of the message) */
-	if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
-		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-}
-#endif
-
-static inline void
-verify_digest(JOB_AES_HMAC *job, void *digest, uint16_t len, uint8_t *status)
-{
-	/* Verify digest if required */
-	if (memcmp(job->auth_tag_output, digest, len) != 0)
-		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-}
-
-static inline void
-generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
-		struct aesni_mb_session *sess)
-{
-	/* No extra copy needed */
-	if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
-		return;
-
-	/*
-	 * This can only happen for HMAC, so only digest
-	 * for authentication algos is required
-	 */
-	memcpy(op->sym->auth.digest.data, job->auth_tag_output,
-			sess->auth.req_digest_len);
-}
-
-/**
- * Process a completed job and return rte_mbuf which job processed
- *
- * @param qp		Queue Pair to process
- * @param job	JOB_AES_HMAC job to process
- *
- * @return
- * - Returns processed crypto operation.
- * - Returns NULL on invalid job
- */
-static inline struct rte_crypto_op *
-post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
-{
-	struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
-	struct aesni_mb_session *sess = NULL;
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-	uint8_t is_docsis_sec = 0;
-
-	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
-		/*
-		 * Assuming at this point that if it's a security type op, that
-		 * this is for DOCSIS
-		 */
-		is_docsis_sec = 1;
-		sess = get_sec_session_private_data(op->sym->sec_session);
-	} else
-#endif
-	{
-		sess = get_sym_session_private_data(op->sym->session,
-						cryptodev_driver_id);
-	}
-
-	if (unlikely(sess == NULL)) {
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return op;
-	}
-
-	if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
-		switch (job->status) {
-		case STS_COMPLETED:
-			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-			if (job->hash_alg == NULL_HASH)
-				break;
-
-			if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
-				if (is_aead_algo(job->hash_alg, sess->cipher.mode))
-					verify_digest(job,
-						op->sym->aead.digest.data,
-						sess->auth.req_digest_len,
-						&op->status);
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-				else if (is_docsis_sec)
-					verify_docsis_sec_crc(job,
-						&op->status);
-#endif
-				else
-					verify_digest(job,
-						op->sym->auth.digest.data,
-						sess->auth.req_digest_len,
-						&op->status);
-			} else
-				generate_digest(job, op, sess);
-			break;
-		default:
-			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-		}
-	}
-
-	/* Free session if a session-less crypto op */
-	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		memset(sess, 0, sizeof(struct aesni_mb_session));
-		memset(op->sym->session, 0,
-			rte_cryptodev_sym_get_existing_header_session_size(
-				op->sym->session));
-		rte_mempool_put(qp->sess_mp_priv, sess);
-		rte_mempool_put(qp->sess_mp, op->sym->session);
-		op->sym->session = NULL;
-	}
-
-	return op;
-}
-
-static inline void
-post_process_mb_sync_job(JOB_AES_HMAC *job)
-{
-	uint32_t *st;
-
-	st = job->user_data;
-	st[0] = (job->status == STS_COMPLETED) ? 0 : EBADMSG;
-}
-
-/**
- * Process a completed JOB_AES_HMAC job and keep processing jobs until
- * get_completed_job return NULL
- *
- * @param qp		Queue Pair to process
- * @param job		JOB_AES_HMAC job
- *
- * @return
- * - Number of processed jobs
- */
-static unsigned
-handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
-		struct rte_crypto_op **ops, uint16_t nb_ops)
-{
-	struct rte_crypto_op *op = NULL;
-	unsigned processed_jobs = 0;
-
-	while (job != NULL) {
-		op = post_process_mb_job(qp, job);
-
-		if (op) {
-			ops[processed_jobs++] = op;
-			qp->stats.dequeued_count++;
-		} else {
-			qp->stats.dequeue_err_count++;
-			break;
-		}
-		if (processed_jobs == nb_ops)
-			break;
-
-		job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
-	}
-
-	return processed_jobs;
-}
-
-static inline uint32_t
-handle_completed_sync_jobs(JOB_AES_HMAC *job, MB_MGR *mb_mgr)
-{
-	uint32_t i;
-
-	for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
-		post_process_mb_sync_job(job);
-
-	return i;
-}
-
-static inline uint32_t
-flush_mb_sync_mgr(MB_MGR *mb_mgr)
-{
-	JOB_AES_HMAC *job;
-
-	job = IMB_FLUSH_JOB(mb_mgr);
-	return handle_completed_sync_jobs(job, mb_mgr);
-}
-
-static inline uint16_t
-flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	int processed_ops = 0;
-
-	/* Flush the remaining jobs */
-	JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
-
-	if (job)
-		processed_ops += handle_completed_jobs(qp, job,
-				&ops[processed_ops], nb_ops - processed_ops);
-
-	return processed_ops;
-}
-
-static inline JOB_AES_HMAC *
-set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
-{
-	job->chain_order = HASH_CIPHER;
-	job->cipher_mode = NULL_CIPHER;
-	job->hash_alg = NULL_HASH;
-	job->cipher_direction = DECRYPT;
-
-	/* Set user data to be crypto operation data struct */
-	job->user_data = op;
-
-	return job;
-}
-
-static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	struct aesni_mb_qp *qp = queue_pair;
-
-	struct rte_crypto_op *op;
-	JOB_AES_HMAC *job;
-
-	int retval, processed_jobs = 0;
-
-	if (unlikely(nb_ops == 0))
-		return 0;
-
-	uint8_t digest_idx = qp->digest_idx;
-	do {
-		/* Get next free mb job struct from mb manager */
-		job = IMB_GET_NEXT_JOB(qp->mb_mgr);
-		if (unlikely(job == NULL)) {
-			/* if no free mb job structs we need to flush mb_mgr */
-			processed_jobs += flush_mb_mgr(qp,
-					&ops[processed_jobs],
-					nb_ops - processed_jobs);
-
-			if (nb_ops == processed_jobs)
-				break;
-
-			job = IMB_GET_NEXT_JOB(qp->mb_mgr);
-		}
-
-		/*
-		 * Get next operation to process from ingress queue.
-		 * There is no need to return the job to the MB_MGR
-		 * if there are no more operations to process, since the MB_MGR
-		 * can use that pointer again in next get_next calls.
-		 */
-		retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
-		if (retval < 0)
-			break;
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
-			retval = set_sec_mb_job_params(job, qp, op,
-						&digest_idx);
-		else
-#endif
-			retval = set_mb_job_params(job, qp, op, &digest_idx);
-
-		if (unlikely(retval != 0)) {
-			qp->stats.dequeue_err_count++;
-			set_job_null_op(job, op);
-		}
-
-		/* Submit job to multi-buffer for processing */
-#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
-		job = IMB_SUBMIT_JOB(qp->mb_mgr);
-#else
-		job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
-#endif
-		/*
-		 * If submit returns a processed job then handle it,
-		 * before submitting subsequent jobs
-		 */
-		if (job)
-			processed_jobs += handle_completed_jobs(qp, job,
-					&ops[processed_jobs],
-					nb_ops - processed_jobs);
-
-	} while (processed_jobs < nb_ops);
-
-	qp->digest_idx = digest_idx;
-
-	if (processed_jobs < 1)
-		processed_jobs += flush_mb_mgr(qp,
-				&ops[processed_jobs],
-				nb_ops - processed_jobs);
-
-	return processed_jobs;
-}
-
-static MB_MGR *
-alloc_init_mb_mgr(enum aesni_mb_vector_mode vector_mode)
-{
-	MB_MGR *mb_mgr = alloc_mb_mgr(0);
-	if (mb_mgr == NULL)
-		return NULL;
-
-	switch (vector_mode) {
-	case RTE_AESNI_MB_SSE:
-		init_mb_mgr_sse(mb_mgr);
-		break;
-	case RTE_AESNI_MB_AVX:
-		init_mb_mgr_avx(mb_mgr);
-		break;
-	case RTE_AESNI_MB_AVX2:
-		init_mb_mgr_avx2(mb_mgr);
-		break;
-	case RTE_AESNI_MB_AVX512:
-		init_mb_mgr_avx512(mb_mgr);
-		break;
-	default:
-		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
-		free_mb_mgr(mb_mgr);
-		return NULL;
-	}
-
-	return mb_mgr;
-}
-
-static inline void
-aesni_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
-{
-	uint32_t i;
-
-	for (i = 0; i != vec->num; ++i)
-		vec->status[i] = err;
-}
-
-static inline int
-check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
-{
-	/* no multi-seg support with current AESNI-MB PMD */
-	if (sgl->num != 1)
-		return ENOTSUP;
-	else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
-		return EINVAL;
-	return 0;
-}
-
-static inline JOB_AES_HMAC *
-submit_sync_job(MB_MGR *mb_mgr)
-{
-#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
-	return IMB_SUBMIT_JOB(mb_mgr);
-#else
-	return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
-#endif
-}
-
-static inline uint32_t
-generate_sync_dgst(struct rte_crypto_sym_vec *vec,
-	const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
-{
-	uint32_t i, k;
-
-	for (i = 0, k = 0; i != vec->num; i++) {
-		if (vec->status[i] == 0) {
-			memcpy(vec->digest[i].va, dgst[i], len);
-			k++;
-		}
-	}
-
-	return k;
-}
-
-static inline uint32_t
-verify_sync_dgst(struct rte_crypto_sym_vec *vec,
-	const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
-{
-	uint32_t i, k;
-
-	for (i = 0, k = 0; i != vec->num; i++) {
-		if (vec->status[i] == 0) {
-			if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
-				vec->status[i] = EBADMSG;
-			else
-				k++;
-		}
-	}
-
-	return k;
-}
-
-uint32_t
-aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev *dev,
-	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
-	struct rte_crypto_sym_vec *vec)
-{
-	int32_t ret;
-	uint32_t i, j, k, len;
-	void *buf;
-	JOB_AES_HMAC *job;
-	MB_MGR *mb_mgr;
-	struct aesni_mb_private *priv;
-	struct aesni_mb_session *s;
-	uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
-
-	s = get_sym_session_private_data(sess, dev->driver_id);
-	if (s == NULL) {
-		aesni_mb_fill_error_code(vec, EINVAL);
-		return 0;
-	}
-
-	/* get per-thread MB MGR, create one if needed */
-	mb_mgr = RTE_PER_LCORE(sync_mb_mgr);
-	if (mb_mgr == NULL) {
-
-		priv = dev->data->dev_private;
-		mb_mgr = alloc_init_mb_mgr(priv->vector_mode);
-		if (mb_mgr == NULL) {
-			aesni_mb_fill_error_code(vec, ENOMEM);
-			return 0;
-		}
-		RTE_PER_LCORE(sync_mb_mgr) = mb_mgr;
-	}
-
-	for (i = 0, j = 0, k = 0; i != vec->num; i++) {
-
-
-		ret = check_crypto_sgl(sofs, vec->sgl + i);
-		if (ret != 0) {
-			vec->status[i] = ret;
-			continue;
-		}
-
-		buf = vec->sgl[i].vec[0].base;
-		len = vec->sgl[i].vec[0].len;
-
-		job = IMB_GET_NEXT_JOB(mb_mgr);
-		if (job == NULL) {
-			k += flush_mb_sync_mgr(mb_mgr);
-			job = IMB_GET_NEXT_JOB(mb_mgr);
-			RTE_ASSERT(job != NULL);
-		}
-
-		/* Submit job for processing */
-		set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
-			&vec->aad[i], tmp_dgst[i], &vec->status[i]);
-		job = submit_sync_job(mb_mgr);
-		j++;
-
-		/* handle completed jobs */
-		k += handle_completed_sync_jobs(job, mb_mgr);
-	}
-
-	/* flush remaining jobs */
-	while (k != j)
-		k += flush_mb_sync_mgr(mb_mgr);
-
-	/* finish processing for successful jobs: check/update digest */
-	if (k != 0) {
-		if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
-			k = verify_sync_dgst(vec,
-				(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
-				s->auth.req_digest_len);
-		else
-			k = generate_sync_dgst(vec,
-				(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
-				s->auth.req_digest_len);
-	}
-
-	return k;
-}
-
-static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
-
-static uint64_t
-vec_mode_to_flags(enum aesni_mb_vector_mode mode)
-{
-	switch (mode) {
-	case RTE_AESNI_MB_SSE:
-		return RTE_CRYPTODEV_FF_CPU_SSE;
-	case RTE_AESNI_MB_AVX:
-		return RTE_CRYPTODEV_FF_CPU_AVX;
-	case RTE_AESNI_MB_AVX2:
-		return RTE_CRYPTODEV_FF_CPU_AVX2;
-	case RTE_AESNI_MB_AVX512:
-		return RTE_CRYPTODEV_FF_CPU_AVX512;
-	default:
-		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n", mode);
-		return 0;
-	}
-}
-
-static int
-cryptodev_aesni_mb_create(const char *name,
-			struct rte_vdev_device *vdev,
-			struct rte_cryptodev_pmd_init_params *init_params)
-{
-	struct rte_cryptodev *dev;
-	struct aesni_mb_private *internals;
-	enum aesni_mb_vector_mode vector_mode;
-	MB_MGR *mb_mgr;
-
-	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
-	if (dev == NULL) {
-		AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
-		return -ENODEV;
-	}
-
-	/* Check CPU for supported vector instruction set */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
-		vector_mode = RTE_AESNI_MB_AVX512;
-	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
-		vector_mode = RTE_AESNI_MB_AVX2;
-	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
-		vector_mode = RTE_AESNI_MB_AVX;
-	else
-		vector_mode = RTE_AESNI_MB_SSE;
-
-	dev->driver_id = cryptodev_driver_id;
-	dev->dev_ops = rte_aesni_mb_pmd_ops;
-
-	/* register rx/tx burst functions for data path */
-	dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
-	dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
-
-	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
-			RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
-			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
-			RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-	struct rte_security_ctx *security_instance;
-	security_instance = rte_malloc("aesni_mb_sec",
-				sizeof(struct rte_security_ctx),
-				RTE_CACHE_LINE_SIZE);
-	if (security_instance == NULL) {
-		AESNI_MB_LOG(ERR, "rte_security_ctx memory alloc failed");
-		rte_cryptodev_pmd_destroy(dev);
-		return -ENOMEM;
-	}
-
-	security_instance->device = (void *)dev;
-	security_instance->ops = rte_aesni_mb_pmd_sec_ops;
-	security_instance->sess_cnt = 0;
-	dev->security_ctx = security_instance;
-	dev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
-#endif
-
-	/* Check CPU for support for AES instruction set */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
-	else
-		AESNI_MB_LOG(WARNING, "AES instructions not supported by CPU");
-
-	dev->feature_flags |= vec_mode_to_flags(vector_mode);
-
-	mb_mgr = alloc_init_mb_mgr(vector_mode);
-	if (mb_mgr == NULL) {
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-		rte_free(dev->security_ctx);
-		dev->security_ctx = NULL;
-#endif
-		rte_cryptodev_pmd_destroy(dev);
-		return -ENOMEM;
-	}
-
-	/* Set vector instructions mode supported */
-	internals = dev->data->dev_private;
-
-	internals->vector_mode = vector_mode;
-	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
-	internals->mb_mgr = mb_mgr;
-
-	AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
-			imb_get_version_str());
-	return 0;
-}
-
-static int
-cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev_pmd_init_params init_params = {
-		"",
-		sizeof(struct aesni_mb_private),
-		rte_socket_id(),
-		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
-	};
-	const char *name, *args;
-	int retval;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-
-	args = rte_vdev_device_args(vdev);
-
-	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
-	if (retval) {
-		AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
-				args);
-		return -EINVAL;
-	}
-
-	return cryptodev_aesni_mb_create(name, vdev, &init_params);
-}
-
-static int
-cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev *cryptodev;
-	struct aesni_mb_private *internals;
-	const char *name;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-
-	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	internals = cryptodev->data->dev_private;
-
-	free_mb_mgr(internals->mb_mgr);
-	if (RTE_PER_LCORE(sync_mb_mgr)) {
-		free_mb_mgr(RTE_PER_LCORE(sync_mb_mgr));
-		RTE_PER_LCORE(sync_mb_mgr) = NULL;
-	}
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-	rte_free(cryptodev->security_ctx);
-	cryptodev->security_ctx = NULL;
-#endif
-
-	return rte_cryptodev_pmd_destroy(cryptodev);
-}
-
-static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
-	.probe = cryptodev_aesni_mb_probe,
-	.remove = cryptodev_aesni_mb_remove
-};
-
-static struct cryptodev_driver aesni_mb_crypto_drv;
-
-RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
-RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
-RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
-	"max_nb_queue_pairs=<int> "
-	"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
-		cryptodev_aesni_mb_pmd_drv.driver,
-		cryptodev_driver_id);
-RTE_LOG_REGISTER_DEFAULT(aesni_mb_logtype_driver, NOTICE);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
deleted file mode 100644
index 48a8f91868..0000000000
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ /dev/null
@@ -1,1126 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_string_fns.h>
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <rte_ether.h>
-#include <cryptodev_pmd.h>
-
-#include "aesni_mb_pmd_private.h"
-
-
-static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
-	{	/* MD5 HMAC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
-				.block_size = 64,
-				.key_size = {
-					.min = 1,
-					.max = 64,
-					.increment = 1
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 16,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA1 HMAC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
-				.block_size = 64,
-				.key_size = {
-					.min = 1,
-					.max = 65535,
-					.increment = 1
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 20,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA1 */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA1,
-				.block_size = 64,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 20,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA224 HMAC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
-				.block_size = 64,
-				.key_size = {
-					.min = 1,
-					.max = 65535,
-					.increment = 1
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 28,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA224 */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA224,
-				.block_size = 64,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 28,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA256 HMAC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
-				.block_size = 64,
-				.key_size = {
-					.min = 1,
-					.max = 65535,
-					.increment = 1
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 32,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA256 */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA256,
-				.block_size = 64,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 32,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA384 HMAC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
-				.block_size = 128,
-				.key_size = {
-					.min = 1,
-					.max = 65535,
-					.increment = 1
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 48,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA384 */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA384,
-				.block_size = 128,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 48,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA512 HMAC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
-				.block_size = 128,
-				.key_size = {
-					.min = 1,
-					.max = 65535,
-					.increment = 1
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 64,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* SHA512  */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SHA512,
-				.block_size = 128,
-				.key_size = {
-					.min = 0,
-					.max = 0,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 64,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* AES XCBC HMAC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 12,
-					.max = 12,
-					.increment = 0
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* AES CBC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* AES CTR */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.iv_size = {
-					.min = 12,
-					.max = 16,
-					.increment = 4
-				}
-			}, }
-		}, }
-	},
-	{	/* AES DOCSIS BPI */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 53, 3)
-					.max = 32,
-					.increment = 16
-#else
-					.max = 16,
-					.increment = 0
-#endif
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* DES CBC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_DES_CBC,
-				.block_size = 8,
-				.key_size = {
-					.min = 8,
-					.max = 8,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 8,
-					.max = 8,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/*  3DES CBC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
-				.block_size = 8,
-				.key_size = {
-					.min = 8,
-					.max = 24,
-					.increment = 8
-				},
-				.iv_size = {
-					.min = 8,
-					.max = 8,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* DES DOCSIS BPI */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
-				.block_size = 8,
-				.key_size = {
-					.min = 8,
-					.max = 8,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 8,
-					.max = 8,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* AES CCM */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
-			{.aead = {
-				.algo = RTE_CRYPTO_AEAD_AES_CCM,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-#if IMB_VERSION(0, 54, 2) <= IMB_VERSION_NUM
-					.max = 32,
-					.increment = 16
-#else
-					.max = 16,
-					.increment = 0
-#endif
-				},
-				.digest_size = {
-					.min = 4,
-					.max = 16,
-					.increment = 2
-				},
-				.aad_size = {
-					.min = 0,
-					.max = 46,
-					.increment = 1
-				},
-				.iv_size = {
-					.min = 7,
-					.max = 13,
-					.increment = 1
-				},
-			}, }
-		}, }
-	},
-	{	/* AES CMAC */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 16,
-					.increment = 1
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* AES GCM */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
-			{.aead = {
-				.algo = RTE_CRYPTO_AEAD_AES_GCM,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 16,
-					.increment = 1
-				},
-				.aad_size = {
-					.min = 0,
-					.max = 65535,
-					.increment = 1
-				},
-				.iv_size = {
-					.min = 12,
-					.max = 12,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* AES GMAC (AUTH) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 16,
-					.increment = 1
-				},
-				.iv_size = {
-					.min = 12,
-					.max = 12,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-#if IMB_VERSION(0, 53, 0) <= IMB_VERSION_NUM
-	{	/* AES ECB */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_AES_ECB,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-#endif
-#if IMB_VERSION(0, 53, 3) <= IMB_VERSION_NUM
-	{	/* ZUC (EIA3) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 4,
-					.max = 4,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* ZUC (EEA3) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-			}, }
-		}, }
-	},
-	{	/* SNOW 3G (UIA2) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 4,
-					.max = 4,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* SNOW 3G (UEA2) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* KASUMI (F9) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_KASUMI_F9,
-				.block_size = 8,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 4,
-					.max = 4,
-					.increment = 0
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* KASUMI (F8) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
-				.block_size = 8,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 8,
-					.max = 8,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-#endif
-#if IMB_VERSION(0, 54, 3) <= IMB_VERSION_NUM
-	{	/* CHACHA20-POLY1305 */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
-			{.aead = {
-				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
-				.block_size = 64,
-				.key_size = {
-					.min = 32,
-					.max = 32,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.aad_size = {
-					.min = 0,
-					.max = 240,
-					.increment = 1
-				},
-				.iv_size = {
-					.min = 12,
-					.max = 12,
-					.increment = 0
-				},
-			}, }
-		}, }
-	},
-#endif
-	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-static const struct rte_cryptodev_capabilities
-					aesni_mb_pmd_security_crypto_cap[] = {
-	{	/* AES DOCSIS BPI */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 16
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-
-	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static const struct rte_security_capability aesni_mb_pmd_security_cap[] = {
-	{	/* DOCSIS Uplink */
-		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
-		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
-		.docsis = {
-			.direction = RTE_SECURITY_DOCSIS_UPLINK
-		},
-		.crypto_capabilities = aesni_mb_pmd_security_crypto_cap
-	},
-	{	/* DOCSIS Downlink */
-		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
-		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
-		.docsis = {
-			.direction = RTE_SECURITY_DOCSIS_DOWNLINK
-		},
-		.crypto_capabilities = aesni_mb_pmd_security_crypto_cap
-	},
-	{
-		.action = RTE_SECURITY_ACTION_TYPE_NONE
-	}
-};
-#endif
-
-/** Configure device */
-static int
-aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused struct rte_cryptodev_config *config)
-{
-	return 0;
-}
-
-/** Start device */
-static int
-aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-/** Stop device */
-static void
-aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
-{
-}
-
-/** Close device */
-static int
-aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-
-/** Get device statistics */
-static void
-aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_stats *stats)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
-
-		stats->enqueued_count += qp->stats.enqueued_count;
-		stats->dequeued_count += qp->stats.dequeued_count;
-
-		stats->enqueue_err_count += qp->stats.enqueue_err_count;
-		stats->dequeue_err_count += qp->stats.dequeue_err_count;
-	}
-}
-
-/** Reset device statistics */
-static void
-aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
-
-		memset(&qp->stats, 0, sizeof(qp->stats));
-	}
-}
-
-
-/** Get device info */
-static void
-aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_info *dev_info)
-{
-	struct aesni_mb_private *internals = dev->data->dev_private;
-
-	if (dev_info != NULL) {
-		dev_info->driver_id = dev->driver_id;
-		dev_info->feature_flags = dev->feature_flags;
-		dev_info->capabilities = aesni_mb_pmd_capabilities;
-		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
-		/* No limit of number of sessions */
-		dev_info->sym.max_nb_sessions = 0;
-	}
-}
-
-/** Release queue pair */
-static int
-aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
-{
-	struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
-	struct rte_ring *r = NULL;
-
-	if (qp != NULL) {
-		r = rte_ring_lookup(qp->name);
-		if (r)
-			rte_ring_free(r);
-		if (qp->mb_mgr)
-			free_mb_mgr(qp->mb_mgr);
-		rte_free(qp);
-		dev->data->queue_pairs[qp_id] = NULL;
-	}
-	return 0;
-}
-
-/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
-static int
-aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
-		struct aesni_mb_qp *qp)
-{
-	unsigned n = snprintf(qp->name, sizeof(qp->name),
-			"aesni_mb_pmd_%u_qp_%u",
-			dev->data->dev_id, qp->id);
-
-	if (n >= sizeof(qp->name))
-		return -1;
-
-	return 0;
-}
-
-/** Create a ring to place processed operations on */
-static struct rte_ring *
-aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
-		unsigned int ring_size, int socket_id)
-{
-	struct rte_ring *r;
-	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-
-	unsigned int n = strlcpy(ring_name, qp->name, sizeof(ring_name));
-
-	if (n >= sizeof(ring_name))
-		return NULL;
-
-	r = rte_ring_lookup(ring_name);
-	if (r) {
-		if (rte_ring_get_size(r) >= ring_size) {
-			AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
-			ring_name);
-			return r;
-		}
-
-		AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
-			ring_name);
-		return NULL;
-	}
-
-	return rte_ring_create(ring_name, ring_size, socket_id,
-			RING_F_SP_ENQ | RING_F_SC_DEQ);
-}
-
-/** Setup a queue pair */
-static int
-aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
-		const struct rte_cryptodev_qp_conf *qp_conf,
-		int socket_id)
-{
-	struct aesni_mb_qp *qp = NULL;
-	struct aesni_mb_private *internals = dev->data->dev_private;
-	int ret = -1;
-
-	/* Free memory prior to re-allocation if needed. */
-	if (dev->data->queue_pairs[qp_id] != NULL)
-		aesni_mb_pmd_qp_release(dev, qp_id);
-
-	/* Allocate the queue pair data structure. */
-	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
-					RTE_CACHE_LINE_SIZE, socket_id);
-	if (qp == NULL)
-		return -ENOMEM;
-
-	qp->id = qp_id;
-	dev->data->queue_pairs[qp_id] = qp;
-
-	if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
-		goto qp_setup_cleanup;
-
-
-	qp->mb_mgr = alloc_mb_mgr(0);
-	if (qp->mb_mgr == NULL) {
-		ret = -ENOMEM;
-		goto qp_setup_cleanup;
-	}
-
-	switch (internals->vector_mode) {
-	case RTE_AESNI_MB_SSE:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
-		init_mb_mgr_sse(qp->mb_mgr);
-		break;
-	case RTE_AESNI_MB_AVX:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
-		init_mb_mgr_avx(qp->mb_mgr);
-		break;
-	case RTE_AESNI_MB_AVX2:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
-		init_mb_mgr_avx2(qp->mb_mgr);
-		break;
-	case RTE_AESNI_MB_AVX512:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
-		init_mb_mgr_avx512(qp->mb_mgr);
-		break;
-	default:
-		AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
-				internals->vector_mode);
-		goto qp_setup_cleanup;
-	}
-
-	qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
-			qp_conf->nb_descriptors, socket_id);
-	if (qp->ingress_queue == NULL) {
-		ret = -1;
-		goto qp_setup_cleanup;
-	}
-
-	qp->sess_mp = qp_conf->mp_session;
-	qp->sess_mp_priv = qp_conf->mp_session_private;
-
-	memset(&qp->stats, 0, sizeof(qp->stats));
-
-	char mp_name[RTE_MEMPOOL_NAMESIZE];
-
-	snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
-				"digest_mp_%u_%u", dev->data->dev_id, qp_id);
-	return 0;
-
-qp_setup_cleanup:
-	if (qp) {
-		if (qp->mb_mgr)
-			free_mb_mgr(qp->mb_mgr);
-		rte_free(qp);
-	}
-
-	return ret;
-}
-
-/** Returns the size of the aesni multi-buffer session structure */
-static unsigned
-aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct aesni_mb_session);
-}
-
-/** Configure a aesni multi-buffer session from a crypto xform chain */
-static int
-aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_sym_xform *xform,
-		struct rte_cryptodev_sym_session *sess,
-		struct rte_mempool *mempool)
-{
-	void *sess_private_data;
-	struct aesni_mb_private *internals = dev->data->dev_private;
-	int ret;
-
-	if (unlikely(sess == NULL)) {
-		AESNI_MB_LOG(ERR, "invalid session struct");
-		return -EINVAL;
-	}
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		AESNI_MB_LOG(ERR,
-				"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	ret = aesni_mb_set_session_parameters(internals->mb_mgr,
-			sess_private_data, xform);
-	if (ret != 0) {
-		AESNI_MB_LOG(ERR, "failed configure session parameters");
-
-		/* Return session to mempool */
-		rte_mempool_put(mempool, sess_private_data);
-		return ret;
-	}
-
-	set_sym_session_private_data(sess, dev->driver_id,
-			sess_private_data);
-
-	return 0;
-}
-
-/** Clear the memory of session so it doesn't leave key material behind */
-static void
-aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_sym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_sym_session_private_data(sess, index);
-
-	/* Zero out the whole structure */
-	if (sess_priv) {
-		memset(sess_priv, 0, sizeof(struct aesni_mb_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-		set_sym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-struct rte_cryptodev_ops aesni_mb_pmd_ops = {
-		.dev_configure		= aesni_mb_pmd_config,
-		.dev_start		= aesni_mb_pmd_start,
-		.dev_stop		= aesni_mb_pmd_stop,
-		.dev_close		= aesni_mb_pmd_close,
-
-		.stats_get		= aesni_mb_pmd_stats_get,
-		.stats_reset		= aesni_mb_pmd_stats_reset,
-
-		.dev_infos_get		= aesni_mb_pmd_info_get,
-
-		.queue_pair_setup	= aesni_mb_pmd_qp_setup,
-		.queue_pair_release	= aesni_mb_pmd_qp_release,
-
-		.sym_cpu_process	= aesni_mb_cpu_crypto_process_bulk,
-
-		.sym_session_get_size	= aesni_mb_pmd_sym_session_get_size,
-		.sym_session_configure	= aesni_mb_pmd_sym_session_configure,
-		.sym_session_clear	= aesni_mb_pmd_sym_session_clear
-};
-
-struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
-
-#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
-/**
- * Configure a aesni multi-buffer session from a security session
- * configuration
- */
-static int
-aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
-		struct rte_security_session *sess,
-		struct rte_mempool *mempool)
-{
-	void *sess_private_data;
-	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
-	int ret;
-
-	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
-			conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
-		AESNI_MB_LOG(ERR, "Invalid security protocol");
-		return -EINVAL;
-	}
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		AESNI_MB_LOG(ERR, "Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf,
-			sess_private_data);
-
-	if (ret != 0) {
-		AESNI_MB_LOG(ERR, "Failed to configure session parameters");
-
-		/* Return session to mempool */
-		rte_mempool_put(mempool, sess_private_data);
-		return ret;
-	}
-
-	set_sec_session_private_data(sess, sess_private_data);
-
-	return ret;
-}
-
-/** Clear the memory of session so it doesn't leave key material behind */
-static int
-aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused,
-		struct rte_security_session *sess)
-{
-	void *sess_priv = get_sec_session_private_data(sess);
-
-	if (sess_priv) {
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-		memset(sess_priv, 0, sizeof(struct aesni_mb_session));
-		set_sec_session_private_data(sess, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-	return 0;
-}
-
-/** Get security capabilities for aesni multi-buffer */
-static const struct rte_security_capability *
-aesni_mb_pmd_sec_capa_get(void *device __rte_unused)
-{
-	return aesni_mb_pmd_security_cap;
-}
-
-static struct rte_security_ops aesni_mb_pmd_sec_ops = {
-		.session_create = aesni_mb_pmd_sec_sess_create,
-		.session_update = NULL,
-		.session_stats_get = NULL,
-		.session_destroy = aesni_mb_pmd_sec_sess_destroy,
-		.set_pkt_metadata = NULL,
-		.capabilities_get = aesni_mb_pmd_sec_capa_get
-};
-
-struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops;
-#endif
diff --git a/drivers/crypto/aesni_mb/version.map b/drivers/crypto/aesni_mb/version.map
deleted file mode 100644
index c2e0723b4c..0000000000
--- a/drivers/crypto/aesni_mb/version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_22 {
-	local: *;
-};
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index 3d48da60ed..bac5d85e26 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -23,5 +23,6 @@ endif
 
 sources = files('rte_ipsec_mb_pmd.c',
 		'rte_ipsec_mb_pmd_ops.c',
+		'pmd_aesni_mb.c'
 		)
 deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
new file mode 100644
index 0000000000..3c377ab753
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c
@@ -0,0 +1,2977 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#if defined(RTE_LIB_SECURITY)
+#define AESNI_MB_DOCSIS_SEC_ENABLED 1
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#include <rte_ether.h>
+#endif
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+#define AES_CCM_DIGEST_MIN_LEN 4
+#define AES_CCM_DIGEST_MAX_LEN 16
+#define HMAC_MAX_BLOCK_SIZE 128
+#define HMAC_IPAD_VALUE			(0x36)
+#define HMAC_OPAD_VALUE			(0x5C)
+
+static const struct rte_cryptodev_capabilities aesni_mb_capabilities[] = {
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 20,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA1 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1,
+				.block_size = 64,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 20,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 28,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA224 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224,
+				.block_size = 64,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 28,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 32,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256,
+				.block_size = 64,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 32,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 48,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384,
+				.block_size = 128,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 48,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 65535,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512,
+				.block_size = 128,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
+	{	/* AES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 16
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/*  3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 24,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* DES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 16
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 16,
+					.increment = 2
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 46,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 7,
+					.max = 13,
+					.increment = 1
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES ECB */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_ECB,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* ZUC (EIA3) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 4,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* ZUC (EEA3) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* SNOW 3G (UIA2) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 4,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* SNOW 3G (UEA2) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* KASUMI (F9) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 4,
+					.max = 4,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* KASUMI (F8) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+				.block_size = 8,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* CHACHA20-POLY1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 64,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t pmd_driver_id_aesni_mb;
+
+struct aesni_mb_qp_data {
+	uint8_t temp_digests[IMB_MAX_JOBS][DIGEST_LENGTH_MAX];
+	/* *< Buffers used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
+};
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+static const unsigned int auth_blocksize[] = {
+		[IMB_AUTH_NULL]			= 0,
+		[IMB_AUTH_MD5]			= 64,
+		[IMB_AUTH_HMAC_SHA_1]		= 64,
+		[IMB_AUTH_HMAC_SHA_224]		= 64,
+		[IMB_AUTH_HMAC_SHA_256]		= 64,
+		[IMB_AUTH_HMAC_SHA_384]		= 128,
+		[IMB_AUTH_HMAC_SHA_512]		= 128,
+		[IMB_AUTH_AES_XCBC]		= 16,
+		[IMB_AUTH_AES_CCM]		= 16,
+		[IMB_AUTH_AES_CMAC]		= 16,
+		[IMB_AUTH_AES_GMAC]		= 16,
+		[IMB_AUTH_SHA_1]		= 64,
+		[IMB_AUTH_SHA_224]		= 64,
+		[IMB_AUTH_SHA_256]		= 64,
+		[IMB_AUTH_SHA_384]		= 128,
+		[IMB_AUTH_SHA_512]		= 128,
+		[IMB_AUTH_ZUC_EIA3_BITLEN]	= 16,
+		[IMB_AUTH_SNOW3G_UIA2_BITLEN]	= 16,
+		[IMB_AUTH_KASUMI_UIA1]		= 16
+};
+
+/**
+ * Get the blocksize in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned int
+get_auth_algo_blocksize(IMB_HASH_ALG algo)
+{
+	return auth_blocksize[algo];
+}
+
+static const unsigned int auth_truncated_digest_byte_lengths[] = {
+		[IMB_AUTH_MD5]			= 12,
+		[IMB_AUTH_HMAC_SHA_1]		= 12,
+		[IMB_AUTH_HMAC_SHA_224]		= 14,
+		[IMB_AUTH_HMAC_SHA_256]		= 16,
+		[IMB_AUTH_HMAC_SHA_384]		= 24,
+		[IMB_AUTH_HMAC_SHA_512]		= 32,
+		[IMB_AUTH_AES_XCBC]		= 12,
+		[IMB_AUTH_AES_CMAC]		= 12,
+		[IMB_AUTH_AES_CCM]		= 8,
+		[IMB_AUTH_NULL]			= 0,
+		[IMB_AUTH_AES_GMAC]		= 12,
+		[IMB_AUTH_SHA_1]		= 20,
+		[IMB_AUTH_SHA_224]		= 28,
+		[IMB_AUTH_SHA_256]		= 32,
+		[IMB_AUTH_SHA_384]		= 48,
+		[IMB_AUTH_SHA_512]		= 64,
+		[IMB_AUTH_ZUC_EIA3_BITLEN]	= 4,
+		[IMB_AUTH_SNOW3G_UIA2_BITLEN]	= 4,
+		[IMB_AUTH_KASUMI_UIA1]		= 4
+};
+
+/**
+ * Get the IPsec specified truncated length in bytes of the HMAC digest for a
+ * specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned int
+get_truncated_digest_byte_length(IMB_HASH_ALG algo)
+{
+	return auth_truncated_digest_byte_lengths[algo];
+}
+
+static const unsigned int auth_digest_byte_lengths[] = {
+		[IMB_AUTH_MD5]			= 16,
+		[IMB_AUTH_HMAC_SHA_1]		= 20,
+		[IMB_AUTH_HMAC_SHA_224]		= 28,
+		[IMB_AUTH_HMAC_SHA_256]		= 32,
+		[IMB_AUTH_HMAC_SHA_384]		= 48,
+		[IMB_AUTH_HMAC_SHA_512]		= 64,
+		[IMB_AUTH_AES_XCBC]		= 16,
+		[IMB_AUTH_AES_CMAC]		= 16,
+		[IMB_AUTH_AES_CCM]		= 16,
+		[IMB_AUTH_AES_GMAC]		= 16,
+		[IMB_AUTH_NULL]			= 0,
+		[IMB_AUTH_SHA_1]		= 20,
+		[IMB_AUTH_SHA_224]		= 28,
+		[IMB_AUTH_SHA_256]		= 32,
+		[IMB_AUTH_SHA_384]		= 48,
+		[IMB_AUTH_SHA_512]		= 64,
+		[IMB_AUTH_ZUC_EIA3_BITLEN]	= 4,
+		[IMB_AUTH_SNOW3G_UIA2_BITLEN]	= 4,
+		[IMB_AUTH_KASUMI_UIA1]		= 4
+	/**< Vector mode dependent pointer table of the multi-buffer APIs */
+
+};
+
+/**
+ * Get the full digest size in bytes for a specified authentication algorithm
+ * (if available in the Multi-buffer library)
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned int
+get_digest_byte_length(IMB_HASH_ALG algo)
+{
+	return auth_digest_byte_lengths[algo];
+}
+
+/** AES-NI multi-buffer private session structure */
+struct aesni_mb_session {
+	IMB_CIPHER_MODE cipher_mode;
+	IMB_CIPHER_DIRECTION cipher_direction;
+	IMB_HASH_ALG hash_alg;
+	IMB_CHAIN_ORDER chain_order;
+	/*  common job fields */
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} auth_iv;
+	/* *< IV parameters
+	 */
+
+	/* * Cipher Parameters
+	 */
+	struct {
+		/* * Cipher direction - encrypt / decrypt */
+		IMB_CIPHER_DIRECTION direction;
+		/* * Cipher mode - CBC / Counter */
+		IMB_CIPHER_MODE mode;
+
+		uint64_t key_length_in_bytes;
+
+		union {
+			struct {
+				uint32_t encode[60] __rte_aligned(16);
+				/* *< encode key */
+				uint32_t decode[60] __rte_aligned(16);
+				/* *< decode key */
+			} expanded_aes_keys;
+			/* *< Expanded AES keys - Allocating space to
+			 * contain the maximum expanded key size which
+			 * is 240 bytes for 256 bit AES, calculate by:
+			 * ((key size (bytes)) *
+			 * ((number of rounds) + 1))
+			 */
+			struct {
+				const void *ks_ptr[3];
+				uint64_t key[3][16];
+			} exp_3des_keys;
+			/* *< Expanded 3DES keys */
+
+			struct gcm_key_data gcm_key;
+			/* *< Expanded GCM key */
+			uint8_t zuc_cipher_key[16];
+			/* *< ZUC cipher key */
+			snow3g_key_schedule_t pKeySched_snow3g_cipher;
+			/* *< SNOW3G scheduled cipher key */
+			kasumi_key_sched_t pKeySched_kasumi_cipher;
+			/* *< KASUMI scheduled cipher key */
+		};
+	} cipher;
+
+	/* *< Authentication Parameters */
+	struct {
+		IMB_HASH_ALG algo; /* *< Authentication Algorithm */
+		enum rte_crypto_auth_operation operation;
+		/* *< auth operation generate or verify */
+		union {
+			struct {
+				uint8_t inner[128] __rte_aligned(16);
+				/* *< inner pad */
+				uint8_t outer[128] __rte_aligned(16);
+				/* *< outer pad */
+			} pads;
+			/* *< HMAC Authentication pads -
+			 * allocating space for the maximum pad
+			 * size supported which is 128 bytes for
+			 * SHA512
+			 */
+
+			struct {
+				uint32_t k1_expanded[44] __rte_aligned(16);
+				/* *< k1 (expanded key). */
+				uint8_t k2[16] __rte_aligned(16);
+				/* *< k2. */
+				uint8_t k3[16] __rte_aligned(16);
+				/* *< k3. */
+			} xcbc;
+
+			struct {
+				uint32_t expkey[60] __rte_aligned(16);
+				/* *< k1 (expanded key). */
+				uint32_t skey1[4] __rte_aligned(16);
+				/* *< k2. */
+				uint32_t skey2[4] __rte_aligned(16);
+				/* *< k3. */
+			} cmac;
+			/* *< Expanded XCBC authentication keys */
+			uint8_t zuc_auth_key[16];
+			/* *< ZUC authentication key */
+			snow3g_key_schedule_t pKeySched_snow3g_auth;
+			/* *< SNOW3G scheduled authentication key */
+			kasumi_key_sched_t pKeySched_kasumi_auth;
+			/* *< KASUMI scheduled authentication key */
+		};
+		/* * Generated digest size by the Multi-buffer library */
+		uint16_t gen_digest_len;
+		/* * Requested digest size from Cryptodev */
+		uint16_t req_digest_len;
+
+	} auth;
+	struct {
+		/* * AAD data length */
+		uint16_t aad_len;
+	} aead;
+} __rte_cache_aligned;
+
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys,
+			void *dec_exp_keys);
+
+
+/**
+ * Calculate the authentication pre-computes
+ *
+ * @param one_block_hash	Function pointer
+ *				to calculate digest on ipad/opad
+ * @param ipad			Inner pad output byte array
+ * @param opad			Outer pad output byte array
+ * @param hkey			Authentication key
+ * @param hkey_len		Authentication key length
+ * @param blocksize		Block size of selected hash algo
+ */
+static void
+calculate_auth_precomputes(hash_one_block_t one_block_hash,
+		uint8_t *ipad, uint8_t *opad,
+		const uint8_t *hkey, uint16_t hkey_len,
+		uint16_t blocksize)
+{
+	uint32_t i, length;
+
+	uint8_t ipad_buf[blocksize] __rte_aligned(16);
+	uint8_t opad_buf[blocksize] __rte_aligned(16);
+
+	/* Setup inner and outer pads */
+	memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
+	memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
+
+	/* XOR hash key with inner and outer pads */
+	length = hkey_len > blocksize ? blocksize : hkey_len;
+
+	for (i = 0; i < length; i++) {
+		ipad_buf[i] ^= hkey[i];
+		opad_buf[i] ^= hkey[i];
+	}
+
+	/* Compute partial hashes */
+	(*one_block_hash)(ipad_buf, ipad);
+	(*one_block_hash)(opad_buf, opad);
+
+	/* Clean up stack */
+	memset(ipad_buf, 0, blocksize);
+	memset(opad_buf, 0, blocksize);
+}
+
+static inline int
+is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
+{
+	return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 ||
+		hash_alg == IMB_AUTH_AES_CCM ||
+		(hash_alg == IMB_AUTH_AES_GMAC &&
+		cipher_mode == IMB_CIPHER_GCM));
+}
+
+/** Set session authentication parameters */
+static int
+aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	hash_one_block_t hash_oneblock_fn = NULL;
+	unsigned int key_larger_block_size = 0;
+	uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
+	uint32_t auth_precompute = 1;
+
+	if (xform == NULL) {
+		sess->auth.algo = IMB_AUTH_NULL;
+		return 0;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+		IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth");
+		return -1;
+	}
+
+	/* Set IV parameters */
+	sess->auth_iv.offset = xform->auth.iv.offset;
+	sess->auth_iv.length = xform->auth.iv.length;
+
+	/* Set the request digest size */
+	sess->auth.req_digest_len = xform->auth.digest_length;
+
+	/* Select auth generate/verify */
+	sess->auth.operation = xform->auth.op;
+
+	/* Set Authentication Parameters */
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+		sess->auth.algo = IMB_AUTH_AES_XCBC;
+
+		uint16_t xcbc_mac_digest_len =
+			get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC);
+		if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
+				sess->auth.xcbc.k1_expanded,
+				sess->auth.xcbc.k2, sess->auth.xcbc.k3);
+		return 0;
+	}
+
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+		uint32_t dust[4*15];
+
+		sess->auth.algo = IMB_AUTH_AES_CMAC;
+
+		uint16_t cmac_digest_len = get_digest_byte_length(IMB_AUTH_AES_CMAC);
+
+		if (sess->auth.req_digest_len > cmac_digest_len) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		/*
+		 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
+		 * in version 0.50 and sizes of 12 and 16 bytes,
+		 * in version 0.49.
+		 * If size requested is different, generate the full digest
+		 * (16 bytes) in a temporary location and then memcpy
+		 * the requested number of bytes.
+		 */
+		if (sess->auth.req_digest_len < 4)
+			sess->auth.gen_digest_len = cmac_digest_len;
+		else
+			sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
+				sess->auth.cmac.expkey, dust);
+		IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
+				sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+		return 0;
+	}
+
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+		if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+			sess->cipher.direction = IMB_DIR_ENCRYPT;
+			sess->chain_order = IMB_ORDER_CIPHER_HASH;
+		} else
+			sess->cipher.direction = IMB_DIR_DECRYPT;
+
+		sess->auth.algo = IMB_AUTH_AES_GMAC;
+		if (sess->auth.req_digest_len >
+			get_digest_byte_length(IMB_AUTH_AES_GMAC)) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+		sess->iv.length = xform->auth.iv.length;
+		sess->iv.offset = xform->auth.iv.offset;
+
+		switch (xform->auth.key.length) {
+		case IMB_KEY_128_BYTES:
+			IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
+				&sess->cipher.gcm_key);
+			sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
+			break;
+		case IMB_KEY_192_BYTES:
+			IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
+				&sess->cipher.gcm_key);
+			sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
+			break;
+		case IMB_KEY_256_BYTES:
+			IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
+				&sess->cipher.gcm_key);
+			sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
+			break;
+		default:
+			RTE_LOG(ERR, PMD, "failed to parse test type\n");
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+		sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
+		uint16_t zuc_eia3_digest_len =
+			get_truncated_digest_byte_length(
+						IMB_AUTH_ZUC_EIA3_BITLEN);
+		if (sess->auth.req_digest_len != zuc_eia3_digest_len) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		memcpy(sess->auth.zuc_auth_key, xform->auth.key.data, 16);
+		return 0;
+	} else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+		sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
+		uint16_t snow3g_uia2_digest_len =
+			get_truncated_digest_byte_length(
+						IMB_AUTH_SNOW3G_UIA2_BITLEN);
+		if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
+					&sess->auth.pKeySched_snow3g_auth);
+		return 0;
+	} else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
+		sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
+		uint16_t kasumi_f9_digest_len =
+			get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
+		if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+		IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
+					&sess->auth.pKeySched_kasumi_auth);
+		return 0;
+	}
+
+	switch (xform->auth.algo) {
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		sess->auth.algo = IMB_AUTH_MD5;
+		hash_oneblock_fn = mb_mgr->md5_one_block;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		sess->auth.algo = IMB_AUTH_HMAC_SHA_1;
+		hash_oneblock_fn = mb_mgr->sha1_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(IMB_AUTH_HMAC_SHA_1)) {
+			IMB_SHA1(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA1:
+		sess->auth.algo = IMB_AUTH_SHA_1;
+		auth_precompute = 0;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		sess->auth.algo = IMB_AUTH_HMAC_SHA_224;
+		hash_oneblock_fn = mb_mgr->sha224_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(IMB_AUTH_HMAC_SHA_224)) {
+			IMB_SHA224(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		sess->auth.algo = IMB_AUTH_SHA_224;
+		auth_precompute = 0;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		sess->auth.algo = IMB_AUTH_HMAC_SHA_256;
+		hash_oneblock_fn = mb_mgr->sha256_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(IMB_AUTH_HMAC_SHA_256)) {
+			IMB_SHA256(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		sess->auth.algo = IMB_AUTH_SHA_256;
+		auth_precompute = 0;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		sess->auth.algo = IMB_AUTH_HMAC_SHA_384;
+		hash_oneblock_fn = mb_mgr->sha384_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(IMB_AUTH_HMAC_SHA_384)) {
+			IMB_SHA384(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		sess->auth.algo = IMB_AUTH_SHA_384;
+		auth_precompute = 0;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		sess->auth.algo = IMB_AUTH_HMAC_SHA_512;
+		hash_oneblock_fn = mb_mgr->sha512_one_block;
+		if (xform->auth.key.length > get_auth_algo_blocksize(IMB_AUTH_HMAC_SHA_512)) {
+			IMB_SHA512(mb_mgr,
+				xform->auth.key.data,
+				xform->auth.key.length,
+				hashed_key);
+			key_larger_block_size = 1;
+		}
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		sess->auth.algo = IMB_AUTH_SHA_512;
+		auth_precompute = 0;
+		break;
+	default:
+		IPSEC_MB_LOG(ERR,
+			"Unsupported authentication algorithm selection");
+		return -ENOTSUP;
+	}
+	uint16_t trunc_digest_size =
+			get_truncated_digest_byte_length(sess->auth.algo);
+	uint16_t full_digest_size =
+			get_digest_byte_length(sess->auth.algo);
+
+	if (sess->auth.req_digest_len > full_digest_size ||
+			sess->auth.req_digest_len == 0) {
+		IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+		return -EINVAL;
+	}
+
+	if (sess->auth.req_digest_len != trunc_digest_size &&
+			sess->auth.req_digest_len != full_digest_size)
+		sess->auth.gen_digest_len = full_digest_size;
+	else
+		sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+	/* Plain SHA does not require precompute key */
+	if (auth_precompute == 0)
+		return 0;
+
+	/* Calculate Authentication precomputes */
+	if (key_larger_block_size) {
+		calculate_auth_precomputes(hash_oneblock_fn,
+			sess->auth.pads.inner, sess->auth.pads.outer,
+			hashed_key,
+			xform->auth.key.length,
+			get_auth_algo_blocksize(sess->auth.algo));
+	} else {
+		calculate_auth_precomputes(hash_oneblock_fn,
+			sess->auth.pads.inner, sess->auth.pads.outer,
+			xform->auth.key.data,
+			xform->auth.key.length,
+			get_auth_algo_blocksize(sess->auth.algo));
+	}
+
+	return 0;
+}
+
+/** Set session cipher parameters */
+static int
+aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	uint8_t is_aes = 0;
+	uint8_t is_3DES = 0;
+	uint8_t is_docsis = 0;
+	uint8_t is_zuc = 0;
+	uint8_t is_snow3g = 0;
+	uint8_t is_kasumi = 0;
+
+	if (xform == NULL) {
+		sess->cipher.mode = IMB_CIPHER_NULL;
+		return 0;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher");
+		return -EINVAL;
+	}
+
+	/* Select cipher direction */
+	switch (xform->cipher.op) {
+	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+		sess->cipher.direction = IMB_DIR_ENCRYPT;
+		break;
+	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+		sess->cipher.direction = IMB_DIR_DECRYPT;
+		break;
+	default:
+		IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter");
+		return -EINVAL;
+	}
+
+	/* Select cipher mode */
+	switch (xform->cipher.algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		sess->cipher.mode = IMB_CIPHER_CBC;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		sess->cipher.mode = IMB_CIPHER_CNTR;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+		sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI;
+		is_docsis = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
+		sess->cipher.mode = IMB_CIPHER_DES;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+		sess->cipher.mode = IMB_CIPHER_DOCSIS_DES;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		sess->cipher.mode = IMB_CIPHER_DES3;
+		is_3DES = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+		sess->cipher.mode = IMB_CIPHER_ECB;
+		is_aes = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+		sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
+		is_zuc = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+		sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
+		is_snow3g = 1;
+		break;
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
+		is_kasumi = 1;
+		break;
+	default:
+		IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter");
+		return -ENOTSUP;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->cipher.iv.offset;
+	sess->iv.length = xform->cipher.iv.length;
+
+	/* Check key length and choose key expansion function for AES */
+	if (is_aes) {
+		switch (xform->cipher.key.length) {
+		case IMB_KEY_128_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case IMB_KEY_192_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
+			IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case IMB_KEY_256_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
+			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+	} else if (is_docsis) {
+		switch (xform->cipher.key.length) {
+		case IMB_KEY_128_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case IMB_KEY_256_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
+			IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+	} else if (is_3DES) {
+		uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+				sess->cipher.exp_3des_keys.key[1],
+				sess->cipher.exp_3des_keys.key[2]};
+
+		switch (xform->cipher.key.length) {
+		case  24:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			IMB_DES_KEYSCHED(mb_mgr, keys[2],
+					xform->cipher.key.data + 16);
+
+			/* Initialize keys - 24 bytes: [K1-K2-K3] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+			break;
+		case 16:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+			IMB_DES_KEYSCHED(mb_mgr, keys[1],
+					xform->cipher.key.data + 8);
+			/* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+			break;
+		case 8:
+			IMB_DES_KEYSCHED(mb_mgr, keys[0],
+					xform->cipher.key.data);
+
+			/* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+			sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+			sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+			break;
+		default:
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		sess->cipher.key_length_in_bytes = 24;
+	} else if (is_zuc) {
+		if (xform->cipher.key.length != 16) {
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+		sess->cipher.key_length_in_bytes = 16;
+		memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
+			16);
+	} else if (is_snow3g) {
+		if (xform->cipher.key.length != 16) {
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+		sess->cipher.key_length_in_bytes = 16;
+		IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
+					&sess->cipher.pKeySched_snow3g_cipher);
+	} else if (is_kasumi) {
+		if (xform->cipher.key.length != 16) {
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+		sess->cipher.key_length_in_bytes = 16;
+		IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
+					&sess->cipher.pKeySched_kasumi_cipher);
+	} else {
+		if (xform->cipher.key.length != 8) {
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+		sess->cipher.key_length_in_bytes = 8;
+
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.encode,
+				xform->cipher.key.data);
+		IMB_DES_KEYSCHED(mb_mgr,
+			(uint64_t *)sess->cipher.expanded_aes_keys.decode,
+				xform->cipher.key.data);
+	}
+
+	return 0;
+}
+
+static int
+aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
+		struct aesni_mb_session *sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	switch (xform->aead.op) {
+	case RTE_CRYPTO_AEAD_OP_ENCRYPT:
+		sess->cipher.direction = IMB_DIR_ENCRYPT;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+		break;
+	case RTE_CRYPTO_AEAD_OP_DECRYPT:
+		sess->cipher.direction = IMB_DIR_DECRYPT;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+		break;
+	default:
+		IPSEC_MB_LOG(ERR, "Invalid aead operation parameter");
+		return -EINVAL;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->aead.iv.offset;
+	sess->iv.length = xform->aead.iv.length;
+
+	/* Set digest sizes */
+	sess->auth.req_digest_len = xform->aead.digest_length;
+	sess->auth.gen_digest_len = sess->auth.req_digest_len;
+
+	switch (xform->aead.algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		sess->cipher.mode = IMB_CIPHER_CCM;
+		sess->auth.algo = IMB_AUTH_AES_CCM;
+
+		/* Check key length and choose key expansion function for AES */
+		switch (xform->aead.key.length) {
+		case IMB_KEY_128_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
+			IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		case IMB_KEY_256_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
+			IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data,
+					sess->cipher.expanded_aes_keys.encode,
+					sess->cipher.expanded_aes_keys.decode);
+			break;
+		default:
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		/* CCM digests must be between 4 and 16 and an even number */
+		if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
+			sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
+			(sess->auth.req_digest_len & 1) == 1) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		sess->cipher.mode = IMB_CIPHER_GCM;
+		sess->auth.algo = IMB_AUTH_AES_GMAC;
+
+		switch (xform->aead.key.length) {
+		case IMB_KEY_128_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
+			IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		case IMB_KEY_192_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
+			IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		case IMB_KEY_256_BYTES:
+			sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
+			IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
+				&sess->cipher.gcm_key);
+			break;
+		default:
+			IPSEC_MB_LOG(ERR, "Invalid cipher key length");
+			return -EINVAL;
+		}
+
+		/* GCM digest size must be between 1 and 16 */
+		if (sess->auth.req_digest_len == 0 ||
+				sess->auth.req_digest_len > 16) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305;
+		sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305;
+
+		if (xform->aead.key.length != 32) {
+			IPSEC_MB_LOG(ERR, "Invalid key length");
+			return -EINVAL;
+		}
+		sess->cipher.key_length_in_bytes = 32;
+		memcpy(sess->cipher.expanded_aes_keys.encode,
+			xform->aead.key.data, 32);
+		if (sess->auth.req_digest_len != 16) {
+			IPSEC_MB_LOG(ERR, "Invalid digest size\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter");
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+/** Configure a aesni multi-buffer session from a crypto xform chain */
+static int
+aesni_mb_session_configure(IMB_MGR *mb_mgr,
+		void *priv_sess,
+		const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	const struct rte_crypto_sym_xform *aead_xform = NULL;
+	enum ipsec_mb_operation mode;
+	struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess;
+	int ret;
+
+	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
+				&cipher_xform, &aead_xform);
+	if (ret)
+		return ret;
+
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	switch (mode) {
+	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
+		sess->chain_order = IMB_ORDER_HASH_CIPHER;
+		break;
+	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
+	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
+		sess->chain_order = IMB_ORDER_CIPHER_HASH;
+		break;
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
+		sess->chain_order = IMB_ORDER_HASH_CIPHER;
+		break;
+	/*
+	 * Multi buffer library operates only at two modes,
+	 * IMB_ORDER_CIPHER_HASH and IMB_ORDER_HASH_CIPHER.
+	 * When doing ciphering only, chain order depends
+	 * on cipher operation: encryption is always
+	 * the first operation and decryption the last one.
+	 */
+	case IPSEC_MB_OP_ENCRYPT_ONLY:
+		sess->chain_order = IMB_ORDER_CIPHER_HASH;
+		break;
+	case IPSEC_MB_OP_DECRYPT_ONLY:
+		sess->chain_order = IMB_ORDER_HASH_CIPHER;
+		break;
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
+		sess->chain_order = IMB_ORDER_CIPHER_HASH;
+		sess->aead.aad_len = xform->aead.aad_length;
+		break;
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
+		sess->chain_order = IMB_ORDER_HASH_CIPHER;
+		sess->aead.aad_len = xform->aead.aad_length;
+		break;
+	case IPSEC_MB_OP_NOT_SUPPORTED:
+	default:
+		IPSEC_MB_LOG(ERR,
+			"Unsupported operation chain order parameter");
+		return -ENOTSUP;
+	}
+
+	/* Default IV length = 0 */
+	sess->iv.length = 0;
+	sess->auth_iv.length = 0;
+
+	ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
+	if (ret != 0) {
+		IPSEC_MB_LOG(ERR,
+			"Invalid/unsupported authentication parameters");
+		return ret;
+	}
+
+	ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
+			cipher_xform);
+	if (ret != 0) {
+		IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
+		return ret;
+	}
+
+	if (aead_xform) {
+		ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
+				aead_xform);
+		if (ret != 0) {
+			IPSEC_MB_LOG(ERR,
+				"Invalid/unsupported aead parameters");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+/** Check DOCSIS security session configuration is valid */
+static int
+check_docsis_sec_session(struct rte_security_session_conf *conf)
+{
+	struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
+	struct rte_security_docsis_xform *docsis = &conf->docsis;
+
+	/* Downlink: CRC generate -> Cipher encrypt */
+	if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
+
+		if (crypto_sym != NULL &&
+		    crypto_sym->type ==	RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+		    crypto_sym->cipher.algo ==
+					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+		    (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
+		     crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
+		    crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
+		    crypto_sym->next == NULL) {
+			return 0;
+		}
+	/* Uplink: Cipher decrypt -> CRC verify */
+	} else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
+
+		if (crypto_sym != NULL &&
+		    crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		    crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+		    crypto_sym->cipher.algo ==
+					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+		    (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
+		     crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
+		    crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
+		    crypto_sym->next == NULL) {
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/** Set DOCSIS security session auth (CRC) parameters */
+static int
+aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
+		struct rte_security_docsis_xform *xform)
+{
+	if (xform == NULL) {
+		IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform");
+		return -EINVAL;
+	}
+
+	/* Select CRC generate/verify */
+	if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
+		sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+	} else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
+		sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
+		sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+	} else {
+		IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction");
+		return -ENOTSUP;
+	}
+
+	sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
+	sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
+
+	return 0;
+}
+
+/**
+ * Parse DOCSIS security session configuration and set private session
+ * parameters
+ */
+static int
+aesni_mb_set_docsis_sec_session_parameters(
+		__rte_unused struct rte_cryptodev *dev,
+		struct rte_security_session_conf *conf,
+		void *sess)
+{
+	IMB_MGR  *mb_mgr = alloc_init_mb_mgr();
+	struct rte_security_docsis_xform *docsis_xform;
+	struct rte_crypto_sym_xform *cipher_xform;
+	struct aesni_mb_session *ipsec_sess = sess;
+	int ret = 0;
+
+	if (!mb_mgr)
+		return -ENOMEM;
+
+	ret = check_docsis_sec_session(conf);
+	if (ret) {
+		IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
+		goto error_exit;
+	}
+
+	switch (conf->docsis.direction) {
+	case RTE_SECURITY_DOCSIS_UPLINK:
+		ipsec_sess->chain_order = IMB_ORDER_CIPHER_HASH;
+		docsis_xform = &conf->docsis;
+		cipher_xform = conf->crypto_xform;
+		break;
+	case RTE_SECURITY_DOCSIS_DOWNLINK:
+		ipsec_sess->chain_order = IMB_ORDER_HASH_CIPHER;
+		cipher_xform = conf->crypto_xform;
+		docsis_xform = &conf->docsis;
+		break;
+	default:
+		IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
+		ret = -EINVAL;
+		goto error_exit;
+	}
+
+	/* Default IV length = 0 */
+	ipsec_sess->iv.length = 0;
+
+	ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess,
+			docsis_xform);
+	if (ret != 0) {
+		IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
+		goto error_exit;
+	}
+
+	ret = aesni_mb_set_session_cipher_parameters(mb_mgr,
+			ipsec_sess, cipher_xform);
+
+	if (ret != 0) {
+		IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
+		goto error_exit;
+	}
+
+error_exit:
+	free_mb_mgr(mb_mgr);
+	return ret;
+}
+#endif
+
+static inline uint64_t
+auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
+		uint32_t oop)
+{
+	struct rte_mbuf *m_src, *m_dst;
+	uint8_t *p_src, *p_dst;
+	uintptr_t u_src, u_dst;
+	uint32_t cipher_end, auth_end;
+
+	/* Only cipher then hash needs special calculation. */
+	if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH)
+		return op->sym->auth.data.offset;
+
+	m_src = op->sym->m_src;
+	m_dst = op->sym->m_dst;
+
+	p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
+	u_src = (uintptr_t)p_src;
+	u_dst = (uintptr_t)p_dst + op->sym->auth.data.offset;
+
+	/**
+	 * Copy the content between cipher offset and auth offset for generating
+	 * correct digest.
+	 */
+	if (op->sym->cipher.data.offset > op->sym->auth.data.offset)
+		memcpy(p_dst + op->sym->auth.data.offset,
+				p_src + op->sym->auth.data.offset,
+				op->sym->cipher.data.offset -
+				op->sym->auth.data.offset);
+
+	/**
+	 * Copy the content between (cipher offset + length) and (auth offset +
+	 * length) for generating correct digest
+	 */
+	cipher_end = op->sym->cipher.data.offset + op->sym->cipher.data.length;
+	auth_end = op->sym->auth.data.offset + op->sym->auth.data.length;
+	if (cipher_end < auth_end)
+		memcpy(p_dst + cipher_end, p_src + cipher_end,
+				auth_end - cipher_end);
+
+	/**
+	 * Since intel-ipsec-mb only supports positive values,
+	 * we need to deduct the correct offset between src and dst.
+	 */
+
+	return u_src < u_dst ? (u_dst - u_src) :
+			(UINT64_MAX - u_src + u_dst + 1);
+}
+
+static inline void
+set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
+		union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
+		struct rte_crypto_va_iova_ptr *iv,
+		struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
+{
+	/* Set crypto operation */
+	job->chain_order = session->chain_order;
+
+	/* Set cipher parameters */
+	job->cipher_direction = session->cipher.direction;
+	job->cipher_mode = session->cipher.mode;
+
+	job->key_len_in_bytes = session->cipher.key_length_in_bytes;
+
+	/* Set authentication parameters */
+	job->hash_alg = session->auth.algo;
+	job->iv = iv->va;
+
+	switch (job->hash_alg) {
+	case IMB_AUTH_AES_XCBC:
+		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+		job->u.XCBC._k2 = session->auth.xcbc.k2;
+		job->u.XCBC._k3 = session->auth.xcbc.k3;
+
+		job->enc_keys = session->cipher.expanded_aes_keys.encode;
+		job->dec_keys = session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case IMB_AUTH_AES_CCM:
+		job->u.CCM.aad = (uint8_t *)aad->va + 18;
+		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+		job->enc_keys = session->cipher.expanded_aes_keys.encode;
+		job->dec_keys = session->cipher.expanded_aes_keys.decode;
+		job->iv++;
+		break;
+
+	case IMB_AUTH_AES_CMAC:
+		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+		job->u.CMAC._skey1 = session->auth.cmac.skey1;
+		job->u.CMAC._skey2 = session->auth.cmac.skey2;
+		job->enc_keys = session->cipher.expanded_aes_keys.encode;
+		job->dec_keys = session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case IMB_AUTH_AES_GMAC:
+		if (session->cipher.mode == IMB_CIPHER_GCM) {
+			job->u.GCM.aad = aad->va;
+			job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+		} else {
+			/* For GMAC */
+			job->u.GCM.aad = buf;
+			job->u.GCM.aad_len_in_bytes = len;
+			job->cipher_mode = IMB_CIPHER_GCM;
+		}
+		job->enc_keys = &session->cipher.gcm_key;
+		job->dec_keys = &session->cipher.gcm_key;
+		break;
+
+	case IMB_AUTH_CHACHA20_POLY1305:
+		job->u.CHACHA20_POLY1305.aad = aad->va;
+		job->u.CHACHA20_POLY1305.aad_len_in_bytes =
+			session->aead.aad_len;
+		job->enc_keys = session->cipher.expanded_aes_keys.encode;
+		job->dec_keys = session->cipher.expanded_aes_keys.encode;
+		break;
+	default:
+		job->u.HMAC._hashed_auth_key_xor_ipad =
+				session->auth.pads.inner;
+		job->u.HMAC._hashed_auth_key_xor_opad =
+				session->auth.pads.outer;
+
+		if (job->cipher_mode == IMB_CIPHER_DES3) {
+			job->enc_keys = session->cipher.exp_3des_keys.ks_ptr;
+			job->dec_keys = session->cipher.exp_3des_keys.ks_ptr;
+		} else {
+			job->enc_keys = session->cipher.expanded_aes_keys.encode;
+			job->dec_keys = session->cipher.expanded_aes_keys.decode;
+		}
+	}
+
+	/*
+	 * Multi-buffer library current only support returning a truncated
+	 * digest length as specified in the relevant IPsec RFCs
+	 */
+
+	/* Set digest location and length */
+	job->auth_tag_output = digest;
+	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
+
+	/* Set IV parameters */
+	job->iv_len_in_bytes = session->iv.length;
+
+	/* Data Parameters */
+	job->src = buf;
+	job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
+	job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
+	job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
+	if (job->hash_alg == IMB_AUTH_AES_GMAC && session->cipher.mode != IMB_CIPHER_GCM) {
+		job->msg_len_to_hash_in_bytes = 0;
+		job->msg_len_to_cipher_in_bytes = 0;
+	} else {
+		job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
+			sofs.ofs.auth.tail;
+		job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
+			sofs.ofs.cipher.tail;
+	}
+
+	job->user_data = udata;
+}
+
+/**
+ * Process a crypto operation and complete a IMB_JOB job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param	qp		queue pair
+ * @param	job		IMB_JOB structure to fill
+ * @param	op		crypto op to process
+ * @param	digest_idx	ID for digest to use
+ *
+ * @return
+ * - 0 on success, the IMB_JOB will be filled
+ * - -1 if invalid session, IMB_JOB will not be filled
+ */
+static inline int
+set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
+		struct rte_crypto_op *op, uint8_t *digest_idx)
+{
+	struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+	struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+	struct aesni_mb_session *session;
+	uint32_t m_offset, oop;
+
+	session = ipsec_mb_get_session_private(qp, op);
+	if (session == NULL) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		return -1;
+	}
+
+	/* Set crypto operation */
+	job->chain_order = session->chain_order;
+
+	/* Set cipher parameters */
+	job->cipher_direction = session->cipher.direction;
+	job->cipher_mode = session->cipher.mode;
+
+	job->key_len_in_bytes = session->cipher.key_length_in_bytes;
+
+	/* Set authentication parameters */
+	job->hash_alg = session->auth.algo;
+
+	const int aead = is_aead_algo(job->hash_alg, job->cipher_mode);
+
+	switch (job->hash_alg) {
+	case IMB_AUTH_AES_XCBC:
+		job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+		job->u.XCBC._k2 = session->auth.xcbc.k2;
+		job->u.XCBC._k3 = session->auth.xcbc.k3;
+
+		job->enc_keys = session->cipher.expanded_aes_keys.encode;
+		job->dec_keys = session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case IMB_AUTH_AES_CCM:
+		job->u.CCM.aad = op->sym->aead.aad.data + 18;
+		job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+		job->enc_keys = session->cipher.expanded_aes_keys.encode;
+		job->dec_keys = session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case IMB_AUTH_AES_CMAC:
+		job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+		job->u.CMAC._skey1 = session->auth.cmac.skey1;
+		job->u.CMAC._skey2 = session->auth.cmac.skey2;
+		job->enc_keys = session->cipher.expanded_aes_keys.encode;
+		job->dec_keys = session->cipher.expanded_aes_keys.decode;
+		break;
+
+	case IMB_AUTH_AES_GMAC:
+		if (session->cipher.mode == IMB_CIPHER_GCM) {
+			job->u.GCM.aad = op->sym->aead.aad.data;
+			job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+		} else {
+			/* For GMAC */
+			job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
+					uint8_t *, op->sym->auth.data.offset);
+			job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
+			job->cipher_mode = IMB_CIPHER_GCM;
+		}
+		job->enc_keys = &session->cipher.gcm_key;
+		job->dec_keys = &session->cipher.gcm_key;
+		break;
+	case IMB_AUTH_ZUC_EIA3_BITLEN:
+		job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
+		job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+						session->auth_iv.offset);
+		break;
+	case IMB_AUTH_SNOW3G_UIA2_BITLEN:
+		job->u.SNOW3G_UIA2._key = (void *)
+			&session->auth.pKeySched_snow3g_auth;
+		job->u.SNOW3G_UIA2._iv =
+			rte_crypto_op_ctod_offset(op, uint8_t *,
+						session->auth_iv.offset);
+		break;
+	case IMB_AUTH_KASUMI_UIA1:
+		job->u.KASUMI_UIA1._key = (void *)
+			&session->auth.pKeySched_kasumi_auth;
+		break;
+	case IMB_AUTH_CHACHA20_POLY1305:
+		job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
+		job->u.CHACHA20_POLY1305.aad_len_in_bytes =
+			session->aead.aad_len;
+		job->enc_keys = session->cipher.expanded_aes_keys.encode;
+		job->dec_keys = session->cipher.expanded_aes_keys.encode;
+		break;
+	default:
+		job->u.HMAC._hashed_auth_key_xor_ipad =
+			session->auth.pads.inner;
+		job->u.HMAC._hashed_auth_key_xor_opad =
+			session->auth.pads.outer;
+
+		if (job->cipher_mode == IMB_CIPHER_DES3) {
+			job->enc_keys = session->cipher.exp_3des_keys.ks_ptr;
+			job->dec_keys = session->cipher.exp_3des_keys.ks_ptr;
+		} else {
+			job->enc_keys = session->cipher.expanded_aes_keys.encode;
+			job->dec_keys = session->cipher.expanded_aes_keys.decode;
+		}
+	}
+
+	if (aead)
+		m_offset = op->sym->aead.data.offset;
+	else
+		m_offset = op->sym->cipher.data.offset;
+
+	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
+		job->enc_keys = session->cipher.zuc_cipher_key;
+		job->dec_keys = session->cipher.zuc_cipher_key;
+	} else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
+		job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
+		m_offset = 0;
+	} else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
+		job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
+		m_offset = 0;
+	}
+
+	if (!op->sym->m_dst) {
+		/* in-place operation */
+		m_dst = m_src;
+		oop = 0;
+	} else if (op->sym->m_dst == op->sym->m_src) {
+		/* in-place operation */
+		m_dst = m_src;
+		oop = 0;
+	} else {
+		/* out-of-place operation */
+		m_dst = op->sym->m_dst;
+		oop = 1;
+	}
+
+	/* Set digest output location */
+	if (job->hash_alg != IMB_AUTH_NULL &&
+			session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+		job->auth_tag_output = qp_data->temp_digests[*digest_idx];
+		*digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
+	} else {
+		if (aead)
+			job->auth_tag_output = op->sym->aead.digest.data;
+		else
+			job->auth_tag_output = op->sym->auth.digest.data;
+
+		if (session->auth.req_digest_len !=
+				session->auth.gen_digest_len) {
+			job->auth_tag_output =
+				qp_data->temp_digests[*digest_idx];
+			*digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
+		}
+	}
+	/*
+	 * Multi-buffer library current only support returning a truncated
+	 * digest length as specified in the relevant IPsec RFCs
+	 */
+
+	/* Set digest length */
+	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
+
+	/* Set IV parameters */
+	job->iv_len_in_bytes = session->iv.length;
+
+	/* Data Parameters */
+	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+
+	switch (job->hash_alg) {
+	case IMB_AUTH_AES_CCM:
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->aead.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+		job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+		job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset + 1);
+		break;
+
+	case IMB_AUTH_AES_GMAC:
+		if (session->cipher.mode == IMB_CIPHER_GCM) {
+			job->cipher_start_src_offset_in_bytes =
+					op->sym->aead.data.offset;
+			job->hash_start_src_offset_in_bytes =
+					op->sym->aead.data.offset;
+			job->msg_len_to_cipher_in_bytes =
+					op->sym->aead.data.length;
+			job->msg_len_to_hash_in_bytes =
+					op->sym->aead.data.length;
+		} else {
+			job->cipher_start_src_offset_in_bytes =
+					op->sym->auth.data.offset;
+			job->hash_start_src_offset_in_bytes =
+					op->sym->auth.data.offset;
+			job->msg_len_to_cipher_in_bytes = 0;
+			job->msg_len_to_hash_in_bytes = 0;
+		}
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+				session->iv.offset);
+		break;
+
+	case IMB_AUTH_CHACHA20_POLY1305:
+		job->cipher_start_src_offset_in_bytes =
+			op->sym->aead.data.offset;
+		job->hash_start_src_offset_in_bytes =
+			op->sym->aead.data.offset;
+		job->msg_len_to_cipher_in_bytes =
+				op->sym->aead.data.length;
+		job->msg_len_to_hash_in_bytes =
+					op->sym->aead.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+				session->iv.offset);
+		break;
+	default:
+		/* For SNOW3G, length and offsets are already in bits */
+		job->cipher_start_src_offset_in_bytes =
+				op->sym->cipher.data.offset;
+		job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+		job->hash_start_src_offset_in_bytes = auth_start_offset(op,
+				session, oop);
+		job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+		job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+			session->iv.offset);
+	}
+
+	if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3)
+		job->msg_len_to_cipher_in_bytes >>= 3;
+	else if (job->hash_alg == IMB_AUTH_KASUMI_UIA1)
+		job->msg_len_to_hash_in_bytes >>= 3;
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return 0;
+}
+
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+/**
+ * Process a crypto operation containing a security op and complete a
+ * IMB_JOB job structure for submission to the multi buffer library for
+ * processing.
+ */
+static inline int
+set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
+			struct rte_crypto_op *op, uint8_t *digest_idx)
+{
+	struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+	struct rte_mbuf *m_src, *m_dst;
+	struct rte_crypto_sym_op *sym;
+	struct aesni_mb_session *session = NULL;
+
+	if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		return -1;
+	}
+	session = (struct aesni_mb_session *)
+		get_sec_session_private_data(op->sym->sec_session);
+
+	if (unlikely(session == NULL)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		return -1;
+	}
+	/* Only DOCSIS protocol operations supported now */
+	if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
+			session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -1;
+	}
+
+	sym = op->sym;
+	m_src = sym->m_src;
+
+	if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
+		/* in-place operation */
+		m_dst = m_src;
+	} else {
+		/* out-of-place operation not supported */
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return -ENOTSUP;
+	}
+
+	/* Set crypto operation */
+	job->chain_order = session->chain_order;
+
+	/* Set cipher parameters */
+	job->cipher_direction = session->cipher.direction;
+	job->cipher_mode = session->cipher.mode;
+
+	job->key_len_in_bytes = session->cipher.key_length_in_bytes;
+	job->enc_keys = session->cipher.expanded_aes_keys.encode;
+	job->dec_keys = session->cipher.expanded_aes_keys.decode;
+
+	/* Set IV parameters */
+	job->iv_len_in_bytes = session->iv.length;
+	job->iv = (uint8_t *)op + session->iv.offset;
+
+	/* Set authentication parameters */
+	job->hash_alg = session->auth.algo;
+
+	/* Set digest output location */
+	job->auth_tag_output = qp_data->temp_digests[*digest_idx];
+	*digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
+
+	/* Set digest length */
+	job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
+
+	/* Set data parameters */
+	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
+						sym->cipher.data.offset);
+
+	job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
+	job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
+
+	job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
+	job->msg_len_to_hash_in_bytes = sym->auth.data.length;
+
+	job->user_data = op;
+
+	return 0;
+}
+
+static inline void
+verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status)
+{
+	uint16_t crc_offset;
+	uint8_t *crc;
+
+	if (!job->msg_len_to_hash_in_bytes)
+		return;
+
+	crc_offset = job->hash_start_src_offset_in_bytes +
+			job->msg_len_to_hash_in_bytes -
+			job->cipher_start_src_offset_in_bytes;
+	crc = job->dst + crc_offset;
+
+	/* Verify CRC (at the end of the message) */
+	if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
+		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+}
+#endif
+
+static inline void
+verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status)
+{
+	/* Verify digest if required */
+	if (memcmp(job->auth_tag_output, digest, len) != 0)
+		*status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+}
+
+static inline void
+generate_digest(IMB_JOB *job, struct rte_crypto_op *op,
+		struct aesni_mb_session *sess)
+{
+	/* No extra copy needed */
+	if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
+		return;
+
+	/*
+	 * This can only happen for HMAC, so only digest
+	 * for authentication algos is required
+	 */
+	memcpy(op->sym->auth.digest.data, job->auth_tag_output,
+			sess->auth.req_digest_len);
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param qp	Queue Pair to process
+ * @param job	IMB_JOB job to process
+ *
+ * @return
+ * - Returns processed crypto operation.
+ * - Returns NULL on invalid job
+ */
+static inline struct rte_crypto_op *
+post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
+{
+	struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+	struct aesni_mb_session *sess = NULL;
+	uint32_t driver_id = ipsec_mb_get_driver_id(
+						IPSEC_MB_PMD_TYPE_AESNI_MB);
+
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+	uint8_t is_docsis_sec = 0;
+
+	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		/*
+		 * Assuming at this point that if it's a security type op, that
+		 * this is for DOCSIS
+		 */
+		is_docsis_sec = 1;
+		sess = get_sec_session_private_data(op->sym->sec_session);
+	} else
+#endif
+	{
+		sess = get_sym_session_private_data(op->sym->session,
+						driver_id);
+	}
+
+	if (unlikely(sess == NULL)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+		return op;
+	}
+
+	if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
+		switch (job->status) {
+		case IMB_STATUS_COMPLETED:
+			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+			if (job->hash_alg == IMB_AUTH_NULL)
+				break;
+
+			if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+				if (is_aead_algo(job->hash_alg,
+						sess->cipher.mode))
+					verify_digest(job,
+						op->sym->aead.digest.data,
+						sess->auth.req_digest_len,
+						&op->status);
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+				else if (is_docsis_sec)
+					verify_docsis_sec_crc(job,
+						&op->status);
+#endif
+				else
+					verify_digest(job,
+						op->sym->auth.digest.data,
+						sess->auth.req_digest_len,
+						&op->status);
+			} else
+				generate_digest(job, op, sess);
+			break;
+		default:
+			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+	}
+
+	/* Free session if a session-less crypto op */
+	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		memset(sess, 0, sizeof(struct aesni_mb_session));
+		memset(op->sym->session, 0,
+			rte_cryptodev_sym_get_existing_header_session_size(
+				op->sym->session));
+		rte_mempool_put(qp->sess_mp_priv, sess);
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	return op;
+}
+
+static inline void
+post_process_mb_sync_job(IMB_JOB *job)
+{
+	uint32_t *st;
+
+	st = job->user_data;
+	st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG;
+}
+
+/**
+ * Process a completed IMB_JOB job and keep processing jobs until
+ * get_completed_job return NULL
+ *
+ * @param qp		Queue Pair to process
+ * @param mb_mgr	IMB_MGR to use
+ * @param job		IMB_JOB job
+ * @param ops		crypto ops to fill
+ * @param nb_ops	number of crypto ops
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static unsigned
+handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
+		IMB_JOB *job, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct rte_crypto_op *op = NULL;
+	uint16_t processed_jobs = 0;
+
+	while (job != NULL) {
+		op = post_process_mb_job(qp, job);
+
+		if (op) {
+			ops[processed_jobs++] = op;
+			qp->stats.dequeued_count++;
+		} else {
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+		if (processed_jobs == nb_ops)
+			break;
+
+		job = IMB_GET_COMPLETED_JOB(mb_mgr);
+	}
+
+	return processed_jobs;
+}
+
+static inline uint32_t
+handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr)
+{
+	uint32_t i;
+
+	for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
+		post_process_mb_sync_job(job);
+
+	return i;
+}
+
+static inline uint32_t
+flush_mb_sync_mgr(IMB_MGR *mb_mgr)
+{
+	IMB_JOB *job;
+
+	job = IMB_FLUSH_JOB(mb_mgr);
+	return handle_completed_sync_jobs(job, mb_mgr);
+}
+
+static inline uint16_t
+flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	int processed_ops = 0;
+
+	/* Flush the remaining jobs */
+	IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr);
+
+	if (job)
+		processed_ops += handle_completed_jobs(qp, mb_mgr, job,
+				&ops[processed_ops], nb_ops - processed_ops);
+
+	return processed_ops;
+}
+
+static inline IMB_JOB *
+set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
+{
+	job->chain_order = IMB_ORDER_HASH_CIPHER;
+	job->cipher_mode = IMB_CIPHER_NULL;
+	job->hash_alg = IMB_AUTH_NULL;
+	job->cipher_direction = IMB_DIR_DECRYPT;
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return job;
+}
+
+static uint16_t
+aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct ipsec_mb_qp *qp = queue_pair;
+	IMB_MGR *mb_mgr = qp->mb_mgr;
+	struct rte_crypto_op *op;
+	IMB_JOB *job;
+	int retval, processed_jobs = 0;
+
+	if (unlikely(nb_ops == 0 || mb_mgr == NULL))
+		return 0;
+
+	uint8_t digest_idx = qp->digest_idx;
+
+	do {
+		/* Get next free mb job struct from mb manager */
+		job = IMB_GET_NEXT_JOB(mb_mgr);
+		if (unlikely(job == NULL)) {
+			/* if no free mb job structs we need to flush mb_mgr */
+			processed_jobs += flush_mb_mgr(qp, mb_mgr,
+					&ops[processed_jobs],
+					nb_ops - processed_jobs);
+
+			if (nb_ops == processed_jobs)
+				break;
+
+			job = IMB_GET_NEXT_JOB(mb_mgr);
+		}
+
+		/*
+		 * Get next operation to process from ingress queue.
+		 * There is no need to return the job to the IMB_MGR
+		 * if there are no more operations to process, since the IMB_MGR
+		 * can use that pointer again in next get_next calls.
+		 */
+		retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+		if (retval < 0)
+			break;
+
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+			retval = set_sec_mb_job_params(job, qp, op,
+						&digest_idx);
+		else
+#endif
+			retval = set_mb_job_params(job, qp, op,
+				&digest_idx);
+
+		if (unlikely(retval != 0)) {
+			qp->stats.dequeue_err_count++;
+			set_job_null_op(job, op);
+		}
+
+		/* Submit job to multi-buffer for processing */
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+		job = IMB_SUBMIT_JOB(mb_mgr);
+#else
+		job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
+#endif
+		/*
+		 * If submit returns a processed job then handle it,
+		 * before submitting subsequent jobs
+		 */
+		if (job)
+			processed_jobs += handle_completed_jobs(qp, mb_mgr,
+					job, &ops[processed_jobs],
+					nb_ops - processed_jobs);
+
+	} while (processed_jobs < nb_ops);
+
+	qp->digest_idx = digest_idx;
+
+	if (processed_jobs < 1)
+		processed_jobs += flush_mb_mgr(qp, mb_mgr,
+				&ops[processed_jobs],
+				nb_ops - processed_jobs);
+
+	return processed_jobs;
+}
+
+
+static inline void
+ipsec_mb_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t err)
+{
+	uint32_t i;
+
+	for (i = 0; i != vec->num; ++i)
+		vec->status[i] = err;
+}
+
+static inline int
+check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
+{
+	/* no multi-seg support with current AESNI-MB PMD */
+	if (sgl->num != 1)
+		return -ENOTSUP;
+	else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
+		return -EINVAL;
+	return 0;
+}
+
+static inline IMB_JOB *
+submit_sync_job(IMB_MGR *mb_mgr)
+{
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+	return IMB_SUBMIT_JOB(mb_mgr);
+#else
+	return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
+#endif
+}
+
+static inline uint32_t
+generate_sync_dgst(struct rte_crypto_sym_vec *vec,
+	const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
+{
+	uint32_t i, k;
+
+	for (i = 0, k = 0; i != vec->num; i++) {
+		if (vec->status[i] == 0) {
+			memcpy(vec->digest[i].va, dgst[i], len);
+			k++;
+		}
+	}
+
+	return k;
+}
+
+static inline uint32_t
+verify_sync_dgst(struct rte_crypto_sym_vec *vec,
+	const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
+{
+	uint32_t i, k;
+
+	for (i = 0, k = 0; i != vec->num; i++) {
+		if (vec->status[i] == 0) {
+			if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
+				vec->status[i] = EBADMSG;
+			else
+				k++;
+		}
+	}
+
+	return k;
+}
+
+static uint32_t
+aesni_mb_process_bulk(struct rte_cryptodev *dev,
+	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
+	struct rte_crypto_sym_vec *vec)
+{
+	int32_t ret;
+	uint32_t i, j, k, len;
+	void *buf;
+	IMB_JOB *job;
+	IMB_MGR *mb_mgr;
+	struct aesni_mb_session *s;
+	uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
+
+	s = get_sym_session_private_data(sess, dev->driver_id);
+	if (s == NULL) {
+		ipsec_mb_fill_error_code(vec, EINVAL);
+		return 0;
+	}
+
+	/* get per-thread MB MGR, create one if needed */
+	mb_mgr = get_per_thread_mb_mgr();
+	if (unlikely(mb_mgr == NULL))
+		return 0;
+
+	for (i = 0, j = 0, k = 0; i != vec->num; i++) {
+		ret = check_crypto_sgl(sofs, vec->sgl + i);
+		if (ret != 0) {
+			vec->status[i] = ret;
+			continue;
+		}
+
+		buf = vec->sgl[i].vec[0].base;
+		len = vec->sgl[i].vec[0].len;
+
+		job = IMB_GET_NEXT_JOB(mb_mgr);
+		if (job == NULL) {
+			k += flush_mb_sync_mgr(mb_mgr);
+			job = IMB_GET_NEXT_JOB(mb_mgr);
+			RTE_ASSERT(job != NULL);
+		}
+
+		/* Submit job for processing */
+		set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
+			&vec->aad[i], tmp_dgst[i], &vec->status[i]);
+		job = submit_sync_job(mb_mgr);
+		j++;
+
+		/* handle completed jobs */
+		k += handle_completed_sync_jobs(job, mb_mgr);
+	}
+
+	/* flush remaining jobs */
+	while (k != j)
+		k += flush_mb_sync_mgr(mb_mgr);
+
+	/* finish processing for successful jobs: check/update digest */
+	if (k != 0) {
+		if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+			k = verify_sync_dgst(vec,
+				(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
+				s->auth.req_digest_len);
+		else
+			k = generate_sync_dgst(vec,
+				(const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
+				s->auth.req_digest_len);
+	}
+
+	return k;
+}
+
+struct rte_cryptodev_ops aes_mb_pmd_ops = {
+	.dev_configure = ipsec_mb_pmd_config,
+	.dev_start = ipsec_mb_pmd_start,
+	.dev_stop = ipsec_mb_pmd_stop,
+	.dev_close = ipsec_mb_pmd_close,
+
+	.stats_get = ipsec_mb_pmd_stats_get,
+	.stats_reset = ipsec_mb_pmd_stats_reset,
+
+	.dev_infos_get = ipsec_mb_pmd_info_get,
+
+	.queue_pair_setup = ipsec_mb_pmd_qp_setup,
+	.queue_pair_release = ipsec_mb_pmd_qp_release,
+
+	.sym_cpu_process = aesni_mb_process_bulk,
+
+	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
+	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
+	.sym_session_clear = ipsec_mb_pmd_sym_session_clear
+};
+
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+/**
+ * Configure a aesni multi-buffer session from a security session
+ * configuration
+ */
+static int
+aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
+		struct rte_security_session *sess,
+		struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	int ret;
+
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
+			conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
+		IPSEC_MB_LOG(ERR, "Invalid security protocol");
+		return -EINVAL;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		IPSEC_MB_LOG(ERR, "Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf,
+			sess_private_data);
+
+	if (ret != 0) {
+		IPSEC_MB_LOG(ERR, "Failed to configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+
+	set_sec_session_private_data(sess, sess_private_data);
+
+	return ret;
+}
+
+/** Clear the memory of session so it does not leave key material behind */
+static int
+aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused,
+		struct rte_security_session *sess)
+{
+	void *sess_priv = get_sec_session_private_data(sess);
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		memset(sess_priv, 0, sizeof(struct aesni_mb_session));
+		set_sec_session_private_data(sess, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+	return 0;
+}
+
+static const struct rte_cryptodev_capabilities
+					aesni_mb_pmd_security_crypto_cap[] = {
+	{	/* AES DOCSIS BPI */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 16
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability aesni_mb_pmd_security_cap[] = {
+	{	/* DOCSIS Uplink */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
+		.docsis = {
+			.direction = RTE_SECURITY_DOCSIS_UPLINK
+		},
+		.crypto_capabilities = aesni_mb_pmd_security_crypto_cap
+	},
+	{	/* DOCSIS Downlink */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,
+		.docsis = {
+			.direction = RTE_SECURITY_DOCSIS_DOWNLINK
+		},
+		.crypto_capabilities = aesni_mb_pmd_security_crypto_cap
+	},
+	{
+		.action = RTE_SECURITY_ACTION_TYPE_NONE
+	}
+};
+
+/** Get security capabilities for aesni multi-buffer */
+static const struct rte_security_capability *
+aesni_mb_pmd_sec_capa_get(void *device __rte_unused)
+{
+	return aesni_mb_pmd_security_cap;
+}
+
+static struct rte_security_ops aesni_mb_pmd_sec_ops = {
+		.session_create = aesni_mb_pmd_sec_sess_create,
+		.session_update = NULL,
+		.session_stats_get = NULL,
+		.session_destroy = aesni_mb_pmd_sec_sess_destroy,
+		.set_pkt_metadata = NULL,
+		.capabilities_get = aesni_mb_pmd_sec_capa_get
+};
+
+struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops;
+
+static int
+aesni_mb_configure_dev(struct rte_cryptodev *dev)
+{
+	struct rte_security_ctx *security_instance;
+
+	security_instance = rte_malloc("aesni_mb_sec",
+				sizeof(struct rte_security_ctx),
+				RTE_CACHE_LINE_SIZE);
+	if (security_instance != NULL) {
+		security_instance->device = (void *)dev;
+		security_instance->ops = rte_aesni_mb_pmd_sec_ops;
+		security_instance->sess_cnt = 0;
+		dev->security_ctx = security_instance;
+
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+#endif
+
+static int
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
+{
+	return cryptodev_ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB);
+}
+
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+	.probe = cryptodev_aesni_mb_probe,
+	.remove = cryptodev_ipsec_mb_remove
+};
+
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD,
+	cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+			"max_nb_queue_pairs=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(
+	aesni_mb_crypto_drv,
+	cryptodev_aesni_mb_pmd_drv.driver,
+	pmd_driver_id_aesni_mb);
+
+/* Constructor function to register aesni-mb PMD */
+RTE_INIT(ipsec_mb_register_aesni_mb)
+{
+	struct ipsec_mb_pmd_data *aesni_mb_data =
+		&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB];
+
+	aesni_mb_data->caps = aesni_mb_capabilities;
+	aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst;
+	aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+			RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
+			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
+			RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+
+	aesni_mb_data->internals_priv_size = 0;
+	aesni_mb_data->ops = &aes_mb_pmd_ops;
+	aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
+	aesni_mb_data->queue_pair_configure = NULL;
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+	aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops;
+	aesni_mb_data->dev_config = aesni_mb_configure_dev;
+	aesni_mb_data->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
+#endif
+	aesni_mb_data->session_configure = aesni_mb_session_configure;
+	aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
index 3f2cefed52..e71037f345 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
@@ -105,6 +105,7 @@ cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
 	dev->dev_ops = ipsec_mb_pmds[pmd_type].ops;
 	dev->enqueue_burst = ipsec_mb_pmd_enqueue_burst;
 	dev->dequeue_burst = ipsec_mb_pmds[pmd_type].dequeue_burst;
+	dev->feature_flags = pmd_data->feature_flags;
 
 	if (pmd_data->dev_config) {
 		retval = (*pmd_data->dev_config)(dev);
@@ -116,8 +117,6 @@ cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
 		}
 	}
 
-	dev->feature_flags = pmd_data->feature_flags;
-
 	switch (vector_mode) {
 	case IPSEC_MB_AVX512:
 		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
@@ -164,6 +163,10 @@ cryptodev_ipsec_mb_remove(struct rte_vdev_device *vdev)
 		rte_free(cryptodev->security_ctx);
 		cryptodev->security_ctx = NULL;
 	}
+#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
+	rte_free(cryptodev->security_ctx);
+	cryptodev->security_ctx = NULL;
+#endif
 
 	return rte_cryptodev_pmd_destroy(cryptodev);
 }
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index 35860b1b10..2b589eee47 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -34,6 +34,9 @@ extern enum ipsec_mb_vector_mode vector_mode;
 /** IMB_MGR instances, one per thread */
 extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
 
+#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
+/**< IPSEC Multi buffer aesni_mb PMD device name */
+
 /** PMD LOGTYPE DRIVER, common to all PMDs */
 extern int ipsec_mb_logtype_driver;
 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
@@ -42,6 +45,7 @@ extern int ipsec_mb_logtype_driver;
 
 /** All supported device types */
 enum ipsec_mb_pmd_types {
+	IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
 	IPSEC_MB_N_PMD_TYPES
 };
 
@@ -60,10 +64,18 @@ enum ipsec_mb_operation {
 	IPSEC_MB_OP_NOT_SUPPORTED
 };
 
+extern uint8_t pmd_driver_id_aesni_mb;
+
 /** Helper function. Gets driver ID based on PMD type */
 static __rte_always_inline uint8_t
-ipsec_mb_get_driver_id(__rte_unused enum ipsec_mb_pmd_types pmd_type)
+ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
 {
+	switch (pmd_type) {
+	case IPSEC_MB_PMD_TYPE_AESNI_MB:
+		return pmd_driver_id_aesni_mb;
+	default:
+		break;
+	}
 	return UINT8_MAX;
 }
 
@@ -136,6 +148,135 @@ get_per_thread_mb_mgr(void)
 	return RTE_PER_LCORE(mb_mgr);
 }
 
+/** Helper function. Gets mode and chained xforms from the xform */
+static __rte_always_inline int
+ipsec_mb_parse_xform(const struct rte_crypto_sym_xform *xform,
+			enum ipsec_mb_operation *mode,
+			const struct rte_crypto_sym_xform **auth_xform,
+			const struct rte_crypto_sym_xform **cipher_xform,
+			const struct rte_crypto_sym_xform **aead_xform)
+{
+	const struct rte_crypto_sym_xform *next = xform->next;
+
+	if (xform == NULL) {
+		*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+		return -ENOTSUP;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (next == NULL) {
+			if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+				*mode = IPSEC_MB_OP_ENCRYPT_ONLY;
+				*cipher_xform = xform;
+				*auth_xform = NULL;
+				return 0;
+			}
+			*mode = IPSEC_MB_OP_DECRYPT_ONLY;
+			*cipher_xform = xform;
+			*auth_xform = NULL;
+			return 0;
+		}
+
+		if (next->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+			*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+			return -ENOTSUP;
+		}
+
+		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+			if (next->auth.op != RTE_CRYPTO_AUTH_OP_GENERATE) {
+				*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+				return -ENOTSUP;
+			}
+
+			*mode = IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN;
+			*cipher_xform = xform;
+			*auth_xform = xform->next;
+			return 0;
+		}
+		if (next->auth.op != RTE_CRYPTO_AUTH_OP_VERIFY) {
+			*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+			return -ENOTSUP;
+		}
+
+		*mode = IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY;
+		*cipher_xform = xform;
+		*auth_xform = xform->next;
+		return 0;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (next == NULL) {
+			if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+				*mode = IPSEC_MB_OP_HASH_GEN_ONLY;
+				*auth_xform = xform;
+				*cipher_xform = NULL;
+				return 0;
+			}
+			*mode = IPSEC_MB_OP_HASH_VERIFY_ONLY;
+			*auth_xform = xform;
+			*cipher_xform = NULL;
+			return 0;
+		}
+
+		if (next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+			*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+			return -ENOTSUP;
+		}
+
+		if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+			if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+				*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+				return -ENOTSUP;
+			}
+
+			*mode = IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT;
+			*auth_xform = xform;
+			*cipher_xform = xform->next;
+			return 0;
+		}
+		if (next->cipher.op != RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+			*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+			return -ENOTSUP;
+		}
+
+		*mode = IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT;
+		*auth_xform = xform;
+		*cipher_xform = xform->next;
+		return 0;
+	}
+
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+			/*
+			 * CCM requires to hash first and cipher later
+			 * when encrypting
+			 */
+			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
+				*mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
+				*aead_xform = xform;
+				return 0;
+				} else {
+					*mode =
+				IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
+					*aead_xform = xform;
+					return 0;
+				}
+		} else {
+			if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
+				*mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT;
+				*aead_xform = xform;
+				return 0;
+			}
+			*mode = IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT;
+			*aead_xform = xform;
+			return 0;
+		}
+	}
+
+	*mode = IPSEC_MB_OP_NOT_SUPPORTED;
+	return -ENOTSUP;
+}
+
 /** Device creation function */
 int
 cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index e40b18b17b..b2ccea6f94 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -8,7 +8,6 @@ endif
 drivers = [
         'ipsec_mb',
         'aesni_gcm',
-        'aesni_mb',
         'armv8',
         'bcmfs',
         'caam_jr',
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 04/10] drivers/crypto: move aesni-gcm PMD to IPsec-mb framework
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
                           ` (2 preceding siblings ...)
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 03/10] drivers/crypto: move aesni-mb PMD to IPsec-mb framework Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-10-06 14:31           ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 05/10] drivers/crypto: move kasumi " Ciara Power
                           ` (5 subsequent siblings)
  9 siblings, 1 reply; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power,
	Thomas Monjalon, Pablo de Lara, Ray Kinsella

From: Piotr Bronowski <piotrx.bronowski@intel.com>

This patch removes the crypto/aesni_gcm folder and gathers all
aesni-gcm PMD implementation specific details into a single file,
pmd_aesni_gcm.c in the crypto/ipsec_mb folder.
A redundant check for iv length is removed.

GCM ops are stored in the queue pair for multi process support, they
are updated during queue pair setup for both primary and secondary
processes.

GCM ops are also set per lcore for the CPU crypto mode.

Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v3:
  - Moved session GCM ops to queue pair.
  - Added GCM ops per lcore.
  - Fixed some formatting.
v2:
  - Fixed enum for GCM key length.
  - Updated maintainers file.
---
 MAINTAINERS                                   |    9 +-
 doc/guides/cryptodevs/aesni_gcm.rst           |    4 +-
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h      |  104 --
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c      |  984 ----------------
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c  |  333 ------
 .../crypto/aesni_gcm/aesni_gcm_pmd_private.h  |  123 --
 drivers/crypto/aesni_gcm/meson.build          |   24 -
 drivers/crypto/aesni_gcm/version.map          |    3 -
 drivers/crypto/ipsec_mb/meson.build           |    3 +-
 drivers/crypto/ipsec_mb/pmd_aesni_gcm.c       | 1003 +++++++++++++++++
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |    7 +
 drivers/crypto/meson.build                    |    1 -
 12 files changed, 1017 insertions(+), 1581 deletions(-)
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
 delete mode 100644 drivers/crypto/aesni_gcm/meson.build
 delete mode 100644 drivers/crypto/aesni_gcm/version.map
 create mode 100644 drivers/crypto/ipsec_mb/pmd_aesni_gcm.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 7b00cd8791..6247e50687 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1042,13 +1042,6 @@ M: Fan Zhang <roy.fan.zhang@intel.com>
 F: drivers/crypto/scheduler/
 F: doc/guides/cryptodevs/scheduler.rst
 
-Intel AES-NI GCM
-M: Declan Doherty <declan.doherty@intel.com>
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-F: drivers/crypto/aesni_gcm/
-F: doc/guides/cryptodevs/aesni_gcm.rst
-F: doc/guides/cryptodevs/features/aesni_gcm.ini
-
 Intel QuickAssist
 M: John Griffin <john.griffin@intel.com>
 M: Fiona Trahe <fiona.trahe@intel.com>
@@ -1062,7 +1055,9 @@ IPsec MB
 M: Fan Zhang <roy.fan.zhang@intel.com>
 M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 F: drivers/crypto/ipsec_mb/
+F: doc/guides/cryptodevs/aesni_gcm.rst
 F: doc/guides/cryptodevs/aesni_mb.rst
+F: doc/guides/cryptodevs/features/aesni_gcm.ini
 F: doc/guides/cryptodevs/features/aesni_mb.ini
 
 KASUMI
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 11b23958d5..bbe9d99840 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -83,7 +83,9 @@ and the external crypto libraries supported by them:
    17.02 - 17.05  ISA-L Crypto v2.18
    17.08 - 18.02  Multi-buffer library 0.46 - 0.48
    18.05 - 19.02  Multi-buffer library 0.49 - 0.52
-   19.05+         Multi-buffer library 0.52 - 1.0*
+   19.05 - 20.08  Multi-buffer library 0.52 - 0.55
+   20.11 - 21.08  Multi-buffer library 0.53 - 1.0*
+   21.11+         Multi-buffer library 1.0*
    =============  ================================
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
deleted file mode 100644
index 8a0d074b6e..0000000000
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef _AESNI_GCM_OPS_H_
-#define _AESNI_GCM_OPS_H_
-
-#ifndef LINUX
-#define LINUX
-#endif
-
-#include <intel-ipsec-mb.h>
-
-/** Supported vector modes */
-enum aesni_gcm_vector_mode {
-	RTE_AESNI_GCM_NOT_SUPPORTED = 0,
-	RTE_AESNI_GCM_SSE,
-	RTE_AESNI_GCM_AVX,
-	RTE_AESNI_GCM_AVX2,
-	RTE_AESNI_GCM_AVX512,
-	RTE_AESNI_GCM_VECTOR_NUM
-};
-
-enum aesni_gcm_key {
-	GCM_KEY_128 = 0,
-	GCM_KEY_192,
-	GCM_KEY_256,
-	GCM_KEY_NUM
-};
-
-typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data, uint8_t *out,
-		const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
-		const uint8_t *aad, uint64_t aad_len,
-		uint8_t *auth_tag, uint64_t auth_tag_len);
-
-typedef void (*aesni_gcm_pre_t)(const void *key, struct gcm_key_data *gcm_data);
-
-typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		const uint8_t *iv,
-		uint8_t const *aad,
-		uint64_t aad_len);
-
-typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		uint8_t *out,
-		const uint8_t *in,
-		uint64_t plaintext_len);
-
-typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		uint8_t *auth_tag,
-		uint64_t auth_tag_len);
-
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		const uint8_t *iv,
-		const uint64_t iv_len);
-
-typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		const uint8_t *in,
-		const uint64_t plaintext_len);
-
-typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		uint8_t *auth_tag,
-		const uint64_t auth_tag_len);
-#endif
-
-/** GCM library function pointer table */
-struct aesni_gcm_ops {
-	aesni_gcm_t enc;        /**< GCM encode function pointer */
-	aesni_gcm_t dec;        /**< GCM decode function pointer */
-	aesni_gcm_pre_t pre;    /**< GCM pre-compute */
-	aesni_gcm_init_t init;
-	aesni_gcm_update_t update_enc;
-	aesni_gcm_update_t update_dec;
-	aesni_gcm_finalize_t finalize_enc;
-	aesni_gcm_finalize_t finalize_dec;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	aesni_gmac_init_t gmac_init;
-	aesni_gmac_update_t gmac_update;
-	aesni_gmac_finalize_t gmac_finalize;
-#endif
-};
-
-/** GCM per-session operation handlers */
-struct aesni_gcm_session_ops {
-	aesni_gcm_t cipher;
-	aesni_gcm_pre_t pre;
-	aesni_gcm_init_t init;
-	aesni_gcm_update_t update;
-	aesni_gcm_finalize_t finalize;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	aesni_gmac_init_t gmac_init;
-	aesni_gmac_update_t gmac_update;
-	aesni_gmac_finalize_t gmac_finalize;
-#endif
-};
-
-#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
deleted file mode 100644
index 330aad8157..0000000000
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ /dev/null
@@ -1,984 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_cryptodev.h>
-#include <cryptodev_pmd.h>
-#include <rte_bus_vdev.h>
-#include <rte_malloc.h>
-#include <rte_cpuflags.h>
-#include <rte_byteorder.h>
-
-#include "aesni_gcm_pmd_private.h"
-
-static uint8_t cryptodev_driver_id;
-
-/* setup session handlers */
-static void
-set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)
-{
-	s->ops.pre = gcm_ops->pre;
-	s->ops.init = gcm_ops->init;
-
-	switch (s->op) {
-	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
-		s->ops.cipher = gcm_ops->enc;
-		s->ops.update = gcm_ops->update_enc;
-		s->ops.finalize = gcm_ops->finalize_enc;
-		break;
-	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
-		s->ops.cipher = gcm_ops->dec;
-		s->ops.update = gcm_ops->update_dec;
-		s->ops.finalize = gcm_ops->finalize_dec;
-		break;
-	case AESNI_GMAC_OP_GENERATE:
-	case AESNI_GMAC_OP_VERIFY:
-		s->ops.finalize = gcm_ops->finalize_enc;
-		break;
-	}
-}
-
-/** Parse crypto xform chain and set private session parameters */
-int
-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
-		struct aesni_gcm_session *sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	const struct rte_crypto_sym_xform *auth_xform;
-	const struct rte_crypto_sym_xform *aead_xform;
-	uint8_t key_length;
-	const uint8_t *key;
-
-	/* AES-GMAC */
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		auth_xform = xform;
-		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
-			AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
-				"authentication only algorithm");
-			return -ENOTSUP;
-		}
-		/* Set IV parameters */
-		sess->iv.offset = auth_xform->auth.iv.offset;
-		sess->iv.length = auth_xform->auth.iv.length;
-
-		/* Select Crypto operation */
-		if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
-			sess->op = AESNI_GMAC_OP_GENERATE;
-		else
-			sess->op = AESNI_GMAC_OP_VERIFY;
-
-		key_length = auth_xform->auth.key.length;
-		key = auth_xform->auth.key.data;
-		sess->req_digest_length = auth_xform->auth.digest_length;
-
-	/* AES-GCM */
-	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		aead_xform = xform;
-
-		if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
-			AESNI_GCM_LOG(ERR, "The only combined operation "
-						"supported is AES GCM");
-			return -ENOTSUP;
-		}
-
-		/* Set IV parameters */
-		sess->iv.offset = aead_xform->aead.iv.offset;
-		sess->iv.length = aead_xform->aead.iv.length;
-
-		/* Select Crypto operation */
-		if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
-			sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
-		/* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
-		else
-			sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
-
-		key_length = aead_xform->aead.key.length;
-		key = aead_xform->aead.key.data;
-
-		sess->aad_length = aead_xform->aead.aad_length;
-		sess->req_digest_length = aead_xform->aead.digest_length;
-	} else {
-		AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
-		return -ENOTSUP;
-	}
-
-	/* IV check */
-	if (sess->iv.length != 16 && sess->iv.length != 12 &&
-			sess->iv.length != 0) {
-		AESNI_GCM_LOG(ERR, "Wrong IV length");
-		return -EINVAL;
-	}
-
-	/* Check key length and calculate GCM pre-compute. */
-	switch (key_length) {
-	case 16:
-		sess->key = GCM_KEY_128;
-		break;
-	case 24:
-		sess->key = GCM_KEY_192;
-		break;
-	case 32:
-		sess->key = GCM_KEY_256;
-		break;
-	default:
-		AESNI_GCM_LOG(ERR, "Invalid key length");
-		return -EINVAL;
-	}
-
-	/* setup session handlers */
-	set_func_ops(sess, &gcm_ops[sess->key]);
-
-	/* pre-generate key */
-	gcm_ops[sess->key].pre(key, &sess->gdata_key);
-
-	/* Digest check */
-	if (sess->req_digest_length > 16) {
-		AESNI_GCM_LOG(ERR, "Invalid digest length");
-		return -EINVAL;
-	}
-	/*
-	 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
-	 * in version 0.50 and sizes of 8, 12 and 16 bytes,
-	 * in version 0.49.
-	 * If size requested is different, generate the full digest
-	 * (16 bytes) in a temporary location and then memcpy
-	 * the requested number of bytes.
-	 */
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-	if (sess->req_digest_length < 4)
-#else
-	if (sess->req_digest_length != 16 &&
-			sess->req_digest_length != 12 &&
-			sess->req_digest_length != 8)
-#endif
-		sess->gen_digest_length = 16;
-	else
-		sess->gen_digest_length = sess->req_digest_length;
-
-	return 0;
-}
-
-/** Get gcm session */
-static struct aesni_gcm_session *
-aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
-{
-	struct aesni_gcm_session *sess = NULL;
-	struct rte_crypto_sym_op *sym_op = op->sym;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		if (likely(sym_op->session != NULL))
-			sess = (struct aesni_gcm_session *)
-					get_sym_session_private_data(
-					sym_op->session,
-					cryptodev_driver_id);
-	} else  {
-		void *_sess;
-		void *_sess_private_data = NULL;
-
-		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
-			return NULL;
-
-		if (rte_mempool_get(qp->sess_mp_priv,
-				(void **)&_sess_private_data))
-			return NULL;
-
-		sess = (struct aesni_gcm_session *)_sess_private_data;
-
-		if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
-				sess, sym_op->xform) != 0)) {
-			rte_mempool_put(qp->sess_mp, _sess);
-			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
-			sess = NULL;
-		}
-		sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
-		set_sym_session_private_data(sym_op->session,
-				cryptodev_driver_id, _sess_private_data);
-	}
-
-	if (unlikely(sess == NULL))
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
-	return sess;
-}
-
-/**
- * Process a crypto operation, calling
- * the GCM API from the multi buffer library.
- *
- * @param	qp		queue pair
- * @param	op		symmetric crypto operation
- * @param	session		GCM session
- *
- * @return
- *
- */
-static int
-process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
-		struct aesni_gcm_session *session)
-{
-	uint8_t *src, *dst;
-	uint8_t *iv_ptr;
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	struct rte_mbuf *m_src = sym_op->m_src;
-	uint32_t offset, data_offset, data_length;
-	uint32_t part_len, total_len, data_len;
-	uint8_t *tag;
-	unsigned int oop = 0;
-
-	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
-			session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
-		offset = sym_op->aead.data.offset;
-		data_offset = offset;
-		data_length = sym_op->aead.data.length;
-	} else {
-		offset = sym_op->auth.data.offset;
-		data_offset = offset;
-		data_length = sym_op->auth.data.length;
-	}
-
-	RTE_ASSERT(m_src != NULL);
-
-	while (offset >= m_src->data_len && data_length != 0) {
-		offset -= m_src->data_len;
-		m_src = m_src->next;
-
-		RTE_ASSERT(m_src != NULL);
-	}
-
-	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
-	data_len = m_src->data_len - offset;
-	part_len = (data_len < data_length) ? data_len :
-			data_length;
-
-	RTE_ASSERT((sym_op->m_dst == NULL) ||
-			((sym_op->m_dst != NULL) &&
-					rte_pktmbuf_is_contiguous(sym_op->m_dst)));
-
-	/* In-place */
-	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
-		dst = src;
-	/* Out-of-place */
-	else {
-		oop = 1;
-		/* Segmented destination buffer is not supported if operation is
-		 * Out-of-place */
-		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
-		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
-					data_offset);
-	}
-
-	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
-				session->iv.offset);
-
-	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
-		qp->ops[session->key].init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				sym_op->aead.aad.data,
-				(uint64_t)session->aad_length);
-
-		qp->ops[session->key].update_enc(&session->gdata_key,
-				&qp->gdata_ctx, dst, src,
-				(uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			if (oop)
-				dst += part_len;
-			else
-				dst = src;
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			qp->ops[session->key].update_enc(&session->gdata_key,
-					&qp->gdata_ctx, dst, src,
-					(uint64_t)part_len);
-			total_len -= part_len;
-		}
-
-		if (session->req_digest_length != session->gen_digest_length)
-			tag = qp->temp_digest;
-		else
-			tag = sym_op->aead.digest.data;
-
-		qp->ops[session->key].finalize_enc(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
-		qp->ops[session->key].init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				sym_op->aead.aad.data,
-				(uint64_t)session->aad_length);
-
-		qp->ops[session->key].update_dec(&session->gdata_key,
-				&qp->gdata_ctx, dst, src,
-				(uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			if (oop)
-				dst += part_len;
-			else
-				dst = src;
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			qp->ops[session->key].update_dec(&session->gdata_key,
-					&qp->gdata_ctx,
-					dst, src,
-					(uint64_t)part_len);
-			total_len -= part_len;
-		}
-
-		tag = qp->temp_digest;
-		qp->ops[session->key].finalize_dec(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
-		qp->ops[session->key].gmac_init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				session->iv.length);
-
-		qp->ops[session->key].gmac_update(&session->gdata_key,
-				&qp->gdata_ctx, src,
-				(uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			qp->ops[session->key].gmac_update(&session->gdata_key,
-					&qp->gdata_ctx, src,
-					(uint64_t)part_len);
-			total_len -= part_len;
-		}
-
-		if (session->req_digest_length != session->gen_digest_length)
-			tag = qp->temp_digest;
-		else
-			tag = sym_op->auth.digest.data;
-
-		qp->ops[session->key].gmac_finalize(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	} else { /* AESNI_GMAC_OP_VERIFY */
-		qp->ops[session->key].gmac_init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				session->iv.length);
-
-		qp->ops[session->key].gmac_update(&session->gdata_key,
-				&qp->gdata_ctx, src,
-				(uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			qp->ops[session->key].gmac_update(&session->gdata_key,
-					&qp->gdata_ctx, src,
-					(uint64_t)part_len);
-			total_len -= part_len;
-		}
-
-		tag = qp->temp_digest;
-
-		qp->ops[session->key].gmac_finalize(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	}
-#else
-	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
-		qp->ops[session->key].init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				src,
-				(uint64_t)data_length);
-		if (session->req_digest_length != session->gen_digest_length)
-			tag = qp->temp_digest;
-		else
-			tag = sym_op->auth.digest.data;
-		qp->ops[session->key].finalize_enc(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	} else { /* AESNI_GMAC_OP_VERIFY */
-		qp->ops[session->key].init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				src,
-				(uint64_t)data_length);
-
-		/*
-		 * Generate always 16 bytes and later compare only
-		 * the bytes passed.
-		 */
-		tag = qp->temp_digest;
-		qp->ops[session->key].finalize_enc(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	}
-#endif
-
-	return 0;
-}
-
-static inline void
-aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
-{
-	uint32_t i;
-
-	for (i = 0; i < vec->num; i++)
-		vec->status[i] = errnum;
-}
-
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, uint8_t *digest)
-{
-	if (s->req_digest_length != s->gen_digest_length) {
-		uint8_t tmpdigest[s->gen_digest_length];
-
-		s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
-			s->gen_digest_length);
-		memcpy(digest, tmpdigest, s->req_digest_length);
-	} else {
-		s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
-			s->gen_digest_length);
-	}
-
-	return 0;
-}
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, uint8_t *digest)
-{
-	uint8_t tmpdigest[s->gen_digest_length];
-
-	s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
-		s->gen_digest_length);
-
-	return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
-		EBADMSG;
-}
-
-static inline void
-aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
-	void *iv, void *aad)
-{
-	uint32_t i;
-
-	/* init crypto operation */
-	s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
-		(uint64_t)s->aad_length);
-
-	/* update with sgl data */
-	for (i = 0; i < sgl->num; i++) {
-		struct rte_crypto_vec *vec = &sgl->vec[i];
-
-		s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
-			vec->len);
-	}
-}
-
-static inline void
-aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
-	void *iv)
-{
-	s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
-		sgl->vec[0].len);
-}
-
-static inline uint32_t
-aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
-{
-	uint32_t i, processed;
-
-	processed = 0;
-	for (i = 0; i < vec->num; ++i) {
-		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
-			vec->aad[i].va);
-		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
-			gdata_ctx, vec->digest[i].va);
-		processed += (vec->status[i] == 0);
-	}
-
-	return processed;
-}
-
-static inline uint32_t
-aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
-{
-	uint32_t i, processed;
-
-	processed = 0;
-	for (i = 0; i < vec->num; ++i) {
-		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
-			vec->aad[i].va);
-		 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
-			gdata_ctx, vec->digest[i].va);
-		processed += (vec->status[i] == 0);
-	}
-
-	return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
-{
-	uint32_t i, processed;
-
-	processed = 0;
-	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
-			vec->status[i] = ENOTSUP;
-			continue;
-		}
-
-		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
-		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
-			gdata_ctx, vec->digest[i].va);
-		processed += (vec->status[i] == 0);
-	}
-
-	return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
-{
-	uint32_t i, processed;
-
-	processed = 0;
-	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
-			vec->status[i] = ENOTSUP;
-			continue;
-		}
-
-		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
-		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
-			gdata_ctx, vec->digest[i].va);
-		processed += (vec->status[i] == 0);
-	}
-
-	return processed;
-}
-
-/** Process CPU crypto bulk operations */
-uint32_t
-aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
-	struct rte_cryptodev_sym_session *sess,
-	__rte_unused union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_sym_vec *vec)
-{
-	void *sess_priv;
-	struct aesni_gcm_session *s;
-	struct gcm_context_data gdata_ctx;
-
-	sess_priv = get_sym_session_private_data(sess, dev->driver_id);
-	if (unlikely(sess_priv == NULL)) {
-		aesni_gcm_fill_error_code(vec, EINVAL);
-		return 0;
-	}
-
-	s = sess_priv;
-	switch (s->op) {
-	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
-		return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
-	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
-		return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
-	case AESNI_GMAC_OP_GENERATE:
-		return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
-	case AESNI_GMAC_OP_VERIFY:
-		return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
-	default:
-		aesni_gcm_fill_error_code(vec, EINVAL);
-		return 0;
-	}
-}
-
-/**
- * Process a completed job and return rte_mbuf which job processed
- *
- * @param job	JOB_AES_HMAC job to process
- *
- * @return
- * - Returns processed mbuf which is trimmed of output digest used in
- * verification of supplied digest in the case of a HASH_CIPHER operation
- * - Returns NULL on invalid job
- */
-static void
-post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
-		struct rte_crypto_op *op,
-		struct aesni_gcm_session *session)
-{
-	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-	/* Verify digest if required */
-	if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
-			session->op == AESNI_GMAC_OP_VERIFY) {
-		uint8_t *digest;
-
-		uint8_t *tag = qp->temp_digest;
-
-		if (session->op == AESNI_GMAC_OP_VERIFY)
-			digest = op->sym->auth.digest.data;
-		else
-			digest = op->sym->aead.digest.data;
-
-#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
-		rte_hexdump(stdout, "auth tag (orig):",
-				digest, session->req_digest_length);
-		rte_hexdump(stdout, "auth tag (calc):",
-				tag, session->req_digest_length);
-#endif
-
-		if (memcmp(tag, digest,	session->req_digest_length) != 0)
-			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	} else {
-		if (session->req_digest_length != session->gen_digest_length) {
-			if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)
-				memcpy(op->sym->aead.digest.data, qp->temp_digest,
-						session->req_digest_length);
-			else
-				memcpy(op->sym->auth.digest.data, qp->temp_digest,
-						session->req_digest_length);
-		}
-	}
-}
-
-/**
- * Process a completed GCM request
- *
- * @param qp		Queue Pair to process
- * @param op		Crypto operation
- * @param job		JOB_AES_HMAC job
- *
- * @return
- * - Number of processed jobs
- */
-static void
-handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
-		struct rte_crypto_op *op,
-		struct aesni_gcm_session *sess)
-{
-	post_process_gcm_crypto_op(qp, op, sess);
-
-	/* Free session if a session-less crypto op */
-	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		memset(sess, 0, sizeof(struct aesni_gcm_session));
-		memset(op->sym->session, 0,
-			rte_cryptodev_sym_get_existing_header_session_size(
-				op->sym->session));
-		rte_mempool_put(qp->sess_mp_priv, sess);
-		rte_mempool_put(qp->sess_mp, op->sym->session);
-		op->sym->session = NULL;
-	}
-}
-
-static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
-		struct rte_crypto_op **ops, uint16_t nb_ops)
-{
-	struct aesni_gcm_session *sess;
-	struct aesni_gcm_qp *qp = queue_pair;
-
-	int retval = 0;
-	unsigned int i, nb_dequeued;
-
-	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
-			(void **)ops, nb_ops, NULL);
-
-	for (i = 0; i < nb_dequeued; i++) {
-
-		sess = aesni_gcm_get_session(qp, ops[i]);
-		if (unlikely(sess == NULL)) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->qp_stats.dequeue_err_count++;
-			break;
-		}
-
-		retval = process_gcm_crypto_op(qp, ops[i], sess);
-		if (retval < 0) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->qp_stats.dequeue_err_count++;
-			break;
-		}
-
-		handle_completed_gcm_crypto_op(qp, ops[i], sess);
-	}
-
-	qp->qp_stats.dequeued_count += i;
-
-	return i;
-}
-
-static uint16_t
-aesni_gcm_pmd_enqueue_burst(void *queue_pair,
-		struct rte_crypto_op **ops, uint16_t nb_ops)
-{
-	struct aesni_gcm_qp *qp = queue_pair;
-
-	unsigned int nb_enqueued;
-
-	nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
-			(void **)ops, nb_ops, NULL);
-	qp->qp_stats.enqueued_count += nb_enqueued;
-
-	return nb_enqueued;
-}
-
-static int aesni_gcm_remove(struct rte_vdev_device *vdev);
-
-static int
-aesni_gcm_create(const char *name,
-		struct rte_vdev_device *vdev,
-		struct rte_cryptodev_pmd_init_params *init_params)
-{
-	struct rte_cryptodev *dev;
-	struct aesni_gcm_private *internals;
-	enum aesni_gcm_vector_mode vector_mode;
-	MB_MGR *mb_mgr;
-
-	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
-	if (dev == NULL) {
-		AESNI_GCM_LOG(ERR, "driver %s: create failed",
-			init_params->name);
-		return -ENODEV;
-	}
-
-	/* Check CPU for supported vector instruction set */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
-		vector_mode = RTE_AESNI_GCM_AVX512;
-	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
-		vector_mode = RTE_AESNI_GCM_AVX2;
-	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
-		vector_mode = RTE_AESNI_GCM_AVX;
-	else
-		vector_mode = RTE_AESNI_GCM_SSE;
-
-	dev->driver_id = cryptodev_driver_id;
-	dev->dev_ops = rte_aesni_gcm_pmd_ops;
-
-	/* register rx/tx burst functions for data path */
-	dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
-	dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
-
-	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
-			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
-			RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
-			RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
-
-	/* Check CPU for support for AES instruction set */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
-	else
-		AESNI_GCM_LOG(WARNING, "AES instructions not supported by CPU");
-
-	mb_mgr = alloc_mb_mgr(0);
-	if (mb_mgr == NULL)
-		return -ENOMEM;
-
-	switch (vector_mode) {
-	case RTE_AESNI_GCM_SSE:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
-		init_mb_mgr_sse(mb_mgr);
-		break;
-	case RTE_AESNI_GCM_AVX:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
-		init_mb_mgr_avx(mb_mgr);
-		break;
-	case RTE_AESNI_GCM_AVX2:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
-		init_mb_mgr_avx2(mb_mgr);
-		break;
-	case RTE_AESNI_GCM_AVX512:
-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_VAES)) {
-			dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
-			init_mb_mgr_avx512(mb_mgr);
-		} else {
-			dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
-			init_mb_mgr_avx2(mb_mgr);
-			vector_mode = RTE_AESNI_GCM_AVX2;
-		}
-		break;
-	default:
-		AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
-		goto error_exit;
-	}
-
-	internals = dev->data->dev_private;
-
-	internals->vector_mode = vector_mode;
-	internals->mb_mgr = mb_mgr;
-
-	/* Set arch independent function pointers, based on key size */
-	internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
-	internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
-	internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
-	internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
-	internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
-	internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
-	internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
-	internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
-	internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
-	internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
-#endif
-
-	internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
-	internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
-	internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
-	internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
-	internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
-	internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
-	internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
-	internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
-	internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
-	internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
-#endif
-
-	internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
-	internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
-	internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
-	internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
-	internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
-	internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
-	internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
-	internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
-	internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
-	internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
-#endif
-
-	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
-
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-	AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
-			imb_get_version_str());
-#else
-	AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
-#endif
-
-	return 0;
-
-error_exit:
-	if (mb_mgr)
-		free_mb_mgr(mb_mgr);
-
-	rte_cryptodev_pmd_destroy(dev);
-
-	return -1;
-}
-
-static int
-aesni_gcm_probe(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev_pmd_init_params init_params = {
-		"",
-		sizeof(struct aesni_gcm_private),
-		rte_socket_id(),
-		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
-	};
-	const char *name;
-	const char *input_args;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-	input_args = rte_vdev_device_args(vdev);
-	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
-
-	return aesni_gcm_create(name, vdev, &init_params);
-}
-
-static int
-aesni_gcm_remove(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev *cryptodev;
-	struct aesni_gcm_private *internals;
-	const char *name;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-
-	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	internals = cryptodev->data->dev_private;
-
-	free_mb_mgr(internals->mb_mgr);
-
-	return rte_cryptodev_pmd_destroy(cryptodev);
-}
-
-static struct rte_vdev_driver aesni_gcm_pmd_drv = {
-	.probe = aesni_gcm_probe,
-	.remove = aesni_gcm_remove
-};
-
-static struct cryptodev_driver aesni_gcm_crypto_drv;
-
-RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
-RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
-RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
-	"max_nb_queue_pairs=<int> "
-	"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
-		cryptodev_driver_id);
-RTE_LOG_REGISTER_DEFAULT(aesni_gcm_logtype_driver, NOTICE);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
deleted file mode 100644
index edb7275e76..0000000000
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ /dev/null
@@ -1,333 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <cryptodev_pmd.h>
-
-#include "aesni_gcm_pmd_private.h"
-
-static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
-	{	/* AES GMAC (AUTH) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 16,
-					.increment = 1
-				},
-				.iv_size = {
-					.min = 12,
-					.max = 12,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* AES GCM */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
-			{.aead = {
-				.algo = RTE_CRYPTO_AEAD_AES_GCM,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 16,
-					.increment = 1
-				},
-				.aad_size = {
-					.min = 0,
-					.max = 65535,
-					.increment = 1
-				},
-				.iv_size = {
-					.min = 12,
-					.max = 12,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-/** Configure device */
-static int
-aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused struct rte_cryptodev_config *config)
-{
-	return 0;
-}
-
-/** Start device */
-static int
-aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-/** Stop device */
-static void
-aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
-{
-}
-
-/** Close device */
-static int
-aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-
-/** Get device statistics */
-static void
-aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_stats *stats)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
-
-		stats->enqueued_count += qp->qp_stats.enqueued_count;
-		stats->dequeued_count += qp->qp_stats.dequeued_count;
-
-		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
-		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
-	}
-}
-
-/** Reset device statistics */
-static void
-aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
-
-		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-	}
-}
-
-
-/** Get device info */
-static void
-aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_info *dev_info)
-{
-	struct aesni_gcm_private *internals = dev->data->dev_private;
-
-	if (dev_info != NULL) {
-		dev_info->driver_id = dev->driver_id;
-		dev_info->feature_flags = dev->feature_flags;
-		dev_info->capabilities = aesni_gcm_pmd_capabilities;
-
-		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
-		/* No limit of number of sessions */
-		dev_info->sym.max_nb_sessions = 0;
-	}
-}
-
-/** Release queue pair */
-static int
-aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
-{
-	if (dev->data->queue_pairs[qp_id] != NULL) {
-		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
-
-		if (qp->processed_pkts)
-			rte_ring_free(qp->processed_pkts);
-
-		rte_free(dev->data->queue_pairs[qp_id]);
-		dev->data->queue_pairs[qp_id] = NULL;
-	}
-	return 0;
-}
-
-/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
-static int
-aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
-		struct aesni_gcm_qp *qp)
-{
-	unsigned n = snprintf(qp->name, sizeof(qp->name),
-			"aesni_gcm_pmd_%u_qp_%u",
-			dev->data->dev_id, qp->id);
-
-	if (n >= sizeof(qp->name))
-		return -1;
-
-	return 0;
-}
-
-/** Create a ring to place process packets on */
-static struct rte_ring *
-aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
-		unsigned ring_size, int socket_id)
-{
-	struct rte_ring *r;
-
-	r = rte_ring_lookup(qp->name);
-	if (r) {
-		if (rte_ring_get_size(r) >= ring_size) {
-			AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
-				" packets", qp->name);
-			return r;
-		}
-		AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
-				" packets", qp->name);
-		return NULL;
-	}
-
-	return rte_ring_create(qp->name, ring_size, socket_id,
-			RING_F_SP_ENQ | RING_F_SC_DEQ);
-}
-
-/** Setup a queue pair */
-static int
-aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
-		const struct rte_cryptodev_qp_conf *qp_conf,
-		int socket_id)
-{
-	struct aesni_gcm_qp *qp = NULL;
-	struct aesni_gcm_private *internals = dev->data->dev_private;
-
-	/* Free memory prior to re-allocation if needed. */
-	if (dev->data->queue_pairs[qp_id] != NULL)
-		aesni_gcm_pmd_qp_release(dev, qp_id);
-
-	/* Allocate the queue pair data structure. */
-	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
-					RTE_CACHE_LINE_SIZE, socket_id);
-	if (qp == NULL)
-		return (-ENOMEM);
-
-	qp->id = qp_id;
-	dev->data->queue_pairs[qp_id] = qp;
-
-	if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
-		goto qp_setup_cleanup;
-
-	qp->ops = (const struct aesni_gcm_ops *)internals->ops;
-
-	qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
-			qp_conf->nb_descriptors, socket_id);
-	if (qp->processed_pkts == NULL)
-		goto qp_setup_cleanup;
-
-	qp->sess_mp = qp_conf->mp_session;
-	qp->sess_mp_priv = qp_conf->mp_session_private;
-
-	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-
-	return 0;
-
-qp_setup_cleanup:
-	if (qp)
-		rte_free(qp);
-
-	return -1;
-}
-
-/** Returns the size of the aesni gcm session structure */
-static unsigned
-aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct aesni_gcm_session);
-}
-
-/** Configure a aesni gcm session from a crypto xform chain */
-static int
-aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
-		struct rte_crypto_sym_xform *xform,
-		struct rte_cryptodev_sym_session *sess,
-		struct rte_mempool *mempool)
-{
-	void *sess_private_data;
-	int ret;
-	struct aesni_gcm_private *internals = dev->data->dev_private;
-
-	if (unlikely(sess == NULL)) {
-		AESNI_GCM_LOG(ERR, "invalid session struct");
-		return -EINVAL;
-	}
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		AESNI_GCM_LOG(ERR,
-				"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-	ret = aesni_gcm_set_session_parameters(internals->ops,
-				sess_private_data, xform);
-	if (ret != 0) {
-		AESNI_GCM_LOG(ERR, "failed configure session parameters");
-
-		/* Return session to mempool */
-		rte_mempool_put(mempool, sess_private_data);
-		return ret;
-	}
-
-	set_sym_session_private_data(sess, dev->driver_id,
-			sess_private_data);
-
-	return 0;
-}
-
-/** Clear the memory of session so it doesn't leave key material behind */
-static void
-aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_sym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_sym_session_private_data(sess, index);
-
-	/* Zero out the whole structure */
-	if (sess_priv) {
-		memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-		set_sym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
-		.dev_configure		= aesni_gcm_pmd_config,
-		.dev_start		= aesni_gcm_pmd_start,
-		.dev_stop		= aesni_gcm_pmd_stop,
-		.dev_close		= aesni_gcm_pmd_close,
-
-		.stats_get		= aesni_gcm_pmd_stats_get,
-		.stats_reset		= aesni_gcm_pmd_stats_reset,
-
-		.dev_infos_get		= aesni_gcm_pmd_info_get,
-
-		.queue_pair_setup	= aesni_gcm_pmd_qp_setup,
-		.queue_pair_release	= aesni_gcm_pmd_qp_release,
-
-		.sym_cpu_process        = aesni_gcm_pmd_cpu_crypto_process,
-
-		.sym_session_get_size	= aesni_gcm_pmd_sym_session_get_size,
-		.sym_session_configure	= aesni_gcm_pmd_sym_session_configure,
-		.sym_session_clear	= aesni_gcm_pmd_sym_session_clear
-};
-
-struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
deleted file mode 100644
index 2763d1c492..0000000000
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef _AESNI_GCM_PMD_PRIVATE_H_
-#define _AESNI_GCM_PMD_PRIVATE_H_
-
-#include "aesni_gcm_ops.h"
-
-/*
- * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
- * so if macro is not defined, it means that the version is 0.49.
- */
-#if !defined(IMB_VERSION_NUM)
-#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
-#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
-#endif
-
-#define CRYPTODEV_NAME_AESNI_GCM_PMD	crypto_aesni_gcm
-/**< AES-NI GCM PMD device name */
-
-/** AES-NI GCM PMD  LOGTYPE DRIVER */
-extern int aesni_gcm_logtype_driver;
-#define AESNI_GCM_LOG(level, fmt, ...) \
-	rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver,	\
-			"%s() line %u: "fmt "\n", __func__, __LINE__,	\
-					## __VA_ARGS__)
-
-/* Maximum length for digest */
-#define DIGEST_LENGTH_MAX 16
-
-/** private data structure for each virtual AESNI GCM device */
-struct aesni_gcm_private {
-	enum aesni_gcm_vector_mode vector_mode;
-	/**< Vector mode */
-	unsigned max_nb_queue_pairs;
-	/**< Max number of queue pairs supported by device */
-	MB_MGR *mb_mgr;
-	/**< Multi-buffer instance */
-	struct aesni_gcm_ops ops[GCM_KEY_NUM];
-	/**< Function pointer table of the gcm APIs */
-};
-
-struct aesni_gcm_qp {
-	const struct aesni_gcm_ops *ops;
-	/**< Function pointer table of the gcm APIs */
-	struct rte_ring *processed_pkts;
-	/**< Ring for placing process packets */
-	struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */
-	/**< GCM parameters */
-	struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */
-	/**< Queue pair statistics */
-	struct rte_mempool *sess_mp;
-	/**< Session Mempool */
-	struct rte_mempool *sess_mp_priv;
-	/**< Session Private Data Mempool */
-	uint16_t id;
-	/**< Queue Pair Identifier */
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	/**< Unique Queue Pair Name */
-	uint8_t temp_digest[DIGEST_LENGTH_MAX];
-	/**< Buffer used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-} __rte_cache_aligned;
-
-
-enum aesni_gcm_operation {
-	AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
-	AESNI_GCM_OP_AUTHENTICATED_DECRYPTION,
-	AESNI_GMAC_OP_GENERATE,
-	AESNI_GMAC_OP_VERIFY
-};
-
-/** AESNI GCM private session structure */
-struct aesni_gcm_session {
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} iv;
-	/**< IV parameters */
-	uint16_t aad_length;
-	/**< AAD length */
-	uint16_t req_digest_length;
-	/**< Requested digest length */
-	uint16_t gen_digest_length;
-	/**< Generated digest length */
-	enum aesni_gcm_operation op;
-	/**< GCM operation type */
-	enum aesni_gcm_key key;
-	/**< GCM key type */
-	struct gcm_key_data gdata_key;
-	/**< GCM parameters */
-	struct aesni_gcm_session_ops ops;
-	/**< Session handlers */
-};
-
-
-/**
- * Setup GCM session parameters
- * @param	sess	aesni gcm session structure
- * @param	xform	crypto transform chain
- *
- * @return
- * - On success returns 0
- * - On failure returns error code < 0
- */
-extern int
-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
-		struct aesni_gcm_session *sess,
-		const struct rte_crypto_sym_xform *xform);
-
-/* Device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
-
-/** CPU crypto bulk process handler */
-uint32_t
-aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
-	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_sym_vec *vec);
-
-#endif /* _AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_gcm/meson.build b/drivers/crypto/aesni_gcm/meson.build
deleted file mode 100644
index 0fcac2a8eb..0000000000
--- a/drivers/crypto/aesni_gcm/meson.build
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 Intel Corporation
-
-IMB_required_ver = '0.52.0'
-lib = cc.find_library('IPSec_MB', required: false)
-if not lib.found()
-    build = false
-    reason = 'missing dependency, "libIPSec_MB"'
-else
-    ext_deps += lib
-
-    # version comes with quotes, so we split based on " and take the middle
-    imb_ver = cc.get_define('IMB_VERSION_STR',
-        prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
-
-    if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
-        reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
-                IMB_required_ver, imb_ver)
-        build = false
-    endif
-endif
-
-sources = files('aesni_gcm_pmd.c', 'aesni_gcm_pmd_ops.c')
-deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_gcm/version.map b/drivers/crypto/aesni_gcm/version.map
deleted file mode 100644
index c2e0723b4c..0000000000
--- a/drivers/crypto/aesni_gcm/version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_22 {
-	local: *;
-};
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index bac5d85e26..8550eaee9a 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -23,6 +23,7 @@ endif
 
 sources = files('rte_ipsec_mb_pmd.c',
 		'rte_ipsec_mb_pmd_ops.c',
-		'pmd_aesni_mb.c'
+		'pmd_aesni_mb.c',
+		'pmd_aesni_gcm.c'
 		)
 deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
new file mode 100644
index 0000000000..2fcfa97a63
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
@@ -0,0 +1,1003 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#if defined(RTE_LIB_SECURITY)
+#define AESNI_MB_DOCSIS_SEC_ENABLED 1
+#include <rte_ether.h>
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#endif
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+#define AESNI_GCM_IV_LENGTH 12
+
+static const struct rte_cryptodev_capabilities aesni_gcm_capabilities[] = {
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = AESNI_GCM_IV_LENGTH,
+					.max = AESNI_GCM_IV_LENGTH,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = AESNI_GCM_IV_LENGTH,
+					.max = AESNI_GCM_IV_LENGTH,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t pmd_driver_id_aesni_gcm;
+
+enum aesni_gcm_key_length {
+	GCM_KEY_128 = 0,
+	GCM_KEY_192,
+	GCM_KEY_256,
+	GCM_NUM_KEY_TYPES
+};
+
+typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
+			    struct gcm_context_data *gcm_ctx_data,
+			    uint8_t *out, const uint8_t *in,
+			    uint64_t plaintext_len, const uint8_t *iv,
+			    const uint8_t *aad, uint64_t aad_len,
+			    uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gcm_pre_t)(const void *key,
+				struct gcm_key_data *gcm_data);
+
+typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
+				 struct gcm_context_data *gcm_ctx_data,
+				 const uint8_t *iv, uint8_t const *aad,
+				 uint64_t aad_len);
+
+typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
+				   struct gcm_context_data *gcm_ctx_data,
+				   uint8_t *out, const uint8_t *in,
+				   uint64_t plaintext_len);
+
+typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
+				     struct gcm_context_data *gcm_ctx_data,
+				     uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
+				  struct gcm_context_data *gcm_ctx_data,
+				  const uint8_t *iv, const uint64_t iv_len);
+
+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
+				    struct gcm_context_data *gcm_ctx_data,
+				    const uint8_t *in,
+				    const uint64_t plaintext_len);
+
+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
+				      struct gcm_context_data *gcm_ctx_data,
+				      uint8_t *auth_tag,
+				      const uint64_t auth_tag_len);
+
+/** GCM operation handlers */
+struct aesni_gcm_ops {
+	aesni_gcm_t enc;
+	aesni_gcm_t dec;
+	aesni_gcm_pre_t pre;
+	aesni_gcm_init_t init;
+	aesni_gcm_update_t update_enc;
+	aesni_gcm_update_t update_dec;
+	aesni_gcm_finalize_t finalize_enc;
+	aesni_gcm_finalize_t finalize_dec;
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+};
+
+RTE_DEFINE_PER_LCORE(struct aesni_gcm_ops[GCM_NUM_KEY_TYPES], gcm_ops);
+
+struct aesni_gcm_qp_data {
+	struct gcm_context_data gcm_ctx_data;
+	uint8_t temp_digest[DIGEST_LENGTH_MAX];
+	/* *< Buffers used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
+	struct aesni_gcm_ops ops[GCM_NUM_KEY_TYPES];
+	/**< Operation Handlers */
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	uint16_t aad_length;
+	/**< AAD length */
+	uint16_t req_digest_length;
+	/**< Requested digest length */
+	uint16_t gen_digest_length;
+	/**< Generated digest length */
+	enum ipsec_mb_operation op;
+	/**< GCM operation type */
+	struct gcm_key_data gdata_key;
+	/**< GCM parameters */
+	enum aesni_gcm_key_length key_length;
+	/** Key Length */
+};
+
+static void
+aesni_gcm_set_ops(struct aesni_gcm_ops *ops, IMB_MGR *mb_mgr)
+{
+	/* Set 128 bit function pointers. */
+	ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
+	ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
+
+	ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
+	ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
+	ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
+
+	ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
+	ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
+	ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
+
+	ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
+	ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
+	ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
+
+	/* Set 192 bit function pointers. */
+	ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
+	ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
+
+	ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
+	ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
+	ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
+
+	ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
+	ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
+	ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
+
+	ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
+	ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
+	ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
+
+	/* Set 256 bit function pointers. */
+	ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
+	ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
+
+	ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
+	ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
+	ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
+
+	ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
+	ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
+	ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
+
+	ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
+	ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
+	ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
+}
+
+static int
+aesni_gcm_session_configure(IMB_MGR *mb_mgr, void *session,
+			    const struct rte_crypto_sym_xform *xform)
+{
+	struct aesni_gcm_session *sess = session;
+	const struct rte_crypto_sym_xform *auth_xform;
+	const struct rte_crypto_sym_xform *cipher_xform;
+	const struct rte_crypto_sym_xform *aead_xform;
+
+	uint8_t key_length;
+	const uint8_t *key;
+	enum ipsec_mb_operation mode;
+	int ret = 0;
+
+	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
+				&cipher_xform, &aead_xform);
+	if (ret)
+		return ret;
+
+	/**< GCM key type */
+
+	sess->op = mode;
+
+	switch (sess->op) {
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+		/* AES-GMAC
+		 * auth_xform = xform;
+		 */
+		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+			IPSEC_MB_LOG(ERR,
+	"Only AES GMAC is supported as an authentication only algorithm");
+			ret = -ENOTSUP;
+			goto error_exit;
+		}
+		/* Set IV parameters */
+		sess->iv.offset = auth_xform->auth.iv.offset;
+		sess->iv.length = auth_xform->auth.iv.length;
+		key_length = auth_xform->auth.key.length;
+		key = auth_xform->auth.key.data;
+		sess->req_digest_length = auth_xform->auth.digest_length;
+		break;
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
+		/* AES-GCM
+		 * aead_xform = xform;
+		 */
+
+		if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
+			IPSEC_MB_LOG(ERR,
+			"The only combined operation supported is AES GCM");
+			ret = -ENOTSUP;
+			goto error_exit;
+		}
+		/* Set IV parameters */
+		sess->iv.offset = aead_xform->aead.iv.offset;
+		sess->iv.length = aead_xform->aead.iv.length;
+		key_length = aead_xform->aead.key.length;
+		key = aead_xform->aead.key.data;
+		sess->aad_length = aead_xform->aead.aad_length;
+		sess->req_digest_length = aead_xform->aead.digest_length;
+		break;
+	default:
+		IPSEC_MB_LOG(
+		    ERR, "Wrong xform type, has to be AEAD or authentication");
+		ret = -ENOTSUP;
+		goto error_exit;
+	}
+
+	/* Check key length, and calculate GCM pre-compute. */
+	switch (key_length) {
+	case 16:
+		sess->key_length = GCM_KEY_128;
+		mb_mgr->gcm128_pre(key, &sess->gdata_key);
+		break;
+	case 24:
+		sess->key_length = GCM_KEY_192;
+		mb_mgr->gcm192_pre(key, &sess->gdata_key);
+		break;
+	case 32:
+		sess->key_length = GCM_KEY_256;
+		mb_mgr->gcm256_pre(key, &sess->gdata_key);
+		break;
+	default:
+		IPSEC_MB_LOG(ERR, "Invalid key length");
+		ret = -EINVAL;
+		goto error_exit;
+	}
+
+	/* Digest check */
+	if (sess->req_digest_length > 16) {
+		IPSEC_MB_LOG(ERR, "Invalid digest length");
+		ret = -EINVAL;
+		goto error_exit;
+	}
+	/*
+	 * If size requested is different, generate the full digest
+	 * (16 bytes) in a temporary location and then memcpy
+	 * the requested number of bytes.
+	 */
+	if (sess->req_digest_length < 4)
+		sess->gen_digest_length = 16;
+	else
+		sess->gen_digest_length = sess->req_digest_length;
+
+error_exit:
+	return ret;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job	IMB_JOB job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,
+		struct rte_crypto_op *op,
+		struct aesni_gcm_session *session)
+{
+	struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	/* Verify digest if required */
+	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
+			session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
+		uint8_t *digest;
+
+		uint8_t *tag = qp_data->temp_digest;
+
+		if (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
+			digest = op->sym->auth.digest.data;
+		else
+			digest = op->sym->aead.digest.data;
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+		rte_hexdump(stdout, "auth tag (orig):",
+				digest, session->req_digest_length);
+		rte_hexdump(stdout, "auth tag (calc):",
+				tag, session->req_digest_length);
+#endif
+
+		if (memcmp(tag, digest,	session->req_digest_length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	} else {
+		if (session->req_digest_length != session->gen_digest_length) {
+			if (session->op ==
+				IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
+				memcpy(op->sym->aead.digest.data,
+					qp_data->temp_digest,
+					session->req_digest_length);
+			else
+				memcpy(op->sym->auth.digest.data,
+					qp_data->temp_digest,
+					session->req_digest_length);
+		}
+	}
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp		Queue Pair to process
+ * @param op		Crypto operation
+ * @param sess		AESNI-GCM session
+ *
+ */
+static void
+handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,
+		struct rte_crypto_op *op,
+		struct aesni_gcm_session *sess)
+{
+	post_process_gcm_crypto_op(qp, op, sess);
+
+	/* Free session if a session-less crypto op */
+	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		memset(sess, 0, sizeof(struct aesni_gcm_session));
+		memset(op->sym->session, 0,
+			rte_cryptodev_sym_get_existing_header_session_size(
+				op->sym->session));
+		rte_mempool_put(qp->sess_mp_priv, sess);
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+}
+
+/**
+ * Process a crypto operation, calling
+ * the GCM API from the multi buffer library.
+ *
+ * @param	qp		queue pair
+ * @param	op		symmetric crypto operation
+ * @param	session		GCM session
+ *
+ * @return
+ *  0 on success
+ */
+static int
+process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
+		struct aesni_gcm_session *session)
+{
+	struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+	uint8_t *src, *dst;
+	uint8_t *iv_ptr;
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct rte_mbuf *m_src = sym_op->m_src;
+	uint32_t offset, data_offset, data_length;
+	uint32_t part_len, total_len, data_len;
+	uint8_t *tag;
+	unsigned int oop = 0;
+	struct aesni_gcm_ops *ops = &qp_data->ops[session->key_length];
+
+	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||
+			session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
+		offset = sym_op->aead.data.offset;
+		data_offset = offset;
+		data_length = sym_op->aead.data.length;
+	} else {
+		offset = sym_op->auth.data.offset;
+		data_offset = offset;
+		data_length = sym_op->auth.data.length;
+	}
+
+	RTE_ASSERT(m_src != NULL);
+
+	while (offset >= m_src->data_len && data_length != 0) {
+		offset -= m_src->data_len;
+		m_src = m_src->next;
+
+		RTE_ASSERT(m_src != NULL);
+	}
+
+	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
+	data_len = m_src->data_len - offset;
+	part_len = (data_len < data_length) ? data_len :
+			data_length;
+
+	RTE_ASSERT((sym_op->m_dst == NULL) ||
+			((sym_op->m_dst != NULL) &&
+				rte_pktmbuf_is_contiguous(sym_op->m_dst)));
+
+	/* In-place */
+	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
+		dst = src;
+	/* Out-of-place */
+	else {
+		oop = 1;
+		/* Segmented destination buffer is not supported
+		 * if operation is Out-of-place
+		 */
+		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
+		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+					data_offset);
+	}
+
+	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+				session->iv.offset);
+
+	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
+		ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
+				sym_op->aead.aad.data,
+				(uint64_t)session->aad_length);
+
+		ops->update_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
+				dst, src, (uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			if (oop)
+				dst += part_len;
+			else
+				dst = src;
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			ops->update_enc(&session->gdata_key,
+					&qp_data->gcm_ctx_data,
+					dst, src, (uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp_data->temp_digest;
+		else
+			tag = sym_op->aead.digest.data;
+
+		ops->finalize_enc(&session->gdata_key, &qp_data->gcm_ctx_data,
+				tag, session->gen_digest_length);
+	} else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
+		ops->init(&session->gdata_key, &qp_data->gcm_ctx_data, iv_ptr,
+				sym_op->aead.aad.data,
+				(uint64_t)session->aad_length);
+
+		ops->update_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
+				dst, src, (uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			if (oop)
+				dst += part_len;
+			else
+				dst = src;
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			ops->update_dec(&session->gdata_key,
+					&qp_data->gcm_ctx_data,
+					dst, src, (uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp_data->temp_digest;
+		ops->finalize_dec(&session->gdata_key, &qp_data->gcm_ctx_data,
+				tag, session->gen_digest_length);
+	} else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {
+		ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
+				iv_ptr, session->iv.length);
+
+		ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
+				src, (uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			ops->gmac_update(&session->gdata_key,
+					&qp_data->gcm_ctx_data, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp_data->temp_digest;
+		else
+			tag = sym_op->auth.digest.data;
+
+		ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
+				tag, session->gen_digest_length);
+	} else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */
+		ops->gmac_init(&session->gdata_key, &qp_data->gcm_ctx_data,
+				iv_ptr, session->iv.length);
+
+		ops->gmac_update(&session->gdata_key, &qp_data->gcm_ctx_data,
+				src, (uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			ops->gmac_update(&session->gdata_key,
+					&qp_data->gcm_ctx_data, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp_data->temp_digest;
+
+		ops->gmac_finalize(&session->gdata_key, &qp_data->gcm_ctx_data,
+				tag, session->gen_digest_length);
+	}
+	return 0;
+}
+
+/** Get gcm session */
+static inline struct aesni_gcm_session *
+aesni_gcm_get_session(struct ipsec_mb_qp *qp,
+	     struct rte_crypto_op *op)
+{
+	struct aesni_gcm_session *sess = NULL;
+	uint32_t driver_id =
+	    ipsec_mb_get_driver_id(IPSEC_MB_PMD_TYPE_AESNI_GCM);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (likely(sym_op->session != NULL))
+			sess = (struct aesni_gcm_session *)
+			    get_sym_session_private_data(sym_op->session,
+							 driver_id);
+	} else {
+		void *_sess;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+			return NULL;
+
+		if (rte_mempool_get(qp->sess_mp_priv,
+				(void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct aesni_gcm_session *)_sess_private_data;
+
+		if (unlikely(aesni_gcm_session_configure(qp->mb_mgr,
+				 _sess_private_data, sym_op->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
+			sess = NULL;
+		}
+		sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_sym_session_private_data(sym_op->session, driver_id,
+					     _sess_private_data);
+	}
+
+	if (unlikely(sess == NULL))
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct aesni_gcm_session *sess;
+	struct ipsec_mb_qp *qp = queue_pair;
+
+	int retval = 0;
+	unsigned int i, nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	for (i = 0; i < nb_dequeued; i++) {
+
+		sess = aesni_gcm_get_session(qp, ops[i]);
+		if (unlikely(sess == NULL)) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+
+		retval = process_gcm_crypto_op(qp, ops[i], sess);
+		if (retval < 0) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+
+		handle_completed_gcm_crypto_op(qp, ops[i], sess);
+	}
+
+	qp->stats.dequeued_count += i;
+
+	return i;
+}
+
+static inline void
+aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,
+			  int32_t errnum)
+{
+	uint32_t i;
+
+	for (i = 0; i < vec->num; i++)
+		vec->status[i] = errnum;
+}
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
+				     struct gcm_context_data *gdata_ctx,
+				     uint8_t *digest, struct aesni_gcm_ops ops)
+{
+	if (s->req_digest_length != s->gen_digest_length) {
+		uint8_t tmpdigest[s->gen_digest_length];
+
+		ops.finalize_enc(&s->gdata_key, gdata_ctx, tmpdigest,
+				s->gen_digest_length);
+		memcpy(digest, tmpdigest, s->req_digest_length);
+	} else {
+		ops.finalize_enc(&s->gdata_key, gdata_ctx, digest,
+				s->gen_digest_length);
+	}
+
+	return 0;
+}
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
+				     struct gcm_context_data *gdata_ctx,
+				     uint8_t *digest, struct aesni_gcm_ops ops)
+{
+	uint8_t tmpdigest[s->gen_digest_length];
+
+	ops.finalize_dec(&s->gdata_key, gdata_ctx, tmpdigest,
+			s->gen_digest_length);
+
+	return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0
+								    : EBADMSG;
+}
+
+static inline void
+aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
+			     struct gcm_context_data *gdata_ctx,
+			     struct rte_crypto_sgl *sgl, void *iv, void *aad,
+			     struct aesni_gcm_ops ops)
+{
+	uint32_t i;
+
+	/* init crypto operation */
+	ops.init(&s->gdata_key, gdata_ctx, iv, aad,
+		    (uint64_t)s->aad_length);
+
+	/* update with sgl data */
+	for (i = 0; i < sgl->num; i++) {
+		struct rte_crypto_vec *vec = &sgl->vec[i];
+
+		switch (s->op) {
+		case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
+			ops.update_enc(&s->gdata_key, gdata_ctx,
+			      vec->base, vec->base, vec->len);
+			break;
+		case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
+			ops.update_dec(&s->gdata_key, gdata_ctx,
+			      vec->base, vec->base, vec->len);
+			break;
+		default:
+			IPSEC_MB_LOG(ERR, "Invalid session op");
+			break;
+		}
+
+	}
+}
+
+static inline void
+aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
+			      struct gcm_context_data *gdata_ctx,
+			      struct rte_crypto_sgl *sgl, void *iv,
+			      struct aesni_gcm_ops ops)
+{
+	ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
+		    sgl->vec[0].len);
+}
+
+static inline uint32_t
+aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
+		      struct gcm_context_data *gdata_ctx,
+		      struct rte_crypto_sym_vec *vec,
+		      struct aesni_gcm_ops ops)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->sgl[i],
+					     vec->iv[i].va, vec->aad[i].va,
+					     ops);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
+		    s, gdata_ctx, vec->digest[i].va, ops);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
+		      struct gcm_context_data *gdata_ctx,
+		      struct rte_crypto_sym_vec *vec,
+		      struct aesni_gcm_ops ops)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->sgl[i],
+					     vec->iv[i].va, vec->aad[i].va,
+					     ops);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
+		    s, gdata_ctx, vec->digest[i].va, ops);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
+			struct gcm_context_data *gdata_ctx,
+			struct rte_crypto_sym_vec *vec,
+			struct aesni_gcm_ops ops)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		if (vec->sgl[i].num != 1) {
+			vec->status[i] = ENOTSUP;
+			continue;
+		}
+
+		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->sgl[i],
+					      vec->iv[i].va, ops);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
+		    s, gdata_ctx, vec->digest[i].va, ops);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
+		      struct gcm_context_data *gdata_ctx,
+		      struct rte_crypto_sym_vec *vec,
+		      struct aesni_gcm_ops ops)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		if (vec->sgl[i].num != 1) {
+			vec->status[i] = ENOTSUP;
+			continue;
+		}
+
+		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->sgl[i],
+					      vec->iv[i].va, ops);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
+		    s, gdata_ctx, vec->digest[i].va, ops);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+/** Process CPU crypto bulk operations */
+static uint32_t
+aesni_gcm_process_bulk(struct rte_cryptodev *dev,
+			struct rte_cryptodev_sym_session *sess,
+			__rte_unused union rte_crypto_sym_ofs ofs,
+			struct rte_crypto_sym_vec *vec)
+{
+	void *sess_priv;
+	struct aesni_gcm_session *s;
+	struct gcm_context_data gdata_ctx;
+	IMB_MGR *mb_mgr;
+
+	sess_priv = get_sym_session_private_data(sess, dev->driver_id);
+	if (unlikely(sess_priv == NULL)) {
+		aesni_gcm_fill_error_code(vec, EINVAL);
+		return 0;
+	}
+
+	s = sess_priv;
+
+	/* get per-thread MB MGR, create one if needed */
+	mb_mgr = get_per_thread_mb_mgr();
+	if (unlikely(mb_mgr == NULL))
+		return 0;
+
+	/* Check if function pointers have been set for this thread ops. */
+	if (unlikely(RTE_PER_LCORE(gcm_ops)[s->key_length].init == NULL))
+		aesni_gcm_set_ops(RTE_PER_LCORE(gcm_ops), mb_mgr);
+
+	switch (s->op) {
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
+		return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec,
+				RTE_PER_LCORE(gcm_ops)[s->key_length]);
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
+		return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec,
+				RTE_PER_LCORE(gcm_ops)[s->key_length]);
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+		return aesni_gmac_sgl_generate(s, &gdata_ctx, vec,
+				RTE_PER_LCORE(gcm_ops)[s->key_length]);
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+		return aesni_gmac_sgl_verify(s, &gdata_ctx, vec,
+				RTE_PER_LCORE(gcm_ops)[s->key_length]);
+	default:
+		aesni_gcm_fill_error_code(vec, EINVAL);
+		return 0;
+	}
+}
+
+static int
+aesni_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+				const struct rte_cryptodev_qp_conf *qp_conf,
+				int socket_id)
+{
+	int ret = ipsec_mb_pmd_qp_setup(dev, qp_id, qp_conf, socket_id);
+	if (ret < 0)
+		return ret;
+
+	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
+	struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+	aesni_gcm_set_ops(qp_data->ops, qp->mb_mgr);
+	return 0;
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+	.dev_configure = ipsec_mb_pmd_config,
+	.dev_start = ipsec_mb_pmd_start,
+	.dev_stop = ipsec_mb_pmd_stop,
+	.dev_close = ipsec_mb_pmd_close,
+
+	.stats_get = ipsec_mb_pmd_stats_get,
+	.stats_reset = ipsec_mb_pmd_stats_reset,
+
+	.dev_infos_get = ipsec_mb_pmd_info_get,
+
+	.queue_pair_setup = aesni_gcm_qp_setup,
+	.queue_pair_release = ipsec_mb_pmd_qp_release,
+
+	.sym_cpu_process = aesni_gcm_process_bulk,
+
+	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
+	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
+	.sym_session_clear = ipsec_mb_pmd_sym_session_clear
+};
+
+static int
+cryptodev_aesni_gcm_probe(struct rte_vdev_device *vdev)
+{
+	return cryptodev_ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_GCM);
+}
+
+static struct rte_vdev_driver cryptodev_aesni_gcm_pmd_drv = {
+	.probe = cryptodev_aesni_gcm_probe,
+	.remove = cryptodev_ipsec_mb_remove
+};
+
+static struct cryptodev_driver aesni_gcm_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD,
+		      cryptodev_aesni_gcm_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
+			      "max_nb_queue_pairs=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv,
+			       cryptodev_aesni_gcm_pmd_drv.driver,
+			       pmd_driver_id_aesni_gcm);
+
+/* Constructor function to register aesni-gcm PMD */
+RTE_INIT(ipsec_mb_register_aesni_gcm)
+{
+	struct ipsec_mb_pmd_data *aesni_gcm_data =
+		&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];
+
+	aesni_gcm_data->caps = aesni_gcm_capabilities;
+	aesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+	aesni_gcm_data->feature_flags =
+		RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+		RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+		RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+		RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+		RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
+		RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+	aesni_gcm_data->internals_priv_size = 0;
+	aesni_gcm_data->ops = &aesni_gcm_pmd_ops;
+	aesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);
+	aesni_gcm_data->queue_pair_configure = NULL;
+	aesni_gcm_data->session_configure = aesni_gcm_session_configure;
+	aesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index 2b589eee47..3407c3c070 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -37,6 +37,9 @@ extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
 /**< IPSEC Multi buffer aesni_mb PMD device name */
 
+#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
+/**< IPSEC Multi buffer PMD aesni_gcm device name */
+
 /** PMD LOGTYPE DRIVER, common to all PMDs */
 extern int ipsec_mb_logtype_driver;
 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
@@ -46,6 +49,7 @@ extern int ipsec_mb_logtype_driver;
 /** All supported device types */
 enum ipsec_mb_pmd_types {
 	IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
+	IPSEC_MB_PMD_TYPE_AESNI_GCM,
 	IPSEC_MB_N_PMD_TYPES
 };
 
@@ -65,6 +69,7 @@ enum ipsec_mb_operation {
 };
 
 extern uint8_t pmd_driver_id_aesni_mb;
+extern uint8_t pmd_driver_id_aesni_gcm;
 
 /** Helper function. Gets driver ID based on PMD type */
 static __rte_always_inline uint8_t
@@ -73,6 +78,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
 	switch (pmd_type) {
 	case IPSEC_MB_PMD_TYPE_AESNI_MB:
 		return pmd_driver_id_aesni_mb;
+	case IPSEC_MB_PMD_TYPE_AESNI_GCM:
+		return pmd_driver_id_aesni_gcm;
 	default:
 		break;
 	}
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index b2ccea6f94..14a13f2263 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -7,7 +7,6 @@ endif
 
 drivers = [
         'ipsec_mb',
-        'aesni_gcm',
         'armv8',
         'bcmfs',
         'caam_jr',
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 05/10] drivers/crypto: move kasumi PMD to IPsec-mb framework
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
                           ` (3 preceding siblings ...)
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 04/10] drivers/crypto: move aesni-gcm " Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 06/10] drivers/crypto: move snow3g " Ciara Power
                           ` (4 subsequent siblings)
  9 siblings, 0 replies; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power,
	Thomas Monjalon, Pablo de Lara, Ray Kinsella

From: Piotr Bronowski <piotrx.bronowski@intel.com>

This patch removes the crypto/kasumi folder and gathers all kasumi PMD
implementation specific details into a single file,
pmd_kasumi.c in the crypto/ipsec_mb folder.

Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v2: Updated maintainers file.
---
 MAINTAINERS                                   |   6 +-
 doc/guides/cryptodevs/kasumi.rst              |   3 +-
 drivers/crypto/ipsec_mb/meson.build           |   3 +-
 drivers/crypto/ipsec_mb/pmd_kasumi.c          | 556 +++++++++++++++
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   7 +
 drivers/crypto/kasumi/kasumi_pmd_private.h    |  81 ---
 drivers/crypto/kasumi/meson.build             |  24 -
 drivers/crypto/kasumi/rte_kasumi_pmd.c        | 642 ------------------
 drivers/crypto/kasumi/rte_kasumi_pmd_ops.c    | 316 ---------
 drivers/crypto/kasumi/version.map             |   3 -
 drivers/crypto/meson.build                    |   1 -
 11 files changed, 568 insertions(+), 1074 deletions(-)
 create mode 100644 drivers/crypto/ipsec_mb/pmd_kasumi.c
 delete mode 100644 drivers/crypto/kasumi/kasumi_pmd_private.h
 delete mode 100644 drivers/crypto/kasumi/meson.build
 delete mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd.c
 delete mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
 delete mode 100644 drivers/crypto/kasumi/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 6247e50687..794bad11c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1057,13 +1057,9 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 F: drivers/crypto/ipsec_mb/
 F: doc/guides/cryptodevs/aesni_gcm.rst
 F: doc/guides/cryptodevs/aesni_mb.rst
+F: doc/guides/cryptodevs/kasumi.rst
 F: doc/guides/cryptodevs/features/aesni_gcm.ini
 F: doc/guides/cryptodevs/features/aesni_mb.ini
-
-KASUMI
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-F: drivers/crypto/kasumi/
-F: doc/guides/cryptodevs/kasumi.rst
 F: doc/guides/cryptodevs/features/kasumi.ini
 
 Marvell cnxk crypto
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index 35c5941317..bc82744fcc 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -78,7 +78,8 @@ and the external crypto libraries supported by them:
    DPDK version   Crypto library version
    =============  ================================
    16.11 - 19.11  LibSSO KASUMI
-   20.02+         Multi-buffer library 0.53 - 1.0*
+   20.02 - 21.08  Multi-buffer library 0.53 - 1.0*
+   21.11+         Multi-buffer library 1.0*
    =============  ================================
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index 8550eaee9a..b0e2c6a0b7 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -24,6 +24,7 @@ endif
 sources = files('rte_ipsec_mb_pmd.c',
 		'rte_ipsec_mb_pmd_ops.c',
 		'pmd_aesni_mb.c',
-		'pmd_aesni_gcm.c'
+		'pmd_aesni_gcm.c',
+		'pmd_kasumi.c'
 		)
 deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/ipsec_mb/pmd_kasumi.c b/drivers/crypto/ipsec_mb/pmd_kasumi.c
new file mode 100644
index 0000000000..10f652a7ff
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/pmd_kasumi.c
@@ -0,0 +1,556 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Intel Corporation
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cpuflags.h>
+#include <rte_cryptodev.h>
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+#define KASUMI_KEY_LENGTH 16
+#define KASUMI_IV_LENGTH 8
+#define KASUMI_MAX_BURST 4
+#define BYTE_LEN 8
+#define KASUMI_DIGEST_LENGTH 4
+
+uint8_t pmd_driver_id_kasumi;
+
+static const struct rte_cryptodev_capabilities kasumi_capabilities[] = {
+	{	/* KASUMI (F9) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+				.block_size = 8,
+				.key_size = {
+					.min = KASUMI_KEY_LENGTH,
+					.max = KASUMI_KEY_LENGTH,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = KASUMI_DIGEST_LENGTH,
+					.max = KASUMI_DIGEST_LENGTH,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* KASUMI (F8) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+				.block_size = 8,
+				.key_size = {
+					.min = KASUMI_KEY_LENGTH,
+					.max = KASUMI_KEY_LENGTH,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = KASUMI_IV_LENGTH,
+					.max = KASUMI_IV_LENGTH,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** KASUMI private session structure */
+struct kasumi_session {
+	/* Keys have to be 16-byte aligned */
+	kasumi_key_sched_t pKeySched_cipher;
+	kasumi_key_sched_t pKeySched_hash;
+	enum ipsec_mb_operation op;
+	enum rte_crypto_auth_operation auth_op;
+	uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+struct kasumi_qp_data {
+	uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
+	/* *< Buffers used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
+};
+
+/** Parse crypto xform chain and set private session parameters. */
+static int
+kasumi_session_configure(IMB_MGR *mgr, void *priv_sess,
+			  const struct rte_crypto_sym_xform *xform)
+{
+	const struct rte_crypto_sym_xform *auth_xform = NULL;
+	const struct rte_crypto_sym_xform *cipher_xform = NULL;
+	enum ipsec_mb_operation mode;
+	struct kasumi_session *sess = (struct kasumi_session *)priv_sess;
+	/* Select Crypto operation - hash then cipher / cipher then hash */
+	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
+				&cipher_xform, NULL);
+
+	if (ret)
+		return ret;
+
+	if (cipher_xform) {
+		/* Only KASUMI F8 supported */
+		if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
+			IPSEC_MB_LOG(ERR, "Unsupported cipher algorithm ");
+			return -ENOTSUP;
+		}
+
+		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+		if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
+			IPSEC_MB_LOG(ERR, "Wrong IV length");
+			return -EINVAL;
+		}
+
+		/* Initialize key */
+		IMB_KASUMI_INIT_F8_KEY_SCHED(mgr,
+					      cipher_xform->cipher.key.data,
+					      &sess->pKeySched_cipher);
+	}
+
+	if (auth_xform) {
+		/* Only KASUMI F9 supported */
+		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
+			IPSEC_MB_LOG(ERR, "Unsupported authentication");
+			return -ENOTSUP;
+		}
+
+		if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
+			IPSEC_MB_LOG(ERR, "Wrong digest length");
+			return -EINVAL;
+		}
+
+		sess->auth_op = auth_xform->auth.op;
+
+		/* Initialize key */
+		IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
+					      &sess->pKeySched_hash);
+	}
+
+	sess->op = mode;
+	return ret;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_kasumi_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
+			  struct kasumi_session *session, uint8_t num_ops)
+{
+	unsigned int i;
+	uint8_t processed_ops = 0;
+	const void *src[num_ops];
+	void *dst[num_ops];
+	uint8_t *iv_ptr;
+	uint64_t iv[num_ops];
+	uint32_t num_bytes[num_ops];
+
+	for (i = 0; i < num_ops; i++) {
+		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
+			 + (ops[i]->sym->cipher.data.offset >> 3);
+		dst[i] = ops[i]->sym->m_dst
+			     ? rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *)
+				   + (ops[i]->sym->cipher.data.offset >> 3)
+			     : rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
+				   + (ops[i]->sym->cipher.data.offset >> 3);
+		iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
+						    session->cipher_iv_offset);
+		iv[i] = *((uint64_t *)(iv_ptr));
+		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+		processed_ops++;
+	}
+
+	if (processed_ops != 0)
+		IMB_KASUMI_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher,
+					iv, src, dst, num_bytes,
+					processed_ops);
+
+	return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_kasumi_cipher_op_bit(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
+			      struct kasumi_session *session)
+{
+	uint8_t *src, *dst;
+	uint8_t *iv_ptr;
+	uint64_t iv;
+	uint32_t length_in_bits, offset_in_bits;
+
+	offset_in_bits = op->sym->cipher.data.offset;
+	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+	if (op->sym->m_dst == NULL)
+		dst = src;
+	else
+		dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+					    session->cipher_iv_offset);
+	iv = *((uint64_t *)(iv_ptr));
+	length_in_bits = op->sym->cipher.data.length;
+
+	IMB_KASUMI_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
+				    src, dst, length_in_bits, offset_in_bits);
+
+	return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_kasumi_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
+			struct kasumi_session *session, uint8_t num_ops)
+{
+	unsigned int i;
+	uint8_t processed_ops = 0;
+	uint8_t *src, *dst;
+	uint32_t length_in_bits;
+	uint32_t num_bytes;
+	struct kasumi_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+
+	for (i = 0; i < num_ops; i++) {
+		/* Data must be byte aligned */
+		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			IPSEC_MB_LOG(ERR, "Invalid Offset");
+			break;
+		}
+
+		length_in_bits = ops[i]->sym->auth.data.length;
+
+		src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *)
+		      + (ops[i]->sym->auth.data.offset >> 3);
+		/* Direction from next bit after end of message */
+		num_bytes = length_in_bits >> 3;
+
+		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+			dst = qp_data->temp_digest;
+			IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
+						&session->pKeySched_hash, src,
+						num_bytes, dst);
+
+			/* Verify digest. */
+			if (memcmp(dst, ops[i]->sym->auth.digest.data,
+				    KASUMI_DIGEST_LENGTH)
+			    != 0)
+				ops[i]->status
+				    = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		} else {
+			dst = ops[i]->sym->auth.digest.data;
+
+			IMB_KASUMI_F9_1_BUFFER(qp->mb_mgr,
+						&session->pKeySched_hash, src,
+						num_bytes, dst);
+		}
+		processed_ops++;
+	}
+
+	return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
+		struct ipsec_mb_qp *qp, uint8_t num_ops,
+		uint16_t *accumulated_enqueued_ops)
+{
+	unsigned int i;
+	unsigned int processed_ops;
+
+	switch (session->op) {
+	case IPSEC_MB_OP_ENCRYPT_ONLY:
+	case IPSEC_MB_OP_DECRYPT_ONLY:
+		processed_ops
+		    = process_kasumi_cipher_op(qp, ops, session, num_ops);
+		break;
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+		processed_ops
+		    = process_kasumi_hash_op(qp, ops, session, num_ops);
+		break;
+	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
+	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
+		processed_ops
+		    = process_kasumi_cipher_op(qp, ops, session, num_ops);
+		process_kasumi_hash_op(qp, ops, session, processed_ops);
+		break;
+	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
+	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
+		processed_ops
+		    = process_kasumi_hash_op(qp, ops, session, num_ops);
+		process_kasumi_cipher_op(qp, ops, session, processed_ops);
+		break;
+	default:
+		/* Operation not supported. */
+		processed_ops = 0;
+	}
+
+	for (i = 0; i < num_ops; i++) {
+		/*
+		 * If there was no error/authentication failure,
+		 * change status to successful.
+		 */
+		if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+		/* Free session if a session-less crypto op. */
+		if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+			memset(session, 0, sizeof(struct kasumi_session));
+			memset(
+			    ops[i]->sym->session, 0,
+			    rte_cryptodev_sym_get_existing_header_session_size(
+				ops[i]->sym->session));
+			rte_mempool_put(qp->sess_mp_priv, session);
+			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+			ops[i]->sym->session = NULL;
+		}
+	}
+
+	*accumulated_enqueued_ops += i;
+	return processed_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
+		struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+	unsigned int processed_op;
+
+	switch (session->op) {
+		/* case KASUMI_OP_ONLY_CIPHER: */
+	case IPSEC_MB_OP_ENCRYPT_ONLY:
+	case IPSEC_MB_OP_DECRYPT_ONLY:
+		processed_op = process_kasumi_cipher_op_bit(qp, op, session);
+		break;
+	/* case KASUMI_OP_ONLY_AUTH: */
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+		processed_op = process_kasumi_hash_op(qp, &op, session, 1);
+		break;
+	/* case KASUMI_OP_CIPHER_AUTH: */
+	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
+		processed_op = process_kasumi_cipher_op_bit(qp, op, session);
+		if (processed_op == 1)
+			process_kasumi_hash_op(qp, &op, session, 1);
+		break;
+	/* case KASUMI_OP_AUTH_CIPHER: */
+	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
+		processed_op = process_kasumi_hash_op(qp, &op, session, 1);
+		if (processed_op == 1)
+			process_kasumi_cipher_op_bit(qp, op, session);
+		break;
+	default:
+		/* Operation not supported. */
+		processed_op = 0;
+	}
+
+	/*
+	 * If there was no error/authentication failure,
+	 * change status to successful.
+	 */
+	if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+	/* Free session if a session-less crypto op. */
+	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		memset(op->sym->session, 0, sizeof(struct kasumi_session));
+		rte_cryptodev_sym_session_free(op->sym->session);
+		op->sym->session = NULL;
+	}
+
+	*accumulated_enqueued_ops += processed_op;
+	return processed_op;
+}
+
+static uint16_t
+kasumi_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+			  uint16_t nb_ops)
+{
+	struct rte_crypto_op *c_ops[nb_ops];
+	struct rte_crypto_op *curr_c_op = NULL;
+
+	struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
+	struct ipsec_mb_qp *qp = queue_pair;
+	unsigned int i;
+	uint8_t burst_size = 0;
+	uint16_t enqueued_ops = 0;
+	uint8_t processed_ops;
+	unsigned int nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
+					      (void **)ops, nb_ops, NULL);
+	for (i = 0; i < nb_dequeued; i++) {
+		curr_c_op = ops[i];
+
+#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
+		if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src)
+		    || (curr_c_op->sym->m_dst != NULL
+			&& !rte_pktmbuf_is_contiguous(
+			    curr_c_op->sym->m_dst))) {
+			IPSEC_MB_LOG(ERR,
+				      "PMD supports only contiguous mbufs, op (%p) provides noncontiguous mbuf as source/destination buffer.",
+				      curr_c_op);
+			curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			break;
+		}
+#endif
+
+		/* Set status as enqueued (not processed yet) by default. */
+		curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+		curr_sess = (struct kasumi_session *)
+			ipsec_mb_get_session_private(qp, curr_c_op);
+		if (unlikely(curr_sess == NULL
+			      || curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
+			curr_c_op->status
+			    = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+			break;
+		}
+
+		/* If length/offset is at bit-level, process this buffer alone.
+		 */
+		if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+		    || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) {
+			/* Process the ops of the previous session. */
+			if (prev_sess != NULL) {
+				processed_ops
+				    = process_ops(c_ops, prev_sess, qp,
+						   burst_size, &enqueued_ops);
+				if (processed_ops < burst_size) {
+					burst_size = 0;
+					break;
+				}
+
+				burst_size = 0;
+				prev_sess = NULL;
+			}
+
+			processed_ops = process_op_bit(curr_c_op, curr_sess,
+							qp, &enqueued_ops);
+			if (processed_ops != 1)
+				break;
+
+			continue;
+		}
+
+		/* Batch ops that share the same session. */
+		if (prev_sess == NULL) {
+			prev_sess = curr_sess;
+			c_ops[burst_size++] = curr_c_op;
+		} else if (curr_sess == prev_sess) {
+			c_ops[burst_size++] = curr_c_op;
+			/*
+			 * When there are enough ops to process in a batch,
+			 * process them, and start a new batch.
+			 */
+			if (burst_size == KASUMI_MAX_BURST) {
+				processed_ops
+				    = process_ops(c_ops, prev_sess, qp,
+						   burst_size, &enqueued_ops);
+				if (processed_ops < burst_size) {
+					burst_size = 0;
+					break;
+				}
+
+				burst_size = 0;
+				prev_sess = NULL;
+			}
+		} else {
+			/*
+			 * Different session, process the ops
+			 * of the previous session.
+			 */
+			processed_ops = process_ops(
+			    c_ops, prev_sess, qp, burst_size, &enqueued_ops);
+			if (processed_ops < burst_size) {
+				burst_size = 0;
+				break;
+			}
+
+			burst_size = 0;
+			prev_sess = curr_sess;
+
+			c_ops[burst_size++] = curr_c_op;
+		}
+	}
+
+	if (burst_size != 0) {
+		/* Process the crypto ops of the last session. */
+		processed_ops = process_ops(c_ops, prev_sess, qp, burst_size,
+					     &enqueued_ops);
+	}
+	qp->stats.dequeued_count += i;
+
+	return i;
+}
+
+struct rte_cryptodev_ops kasumi_pmd_ops = {
+	.dev_configure = ipsec_mb_pmd_config,
+	.dev_start = ipsec_mb_pmd_start,
+	.dev_stop = ipsec_mb_pmd_stop,
+	.dev_close = ipsec_mb_pmd_close,
+
+	.stats_get = ipsec_mb_pmd_stats_get,
+	.stats_reset = ipsec_mb_pmd_stats_reset,
+
+	.dev_infos_get = ipsec_mb_pmd_info_get,
+
+	.queue_pair_setup = ipsec_mb_pmd_qp_setup,
+	.queue_pair_release = ipsec_mb_pmd_qp_release,
+
+	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
+	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
+	.sym_session_clear = ipsec_mb_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
+
+static int
+cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
+{
+	return cryptodev_ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_KASUMI);
+}
+
+static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
+	.probe = cryptodev_kasumi_probe,
+	.remove = cryptodev_ipsec_mb_remove
+};
+
+static struct cryptodev_driver kasumi_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
+			       "max_nb_queue_pairs=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
+				cryptodev_kasumi_pmd_drv.driver,
+				pmd_driver_id_kasumi);
+
+/* Constructor function to register kasumi PMD */
+RTE_INIT(ipsec_mb_register_kasumi)
+{
+	struct ipsec_mb_pmd_data *kasumi_data
+	    = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_KASUMI];
+
+	kasumi_data->caps = kasumi_capabilities;
+	kasumi_data->dequeue_burst = kasumi_pmd_dequeue_burst;
+	kasumi_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
+				| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
+				| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
+				| RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT
+				| RTE_CRYPTODEV_FF_SYM_SESSIONLESS
+				| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+	kasumi_data->internals_priv_size = 0;
+	kasumi_data->ops = &kasumi_pmd_ops;
+	kasumi_data->qp_priv_size = sizeof(struct kasumi_qp_data);
+	kasumi_data->session_configure = kasumi_session_configure;
+	kasumi_data->session_priv_size = sizeof(struct kasumi_session);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index 3407c3c070..10fa289017 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -40,6 +40,9 @@ extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
 #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
 /**< IPSEC Multi buffer PMD aesni_gcm device name */
 
+#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
+/**< IPSEC Multi buffer PMD kasumi device name */
+
 /** PMD LOGTYPE DRIVER, common to all PMDs */
 extern int ipsec_mb_logtype_driver;
 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
@@ -50,6 +53,7 @@ extern int ipsec_mb_logtype_driver;
 enum ipsec_mb_pmd_types {
 	IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
 	IPSEC_MB_PMD_TYPE_AESNI_GCM,
+	IPSEC_MB_PMD_TYPE_KASUMI,
 	IPSEC_MB_N_PMD_TYPES
 };
 
@@ -70,6 +74,7 @@ enum ipsec_mb_operation {
 
 extern uint8_t pmd_driver_id_aesni_mb;
 extern uint8_t pmd_driver_id_aesni_gcm;
+extern uint8_t pmd_driver_id_kasumi;
 
 /** Helper function. Gets driver ID based on PMD type */
 static __rte_always_inline uint8_t
@@ -80,6 +85,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
 		return pmd_driver_id_aesni_mb;
 	case IPSEC_MB_PMD_TYPE_AESNI_GCM:
 		return pmd_driver_id_aesni_gcm;
+	case IPSEC_MB_PMD_TYPE_KASUMI:
+		return pmd_driver_id_kasumi;
 	default:
 		break;
 	}
diff --git a/drivers/crypto/kasumi/kasumi_pmd_private.h b/drivers/crypto/kasumi/kasumi_pmd_private.h
deleted file mode 100644
index abedcd616d..0000000000
--- a/drivers/crypto/kasumi/kasumi_pmd_private.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#ifndef _KASUMI_PMD_PRIVATE_H_
-#define _KASUMI_PMD_PRIVATE_H_
-
-#include <intel-ipsec-mb.h>
-
-#define CRYPTODEV_NAME_KASUMI_PMD	crypto_kasumi
-/**< KASUMI PMD device name */
-
-/** KASUMI PMD LOGTYPE DRIVER */
-extern int kasumi_logtype_driver;
-
-#define KASUMI_LOG(level, fmt, ...)  \
-	rte_log(RTE_LOG_ ## level, kasumi_logtype_driver,  \
-			"%s() line %u: " fmt "\n", __func__, __LINE__,  \
-					## __VA_ARGS__)
-
-#define KASUMI_DIGEST_LENGTH 4
-
-/** private data structure for each virtual KASUMI device */
-struct kasumi_private {
-	unsigned max_nb_queue_pairs;
-	/**< Max number of queue pairs supported by device */
-	MB_MGR *mgr;
-	/**< Multi-buffer instance */
-};
-
-/** KASUMI buffer queue pair */
-struct kasumi_qp {
-	uint16_t id;
-	/**< Queue Pair Identifier */
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	/**< Unique Queue Pair Name */
-	struct rte_ring *processed_ops;
-	/**< Ring for placing processed ops */
-	struct rte_mempool *sess_mp;
-	/**< Session Mempool */
-	struct rte_mempool *sess_mp_priv;
-	/**< Session Private Data Mempool */
-	struct rte_cryptodev_stats qp_stats;
-	/**< Queue pair statistics */
-	uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
-	/**< Buffer used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-	MB_MGR *mgr;
-	/**< Multi-buffer instance */
-} __rte_cache_aligned;
-
-enum kasumi_operation {
-	KASUMI_OP_ONLY_CIPHER,
-	KASUMI_OP_ONLY_AUTH,
-	KASUMI_OP_CIPHER_AUTH,
-	KASUMI_OP_AUTH_CIPHER,
-	KASUMI_OP_NOT_SUPPORTED
-};
-
-/** KASUMI private session structure */
-struct kasumi_session {
-	/* Keys have to be 16-byte aligned */
-	kasumi_key_sched_t pKeySched_cipher;
-	kasumi_key_sched_t pKeySched_hash;
-	enum kasumi_operation op;
-	enum rte_crypto_auth_operation auth_op;
-	uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-
-int
-kasumi_set_session_parameters(MB_MGR *mgr, struct kasumi_session *sess,
-		const struct rte_crypto_sym_xform *xform);
-
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
-
-#endif /* _KASUMI_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/kasumi/meson.build b/drivers/crypto/kasumi/meson.build
deleted file mode 100644
index e6e0f08c3d..0000000000
--- a/drivers/crypto/kasumi/meson.build
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018-2020 Intel Corporation
-
-IMB_required_ver = '0.53.0'
-lib = cc.find_library('IPSec_MB', required: false)
-if not lib.found()
-    build = false
-    reason = 'missing dependency, "libIPSec_MB"'
-else
-    # version comes with quotes, so we split based on " and take the middle
-    imb_ver = cc.get_define('IMB_VERSION_STR',
-        prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
-
-    if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
-        reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
-                IMB_required_ver, imb_ver)
-        build = false
-    endif
-
-endif
-
-ext_deps += lib
-sources = files('rte_kasumi_pmd.c', 'rte_kasumi_pmd_ops.c')
-deps += ['bus_vdev']
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
deleted file mode 100644
index d6f927417a..0000000000
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ /dev/null
@@ -1,642 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_cryptodev.h>
-#include <cryptodev_pmd.h>
-#include <rte_bus_vdev.h>
-#include <rte_malloc.h>
-#include <rte_cpuflags.h>
-
-#include "kasumi_pmd_private.h"
-
-#define KASUMI_KEY_LENGTH 16
-#define KASUMI_IV_LENGTH 8
-#define KASUMI_MAX_BURST 4
-#define BYTE_LEN 8
-
-static uint8_t cryptodev_driver_id;
-
-/** Get xform chain order. */
-static enum kasumi_operation
-kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
-{
-	if (xform == NULL)
-		return KASUMI_OP_NOT_SUPPORTED;
-
-	if (xform->next)
-		if (xform->next->next != NULL)
-			return KASUMI_OP_NOT_SUPPORTED;
-
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		if (xform->next == NULL)
-			return KASUMI_OP_ONLY_AUTH;
-		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
-			return KASUMI_OP_AUTH_CIPHER;
-		else
-			return KASUMI_OP_NOT_SUPPORTED;
-	}
-
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
-		if (xform->next == NULL)
-			return KASUMI_OP_ONLY_CIPHER;
-		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
-			return KASUMI_OP_CIPHER_AUTH;
-		else
-			return KASUMI_OP_NOT_SUPPORTED;
-	}
-
-	return KASUMI_OP_NOT_SUPPORTED;
-}
-
-
-/** Parse crypto xform chain and set private session parameters. */
-int
-kasumi_set_session_parameters(MB_MGR *mgr, struct kasumi_session *sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	const struct rte_crypto_sym_xform *auth_xform = NULL;
-	const struct rte_crypto_sym_xform *cipher_xform = NULL;
-	enum kasumi_operation mode;
-
-	/* Select Crypto operation - hash then cipher / cipher then hash */
-	mode = kasumi_get_mode(xform);
-
-	switch (mode) {
-	case KASUMI_OP_CIPHER_AUTH:
-		auth_xform = xform->next;
-		/* Fall-through */
-	case KASUMI_OP_ONLY_CIPHER:
-		cipher_xform = xform;
-		break;
-	case KASUMI_OP_AUTH_CIPHER:
-		cipher_xform = xform->next;
-		/* Fall-through */
-	case KASUMI_OP_ONLY_AUTH:
-		auth_xform = xform;
-		break;
-	case KASUMI_OP_NOT_SUPPORTED:
-	default:
-		KASUMI_LOG(ERR, "Unsupported operation chain order parameter");
-		return -ENOTSUP;
-	}
-
-	if (cipher_xform) {
-		/* Only KASUMI F8 supported */
-		if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
-			KASUMI_LOG(ERR, "Unsupported cipher algorithm ");
-			return -ENOTSUP;
-		}
-
-		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
-		if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
-			KASUMI_LOG(ERR, "Wrong IV length");
-			return -EINVAL;
-		}
-
-		/* Initialize key */
-		IMB_KASUMI_INIT_F8_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
-				&sess->pKeySched_cipher);
-	}
-
-	if (auth_xform) {
-		/* Only KASUMI F9 supported */
-		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
-			KASUMI_LOG(ERR, "Unsupported authentication");
-			return -ENOTSUP;
-		}
-
-		if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
-			KASUMI_LOG(ERR, "Wrong digest length");
-			return -EINVAL;
-		}
-
-		sess->auth_op = auth_xform->auth.op;
-
-		/* Initialize key */
-		IMB_KASUMI_INIT_F9_KEY_SCHED(mgr, auth_xform->auth.key.data,
-				&sess->pKeySched_hash);
-	}
-
-
-	sess->op = mode;
-
-	return 0;
-}
-
-/** Get KASUMI session. */
-static struct kasumi_session *
-kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
-{
-	struct kasumi_session *sess = NULL;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		if (likely(op->sym->session != NULL))
-			sess = (struct kasumi_session *)
-					get_sym_session_private_data(
-					op->sym->session,
-					cryptodev_driver_id);
-	} else {
-		void *_sess = NULL;
-		void *_sess_private_data = NULL;
-
-		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
-			return NULL;
-
-		if (rte_mempool_get(qp->sess_mp_priv,
-				(void **)&_sess_private_data))
-			return NULL;
-
-		sess = (struct kasumi_session *)_sess_private_data;
-
-		if (unlikely(kasumi_set_session_parameters(qp->mgr, sess,
-				op->sym->xform) != 0)) {
-			rte_mempool_put(qp->sess_mp, _sess);
-			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
-			sess = NULL;
-		}
-		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
-		set_sym_session_private_data(op->sym->session,
-				cryptodev_driver_id, _sess_private_data);
-	}
-
-	if (unlikely(sess == NULL))
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
-	return sess;
-}
-
-/** Encrypt/decrypt mbufs with same cipher key. */
-static uint8_t
-process_kasumi_cipher_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
-		struct kasumi_session *session, uint8_t num_ops)
-{
-	unsigned i;
-	uint8_t processed_ops = 0;
-	const void *src[num_ops];
-	void *dst[num_ops];
-	uint8_t *iv_ptr;
-	uint64_t iv[num_ops];
-	uint32_t num_bytes[num_ops];
-
-	for (i = 0; i < num_ops; i++) {
-		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
-		dst[i] = ops[i]->sym->m_dst ?
-			rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3) :
-			rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
-		iv_ptr = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
-				session->cipher_iv_offset);
-		iv[i] = *((uint64_t *)(iv_ptr));
-		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
-		processed_ops++;
-	}
-
-	if (processed_ops != 0)
-		IMB_KASUMI_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
-			src, dst, num_bytes, processed_ops);
-
-	return processed_ops;
-}
-
-/** Encrypt/decrypt mbuf (bit level function). */
-static uint8_t
-process_kasumi_cipher_op_bit(struct kasumi_qp *qp, struct rte_crypto_op *op,
-		struct kasumi_session *session)
-{
-	uint8_t *src, *dst;
-	uint8_t *iv_ptr;
-	uint64_t iv;
-	uint32_t length_in_bits, offset_in_bits;
-
-	offset_in_bits = op->sym->cipher.data.offset;
-	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
-	if (op->sym->m_dst == NULL)
-		dst = src;
-	else
-		dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
-	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
-			session->cipher_iv_offset);
-	iv = *((uint64_t *)(iv_ptr));
-	length_in_bits = op->sym->cipher.data.length;
-
-	IMB_KASUMI_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
-			src, dst, length_in_bits, offset_in_bits);
-
-	return 1;
-}
-
-/** Generate/verify hash from mbufs with same hash key. */
-static int
-process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
-		struct kasumi_session *session,
-		uint8_t num_ops)
-{
-	unsigned i;
-	uint8_t processed_ops = 0;
-	uint8_t *src, *dst;
-	uint32_t length_in_bits;
-	uint32_t num_bytes;
-
-	for (i = 0; i < num_ops; i++) {
-		/* Data must be byte aligned */
-		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			KASUMI_LOG(ERR, "Invalid Offset");
-			break;
-		}
-
-		length_in_bits = ops[i]->sym->auth.data.length;
-
-		src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->auth.data.offset >> 3);
-		/* Direction from next bit after end of message */
-		num_bytes = length_in_bits >> 3;
-
-		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
-			dst = qp->temp_digest;
-			IMB_KASUMI_F9_1_BUFFER(qp->mgr,
-					&session->pKeySched_hash, src,
-					num_bytes, dst);
-
-			/* Verify digest. */
-			if (memcmp(dst, ops[i]->sym->auth.digest.data,
-					KASUMI_DIGEST_LENGTH) != 0)
-				ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-		} else  {
-			dst = ops[i]->sym->auth.digest.data;
-
-			IMB_KASUMI_F9_1_BUFFER(qp->mgr,
-					&session->pKeySched_hash, src,
-					num_bytes, dst);
-		}
-		processed_ops++;
-	}
-
-	return processed_ops;
-}
-
-/** Process a batch of crypto ops which shares the same session. */
-static int
-process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
-		struct kasumi_qp *qp, uint8_t num_ops,
-		uint16_t *accumulated_enqueued_ops)
-{
-	unsigned i;
-	unsigned enqueued_ops, processed_ops;
-
-	switch (session->op) {
-	case KASUMI_OP_ONLY_CIPHER:
-		processed_ops = process_kasumi_cipher_op(qp, ops,
-				session, num_ops);
-		break;
-	case KASUMI_OP_ONLY_AUTH:
-		processed_ops = process_kasumi_hash_op(qp, ops, session,
-				num_ops);
-		break;
-	case KASUMI_OP_CIPHER_AUTH:
-		processed_ops = process_kasumi_cipher_op(qp, ops, session,
-				num_ops);
-		process_kasumi_hash_op(qp, ops, session, processed_ops);
-		break;
-	case KASUMI_OP_AUTH_CIPHER:
-		processed_ops = process_kasumi_hash_op(qp, ops, session,
-				num_ops);
-		process_kasumi_cipher_op(qp, ops, session, processed_ops);
-		break;
-	default:
-		/* Operation not supported. */
-		processed_ops = 0;
-	}
-
-	for (i = 0; i < num_ops; i++) {
-		/*
-		 * If there was no error/authentication failure,
-		 * change status to successful.
-		 */
-		if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-		/* Free session if a session-less crypto op. */
-		if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-			memset(session, 0, sizeof(struct kasumi_session));
-			memset(ops[i]->sym->session, 0,
-			rte_cryptodev_sym_get_existing_header_session_size(
-					ops[i]->sym->session));
-			rte_mempool_put(qp->sess_mp_priv, session);
-			rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
-			ops[i]->sym->session = NULL;
-		}
-	}
-
-	enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
-				(void **)ops, processed_ops, NULL);
-	qp->qp_stats.enqueued_count += enqueued_ops;
-	*accumulated_enqueued_ops += enqueued_ops;
-
-	return enqueued_ops;
-}
-
-/** Process a crypto op with length/offset in bits. */
-static int
-process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
-		struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops)
-{
-	unsigned enqueued_op, processed_op;
-
-	switch (session->op) {
-	case KASUMI_OP_ONLY_CIPHER:
-		processed_op = process_kasumi_cipher_op_bit(qp, op,
-				session);
-		break;
-	case KASUMI_OP_ONLY_AUTH:
-		processed_op = process_kasumi_hash_op(qp, &op, session, 1);
-		break;
-	case KASUMI_OP_CIPHER_AUTH:
-		processed_op = process_kasumi_cipher_op_bit(qp, op, session);
-		if (processed_op == 1)
-			process_kasumi_hash_op(qp, &op, session, 1);
-		break;
-	case KASUMI_OP_AUTH_CIPHER:
-		processed_op = process_kasumi_hash_op(qp, &op, session, 1);
-		if (processed_op == 1)
-			process_kasumi_cipher_op_bit(qp, op, session);
-		break;
-	default:
-		/* Operation not supported. */
-		processed_op = 0;
-	}
-
-	/*
-	 * If there was no error/authentication failure,
-	 * change status to successful.
-	 */
-	if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
-		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-	/* Free session if a session-less crypto op. */
-	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		memset(op->sym->session, 0, sizeof(struct kasumi_session));
-		rte_cryptodev_sym_session_free(op->sym->session);
-		op->sym->session = NULL;
-	}
-
-	enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
-				processed_op, NULL);
-	qp->qp_stats.enqueued_count += enqueued_op;
-	*accumulated_enqueued_ops += enqueued_op;
-
-	return enqueued_op;
-}
-
-static uint16_t
-kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
-{
-	struct rte_crypto_op *c_ops[nb_ops];
-	struct rte_crypto_op *curr_c_op;
-
-	struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
-	struct kasumi_qp *qp = queue_pair;
-	unsigned i;
-	uint8_t burst_size = 0;
-	uint16_t enqueued_ops = 0;
-	uint8_t processed_ops;
-
-	for (i = 0; i < nb_ops; i++) {
-		curr_c_op = ops[i];
-
-#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
-		if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
-				(curr_c_op->sym->m_dst != NULL &&
-				!rte_pktmbuf_is_contiguous(
-						curr_c_op->sym->m_dst))) {
-			KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, "
-				"op (%p) provides noncontiguous mbuf as "
-				"source/destination buffer.", curr_c_op);
-			curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			break;
-		}
-#endif
-
-		/* Set status as enqueued (not processed yet) by default. */
-		curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
-		curr_sess = kasumi_get_session(qp, curr_c_op);
-		if (unlikely(curr_sess == NULL ||
-				curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) {
-			curr_c_op->status =
-					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-			break;
-		}
-
-		/* If length/offset is at bit-level, process this buffer alone. */
-		if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
-				|| ((ops[i]->sym->cipher.data.offset
-					% BYTE_LEN) != 0)) {
-			/* Process the ops of the previous session. */
-			if (prev_sess != NULL) {
-				processed_ops = process_ops(c_ops, prev_sess,
-						qp, burst_size, &enqueued_ops);
-				if (processed_ops < burst_size) {
-					burst_size = 0;
-					break;
-				}
-
-				burst_size = 0;
-				prev_sess = NULL;
-			}
-
-			processed_ops = process_op_bit(curr_c_op, curr_sess,
-						qp, &enqueued_ops);
-			if (processed_ops != 1)
-				break;
-
-			continue;
-		}
-
-		/* Batch ops that share the same session. */
-		if (prev_sess == NULL) {
-			prev_sess = curr_sess;
-			c_ops[burst_size++] = curr_c_op;
-		} else if (curr_sess == prev_sess) {
-			c_ops[burst_size++] = curr_c_op;
-			/*
-			 * When there are enough ops to process in a batch,
-			 * process them, and start a new batch.
-			 */
-			if (burst_size == KASUMI_MAX_BURST) {
-				processed_ops = process_ops(c_ops, prev_sess,
-						qp, burst_size, &enqueued_ops);
-				if (processed_ops < burst_size) {
-					burst_size = 0;
-					break;
-				}
-
-				burst_size = 0;
-				prev_sess = NULL;
-			}
-		} else {
-			/*
-			 * Different session, process the ops
-			 * of the previous session.
-			 */
-			processed_ops = process_ops(c_ops, prev_sess,
-					qp, burst_size, &enqueued_ops);
-			if (processed_ops < burst_size) {
-				burst_size = 0;
-				break;
-			}
-
-			burst_size = 0;
-			prev_sess = curr_sess;
-
-			c_ops[burst_size++] = curr_c_op;
-		}
-	}
-
-	if (burst_size != 0) {
-		/* Process the crypto ops of the last session. */
-		processed_ops = process_ops(c_ops, prev_sess,
-				qp, burst_size, &enqueued_ops);
-	}
-
-	qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
-	return enqueued_ops;
-}
-
-static uint16_t
-kasumi_pmd_dequeue_burst(void *queue_pair,
-		struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-	struct kasumi_qp *qp = queue_pair;
-
-	unsigned nb_dequeued;
-
-	nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
-			(void **)c_ops, nb_ops, NULL);
-	qp->qp_stats.dequeued_count += nb_dequeued;
-
-	return nb_dequeued;
-}
-
-static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
-
-static int
-cryptodev_kasumi_create(const char *name,
-			struct rte_vdev_device *vdev,
-			struct rte_cryptodev_pmd_init_params *init_params)
-{
-	struct rte_cryptodev *dev;
-	struct kasumi_private *internals;
-	MB_MGR *mgr;
-
-	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
-	if (dev == NULL) {
-		KASUMI_LOG(ERR, "failed to create cryptodev vdev");
-		goto init_error;
-	}
-
-	dev->driver_id = cryptodev_driver_id;
-	dev->dev_ops = rte_kasumi_pmd_ops;
-
-	/* Register RX/TX burst functions for data path. */
-	dev->dequeue_burst = kasumi_pmd_dequeue_burst;
-	dev->enqueue_burst = kasumi_pmd_enqueue_burst;
-
-	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
-			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
-
-	mgr = alloc_mb_mgr(0);
-	if (mgr == NULL)
-		return -ENOMEM;
-
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
-		init_mb_mgr_avx(mgr);
-	} else {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
-		init_mb_mgr_sse(mgr);
-	}
-
-	internals = dev->data->dev_private;
-
-	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
-	internals->mgr = mgr;
-
-	return 0;
-init_error:
-	KASUMI_LOG(ERR, "driver %s: failed",
-			init_params->name);
-
-	cryptodev_kasumi_remove(vdev);
-	return -EFAULT;
-}
-
-static int
-cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev_pmd_init_params init_params = {
-		"",
-		sizeof(struct kasumi_private),
-		rte_socket_id(),
-		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
-	};
-	const char *name;
-	const char *input_args;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-	input_args = rte_vdev_device_args(vdev);
-
-	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
-
-	return cryptodev_kasumi_create(name, vdev, &init_params);
-}
-
-static int
-cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev *cryptodev;
-	const char *name;
-	struct kasumi_private *internals;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-
-	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	internals = cryptodev->data->dev_private;
-
-	free_mb_mgr(internals->mgr);
-
-	return rte_cryptodev_pmd_destroy(cryptodev);
-}
-
-static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
-	.probe = cryptodev_kasumi_probe,
-	.remove = cryptodev_kasumi_remove
-};
-
-static struct cryptodev_driver kasumi_crypto_drv;
-
-RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
-RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
-RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
-	"max_nb_queue_pairs=<int> "
-	"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
-		cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
-
-RTE_LOG_REGISTER_DEFAULT(kasumi_logtype_driver, NOTICE);
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
deleted file mode 100644
index f075054807..0000000000
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <cryptodev_pmd.h>
-
-#include "kasumi_pmd_private.h"
-
-static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
-	{	/* KASUMI (F9) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_KASUMI_F9,
-				.block_size = 8,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 4,
-					.max = 4,
-					.increment = 0
-				},
-				.iv_size = { 0 }
-			}, }
-		}, }
-	},
-	{	/* KASUMI (F8) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
-				.block_size = 8,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 8,
-					.max = 8,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-/** Configure device */
-static int
-kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused struct rte_cryptodev_config *config)
-{
-	return 0;
-}
-
-/** Start device */
-static int
-kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-/** Stop device */
-static void
-kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev)
-{
-}
-
-/** Close device */
-static int
-kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-
-/** Get device statistics */
-static void
-kasumi_pmd_stats_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_stats *stats)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
-
-		stats->enqueued_count += qp->qp_stats.enqueued_count;
-		stats->dequeued_count += qp->qp_stats.dequeued_count;
-
-		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
-		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
-	}
-}
-
-/** Reset device statistics */
-static void
-kasumi_pmd_stats_reset(struct rte_cryptodev *dev)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
-
-		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-	}
-}
-
-
-/** Get device info */
-static void
-kasumi_pmd_info_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_info *dev_info)
-{
-	struct kasumi_private *internals = dev->data->dev_private;
-
-	if (dev_info != NULL) {
-		dev_info->driver_id = dev->driver_id;
-		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
-		/* No limit of number of sessions */
-		dev_info->sym.max_nb_sessions = 0;
-		dev_info->feature_flags = dev->feature_flags;
-		dev_info->capabilities = kasumi_pmd_capabilities;
-	}
-}
-
-/** Release queue pair */
-static int
-kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
-{
-	struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
-
-	if (qp != NULL) {
-		rte_ring_free(qp->processed_ops);
-		rte_free(qp);
-		dev->data->queue_pairs[qp_id] = NULL;
-	}
-	return 0;
-}
-
-/** set a unique name for the queue pair based on its name, dev_id and qp_id */
-static int
-kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
-		struct kasumi_qp *qp)
-{
-	unsigned n = snprintf(qp->name, sizeof(qp->name),
-			"kasumi_pmd_%u_qp_%u",
-			dev->data->dev_id, qp->id);
-
-	if (n >= sizeof(qp->name))
-		return -1;
-
-	return 0;
-}
-
-/** Create a ring to place processed ops on */
-static struct rte_ring *
-kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
-		unsigned ring_size, int socket_id)
-{
-	struct rte_ring *r;
-
-	r = rte_ring_lookup(qp->name);
-	if (r) {
-		if (rte_ring_get_size(r) == ring_size) {
-			KASUMI_LOG(INFO, "Reusing existing ring %s"
-					" for processed packets",
-					 qp->name);
-			return r;
-		}
-
-		KASUMI_LOG(ERR, "Unable to reuse existing ring %s"
-				" for processed packets",
-				 qp->name);
-		return NULL;
-	}
-
-	return rte_ring_create(qp->name, ring_size, socket_id,
-			RING_F_SP_ENQ | RING_F_SC_DEQ);
-}
-
-/** Setup a queue pair */
-static int
-kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
-		const struct rte_cryptodev_qp_conf *qp_conf,
-		int socket_id)
-{
-	struct kasumi_qp *qp = NULL;
-	struct kasumi_private *internals = dev->data->dev_private;
-
-	/* Free memory prior to re-allocation if needed. */
-	if (dev->data->queue_pairs[qp_id] != NULL)
-		kasumi_pmd_qp_release(dev, qp_id);
-
-	/* Allocate the queue pair data structure. */
-	qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp),
-					RTE_CACHE_LINE_SIZE, socket_id);
-	if (qp == NULL)
-		return (-ENOMEM);
-
-	qp->id = qp_id;
-	dev->data->queue_pairs[qp_id] = qp;
-
-	if (kasumi_pmd_qp_set_unique_name(dev, qp))
-		goto qp_setup_cleanup;
-
-	qp->processed_ops = kasumi_pmd_qp_create_processed_ops_ring(qp,
-			qp_conf->nb_descriptors, socket_id);
-	if (qp->processed_ops == NULL)
-		goto qp_setup_cleanup;
-
-	qp->mgr = internals->mgr;
-	qp->sess_mp = qp_conf->mp_session;
-	qp->sess_mp_priv = qp_conf->mp_session_private;
-
-	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-
-	return 0;
-
-qp_setup_cleanup:
-	rte_free(qp);
-
-	return -1;
-}
-
-/** Returns the size of the KASUMI session structure */
-static unsigned
-kasumi_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct kasumi_session);
-}
-
-/** Configure a KASUMI session from a crypto xform chain */
-static int
-kasumi_pmd_sym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_sym_xform *xform,
-		struct rte_cryptodev_sym_session *sess,
-		struct rte_mempool *mempool)
-{
-	void *sess_private_data;
-	int ret;
-	struct kasumi_private *internals = dev->data->dev_private;
-
-	if (unlikely(sess == NULL)) {
-		KASUMI_LOG(ERR, "invalid session struct");
-		return -EINVAL;
-	}
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		KASUMI_LOG(ERR,
-				"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	ret = kasumi_set_session_parameters(internals->mgr,
-					sess_private_data, xform);
-	if (ret != 0) {
-		KASUMI_LOG(ERR, "failed configure session parameters");
-
-		/* Return session to mempool */
-		rte_mempool_put(mempool, sess_private_data);
-		return ret;
-	}
-
-	set_sym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-}
-
-/** Clear the memory of session so it doesn't leave key material behind */
-static void
-kasumi_pmd_sym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_sym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_sym_session_private_data(sess, index);
-
-	/* Zero out the whole structure */
-	if (sess_priv) {
-		memset(sess_priv, 0, sizeof(struct kasumi_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-		set_sym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-struct rte_cryptodev_ops kasumi_pmd_ops = {
-		.dev_configure      = kasumi_pmd_config,
-		.dev_start          = kasumi_pmd_start,
-		.dev_stop           = kasumi_pmd_stop,
-		.dev_close          = kasumi_pmd_close,
-
-		.stats_get          = kasumi_pmd_stats_get,
-		.stats_reset        = kasumi_pmd_stats_reset,
-
-		.dev_infos_get      = kasumi_pmd_info_get,
-
-		.queue_pair_setup   = kasumi_pmd_qp_setup,
-		.queue_pair_release = kasumi_pmd_qp_release,
-
-		.sym_session_get_size   = kasumi_pmd_sym_session_get_size,
-		.sym_session_configure  = kasumi_pmd_sym_session_configure,
-		.sym_session_clear      = kasumi_pmd_sym_session_clear
-};
-
-struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
diff --git a/drivers/crypto/kasumi/version.map b/drivers/crypto/kasumi/version.map
deleted file mode 100644
index c2e0723b4c..0000000000
--- a/drivers/crypto/kasumi/version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_22 {
-	local: *;
-};
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index 14a13f2263..5608bf9573 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -14,7 +14,6 @@ drivers = [
         'cnxk',
         'dpaa_sec',
         'dpaa2_sec',
-        'kasumi',
         'mlx5',
         'mvsam',
         'nitrox',
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 06/10] drivers/crypto: move snow3g PMD to IPsec-mb framework
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
                           ` (4 preceding siblings ...)
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 05/10] drivers/crypto: move kasumi " Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-10-04 12:45           ` De Lara Guarch, Pablo
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 07/10] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
                           ` (3 subsequent siblings)
  9 siblings, 1 reply; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power,
	Thomas Monjalon, Pablo de Lara, Ray Kinsella

From: Piotr Bronowski <piotrx.bronowski@intel.com>

This patch removes the crypto/snow3g folder and gathers all snow3g PMD
implementation specific details into a single file,
pmd_snow3g.c in the crypto/ipsec_mb folder.

Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v3: Removed extra empty lines.
v2: Updated maintainers file.
---
 MAINTAINERS                                   |   8 +-
 doc/guides/cryptodevs/snow3g.rst              |   3 +-
 drivers/crypto/ipsec_mb/meson.build           |   3 +-
 .../pmd_snow3g.c}                             | 457 ++++++++----------
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   7 +
 drivers/crypto/meson.build                    |   1 -
 drivers/crypto/snow3g/meson.build             |  24 -
 drivers/crypto/snow3g/rte_snow3g_pmd_ops.c    | 323 -------------
 drivers/crypto/snow3g/snow3g_pmd_private.h    |  84 ----
 drivers/crypto/snow3g/version.map             |   3 -
 10 files changed, 205 insertions(+), 708 deletions(-)
 rename drivers/crypto/{snow3g/rte_snow3g_pmd.c => ipsec_mb/pmd_snow3g.c} (57%)
 delete mode 100644 drivers/crypto/snow3g/meson.build
 delete mode 100644 drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
 delete mode 100644 drivers/crypto/snow3g/snow3g_pmd_private.h
 delete mode 100644 drivers/crypto/snow3g/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 794bad11c2..28855222d6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1058,9 +1058,11 @@ F: drivers/crypto/ipsec_mb/
 F: doc/guides/cryptodevs/aesni_gcm.rst
 F: doc/guides/cryptodevs/aesni_mb.rst
 F: doc/guides/cryptodevs/kasumi.rst
+F: doc/guides/cryptodevs/snow3g.rst
 F: doc/guides/cryptodevs/features/aesni_gcm.ini
 F: doc/guides/cryptodevs/features/aesni_mb.ini
 F: doc/guides/cryptodevs/features/kasumi.ini
+F: doc/guides/cryptodevs/features/snow3g.ini
 
 Marvell cnxk crypto
 M: Ankur Dwivedi <adwivedi@marvell.com>
@@ -1131,12 +1133,6 @@ F: drivers/crypto/openssl/
 F: doc/guides/cryptodevs/openssl.rst
 F: doc/guides/cryptodevs/features/openssl.ini
 
-SNOW 3G
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-F: drivers/crypto/snow3g/
-F: doc/guides/cryptodevs/snow3g.rst
-F: doc/guides/cryptodevs/features/snow3g.ini
-
 Virtio
 M: Jay Zhou <jianjay.zhou@huawei.com>
 F: drivers/crypto/virtio/
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 0258b71bb4..4ba71d66ce 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -77,7 +77,8 @@ and the external crypto libraries supported by them:
    DPDK version   Crypto library version
    =============  ================================
    16.04 - 19.11  LibSSO SNOW3G
-   20.02+         Multi-buffer library 0.53 - 1.0*
+   20.02 - 21.08  Multi-buffer library 0.53 - 1.0*
+   21.11+         Multi-buffer library 1.0*
    =============  ================================
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index b0e2c6a0b7..e9d74eaad4 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -25,6 +25,7 @@ sources = files('rte_ipsec_mb_pmd.c',
 		'rte_ipsec_mb_pmd_ops.c',
 		'pmd_aesni_mb.c',
 		'pmd_aesni_gcm.c',
-		'pmd_kasumi.c'
+		'pmd_kasumi.c',
+		'pmd_snow3g.c'
 		)
 deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c
similarity index 57%
rename from drivers/crypto/snow3g/rte_snow3g_pmd.c
rename to drivers/crypto/ipsec_mb/pmd_snow3g.c
index 8284ac0b66..cc1ad3fb24 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
@@ -1,87 +1,108 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
+ * Copyright(c) 2015-2021 Intel Corporation
  */
 
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_cryptodev.h>
-#include <cryptodev_pmd.h>
-#include <rte_bus_vdev.h>
-#include <rte_malloc.h>
-#include <rte_cpuflags.h>
+#include <intel-ipsec-mb.h>
 
-#include "snow3g_pmd_private.h"
+#if defined(RTE_LIB_SECURITY)
+#define AESNI_MB_DOCSIS_SEC_ENABLED 1
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#include <rte_ether.h>
+#endif
+
+#include "rte_ipsec_mb_pmd_private.h"
 
 #define SNOW3G_IV_LENGTH 16
 #define SNOW3G_MAX_BURST 8
 #define BYTE_LEN 8
+#define SNOW3G_DIGEST_LENGTH 4
+#define SNOW3G_MAX_KEY_SIZE  128
+
+uint8_t pmd_driver_id_snow3g;
+
+static const struct rte_cryptodev_capabilities snow3g_capabilities[] = {
+	{	/* SNOW 3G (UIA2) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = SNOW3G_DIGEST_LENGTH,
+					.max = SNOW3G_DIGEST_LENGTH,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = SNOW3G_IV_LENGTH,
+					.max = SNOW3G_IV_LENGTH,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* SNOW 3G (UEA2) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = SNOW3G_IV_LENGTH,
+					.max = SNOW3G_IV_LENGTH,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
 
-static uint8_t cryptodev_driver_id;
-
-/** Get xform chain order. */
-static enum snow3g_operation
-snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
-{
-	if (xform == NULL)
-		return SNOW3G_OP_NOT_SUPPORTED;
-
-	if (xform->next)
-		if (xform->next->next != NULL)
-			return SNOW3G_OP_NOT_SUPPORTED;
-
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		if (xform->next == NULL)
-			return SNOW3G_OP_ONLY_AUTH;
-		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
-			return SNOW3G_OP_AUTH_CIPHER;
-		else
-			return SNOW3G_OP_NOT_SUPPORTED;
-	}
-
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
-		if (xform->next == NULL)
-			return SNOW3G_OP_ONLY_CIPHER;
-		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
-			return SNOW3G_OP_CIPHER_AUTH;
-		else
-			return SNOW3G_OP_NOT_SUPPORTED;
-	}
-
-	return SNOW3G_OP_NOT_SUPPORTED;
-}
-
+/** SNOW 3G private session structure */
+struct snow3g_session {
+	enum ipsec_mb_operation op;
+	enum rte_crypto_auth_operation auth_op;
+	snow3g_key_schedule_t pKeySched_cipher;
+	snow3g_key_schedule_t pKeySched_hash;
+	uint16_t cipher_iv_offset;
+	uint16_t auth_iv_offset;
+} __rte_cache_aligned;
+
+struct snow3g_qp_data {
+	uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
+	/**< Buffer used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
+};
 
 /** Parse crypto xform chain and set private session parameters. */
-int
-snow3g_set_session_parameters(MB_MGR *mgr, struct snow3g_session *sess,
+static int
+snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
 		const struct rte_crypto_sym_xform *xform)
 {
+	struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
 	const struct rte_crypto_sym_xform *auth_xform = NULL;
 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
-	enum snow3g_operation mode;
+	enum ipsec_mb_operation mode;
 
 	/* Select Crypto operation - hash then cipher / cipher then hash */
-	mode = snow3g_get_mode(xform);
-
-	switch (mode) {
-	case SNOW3G_OP_CIPHER_AUTH:
-		auth_xform = xform->next;
-
-		/* Fall-through */
-	case SNOW3G_OP_ONLY_CIPHER:
-		cipher_xform = xform;
-		break;
-	case SNOW3G_OP_AUTH_CIPHER:
-		cipher_xform = xform->next;
-		/* Fall-through */
-	case SNOW3G_OP_ONLY_AUTH:
-		auth_xform = xform;
-		break;
-	case SNOW3G_OP_NOT_SUPPORTED:
-	default:
-		SNOW3G_LOG(ERR, "Unsupported operation chain order parameter");
-		return -ENOTSUP;
-	}
+	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
+				&cipher_xform, NULL);
+	if (ret)
+		return ret;
 
 	if (cipher_xform) {
 		/* Only SNOW 3G UEA2 supported */
@@ -89,11 +110,11 @@ snow3g_set_session_parameters(MB_MGR *mgr, struct snow3g_session *sess,
 			return -ENOTSUP;
 
 		if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
-			SNOW3G_LOG(ERR, "Wrong IV length");
+			IPSEC_MB_LOG(ERR, "Wrong IV length");
 			return -EINVAL;
 		}
 		if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
-			SNOW3G_LOG(ERR, "Not enough memory to store the key");
+			IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
 			return -ENOMEM;
 		}
 
@@ -110,18 +131,18 @@ snow3g_set_session_parameters(MB_MGR *mgr, struct snow3g_session *sess,
 			return -ENOTSUP;
 
 		if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
-			SNOW3G_LOG(ERR, "Wrong digest length");
+			IPSEC_MB_LOG(ERR, "Wrong digest length");
 			return -EINVAL;
 		}
 		if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
-			SNOW3G_LOG(ERR, "Not enough memory to store the key");
+			IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
 			return -ENOMEM;
 		}
 
 		sess->auth_op = auth_xform->auth.op;
 
 		if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
-			SNOW3G_LOG(ERR, "Wrong IV length");
+			IPSEC_MB_LOG(ERR, "Wrong IV length");
 			return -EINVAL;
 		}
 		sess->auth_iv_offset = auth_xform->auth.iv.offset;
@@ -136,56 +157,13 @@ snow3g_set_session_parameters(MB_MGR *mgr, struct snow3g_session *sess,
 	return 0;
 }
 
-/** Get SNOW 3G session. */
-static struct snow3g_session *
-snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
-{
-	struct snow3g_session *sess = NULL;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		if (likely(op->sym->session != NULL))
-			sess = (struct snow3g_session *)
-					get_sym_session_private_data(
-					op->sym->session,
-					cryptodev_driver_id);
-	} else {
-		void *_sess = NULL;
-		void *_sess_private_data = NULL;
-
-		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
-			return NULL;
-
-		if (rte_mempool_get(qp->sess_mp_priv,
-				(void **)&_sess_private_data))
-			return NULL;
-
-		sess = (struct snow3g_session *)_sess_private_data;
-
-		if (unlikely(snow3g_set_session_parameters(qp->mgr, sess,
-				op->sym->xform) != 0)) {
-			rte_mempool_put(qp->sess_mp, _sess);
-			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
-			sess = NULL;
-		}
-		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
-		set_sym_session_private_data(op->sym->session,
-				cryptodev_driver_id, _sess_private_data);
-	}
-
-	if (unlikely(sess == NULL))
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
-
-	return sess;
-}
-
 /** Encrypt/decrypt mbufs with same cipher key. */
 static uint8_t
-process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
+process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 		struct snow3g_session *session,
 		uint8_t num_ops)
 {
-	unsigned i;
+	uint32_t i;
 	uint8_t processed_ops = 0;
 	const void *src[SNOW3G_MAX_BURST];
 	void *dst[SNOW3G_MAX_BURST];
@@ -207,7 +185,7 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 		processed_ops++;
 	}
 
-	IMB_SNOW3G_F8_N_BUFFER(qp->mgr, &session->pKeySched_cipher, iv,
+	IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
 			src, dst, num_bytes, processed_ops);
 
 	return processed_ops;
@@ -215,7 +193,7 @@ process_snow3g_cipher_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 
 /** Encrypt/decrypt mbuf (bit level function). */
 static uint8_t
-process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
+process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
 		struct rte_crypto_op *op,
 		struct snow3g_session *session)
 {
@@ -227,7 +205,7 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
 	if (op->sym->m_dst == NULL) {
 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-		SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
+		IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
 		return 0;
 	}
 	dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
@@ -235,7 +213,7 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 				session->cipher_iv_offset);
 	length_in_bits = op->sym->cipher.data.length;
 
-	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mgr, &session->pKeySched_cipher, iv,
+	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
 			src, dst, length_in_bits, offset_in_bits);
 
 	return 1;
@@ -243,21 +221,22 @@ process_snow3g_cipher_op_bit(struct snow3g_qp *qp,
 
 /** Generate/verify hash from mbufs with same hash key. */
 static int
-process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
+process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 		struct snow3g_session *session,
 		uint8_t num_ops)
 {
-	unsigned i;
+	uint32_t i;
 	uint8_t processed_ops = 0;
 	uint8_t *src, *dst;
 	uint32_t length_in_bits;
 	uint8_t *iv;
+	struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
 
 	for (i = 0; i < num_ops; i++) {
 		/* Data must be byte aligned */
 		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			SNOW3G_LOG(ERR, "Offset");
+			IPSEC_MB_LOG(ERR, "Offset");
 			break;
 		}
 
@@ -269,19 +248,20 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 				session->auth_iv_offset);
 
 		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
-			dst = qp->temp_digest;
+			dst = qp_data->temp_digest;
 
-			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
+			IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
 			/* Verify digest. */
 			if (memcmp(dst, ops[i]->sym->auth.digest.data,
 					SNOW3G_DIGEST_LENGTH) != 0)
-				ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+				ops[i]->status =
+					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 		} else  {
 			dst = ops[i]->sym->auth.digest.data;
 
-			IMB_SNOW3G_F9_1_BUFFER(qp->mgr,
+			IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
 		}
@@ -294,11 +274,11 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
 /** Process a batch of crypto ops which shares the same session. */
 static int
 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
-		struct snow3g_qp *qp, uint8_t num_ops,
+		struct ipsec_mb_qp *qp, uint8_t num_ops,
 		uint16_t *accumulated_enqueued_ops)
 {
-	unsigned i;
-	unsigned enqueued_ops, processed_ops;
+	uint32_t i;
+	uint32_t enqueued_ops, processed_ops;
 
 #ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
 	for (i = 0; i < num_ops; i++) {
@@ -306,7 +286,8 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
 				(ops[i]->sym->m_dst != NULL &&
 				!rte_pktmbuf_is_contiguous(
 						ops[i]->sym->m_dst))) {
-			SNOW3G_LOG(ERR, "PMD supports only contiguous mbufs, "
+			IPSEC_MB_LOG(ERR,
+				"PMD supports only contiguous mbufs, "
 				"op (%p) provides noncontiguous mbuf as "
 				"source/destination buffer.\n", ops[i]);
 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -316,20 +297,24 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
 #endif
 
 	switch (session->op) {
-	case SNOW3G_OP_ONLY_CIPHER:
+	case IPSEC_MB_OP_ENCRYPT_ONLY:
+	case IPSEC_MB_OP_DECRYPT_ONLY:
 		processed_ops = process_snow3g_cipher_op(qp, ops,
 				session, num_ops);
 		break;
-	case SNOW3G_OP_ONLY_AUTH:
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
 		processed_ops = process_snow3g_hash_op(qp, ops, session,
 				num_ops);
 		break;
-	case SNOW3G_OP_CIPHER_AUTH:
+	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
+	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
 		processed_ops = process_snow3g_cipher_op(qp, ops, session,
 				num_ops);
 		process_snow3g_hash_op(qp, ops, session, processed_ops);
 		break;
-	case SNOW3G_OP_AUTH_CIPHER:
+	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
+	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
 		processed_ops = process_snow3g_hash_op(qp, ops, session,
 				num_ops);
 		process_snow3g_cipher_op(qp, ops, session, processed_ops);
@@ -358,9 +343,9 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
 		}
 	}
 
-	enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+	enqueued_ops = rte_ring_enqueue_burst(qp->ingress_queue,
 			(void **)ops, processed_ops, NULL);
-	qp->qp_stats.enqueued_count += enqueued_ops;
+	qp->stats.enqueued_count += enqueued_ops;
 	*accumulated_enqueued_ops += enqueued_ops;
 
 	return enqueued_ops;
@@ -369,24 +354,29 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
 /** Process a crypto op with length/offset in bits. */
 static int
 process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
-		struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
+		struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops)
 {
-	unsigned enqueued_op, processed_op;
+	uint32_t enqueued_op, processed_op;
 
 	switch (session->op) {
-	case SNOW3G_OP_ONLY_CIPHER:
+	case IPSEC_MB_OP_ENCRYPT_ONLY:
+	case IPSEC_MB_OP_DECRYPT_ONLY:
+
 		processed_op = process_snow3g_cipher_op_bit(qp, op,
 				session);
 		break;
-	case SNOW3G_OP_ONLY_AUTH:
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
 		processed_op = process_snow3g_hash_op(qp, &op, session, 1);
 		break;
-	case SNOW3G_OP_CIPHER_AUTH:
+	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
+	case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
 		processed_op = process_snow3g_cipher_op_bit(qp, op, session);
 		if (processed_op == 1)
 			process_snow3g_hash_op(qp, &op, session, 1);
 		break;
-	case SNOW3G_OP_AUTH_CIPHER:
+	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
+	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
 		processed_op = process_snow3g_hash_op(qp, &op, session, 1);
 		if (processed_op == 1)
 			process_snow3g_cipher_op_bit(qp, op, session);
@@ -410,43 +400,49 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
 		op->sym->session = NULL;
 	}
 
-	enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
+	enqueued_op = rte_ring_enqueue_burst(qp->ingress_queue,
 			(void **)&op, processed_op, NULL);
-	qp->qp_stats.enqueued_count += enqueued_op;
+	qp->stats.enqueued_count += enqueued_op;
 	*accumulated_enqueued_ops += enqueued_op;
 
 	return enqueued_op;
 }
 
 static uint16_t
-snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
+snow3g_pmd_dequeue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
+	struct ipsec_mb_qp *qp = queue_pair;
 	struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
 	struct rte_crypto_op *curr_c_op;
 
 	struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
-	struct snow3g_qp *qp = queue_pair;
-	unsigned i;
+	uint32_t i;
 	uint8_t burst_size = 0;
 	uint16_t enqueued_ops = 0;
 	uint8_t processed_ops;
+	uint32_t nb_dequeued;
 
-	for (i = 0; i < nb_ops; i++) {
+	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	for (i = 0; i < nb_dequeued; i++) {
 		curr_c_op = ops[i];
 
 		/* Set status as enqueued (not processed yet) by default. */
 		curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
 
-		curr_sess = snow3g_get_session(qp, curr_c_op);
+		curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
 		if (unlikely(curr_sess == NULL ||
-				curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
+				curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
 			curr_c_op->status =
 					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 			break;
 		}
 
-		/* If length/offset is at bit-level, process this buffer alone. */
+		/* If length/offset is at bit-level,
+		 * process this buffer alone.
+		 */
 		if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
 				|| ((curr_c_op->sym->cipher.data.offset
 					% BYTE_LEN) != 0)) {
@@ -517,131 +513,43 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
 				qp, burst_size, &enqueued_ops);
 	}
 
-	qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
-	return enqueued_ops;
-}
-
-static uint16_t
-snow3g_pmd_dequeue_burst(void *queue_pair,
-		struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-	struct snow3g_qp *qp = queue_pair;
 
-	unsigned nb_dequeued;
 
-	nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
-			(void **)c_ops, nb_ops, NULL);
-	qp->qp_stats.dequeued_count += nb_dequeued;
+	qp->stats.dequeued_count += i;
 
-	return nb_dequeued;
+	return i;
 }
 
-static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
-
-static int
-cryptodev_snow3g_create(const char *name,
-			struct rte_vdev_device *vdev,
-			struct rte_cryptodev_pmd_init_params *init_params)
-{
-	struct rte_cryptodev *dev;
-	struct snow3g_private *internals;
-	MB_MGR *mgr;
-
-	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
-	if (dev == NULL) {
-		SNOW3G_LOG(ERR, "failed to create cryptodev vdev");
-		goto init_error;
-	}
-
-	dev->driver_id = cryptodev_driver_id;
-	dev->dev_ops = rte_snow3g_pmd_ops;
+struct rte_cryptodev_ops snow3g_pmd_ops = {
+	.dev_configure = ipsec_mb_pmd_config,
+	.dev_start = ipsec_mb_pmd_start,
+	.dev_stop = ipsec_mb_pmd_stop,
+	.dev_close = ipsec_mb_pmd_close,
 
-	/* Register RX/TX burst functions for data path. */
-	dev->dequeue_burst = snow3g_pmd_dequeue_burst;
-	dev->enqueue_burst = snow3g_pmd_enqueue_burst;
+	.stats_get = ipsec_mb_pmd_stats_get,
+	.stats_reset = ipsec_mb_pmd_stats_reset,
 
-	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
-			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+	.dev_infos_get = ipsec_mb_pmd_info_get,
 
-	mgr = alloc_mb_mgr(0);
-	if (mgr == NULL)
-		return -ENOMEM;
-
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
-		init_mb_mgr_avx2(mgr);
-	} else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
-		init_mb_mgr_avx(mgr);
-	} else {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
-		init_mb_mgr_sse(mgr);
-	}
+	.queue_pair_setup = ipsec_mb_pmd_qp_setup,
+	.queue_pair_release = ipsec_mb_pmd_qp_release,
 
-	internals = dev->data->dev_private;
-	internals->mgr = mgr;
-
-	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
-
-	return 0;
-init_error:
-	SNOW3G_LOG(ERR, "driver %s: cryptodev_snow3g_create failed",
-			init_params->name);
+	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
+	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
+	.sym_session_clear = ipsec_mb_pmd_sym_session_clear
+};
 
-	cryptodev_snow3g_remove(vdev);
-	return -EFAULT;
-}
+struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
 
 static int
 cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
 {
-	struct rte_cryptodev_pmd_init_params init_params = {
-		"",
-		sizeof(struct snow3g_private),
-		rte_socket_id(),
-		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
-	};
-	const char *name;
-	const char *input_args;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-	input_args = rte_vdev_device_args(vdev);
-
-	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
-
-	return cryptodev_snow3g_create(name, vdev, &init_params);
-}
-
-static int
-cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev *cryptodev;
-	const char *name;
-	struct snow3g_private *internals;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-
-	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	internals = cryptodev->data->dev_private;
-
-	free_mb_mgr(internals->mgr);
-
-	return rte_cryptodev_pmd_destroy(cryptodev);
+	return cryptodev_ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_SNOW3G);
 }
 
 static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
 	.probe = cryptodev_snow3g_probe,
-	.remove = cryptodev_snow3g_remove
+	.remove = cryptodev_ipsec_mb_remove
 };
 
 static struct cryptodev_driver snow3g_crypto_drv;
@@ -649,8 +557,27 @@ static struct cryptodev_driver snow3g_crypto_drv;
 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
-	"max_nb_queue_pairs=<int> "
-	"socket_id=<int>");
+			       "max_nb_queue_pairs=<int> socket_id=<int>");
 RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
-		cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id);
-RTE_LOG_REGISTER_DEFAULT(snow3g_logtype_driver, INFO);
+				cryptodev_snow3g_pmd_drv.driver,
+				pmd_driver_id_snow3g);
+
+/* Constructor function to register snow3g PMD */
+RTE_INIT(ipsec_mb_register_snow3g)
+{
+	struct ipsec_mb_pmd_data *snow3g_data
+		= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
+
+	snow3g_data->caps = snow3g_capabilities;
+	snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
+	snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
+			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+	snow3g_data->internals_priv_size = 0;
+	snow3g_data->ops = &snow3g_pmd_ops;
+	snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
+	snow3g_data->session_configure = snow3g_session_configure;
+	snow3g_data->session_priv_size = sizeof(struct snow3g_session);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index 10fa289017..8ee8b73e55 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -43,6 +43,9 @@ extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
 #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
 /**< IPSEC Multi buffer PMD kasumi device name */
 
+#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
+/**< IPSEC Multi buffer PMD snow3g device name */
+
 /** PMD LOGTYPE DRIVER, common to all PMDs */
 extern int ipsec_mb_logtype_driver;
 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
@@ -54,6 +57,7 @@ enum ipsec_mb_pmd_types {
 	IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
 	IPSEC_MB_PMD_TYPE_AESNI_GCM,
 	IPSEC_MB_PMD_TYPE_KASUMI,
+	IPSEC_MB_PMD_TYPE_SNOW3G,
 	IPSEC_MB_N_PMD_TYPES
 };
 
@@ -75,6 +79,7 @@ enum ipsec_mb_operation {
 extern uint8_t pmd_driver_id_aesni_mb;
 extern uint8_t pmd_driver_id_aesni_gcm;
 extern uint8_t pmd_driver_id_kasumi;
+extern uint8_t pmd_driver_id_snow3g;
 
 /** Helper function. Gets driver ID based on PMD type */
 static __rte_always_inline uint8_t
@@ -87,6 +92,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
 		return pmd_driver_id_aesni_gcm;
 	case IPSEC_MB_PMD_TYPE_KASUMI:
 		return pmd_driver_id_kasumi;
+	case IPSEC_MB_PMD_TYPE_SNOW3G:
+		return pmd_driver_id_snow3g;
 	default:
 		break;
 	}
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index 5608bf9573..72b3b776a8 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -22,7 +22,6 @@ drivers = [
         'octeontx2',
         'openssl',
         'scheduler',
-        'snow3g',
         'virtio',
         'zuc',
 ]
diff --git a/drivers/crypto/snow3g/meson.build b/drivers/crypto/snow3g/meson.build
deleted file mode 100644
index 0c087baa2a..0000000000
--- a/drivers/crypto/snow3g/meson.build
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2019-2020 Intel Corporation
-
-IMB_required_ver = '0.53.0'
-lib = cc.find_library('IPSec_MB', required: false)
-if not lib.found()
-    build = false
-    reason = 'missing dependency, "libIPSec_MB"'
-else
-    # version comes with quotes, so we split based on " and take the middle
-    imb_ver = cc.get_define('IMB_VERSION_STR',
-        prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
-
-    if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
-                reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
-                IMB_required_ver, imb_ver)
-        build = false
-    endif
-
-endif
-
-ext_deps += lib
-sources = files('rte_snow3g_pmd.c', 'rte_snow3g_pmd_ops.c')
-deps += ['bus_vdev', 'cryptodev']
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
deleted file mode 100644
index 3f46014b7d..0000000000
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <cryptodev_pmd.h>
-
-#include "snow3g_pmd_private.h"
-
-static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
-	{	/* SNOW 3G (UIA2) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 4,
-					.max = 4,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* SNOW 3G (UEA2) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-/** Configure device */
-static int
-snow3g_pmd_config(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused struct rte_cryptodev_config *config)
-{
-	return 0;
-}
-
-/** Start device */
-static int
-snow3g_pmd_start(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-/** Stop device */
-static void
-snow3g_pmd_stop(__rte_unused struct rte_cryptodev *dev)
-{
-}
-
-/** Close device */
-static int
-snow3g_pmd_close(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-
-/** Get device statistics */
-static void
-snow3g_pmd_stats_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_stats *stats)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
-
-		stats->enqueued_count += qp->qp_stats.enqueued_count;
-		stats->dequeued_count += qp->qp_stats.dequeued_count;
-
-		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
-		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
-	}
-}
-
-/** Reset device statistics */
-static void
-snow3g_pmd_stats_reset(struct rte_cryptodev *dev)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
-
-		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-	}
-}
-
-
-/** Get device info */
-static void
-snow3g_pmd_info_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_info *dev_info)
-{
-	struct snow3g_private *internals = dev->data->dev_private;
-
-	if (dev_info != NULL) {
-		dev_info->driver_id = dev->driver_id;
-		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
-		/* No limit of number of sessions */
-		dev_info->sym.max_nb_sessions = 0;
-		dev_info->feature_flags = dev->feature_flags;
-		dev_info->capabilities = snow3g_pmd_capabilities;
-	}
-}
-
-/** Release queue pair */
-static int
-snow3g_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
-{
-	if (dev->data->queue_pairs[qp_id] != NULL) {
-		struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
-
-		if (qp->processed_ops)
-			rte_ring_free(qp->processed_ops);
-
-		rte_free(dev->data->queue_pairs[qp_id]);
-		dev->data->queue_pairs[qp_id] = NULL;
-	}
-	return 0;
-}
-
-/** set a unique name for the queue pair based on its name, dev_id and qp_id */
-static int
-snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
-		struct snow3g_qp *qp)
-{
-	unsigned n = snprintf(qp->name, sizeof(qp->name),
-			"snow3g_pmd_%u_qp_%u",
-			dev->data->dev_id, qp->id);
-
-	if (n >= sizeof(qp->name))
-		return -1;
-
-	return 0;
-}
-
-/** Create a ring to place processed ops on */
-static struct rte_ring *
-snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
-		unsigned ring_size, int socket_id)
-{
-	struct rte_ring *r;
-
-	r = rte_ring_lookup(qp->name);
-	if (r) {
-		if (rte_ring_get_size(r) >= ring_size) {
-			SNOW3G_LOG(INFO, "Reusing existing ring %s"
-					" for processed packets",
-					 qp->name);
-			return r;
-		}
-
-		SNOW3G_LOG(ERR, "Unable to reuse existing ring %s"
-				" for processed packets",
-				 qp->name);
-		return NULL;
-	}
-
-	return rte_ring_create(qp->name, ring_size, socket_id,
-			RING_F_SP_ENQ | RING_F_SC_DEQ);
-}
-
-/** Setup a queue pair */
-static int
-snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
-		const struct rte_cryptodev_qp_conf *qp_conf,
-		int socket_id)
-{
-	struct snow3g_qp *qp = NULL;
-	struct snow3g_private *internals = dev->data->dev_private;
-
-	/* Free memory prior to re-allocation if needed. */
-	if (dev->data->queue_pairs[qp_id] != NULL)
-		snow3g_pmd_qp_release(dev, qp_id);
-
-	/* Allocate the queue pair data structure. */
-	qp = rte_zmalloc_socket("SNOW 3G PMD Queue Pair", sizeof(*qp),
-					RTE_CACHE_LINE_SIZE, socket_id);
-	if (qp == NULL)
-		return (-ENOMEM);
-
-	qp->id = qp_id;
-	dev->data->queue_pairs[qp_id] = qp;
-
-	if (snow3g_pmd_qp_set_unique_name(dev, qp))
-		goto qp_setup_cleanup;
-
-	qp->processed_ops = snow3g_pmd_qp_create_processed_ops_ring(qp,
-			qp_conf->nb_descriptors, socket_id);
-	if (qp->processed_ops == NULL)
-		goto qp_setup_cleanup;
-
-	qp->mgr = internals->mgr;
-	qp->sess_mp = qp_conf->mp_session;
-	qp->sess_mp_priv = qp_conf->mp_session_private;
-
-	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-
-	return 0;
-
-qp_setup_cleanup:
-	if (qp)
-		rte_free(qp);
-
-	return -1;
-}
-
-/** Returns the size of the SNOW 3G session structure */
-static unsigned
-snow3g_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct snow3g_session);
-}
-
-/** Configure a SNOW 3G session from a crypto xform chain */
-static int
-snow3g_pmd_sym_session_configure(struct rte_cryptodev *dev,
-		struct rte_crypto_sym_xform *xform,
-		struct rte_cryptodev_sym_session *sess,
-		struct rte_mempool *mempool)
-{
-	void *sess_private_data;
-	int ret;
-	struct snow3g_private *internals = dev->data->dev_private;
-
-	if (unlikely(sess == NULL)) {
-		SNOW3G_LOG(ERR, "invalid session struct");
-		return -EINVAL;
-	}
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		SNOW3G_LOG(ERR,
-			"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-
-	ret = snow3g_set_session_parameters(internals->mgr,
-					sess_private_data, xform);
-	if (ret != 0) {
-		SNOW3G_LOG(ERR, "failed configure session parameters");
-
-		/* Return session to mempool */
-		rte_mempool_put(mempool, sess_private_data);
-		return ret;
-	}
-
-	set_sym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-}
-
-/** Clear the memory of session so it doesn't leave key material behind */
-static void
-snow3g_pmd_sym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_sym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_sym_session_private_data(sess, index);
-
-	/* Zero out the whole structure */
-	if (sess_priv) {
-		memset(sess_priv, 0, sizeof(struct snow3g_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-		set_sym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-struct rte_cryptodev_ops snow3g_pmd_ops = {
-		.dev_configure      = snow3g_pmd_config,
-		.dev_start          = snow3g_pmd_start,
-		.dev_stop           = snow3g_pmd_stop,
-		.dev_close          = snow3g_pmd_close,
-
-		.stats_get          = snow3g_pmd_stats_get,
-		.stats_reset        = snow3g_pmd_stats_reset,
-
-		.dev_infos_get      = snow3g_pmd_info_get,
-
-		.queue_pair_setup   = snow3g_pmd_qp_setup,
-		.queue_pair_release = snow3g_pmd_qp_release,
-
-		.sym_session_get_size   = snow3g_pmd_sym_session_get_size,
-		.sym_session_configure  = snow3g_pmd_sym_session_configure,
-		.sym_session_clear      = snow3g_pmd_sym_session_clear
-};
-
-struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
diff --git a/drivers/crypto/snow3g/snow3g_pmd_private.h b/drivers/crypto/snow3g/snow3g_pmd_private.h
deleted file mode 100644
index 23cf078a9c..0000000000
--- a/drivers/crypto/snow3g/snow3g_pmd_private.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2019 Intel Corporation
- */
-
-#ifndef _SNOW3G_PMD_PRIVATE_H_
-#define _SNOW3G_PMD_PRIVATE_H_
-
-#include <intel-ipsec-mb.h>
-
-#define CRYPTODEV_NAME_SNOW3G_PMD	crypto_snow3g
-/**< SNOW 3G PMD device name */
-
-/** SNOW 3G PMD LOGTYPE DRIVER */
-extern int snow3g_logtype_driver;
-
-#define SNOW3G_LOG(level, fmt, ...)  \
-	rte_log(RTE_LOG_ ## level, snow3g_logtype_driver,  \
-			"%s() line %u: " fmt "\n", __func__, __LINE__,  \
-					## __VA_ARGS__)
-
-#define SNOW3G_DIGEST_LENGTH 4
-#define SNOW3G_MAX_KEY_SIZE  128
-
-/** private data structure for each virtual SNOW 3G device */
-struct snow3g_private {
-	unsigned max_nb_queue_pairs;
-	/**< Max number of queue pairs supported by device */
-	MB_MGR *mgr;
-	/**< Multi-buffer instance */
-};
-
-/** SNOW 3G buffer queue pair */
-struct snow3g_qp {
-	uint16_t id;
-	/**< Queue Pair Identifier */
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	/**< Unique Queue Pair Name */
-	struct rte_ring *processed_ops;
-	/**< Ring for placing processed ops */
-	struct rte_mempool *sess_mp;
-	/**< Session Mempool */
-	struct rte_mempool *sess_mp_priv;
-	/**< Session Private Data Mempool */
-	struct rte_cryptodev_stats qp_stats;
-	/**< Queue pair statistics */
-	uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
-	/**< Buffer used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-	MB_MGR *mgr;
-	/**< Multi-buffer instance */
-} __rte_cache_aligned;
-
-enum snow3g_operation {
-	SNOW3G_OP_ONLY_CIPHER,
-	SNOW3G_OP_ONLY_AUTH,
-	SNOW3G_OP_CIPHER_AUTH,
-	SNOW3G_OP_AUTH_CIPHER,
-	SNOW3G_OP_NOT_SUPPORTED
-};
-
-/** SNOW 3G private session structure */
-struct snow3g_session {
-	enum snow3g_operation op;
-	enum rte_crypto_auth_operation auth_op;
-	snow3g_key_schedule_t pKeySched_cipher;
-	snow3g_key_schedule_t pKeySched_hash;
-	uint16_t cipher_iv_offset;
-	uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-
-extern int
-snow3g_set_session_parameters(MB_MGR *mgr, struct snow3g_session *sess,
-		const struct rte_crypto_sym_xform *xform);
-
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_snow3g_pmd_ops;
-
-
-
-#endif /* _SNOW3G_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/snow3g/version.map b/drivers/crypto/snow3g/version.map
deleted file mode 100644
index c2e0723b4c..0000000000
--- a/drivers/crypto/snow3g/version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_22 {
-	local: *;
-};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 07/10] crypto/ipsec_mb: add snow3g digest appended ops support
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
                           ` (5 preceding siblings ...)
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 06/10] drivers/crypto: move snow3g " Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 08/10] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
                           ` (2 subsequent siblings)
  9 siblings, 0 replies; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Damian Nowak, Kai Ji,
	Ciara Power, Pablo de Lara

From: Piotr Bronowski <piotrx.bronowski@intel.com>

This patch enables out-of-place auth-cipher operations where
digest should be encrypted along with the rest of raw data.
It also adds support for partially encrypted digest when using
auth-cipher operations.

Signed-off-by: Damian Nowak <damianx.nowak@intel.com>
Signed-off-by: Kai Ji <kai.ji@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v2: Added release note.
---
 doc/guides/cryptodevs/features/snow3g.ini |   1 +
 doc/guides/rel_notes/release_21_11.rst    |   7 +
 drivers/crypto/ipsec_mb/pmd_snow3g.c      | 148 +++++++++++++++++++---
 3 files changed, 139 insertions(+), 17 deletions(-)

diff --git a/doc/guides/cryptodevs/features/snow3g.ini b/doc/guides/cryptodevs/features/snow3g.ini
index 14ac7e4b6d..4d4c5b579b 100644
--- a/doc/guides/cryptodevs/features/snow3g.ini
+++ b/doc/guides/cryptodevs/features/snow3g.ini
@@ -8,6 +8,7 @@ Symmetric crypto       = Y
 Sym operation chaining = Y
 Symmetric sessionless  = Y
 Non-Byte aligned data  = Y
+Digest encrypted       = Y
 OOP LB  In LB  Out     = Y
 
 ;
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 3c9d7e19cb..696541dab7 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -69,6 +69,13 @@ New Features
   This feature makes use of an intel-ipsec-mb API found in v1.1,
   which is the minimum required version to use this multi-process support.
 
+* **Added digest appended ops support for Snow3G PMD.**
+
+  * Added support for out-of-place auth-cipher operations that encrypt
+    the digest along with the rest of the raw data.
+  * Added support for partially encrypted digest when using auth-cipher
+    operations.
+
 * **Updated Marvell cnxk crypto PMD.**
 
   * Added AES-CBC SHA1-HMAC support in lookaside protocol (IPsec) for CN10K.
diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c
index cc1ad3fb24..c61067f910 100644
--- a/drivers/crypto/ipsec_mb/pmd_snow3g.c
+++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c
@@ -157,6 +157,24 @@ snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
 	return 0;
 }
 
+/** Check if conditions are met for digest-appended operations */
+static uint8_t *
+snow3g_digest_appended_in_src(struct rte_crypto_op *op)
+{
+	unsigned int auth_size, cipher_size;
+
+	auth_size = (op->sym->auth.data.offset >> 3) +
+		(op->sym->auth.data.length >> 3);
+	cipher_size = (op->sym->cipher.data.offset >> 3) +
+		(op->sym->cipher.data.length >> 3);
+
+	if (auth_size < cipher_size)
+		return rte_pktmbuf_mtod_offset(op->sym->m_src,
+				uint8_t *, auth_size);
+
+	return NULL;
+}
+
 /** Encrypt/decrypt mbufs with same cipher key. */
 static uint8_t
 process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
@@ -165,29 +183,75 @@ process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 {
 	uint32_t i;
 	uint8_t processed_ops = 0;
-	const void *src[SNOW3G_MAX_BURST];
-	void *dst[SNOW3G_MAX_BURST];
-	const void *iv[SNOW3G_MAX_BURST];
-	uint32_t num_bytes[SNOW3G_MAX_BURST];
+	const void *src[SNOW3G_MAX_BURST] = {NULL};
+	void *dst[SNOW3G_MAX_BURST] = {NULL};
+	uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
+	const void *iv[SNOW3G_MAX_BURST] = {NULL};
+	uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
+	uint32_t cipher_off, cipher_len;
+	int unencrypted_bytes = 0;
 
 	for (i = 0; i < num_ops; i++) {
-		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
-		dst[i] = ops[i]->sym->m_dst ?
-			rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3) :
-			rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
-				(ops[i]->sym->cipher.data.offset >> 3);
+
+		cipher_off = ops[i]->sym->cipher.data.offset >> 3;
+		cipher_len = ops[i]->sym->cipher.data.length >> 3;
+		src[i] = rte_pktmbuf_mtod_offset(
+			ops[i]->sym->m_src,	uint8_t *, cipher_off);
+
+		/* If out-of-place operation */
+		if (ops[i]->sym->m_dst &&
+			ops[i]->sym->m_src != ops[i]->sym->m_dst) {
+			dst[i] = rte_pktmbuf_mtod_offset(
+				ops[i]->sym->m_dst, uint8_t *, cipher_off);
+
+			/* In case of out-of-place, auth-cipher operation
+			 * with partial encryption of the digest, copy
+			 * the remaining, unencrypted part.
+			 */
+			if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
+			    || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
+				unencrypted_bytes =
+					(ops[i]->sym->auth.data.offset >> 3) +
+					(ops[i]->sym->auth.data.length >> 3) +
+					(SNOW3G_DIGEST_LENGTH) -
+					cipher_off - cipher_len;
+			if (unencrypted_bytes > 0)
+				rte_memcpy(
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_dst, uint8_t *,
+						cipher_off + cipher_len),
+					rte_pktmbuf_mtod_offset(
+						ops[i]->sym->m_src, uint8_t *,
+						cipher_off + cipher_len),
+					unencrypted_bytes);
+		} else
+			dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
+						uint8_t *, cipher_off);
+
 		iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
 				session->cipher_iv_offset);
-		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
-
+		num_bytes[i] = cipher_len;
 		processed_ops++;
 	}
 
 	IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
 			src, dst, num_bytes, processed_ops);
 
+	/* Take care of the raw digest data in src buffer */
+	for (i = 0; i < num_ops; i++) {
+		if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
+			session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
+				ops[i]->sym->m_dst != NULL) {
+			digest_appended[i] =
+				snow3g_digest_appended_in_src(ops[i]);
+			/* Clear unencrypted digest from
+			 * the src buffer
+			 */
+			if (digest_appended[i] != NULL)
+				memset(digest_appended[i],
+					0, SNOW3G_DIGEST_LENGTH);
+		}
+	}
 	return processed_ops;
 }
 
@@ -200,6 +264,7 @@ process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
 	uint8_t *src, *dst;
 	uint8_t *iv;
 	uint32_t length_in_bits, offset_in_bits;
+	int unencrypted_bytes = 0;
 
 	offset_in_bits = op->sym->cipher.data.offset;
 	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
@@ -208,10 +273,32 @@ process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
 		IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
 		return 0;
 	}
+	length_in_bits = op->sym->cipher.data.length;
 	dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+	/* In case of out-of-place, auth-cipher operation
+	 * with partial encryption of the digest, copy
+	 * the remaining, unencrypted part.
+	 */
+	if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
+		session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
+		unencrypted_bytes =
+			(op->sym->auth.data.offset >> 3) +
+			(op->sym->auth.data.length >> 3) +
+			(SNOW3G_DIGEST_LENGTH) -
+			(offset_in_bits >> 3) -
+			(length_in_bits >> 3);
+	if (unencrypted_bytes > 0)
+		rte_memcpy(
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_dst, uint8_t *,
+				(length_in_bits >> 3)),
+			rte_pktmbuf_mtod_offset(
+				op->sym->m_src, uint8_t *,
+				(length_in_bits >> 3)),
+				unencrypted_bytes);
+
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
 				session->cipher_iv_offset);
-	length_in_bits = op->sym->cipher.data.length;
 
 	IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
 			src, dst, length_in_bits, offset_in_bits);
@@ -230,6 +317,7 @@ process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 	uint8_t *src, *dst;
 	uint32_t length_in_bits;
 	uint8_t *iv;
+	uint8_t digest_appended = 0;
 	struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
 
 	for (i = 0; i < num_ops; i++) {
@@ -240,6 +328,8 @@ process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 			break;
 		}
 
+		dst = NULL;
+
 		length_in_bits = ops[i]->sym->auth.data.length;
 
 		src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
@@ -249,6 +339,15 @@ process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 
 		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
 			dst = qp_data->temp_digest;
+			 /* Handle auth cipher verify oop case*/
+			if ((session->op ==
+				IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
+				session->op ==
+				IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
+				ops[i]->sym->m_dst != NULL)
+				src = rte_pktmbuf_mtod_offset(
+					ops[i]->sym->m_dst, uint8_t *,
+					ops[i]->sym->auth.data.offset >> 3);
 
 			IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
 					&session->pKeySched_hash,
@@ -258,12 +357,26 @@ process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 					SNOW3G_DIGEST_LENGTH) != 0)
 				ops[i]->status =
 					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-		} else  {
-			dst = ops[i]->sym->auth.digest.data;
+		} else {
+			if (session->op ==
+				IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
+				session->op ==
+				IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
+				dst = snow3g_digest_appended_in_src(ops[i]);
+
+			if (dst != NULL)
+				digest_appended = 1;
+			else
+				dst = ops[i]->sym->auth.digest.data;
 
 			IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
 					&session->pKeySched_hash,
 					iv, src, length_in_bits, dst);
+
+			/* Copy back digest from src to auth.digest.data */
+			if (digest_appended)
+				rte_memcpy(ops[i]->sym->auth.digest.data,
+					dst, SNOW3G_DIGEST_LENGTH);
 		}
 		processed_ops++;
 	}
@@ -574,7 +687,8 @@ RTE_INIT(ipsec_mb_register_snow3g)
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
 			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
 	snow3g_data->internals_priv_size = 0;
 	snow3g_data->ops = &snow3g_pmd_ops;
 	snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 08/10] drivers/crypto: move zuc PMD to IPsec-mb framework
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
                           ` (6 preceding siblings ...)
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 07/10] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 09/10] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 10/10] doc/rel_notes: added note for SW Crypto PMD change Ciara Power
  9 siblings, 0 replies; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power,
	Thomas Monjalon, Pablo de Lara, Ray Kinsella

From: Piotr Bronowski <piotrx.bronowski@intel.com>

This patch removes the crypto/zuc folder and gathers all zuc PMD
implementation specific details into a single file,
pmd_zuc.c in the crypto/ipsec_mb folder.

Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v2: Updated maintainers file.
---
 MAINTAINERS                                   |   8 +-
 doc/guides/cryptodevs/zuc.rst                 |   3 +-
 drivers/crypto/ipsec_mb/meson.build           |   3 +-
 .../{zuc/rte_zuc_pmd.c => ipsec_mb/pmd_zuc.c} | 459 +++++++-----------
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   7 +
 drivers/crypto/meson.build                    |   1 -
 drivers/crypto/zuc/meson.build                |  24 -
 drivers/crypto/zuc/rte_zuc_pmd_ops.c          | 322 ------------
 drivers/crypto/zuc/version.map                |   3 -
 drivers/crypto/zuc/zuc_pmd_private.h          |  83 ----
 10 files changed, 199 insertions(+), 714 deletions(-)
 rename drivers/crypto/{zuc/rte_zuc_pmd.c => ipsec_mb/pmd_zuc.c} (50%)
 delete mode 100644 drivers/crypto/zuc/meson.build
 delete mode 100644 drivers/crypto/zuc/rte_zuc_pmd_ops.c
 delete mode 100644 drivers/crypto/zuc/version.map
 delete mode 100644 drivers/crypto/zuc/zuc_pmd_private.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 28855222d6..4a4ed84997 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1059,10 +1059,12 @@ F: doc/guides/cryptodevs/aesni_gcm.rst
 F: doc/guides/cryptodevs/aesni_mb.rst
 F: doc/guides/cryptodevs/kasumi.rst
 F: doc/guides/cryptodevs/snow3g.rst
+F: doc/guides/cryptodevs/zuc.rst
 F: doc/guides/cryptodevs/features/aesni_gcm.ini
 F: doc/guides/cryptodevs/features/aesni_mb.ini
 F: doc/guides/cryptodevs/features/kasumi.ini
 F: doc/guides/cryptodevs/features/snow3g.ini
+F: doc/guides/cryptodevs/features/zuc.ini
 
 Marvell cnxk crypto
 M: Ankur Dwivedi <adwivedi@marvell.com>
@@ -1139,12 +1141,6 @@ F: drivers/crypto/virtio/
 F: doc/guides/cryptodevs/virtio.rst
 F: doc/guides/cryptodevs/features/virtio.ini
 
-ZUC
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-F: drivers/crypto/zuc/
-F: doc/guides/cryptodevs/zuc.rst
-F: doc/guides/cryptodevs/features/zuc.ini
-
 
 Compression Drivers
 -------------------
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index 988a79bc26..8c925665b5 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -77,7 +77,8 @@ and the external crypto libraries supported by them:
    DPDK version   Crypto library version
    =============  ================================
    16.11 - 19.11  LibSSO ZUC
-   20.02+         Multi-buffer library 0.53 - 1.0*
+   20.02 - 21.08  Multi-buffer library 0.53 - 1.0*
+   21.11+         Multi-buffer library 1.0*
    =============  ================================
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index e9d74eaad4..a1619c78ac 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -26,6 +26,7 @@ sources = files('rte_ipsec_mb_pmd.c',
 		'pmd_aesni_mb.c',
 		'pmd_aesni_gcm.c',
 		'pmd_kasumi.c',
-		'pmd_snow3g.c'
+		'pmd_snow3g.c',
+		'pmd_zuc.c'
 		)
 deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/ipsec_mb/pmd_zuc.c
similarity index 50%
rename from drivers/crypto/zuc/rte_zuc_pmd.c
rename to drivers/crypto/ipsec_mb/pmd_zuc.c
index d4b343a7af..35a560ed5f 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/ipsec_mb/pmd_zuc.c
@@ -1,85 +1,108 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
+ * Copyright(c) 2015-2021 Intel Corporation
  */
 
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_cryptodev.h>
-#include <cryptodev_pmd.h>
-#include <rte_bus_vdev.h>
-#include <rte_malloc.h>
-#include <rte_cpuflags.h>
+#include <intel-ipsec-mb.h>
 
-#include "zuc_pmd_private.h"
-#define ZUC_MAX_BURST 16
-#define BYTE_LEN 8
-
-static uint8_t cryptodev_driver_id;
+#if defined(RTE_LIB_SECURITY)
+#define AESNI_MB_DOCSIS_SEC_ENABLED 1
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#include <rte_ether.h>
+#endif
 
-/** Get xform chain order. */
-static enum zuc_operation
-zuc_get_mode(const struct rte_crypto_sym_xform *xform)
-{
-	if (xform == NULL)
-		return ZUC_OP_NOT_SUPPORTED;
-
-	if (xform->next)
-		if (xform->next->next != NULL)
-			return ZUC_OP_NOT_SUPPORTED;
-
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		if (xform->next == NULL)
-			return ZUC_OP_ONLY_AUTH;
-		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
-			return ZUC_OP_AUTH_CIPHER;
-		else
-			return ZUC_OP_NOT_SUPPORTED;
-	}
+#include "rte_ipsec_mb_pmd_private.h"
 
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
-		if (xform->next == NULL)
-			return ZUC_OP_ONLY_CIPHER;
-		else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
-			return ZUC_OP_CIPHER_AUTH;
-		else
-			return ZUC_OP_NOT_SUPPORTED;
-	}
+#define ZUC_IV_KEY_LENGTH 16
+#define ZUC_DIGEST_LENGTH 4
+#define ZUC_MAX_BURST 16
+#define BYTE_LEN 8
 
-	return ZUC_OP_NOT_SUPPORTED;
-}
+uint8_t pmd_driver_id_zuc;
+
+static const struct rte_cryptodev_capabilities zuc_capabilities[] = {
+	{	/* ZUC (EIA3) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = ZUC_DIGEST_LENGTH,
+					.max = ZUC_DIGEST_LENGTH,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = ZUC_IV_KEY_LENGTH,
+					.max = ZUC_IV_KEY_LENGTH,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* ZUC (EEA3) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = ZUC_IV_KEY_LENGTH,
+					.max = ZUC_IV_KEY_LENGTH,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
 
+/** ZUC private session structure */
+struct zuc_session {
+	enum ipsec_mb_operation op;
+	enum rte_crypto_auth_operation auth_op;
+	uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
+	uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+	uint16_t cipher_iv_offset;
+	uint16_t auth_iv_offset;
+} __rte_cache_aligned;
+
+struct zuc_qp_data {
+
+	uint8_t temp_digest[ZUC_DIGEST_LENGTH];
+	/* *< Buffers used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
+};
 
 /** Parse crypto xform chain and set private session parameters. */
-int
-zuc_set_session_parameters(struct zuc_session *sess,
+static int
+zuc_session_configure(__rte_unused IMB_MGR * mgr, void *zuc_sess,
 		const struct rte_crypto_sym_xform *xform)
 {
+	struct zuc_session *sess = (struct zuc_session *) zuc_sess;
 	const struct rte_crypto_sym_xform *auth_xform = NULL;
 	const struct rte_crypto_sym_xform *cipher_xform = NULL;
-	enum zuc_operation mode;
-
+	enum ipsec_mb_operation mode;
 	/* Select Crypto operation - hash then cipher / cipher then hash */
-	mode = zuc_get_mode(xform);
+	int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
+				&cipher_xform, NULL);
 
-	switch (mode) {
-	case ZUC_OP_CIPHER_AUTH:
-		auth_xform = xform->next;
-
-		/* Fall-through */
-	case ZUC_OP_ONLY_CIPHER:
-		cipher_xform = xform;
-		break;
-	case ZUC_OP_AUTH_CIPHER:
-		cipher_xform = xform->next;
-		/* Fall-through */
-	case ZUC_OP_ONLY_AUTH:
-		auth_xform = xform;
-		break;
-	case ZUC_OP_NOT_SUPPORTED:
-	default:
-		ZUC_LOG(ERR, "Unsupported operation chain order parameter");
-		return -ENOTSUP;
-	}
+	if (ret)
+		return ret;
 
 	if (cipher_xform) {
 		/* Only ZUC EEA3 supported */
@@ -87,7 +110,7 @@ zuc_set_session_parameters(struct zuc_session *sess,
 			return -ENOTSUP;
 
 		if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
-			ZUC_LOG(ERR, "Wrong IV length");
+			IPSEC_MB_LOG(ERR, "Wrong IV length");
 			return -EINVAL;
 		}
 		sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
@@ -103,14 +126,14 @@ zuc_set_session_parameters(struct zuc_session *sess,
 			return -ENOTSUP;
 
 		if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
-			ZUC_LOG(ERR, "Wrong digest length");
+			IPSEC_MB_LOG(ERR, "Wrong digest length");
 			return -EINVAL;
 		}
 
 		sess->auth_op = auth_xform->auth.op;
 
 		if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
-			ZUC_LOG(ERR, "Wrong IV length");
+			IPSEC_MB_LOG(ERR, "Wrong IV length");
 			return -EINVAL;
 		}
 		sess->auth_iv_offset = auth_xform->auth.iv.offset;
@@ -120,61 +143,17 @@ zuc_set_session_parameters(struct zuc_session *sess,
 				ZUC_IV_KEY_LENGTH);
 	}
 
-
 	sess->op = mode;
-
 	return 0;
 }
 
-/** Get ZUC session. */
-static struct zuc_session *
-zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
-{
-	struct zuc_session *sess = NULL;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		if (likely(op->sym->session != NULL))
-			sess = (struct zuc_session *)get_sym_session_private_data(
-					op->sym->session,
-					cryptodev_driver_id);
-	} else {
-		void *_sess = NULL;
-		void *_sess_private_data = NULL;
-
-		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
-			return NULL;
-
-		if (rte_mempool_get(qp->sess_mp_priv,
-				(void **)&_sess_private_data))
-			return NULL;
-
-		sess = (struct zuc_session *)_sess_private_data;
-
-		if (unlikely(zuc_set_session_parameters(sess,
-				op->sym->xform) != 0)) {
-			rte_mempool_put(qp->sess_mp, _sess);
-			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
-			sess = NULL;
-		}
-		op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
-		set_sym_session_private_data(op->sym->session,
-				cryptodev_driver_id, _sess_private_data);
-	}
-
-	if (unlikely(sess == NULL))
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
-
-	return sess;
-}
-
 /** Encrypt/decrypt mbufs. */
 static uint8_t
-process_zuc_cipher_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
+process_zuc_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 		struct zuc_session **sessions,
 		uint8_t num_ops)
 {
-	unsigned i;
+	unsigned int i;
 	uint8_t processed_ops = 0;
 	const void *src[ZUC_MAX_BURST];
 	void *dst[ZUC_MAX_BURST];
@@ -188,7 +167,7 @@ process_zuc_cipher_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
 				|| ((ops[i]->sym->cipher.data.offset
 					% BYTE_LEN) != 0)) {
 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			ZUC_LOG(ERR, "Data Length or offset");
+			IPSEC_MB_LOG(ERR, "Data Length or offset");
 			break;
 		}
 
@@ -199,9 +178,14 @@ process_zuc_cipher_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
 				(ops[i]->sym->m_dst != NULL &&
 				!rte_pktmbuf_is_contiguous(
 						ops[i]->sym->m_dst))) {
-			ZUC_LOG(ERR, "PMD supports only contiguous mbufs, "
-				"op (%p) provides noncontiguous mbuf as "
-				"source/destination buffer.\n", ops[i]);
+			IPSEC_MB_LOG(ERR, "PMD supports only "
+				" contiguous mbufs, op (%p) "
+				"provides noncontiguous mbuf "
+				"as source/destination buffer.\n",
+				"PMD supports only contiguous mbufs, "
+				"op (%p) provides noncontiguous mbuf "
+				"as source/destination buffer.\n",
+				ops[i]);
 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 			break;
 		}
@@ -232,24 +216,26 @@ process_zuc_cipher_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
 
 /** Generate/verify hash from mbufs. */
 static int
-process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
+process_zuc_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
 		struct zuc_session **sessions,
 		uint8_t num_ops)
 {
 	unsigned int i;
 	uint8_t processed_ops = 0;
-	uint8_t *src[ZUC_MAX_BURST] = { 0 };
+	uint8_t *src[ZUC_MAX_BURST];
 	uint32_t *dst[ZUC_MAX_BURST];
-	uint32_t length_in_bits[ZUC_MAX_BURST] = { 0 };
-	uint8_t *iv[ZUC_MAX_BURST] = { 0 };
-	const void *hash_keys[ZUC_MAX_BURST] = { 0 };
+	uint32_t length_in_bits[ZUC_MAX_BURST];
+	uint8_t *iv[ZUC_MAX_BURST];
+	const void *hash_keys[ZUC_MAX_BURST];
 	struct zuc_session *sess;
+	struct zuc_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+
 
 	for (i = 0; i < num_ops; i++) {
 		/* Data must be byte aligned */
 		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
 			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			ZUC_LOG(ERR, "Offset");
+			IPSEC_MB_LOG(ERR, "Offset");
 			break;
 		}
 
@@ -264,22 +250,16 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
 
 		hash_keys[i] = sess->pKey_hash;
 		if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
-			dst[i] = (uint32_t *)qp->temp_digest;
+			dst[i] = (uint32_t *)qp_data->temp_digest;
 		else
 			dst[i] = (uint32_t *)ops[i]->sym->auth.digest.data;
 
-#if IMB_VERSION_NUM < IMB_VERSION(0, 53, 3)
-		IMB_ZUC_EIA3_1_BUFFER(qp->mb_mgr, hash_keys[i],
-				iv[i], src[i], length_in_bits[i], dst[i]);
-#endif
 		processed_ops++;
 	}
 
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 53, 3)
 	IMB_ZUC_EIA3_N_BUFFER(qp->mb_mgr, (const void **)hash_keys,
 			(const void * const *)iv, (const void * const *)src,
 			length_in_bits, dst, processed_ops);
-#endif
 
 	/*
 	 * If tag needs to be verified, compare generated tag
@@ -289,36 +269,39 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
 		if (sessions[i]->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
 			if (memcmp(dst[i], ops[i]->sym->auth.digest.data,
 					ZUC_DIGEST_LENGTH) != 0)
-				ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+				ops[i]->status =
+					RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 
 	return processed_ops;
 }
 
 /** Process a batch of crypto ops which shares the same operation type. */
 static int
-process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
+process_ops(struct rte_crypto_op **ops, enum ipsec_mb_operation op_type,
 		struct zuc_session **sessions,
-		struct zuc_qp *qp, uint8_t num_ops,
+		struct ipsec_mb_qp *qp, uint8_t num_ops,
 		uint16_t *accumulated_enqueued_ops)
 {
-	unsigned i;
-	unsigned enqueued_ops, processed_ops;
+	unsigned int i;
+	unsigned int processed_ops;
 
 	switch (op_type) {
-	case ZUC_OP_ONLY_CIPHER:
+	case IPSEC_MB_OP_ENCRYPT_ONLY:
+	case IPSEC_MB_OP_DECRYPT_ONLY:
 		processed_ops = process_zuc_cipher_op(qp, ops,
 				sessions, num_ops);
 		break;
-	case ZUC_OP_ONLY_AUTH:
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
 		processed_ops = process_zuc_hash_op(qp, ops, sessions,
 				num_ops);
 		break;
-	case ZUC_OP_CIPHER_AUTH:
+	case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
 		processed_ops = process_zuc_cipher_op(qp, ops, sessions,
 				num_ops);
 		process_zuc_hash_op(qp, ops, sessions, processed_ops);
 		break;
-	case ZUC_OP_AUTH_CIPHER:
+	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
 		processed_ops = process_zuc_hash_op(qp, ops, sessions,
 				num_ops);
 		process_zuc_cipher_op(qp, ops, sessions, processed_ops);
@@ -347,35 +330,38 @@ process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
 		}
 	}
 
-	enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
-			(void **)ops, processed_ops, NULL);
-	qp->qp_stats.enqueued_count += enqueued_ops;
-	*accumulated_enqueued_ops += enqueued_ops;
+	*accumulated_enqueued_ops += processed_ops;
 
-	return enqueued_ops;
+	return processed_ops;
 }
 
 static uint16_t
-zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
-		uint16_t nb_ops)
+zuc_pmd_dequeue_burst(void *queue_pair,
+		struct rte_crypto_op **c_ops, uint16_t nb_ops)
 {
-	struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
+
 	struct rte_crypto_op *curr_c_op;
 
 	struct zuc_session *curr_sess;
 	struct zuc_session *sessions[ZUC_MAX_BURST];
-	enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
-	enum zuc_operation curr_zuc_op;
-	struct zuc_qp *qp = queue_pair;
-	unsigned i;
+	enum ipsec_mb_operation prev_zuc_op = IPSEC_MB_OP_NOT_SUPPORTED;
+	enum ipsec_mb_operation curr_zuc_op;
+	struct ipsec_mb_qp *qp = queue_pair;
+	unsigned int nb_dequeued;
+	unsigned int i;
 	uint8_t burst_size = 0;
 	uint16_t enqueued_ops = 0;
 	uint8_t processed_ops;
 
-	for (i = 0; i < nb_ops; i++) {
-		curr_c_op = ops[i];
+	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
+			(void **)c_ops, nb_ops, NULL);
+
 
-		curr_sess = zuc_get_session(qp, curr_c_op);
+	for (i = 0; i < nb_dequeued; i++) {
+		curr_c_op = c_ops[i];
+
+		curr_sess = (struct zuc_session *)
+			ipsec_mb_get_session_private(qp, curr_c_op);
 		if (unlikely(curr_sess == NULL)) {
 			curr_c_op->status =
 					RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
@@ -441,143 +427,70 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
 				&enqueued_ops);
 	}
 
-	qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
-	return enqueued_ops;
-}
-
-static uint16_t
-zuc_pmd_dequeue_burst(void *queue_pair,
-		struct rte_crypto_op **c_ops, uint16_t nb_ops)
-{
-	struct zuc_qp *qp = queue_pair;
-
-	unsigned nb_dequeued;
-
-	nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
-			(void **)c_ops, nb_ops, NULL);
-	qp->qp_stats.dequeued_count += nb_dequeued;
 
-	return nb_dequeued;
+	qp->stats.dequeued_count += i;
+	return i;
 }
 
-static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
+struct rte_cryptodev_ops zuc_pmd_ops = {
+	.dev_configure = ipsec_mb_pmd_config,
+	.dev_start = ipsec_mb_pmd_start,
+	.dev_stop = ipsec_mb_pmd_stop,
+	.dev_close = ipsec_mb_pmd_close,
 
-static int
-cryptodev_zuc_create(const char *name,
-		struct rte_vdev_device *vdev,
-		struct rte_cryptodev_pmd_init_params *init_params)
-{
-	struct rte_cryptodev *dev;
-	struct zuc_private *internals;
-	MB_MGR *mb_mgr;
-
-	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
-	if (dev == NULL) {
-		ZUC_LOG(ERR, "failed to create cryptodev vdev");
-		goto init_error;
-	}
-
-	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
-			RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
-
-	mb_mgr = alloc_mb_mgr(0);
-	if (mb_mgr == NULL)
-		return -ENOMEM;
-
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
-		init_mb_mgr_avx512(mb_mgr);
-	} else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
-		init_mb_mgr_avx2(mb_mgr);
-	} else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
-		init_mb_mgr_avx(mb_mgr);
-	} else {
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
-		init_mb_mgr_sse(mb_mgr);
-	}
+	.stats_get = ipsec_mb_pmd_stats_get,
+	.stats_reset = ipsec_mb_pmd_stats_reset,
 
-	dev->driver_id = cryptodev_driver_id;
-	dev->dev_ops = rte_zuc_pmd_ops;
+	.dev_infos_get = ipsec_mb_pmd_info_get,
 
-	/* Register RX/TX burst functions for data path. */
-	dev->dequeue_burst = zuc_pmd_dequeue_burst;
-	dev->enqueue_burst = zuc_pmd_enqueue_burst;
+	.queue_pair_setup = ipsec_mb_pmd_qp_setup,
+	.queue_pair_release = ipsec_mb_pmd_qp_release,
 
-	internals = dev->data->dev_private;
-	internals->mb_mgr = mb_mgr;
-
-	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
-
-	return 0;
-init_error:
-	ZUC_LOG(ERR, "driver %s: failed",
-			init_params->name);
+	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
+	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
+	.sym_session_clear = ipsec_mb_pmd_sym_session_clear
+};
 
-	cryptodev_zuc_remove(vdev);
-	return -EFAULT;
-}
+struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
 
 static int
 cryptodev_zuc_probe(struct rte_vdev_device *vdev)
 {
-	struct rte_cryptodev_pmd_init_params init_params = {
-		"",
-		sizeof(struct zuc_private),
-		rte_socket_id(),
-		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
-	};
-	const char *name;
-	const char *input_args;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-	input_args = rte_vdev_device_args(vdev);
-
-	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
-
-	return cryptodev_zuc_create(name, vdev, &init_params);
-}
-
-static int
-cryptodev_zuc_remove(struct rte_vdev_device *vdev)
-{
-
-	struct rte_cryptodev *cryptodev;
-	const char *name;
-	struct zuc_private *internals;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-
-	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	internals = cryptodev->data->dev_private;
-
-	free_mb_mgr(internals->mb_mgr);
-
-	return rte_cryptodev_pmd_destroy(cryptodev);
+	return cryptodev_ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_ZUC);
 }
 
 static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
 	.probe = cryptodev_zuc_probe,
-	.remove = cryptodev_zuc_remove
+	.remove = cryptodev_ipsec_mb_remove
+
 };
 
 static struct cryptodev_driver zuc_crypto_drv;
 
 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
-	"max_nb_queue_pairs=<int> "
-	"socket_id=<int>");
+	"max_nb_queue_pairs=<int> socket_id=<int>");
 RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
-		cryptodev_driver_id);
-RTE_LOG_REGISTER_DEFAULT(zuc_logtype_driver, INFO);
+		pmd_driver_id_zuc);
+
+/* Constructor function to register zuc PMD */
+RTE_INIT(ipsec_mb_register_zuc)
+{
+	struct ipsec_mb_pmd_data *zuc_data
+	    = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_ZUC];
+
+	zuc_data->caps = zuc_capabilities;
+	zuc_data->dequeue_burst = zuc_pmd_dequeue_burst;
+	zuc_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
+			| RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
+			| RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA
+			| RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT
+			| RTE_CRYPTODEV_FF_SYM_SESSIONLESS
+			| RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+	zuc_data->internals_priv_size = 0;
+	zuc_data->ops = &zuc_pmd_ops;
+	zuc_data->qp_priv_size = sizeof(struct zuc_qp_data);
+	zuc_data->session_configure = zuc_session_configure;
+	zuc_data->session_priv_size = sizeof(struct zuc_session);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index 8ee8b73e55..b6a98a85ba 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -46,6 +46,9 @@ extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
 #define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
 /**< IPSEC Multi buffer PMD snow3g device name */
 
+#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
+/**< IPSEC Multi buffer PMD zuc device name */
+
 /** PMD LOGTYPE DRIVER, common to all PMDs */
 extern int ipsec_mb_logtype_driver;
 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
@@ -58,6 +61,7 @@ enum ipsec_mb_pmd_types {
 	IPSEC_MB_PMD_TYPE_AESNI_GCM,
 	IPSEC_MB_PMD_TYPE_KASUMI,
 	IPSEC_MB_PMD_TYPE_SNOW3G,
+	IPSEC_MB_PMD_TYPE_ZUC,
 	IPSEC_MB_N_PMD_TYPES
 };
 
@@ -80,6 +84,7 @@ extern uint8_t pmd_driver_id_aesni_mb;
 extern uint8_t pmd_driver_id_aesni_gcm;
 extern uint8_t pmd_driver_id_kasumi;
 extern uint8_t pmd_driver_id_snow3g;
+extern uint8_t pmd_driver_id_zuc;
 
 /** Helper function. Gets driver ID based on PMD type */
 static __rte_always_inline uint8_t
@@ -94,6 +99,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
 		return pmd_driver_id_kasumi;
 	case IPSEC_MB_PMD_TYPE_SNOW3G:
 		return pmd_driver_id_snow3g;
+	case IPSEC_MB_PMD_TYPE_ZUC:
+		return pmd_driver_id_zuc;
 	default:
 		break;
 	}
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index 72b3b776a8..cb14371e9d 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -23,7 +23,6 @@ drivers = [
         'openssl',
         'scheduler',
         'virtio',
-        'zuc',
 ]
 
 std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
diff --git a/drivers/crypto/zuc/meson.build b/drivers/crypto/zuc/meson.build
deleted file mode 100644
index a5f77a22d8..0000000000
--- a/drivers/crypto/zuc/meson.build
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018-2020 Intel Corporation
-
-IMB_required_ver = '0.53.0'
-lib = cc.find_library('IPSec_MB', required: false)
-if not lib.found()
-    build = false
-    reason = 'missing dependency, "libIPSec_MB"'
-else
-    # version comes with quotes, so we split based on " and take the middle
-    imb_ver = cc.get_define('IMB_VERSION_STR',
-        prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
-
-    if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
-        reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
-                IMB_required_ver, imb_ver)
-        build = false
-    endif
-
-endif
-
-ext_deps += lib
-sources = files('rte_zuc_pmd.c', 'rte_zuc_pmd_ops.c')
-deps += ['bus_vdev']
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
deleted file mode 100644
index 38642d45ab..0000000000
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <cryptodev_pmd.h>
-
-#include "zuc_pmd_private.h"
-
-static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
-	{	/* ZUC (EIA3) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.digest_size = {
-					.min = 4,
-					.max = 4,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* ZUC (EEA3) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-			{.cipher = {
-				.algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-				.iv_size = {
-					.min = 16,
-					.max = 16,
-					.increment = 0
-				},
-			}, }
-		}, }
-	},
-	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-/** Configure device */
-static int
-zuc_pmd_config(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused struct rte_cryptodev_config *config)
-{
-	return 0;
-}
-
-/** Start device */
-static int
-zuc_pmd_start(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-/** Stop device */
-static void
-zuc_pmd_stop(__rte_unused struct rte_cryptodev *dev)
-{
-}
-
-/** Close device */
-static int
-zuc_pmd_close(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-
-/** Get device statistics */
-static void
-zuc_pmd_stats_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_stats *stats)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
-
-		stats->enqueued_count += qp->qp_stats.enqueued_count;
-		stats->dequeued_count += qp->qp_stats.dequeued_count;
-
-		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
-		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
-	}
-}
-
-/** Reset device statistics */
-static void
-zuc_pmd_stats_reset(struct rte_cryptodev *dev)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
-
-		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-	}
-}
-
-
-/** Get device info */
-static void
-zuc_pmd_info_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_info *dev_info)
-{
-	struct zuc_private *internals = dev->data->dev_private;
-
-	if (dev_info != NULL) {
-		dev_info->driver_id = dev->driver_id;
-		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
-		/* No limit of number of sessions */
-		dev_info->sym.max_nb_sessions = 0;
-		dev_info->feature_flags = dev->feature_flags;
-		dev_info->capabilities = zuc_pmd_capabilities;
-	}
-}
-
-/** Release queue pair */
-static int
-zuc_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
-{
-	if (dev->data->queue_pairs[qp_id] != NULL) {
-		struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
-
-		if (qp->processed_ops)
-			rte_ring_free(qp->processed_ops);
-
-		rte_free(dev->data->queue_pairs[qp_id]);
-		dev->data->queue_pairs[qp_id] = NULL;
-	}
-	return 0;
-}
-
-/** set a unique name for the queue pair based on its name, dev_id and qp_id */
-static int
-zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
-		struct zuc_qp *qp)
-{
-	unsigned n = snprintf(qp->name, sizeof(qp->name),
-			"zuc_pmd_%u_qp_%u",
-			dev->data->dev_id, qp->id);
-
-	if (n >= sizeof(qp->name))
-		return -1;
-
-	return 0;
-}
-
-/** Create a ring to place processed ops on */
-static struct rte_ring *
-zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
-		unsigned ring_size, int socket_id)
-{
-	struct rte_ring *r;
-
-	r = rte_ring_lookup(qp->name);
-	if (r) {
-		if (rte_ring_get_size(r) >= ring_size) {
-			ZUC_LOG(INFO, "Reusing existing ring %s"
-					" for processed packets",
-					 qp->name);
-			return r;
-		}
-
-		ZUC_LOG(ERR, "Unable to reuse existing ring %s"
-				" for processed packets",
-				 qp->name);
-		return NULL;
-	}
-
-	return rte_ring_create(qp->name, ring_size, socket_id,
-			RING_F_SP_ENQ | RING_F_SC_DEQ);
-}
-
-/** Setup a queue pair */
-static int
-zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
-		const struct rte_cryptodev_qp_conf *qp_conf,
-		int socket_id)
-{
-	struct zuc_qp *qp = NULL;
-	struct zuc_private *internals = dev->data->dev_private;
-
-	/* Free memory prior to re-allocation if needed. */
-	if (dev->data->queue_pairs[qp_id] != NULL)
-		zuc_pmd_qp_release(dev, qp_id);
-
-	/* Allocate the queue pair data structure. */
-	qp = rte_zmalloc_socket("ZUC PMD Queue Pair", sizeof(*qp),
-					RTE_CACHE_LINE_SIZE, socket_id);
-	if (qp == NULL)
-		return (-ENOMEM);
-
-	qp->id = qp_id;
-	dev->data->queue_pairs[qp_id] = qp;
-
-	if (zuc_pmd_qp_set_unique_name(dev, qp))
-		goto qp_setup_cleanup;
-
-	qp->processed_ops = zuc_pmd_qp_create_processed_ops_ring(qp,
-			qp_conf->nb_descriptors, socket_id);
-	if (qp->processed_ops == NULL)
-		goto qp_setup_cleanup;
-
-	qp->mb_mgr = internals->mb_mgr;
-	qp->sess_mp = qp_conf->mp_session;
-	qp->sess_mp_priv = qp_conf->mp_session_private;
-
-	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-
-	return 0;
-
-qp_setup_cleanup:
-	if (qp)
-		rte_free(qp);
-
-	return -1;
-}
-
-/** Returns the size of the ZUC session structure */
-static unsigned
-zuc_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct zuc_session);
-}
-
-/** Configure a ZUC session from a crypto xform chain */
-static int
-zuc_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
-		struct rte_crypto_sym_xform *xform,
-		struct rte_cryptodev_sym_session *sess,
-		struct rte_mempool *mempool)
-{
-	void *sess_private_data;
-	int ret;
-
-	if (unlikely(sess == NULL)) {
-		ZUC_LOG(ERR, "invalid session struct");
-		return -EINVAL;
-	}
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		ZUC_LOG(ERR,
-			"Couldn't get object from session mempool");
-
-		return -ENOMEM;
-	}
-
-	ret = zuc_set_session_parameters(sess_private_data, xform);
-	if (ret != 0) {
-		ZUC_LOG(ERR, "failed configure session parameters");
-
-		/* Return session to mempool */
-		rte_mempool_put(mempool, sess_private_data);
-		return ret;
-	}
-
-	set_sym_session_private_data(sess, dev->driver_id,
-		sess_private_data);
-
-	return 0;
-}
-
-/** Clear the memory of session so it doesn't leave key material behind */
-static void
-zuc_pmd_sym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_sym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_sym_session_private_data(sess, index);
-
-	/* Zero out the whole structure */
-	if (sess_priv) {
-		memset(sess_priv, 0, sizeof(struct zuc_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-		set_sym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-struct rte_cryptodev_ops zuc_pmd_ops = {
-		.dev_configure      = zuc_pmd_config,
-		.dev_start          = zuc_pmd_start,
-		.dev_stop           = zuc_pmd_stop,
-		.dev_close          = zuc_pmd_close,
-
-		.stats_get          = zuc_pmd_stats_get,
-		.stats_reset        = zuc_pmd_stats_reset,
-
-		.dev_infos_get      = zuc_pmd_info_get,
-
-		.queue_pair_setup   = zuc_pmd_qp_setup,
-		.queue_pair_release = zuc_pmd_qp_release,
-
-		.sym_session_get_size   = zuc_pmd_sym_session_get_size,
-		.sym_session_configure  = zuc_pmd_sym_session_configure,
-		.sym_session_clear      = zuc_pmd_sym_session_clear
-};
-
-struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
diff --git a/drivers/crypto/zuc/version.map b/drivers/crypto/zuc/version.map
deleted file mode 100644
index c2e0723b4c..0000000000
--- a/drivers/crypto/zuc/version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_22 {
-	local: *;
-};
diff --git a/drivers/crypto/zuc/zuc_pmd_private.h b/drivers/crypto/zuc/zuc_pmd_private.h
deleted file mode 100644
index d8684891ee..0000000000
--- a/drivers/crypto/zuc/zuc_pmd_private.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#ifndef _ZUC_PMD_PRIVATE_H_
-#define _ZUC_PMD_PRIVATE_H_
-
-#include <intel-ipsec-mb.h>
-
-#define CRYPTODEV_NAME_ZUC_PMD		crypto_zuc
-/**< ZUC PMD device name */
-
-/** ZUC PMD LOGTYPE DRIVER */
-extern int zuc_logtype_driver;
-#define ZUC_LOG(level, fmt, ...)  \
-	rte_log(RTE_LOG_ ## level, zuc_logtype_driver,  \
-			"%s()... line %u: " fmt "\n", __func__, __LINE__,  \
-				## __VA_ARGS__)
-
-#define ZUC_IV_KEY_LENGTH 16
-#define ZUC_DIGEST_LENGTH 4
-
-/** private data structure for each virtual ZUC device */
-struct zuc_private {
-	unsigned max_nb_queue_pairs;
-	/**< Max number of queue pairs supported by device */
-	MB_MGR *mb_mgr;
-	/**< Multi-buffer instance */
-};
-
-/** ZUC buffer queue pair */
-struct zuc_qp {
-	uint16_t id;
-	/**< Queue Pair Identifier */
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	/**< Unique Queue Pair Name */
-	struct rte_ring *processed_ops;
-	/**< Ring for placing processed ops */
-	struct rte_mempool *sess_mp;
-	/**< Session Mempool */
-	struct rte_mempool *sess_mp_priv;
-	/**< Session Private Data Mempool */
-	struct rte_cryptodev_stats qp_stats;
-	/**< Queue pair statistics */
-	uint8_t temp_digest[ZUC_DIGEST_LENGTH];
-	/**< Buffer used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-	MB_MGR *mb_mgr;
-	/**< Multi-buffer instance */
-} __rte_cache_aligned;
-
-enum zuc_operation {
-	ZUC_OP_ONLY_CIPHER,
-	ZUC_OP_ONLY_AUTH,
-	ZUC_OP_CIPHER_AUTH,
-	ZUC_OP_AUTH_CIPHER,
-	ZUC_OP_NOT_SUPPORTED
-};
-
-/** ZUC private session structure */
-struct zuc_session {
-	enum zuc_operation op;
-	enum rte_crypto_auth_operation auth_op;
-	uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
-	uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
-	uint16_t cipher_iv_offset;
-	uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-
-extern int
-zuc_set_session_parameters(struct zuc_session *sess,
-		const struct rte_crypto_sym_xform *xform);
-
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_zuc_pmd_ops;
-
-
-
-#endif /* _ZUC_PMD_PRIVATE_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 09/10] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
                           ` (7 preceding siblings ...)
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 08/10] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  2021-10-06 14:48           ` [dpdk-dev] [EXT] " Akhil Goyal
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 10/10] doc/rel_notes: added note for SW Crypto PMD change Ciara Power
  9 siblings, 1 reply; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Kai Ji, Ciara Power,
	Declan Doherty, Pablo de Lara

From: Kai Ji <kai.ji@intel.com>

Add in new chacha20_poly1305 support in ipsec_mb.
Add in new chacha20_poly1305 test vector for SGL test.

Signed-off-by: Kai Ji <kai.ji@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v3:
  - Fixed some formatting.
  - Removed unnecessary get session function.

v2:
  - Added unused tag to session configure parameter.
  - Added release note.
  - Added documentation for PMD.
---
 app/test/test_cryptodev.c                     |  23 +
 app/test/test_cryptodev.h                     |   1 +
 app/test/test_cryptodev_aead_test_vectors.h   | 114 +++++
 doc/guides/cryptodevs/chacha20_poly1305.rst   |  99 ++++
 .../cryptodevs/features/chacha20_poly1305.ini |  35 ++
 doc/guides/cryptodevs/index.rst               |   1 +
 doc/guides/rel_notes/release_21_11.rst        |   5 +
 drivers/crypto/ipsec_mb/meson.build           |   1 +
 drivers/crypto/ipsec_mb/pmd_chacha_poly.c     | 482 ++++++++++++++++++
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   7 +
 10 files changed, 768 insertions(+)
 create mode 100644 doc/guides/cryptodevs/chacha20_poly1305.rst
 create mode 100644 doc/guides/cryptodevs/features/chacha20_poly1305.ini
 create mode 100644 drivers/crypto/ipsec_mb/pmd_chacha_poly.c

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 16d770a17f..92c9bd0141 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -13455,6 +13455,14 @@ test_chacha20_poly1305_decrypt_test_case_rfc8439(void)
 	return test_authenticated_decryption(&chacha20_poly1305_case_rfc8439);
 }
 
+static int
+test_chacha20_poly1305_encrypt_SGL_out_of_place(void)
+{
+	return test_authenticated_encryption_SGL(
+		&chacha20_poly1305_case_2, OUT_OF_PLACE, 32,
+		chacha20_poly1305_case_2.plaintext.len);
+}
+
 #ifdef RTE_CRYPTO_SCHEDULER
 
 /* global AESNI worker IDs for the scheduler test */
@@ -14063,6 +14071,8 @@ static struct unit_test_suite cryptodev_chacha20_poly1305_testsuite  = {
 			test_chacha20_poly1305_encrypt_test_case_rfc8439),
 		TEST_CASE_ST(ut_setup, ut_teardown,
 			test_chacha20_poly1305_decrypt_test_case_rfc8439),
+		TEST_CASE_ST(ut_setup, ut_teardown,
+			test_chacha20_poly1305_encrypt_SGL_out_of_place),
 		TEST_CASES_END()
 	}
 };
@@ -14629,6 +14639,17 @@ test_cryptodev_cpu_aesni_mb(void)
 	return rc;
 }
 
+static int
+test_cryptodev_chacha_poly_mb(void)
+{
+	int32_t rc;
+	enum rte_security_session_action_type at = gbl_action_type;
+	rc = run_cryptodev_testsuite(
+			RTE_STR(CRYPTODEV_NAME_CHACHA20_POLY1305_PMD));
+	gbl_action_type = at;
+	return rc;
+}
+
 static int
 test_cryptodev_openssl(void)
 {
@@ -14888,6 +14909,8 @@ REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
 REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
 REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
 	test_cryptodev_cpu_aesni_mb);
+REGISTER_TEST_COMMAND(cryptodev_chacha_poly_mb_autotest,
+	test_cryptodev_chacha_poly_mb);
 REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
 REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_autotest, test_cryptodev_aesni_gcm);
 REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_gcm_autotest,
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 1cdd84d01f..90c8287365 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -59,6 +59,7 @@
 #define CRYPTODEV_NAME_SNOW3G_PMD	crypto_snow3g
 #define CRYPTODEV_NAME_KASUMI_PMD	crypto_kasumi
 #define CRYPTODEV_NAME_ZUC_PMD		crypto_zuc
+#define CRYPTODEV_NAME_CHACHA20_POLY1305_PMD	crypto_chacha20_poly1305
 #define CRYPTODEV_NAME_ARMV8_PMD	crypto_armv8
 #define CRYPTODEV_NAME_DPAA_SEC_PMD	crypto_dpaa_sec
 #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
diff --git a/app/test/test_cryptodev_aead_test_vectors.h b/app/test/test_cryptodev_aead_test_vectors.h
index 73cc143f10..07292620a4 100644
--- a/app/test/test_cryptodev_aead_test_vectors.h
+++ b/app/test/test_cryptodev_aead_test_vectors.h
@@ -3930,4 +3930,118 @@ static const struct aead_test_data chacha20_poly1305_case_rfc8439 = {
 		.len = 16
 	}
 };
+
+static uint8_t chacha_aad_2[] = {
+			0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x4e, 0x91
+};
+
+static const struct aead_test_data chacha20_poly1305_case_2 = {
+	.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+	.key = {
+		.data = {
+				0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+				0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+				0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+				0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+		},
+		.len = 32
+	},
+	.iv = {
+		.data = {
+				0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04,
+				0x05, 0x06, 0x07, 0x08
+		},
+		.len = 12
+	},
+	.aad = {
+		.data = chacha_aad_2,
+		.len = 12
+	},
+	.plaintext = {
+		.data = {
+				0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+				0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+				0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+				0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+				0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+				0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+				0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+				0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+				0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+				0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+				0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+				0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+				0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+				0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+				0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+				0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+				0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+				0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+				0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+				0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+				0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+				0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+				0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+				0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+				0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+				0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+				0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+				0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+				0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+				0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+				0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+				0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+				0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+				0x9d
+		},
+		.len = 265
+	},
+	.ciphertext = {
+		.data = {
+				0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
+				0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
+				0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
+				0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
+				0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
+				0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
+				0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
+				0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
+				0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
+				0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
+				0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
+				0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
+				0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
+				0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
+				0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
+				0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
+				0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
+				0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
+				0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
+				0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
+				0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
+				0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
+				0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
+				0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
+				0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
+				0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
+				0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
+				0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
+				0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
+				0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
+				0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
+				0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
+				0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
+				0x9b
+		},
+		.len = 265
+	},
+	.auth_tag = {
+		.data = {
+				0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb, 0x22,
+				0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, 0x38
+		},
+		.len = 16
+	}
+};
 #endif /* TEST_CRYPTODEV_AEAD_TEST_VECTORS_H_ */
diff --git a/doc/guides/cryptodevs/chacha20_poly1305.rst b/doc/guides/cryptodevs/chacha20_poly1305.rst
new file mode 100644
index 0000000000..e5f7368d6d
--- /dev/null
+++ b/doc/guides/cryptodevs/chacha20_poly1305.rst
@@ -0,0 +1,99 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2016-2019 Intel Corporation.
+
+Chacha20-poly1305 Crypto Poll Mode Driver
+=========================================
+
+The Chacha20-poly1305 PMD provides poll mode crypto driver support for
+utilizing `Intel IPSec Multi-buffer library <https://github.com/01org/intel-ipsec-mb>`_.
+
+Features
+--------
+
+Chacha20-poly1305 PMD has support for:
+
+AEAD algorithms:
+
+* RTE_CRYPTO_AEAD_CHACHA20_POLY1305
+
+
+Installation
+------------
+
+To build DPDK with the Chacha20-poly1305 PMD the user is required to download
+the multi-buffer library from `here <https://github.com/01org/intel-ipsec-mb>`_
+and compile it on their user system before building DPDK.
+The latest version of the library supported by this PMD is v1.0, which
+can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v1.0.zip>`_.
+
+After downloading the library, the user needs to unpack and compile it
+on their system before building DPDK:
+
+.. code-block:: console
+
+    make
+    make install
+
+The library requires NASM to be built. Depending on the library version, it might
+require a minimum NASM version (e.g. v0.54 requires at least NASM 2.14).
+
+NASM is packaged for different OS. However, on some OS the version is too old,
+so a manual installation is required. In that case, NASM can be downloaded from
+`NASM website <https://www.nasm.us/pub/nasm/releasebuilds/?C=M;O=D>`_.
+Once it is downloaded, extract it and follow these steps:
+
+.. code-block:: console
+
+    ./configure
+    make
+    make install
+
+.. note::
+
+   Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library <= v0.53.
+   If a lower GCC version than 5.0, the workaround proposed by the following link
+   should be used: `<https://github.com/intel/intel-ipsec-mb/issues/40>`_.
+
+As a reference, the following table shows a mapping between the past DPDK versions
+and the external crypto libraries supported by them:
+
+.. _table_zuc_versions:
+
+.. table:: DPDK and external crypto library version compatibility
+
+   =============  ================================
+   DPDK version   Crypto library version
+   =============  ================================
+   21.11+         Multi-buffer library 1.0*
+   =============  ================================
+
+\* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
+
+Initialization
+--------------
+
+In order to enable this virtual crypto PMD, user must:
+
+* Build the multi buffer library (explained in Installation section).
+
+To use the PMD in an application, user must:
+
+* Call rte_vdev_init("crypto_chacha20_poly1305") within the application.
+
+* Use --vdev="crypto_chacha20_poly1305" in the EAL options, which will call
+  rte_vdev_init() internally.
+
+The following parameters (all optional) can be provided in the previous two calls:
+
+* socket_id: Specify the socket where the memory for the device is going to be allocated
+  (by default, socket_id will be the socket where the core that is creating the PMD is running on).
+
+* max_nb_queue_pairs: Specify the maximum number of queue pairs in the device (8 by default).
+
+* max_nb_sessions: Specify the maximum number of sessions that can be created (2048 by default).
+
+Example:
+
+.. code-block:: console
+
+    --vdev="crypto_chacha20_poly1305,socket_id=0,max_nb_sessions=128"
diff --git a/doc/guides/cryptodevs/features/chacha20_poly1305.ini b/doc/guides/cryptodevs/features/chacha20_poly1305.ini
new file mode 100644
index 0000000000..3353e031c9
--- /dev/null
+++ b/doc/guides/cryptodevs/features/chacha20_poly1305.ini
@@ -0,0 +1,35 @@
+;
+; Supported features of the 'chacha20_poly1305' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto       = Y
+Sym operation chaining = Y
+Symmetric sessionless  = Y
+Non-Byte aligned data  = Y
+In Place SGL           = Y
+OOP SGL In LB  Out     = Y
+OOP LB  In LB  Out     = Y
+CPU crypto             = Y
+
+;
+; Supported crypto algorithms of the 'chacha20_poly1305' crypto driver.
+;
+[Cipher]
+
+;
+; Supported authentication algorithms of the 'chacha20_poly1305' crypto driver.
+;
+[Auth]
+
+;
+; Supported AEAD algorithms of the 'chacha20_poly1305' crypto driver.
+;
+[AEAD]
+CHACHA20-POLY1305 = Y
+
+;
+; Supported Asymmetric algorithms of the 'chacha20_poly1305' crypto driver.
+;
+[Asymmetric]
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 0f981c77b5..3dcc2ecd2e 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -16,6 +16,7 @@ Crypto Device Drivers
     bcmfs
     caam_jr
     ccp
+    chacha20_poly1305
     cnxk
     dpaa2_sec
     dpaa_sec
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 696541dab7..3beecb2392 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -76,6 +76,11 @@ New Features
   * Added support for partially encrypted digest when using auth-cipher
     operations.
 
+* **Added Chacha20-poly1305 Crypto PMD.**
+
+  * Added PMD to support chacha20-poly1305 algorithms to IPSec_MB PMD framework.
+  * Test vector added for chacha20-poly1305 SGL test.
+
 * **Updated Marvell cnxk crypto PMD.**
 
   * Added AES-CBC SHA1-HMAC support in lookaside protocol (IPsec) for CN10K.
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index a1619c78ac..6e0a5f8004 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -25,6 +25,7 @@ sources = files('rte_ipsec_mb_pmd.c',
 		'rte_ipsec_mb_pmd_ops.c',
 		'pmd_aesni_mb.c',
 		'pmd_aesni_gcm.c',
+		'pmd_chacha_poly.c',
 		'pmd_kasumi.c',
 		'pmd_snow3g.c',
 		'pmd_zuc.c'
diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
new file mode 100644
index 0000000000..814bc0761c
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#if defined(RTE_LIB_SECURITY)
+#define AESNI_MB_DOCSIS_SEC_ENABLED 1
+#include <rte_ether.h>
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#endif
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+#define CHACHA20_POLY1305_IV_LENGTH 12
+#define CHACHA20_POLY1305_DIGEST_LENGTH 16
+#define CHACHA20_POLY1305_KEY_SIZE  32
+
+static const
+struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
+	{/* CHACHA20-POLY1305 */
+	    .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+	    {.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+		    {.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 64,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0},
+			    },
+			}
+		},}
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t pmd_driver_id_chacha20_poly1305;
+
+/** CHACHA20 POLY1305 private session structure */
+struct chacha20_poly1305_session {
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	uint16_t aad_length;
+	/**< AAD length */
+	uint16_t req_digest_length;
+	/**< Requested digest length */
+	uint16_t gen_digest_length;
+	/**< Generated digest length */
+	uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
+	enum ipsec_mb_operation op;
+} __rte_cache_aligned;
+
+struct chacha20_poly1305_qp_data {
+	struct chacha20_poly1305_context_data chacha20_poly1305_ctx_data;
+	uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
+	/**< Buffer used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
+};
+
+/** Parse crypto xform chain and set private session parameters. */
+static int
+chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
+		void *priv_sess, const struct rte_crypto_sym_xform *xform)
+{
+	struct chacha20_poly1305_session *sess = priv_sess;
+	const struct rte_crypto_sym_xform *auth_xform;
+	const struct rte_crypto_sym_xform *cipher_xform;
+	const struct rte_crypto_sym_xform *aead_xform;
+
+	uint8_t key_length;
+	const uint8_t *key;
+	enum ipsec_mb_operation mode;
+	int ret = 0;
+
+	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
+				&cipher_xform, &aead_xform);
+	if (ret)
+		return ret;
+
+	sess->op = mode;
+
+	switch (sess->op) {
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
+		if (aead_xform->aead.algo !=
+				RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
+			IPSEC_MB_LOG(ERR,
+			"The only combined operation supported is CHACHA20 POLY1305");
+			ret = -ENOTSUP;
+			goto error_exit;
+		}
+		/* Set IV parameters */
+		sess->iv.offset = aead_xform->aead.iv.offset;
+		sess->iv.length = aead_xform->aead.iv.length;
+		key_length = aead_xform->aead.key.length;
+		key = aead_xform->aead.key.data;
+		sess->aad_length = aead_xform->aead.aad_length;
+		sess->req_digest_length = aead_xform->aead.digest_length;
+		break;
+	default:
+		IPSEC_MB_LOG(
+		    ERR, "Wrong xform type, has to be AEAD or authentication");
+		ret = -ENOTSUP;
+		goto error_exit;
+	}
+
+	/* IV check */
+	if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
+		sess->iv.length != 0) {
+		IPSEC_MB_LOG(ERR, "Wrong IV length");
+		ret = -EINVAL;
+		goto error_exit;
+	}
+
+	/* Check key length */
+	if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
+		IPSEC_MB_LOG(ERR, "Invalid key length");
+		ret = -EINVAL;
+		goto error_exit;
+	} else {
+		memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
+	}
+
+	/* Digest check */
+	if (sess->req_digest_length !=  CHACHA20_POLY1305_DIGEST_LENGTH) {
+		IPSEC_MB_LOG(ERR, "Invalid digest length");
+		ret = -EINVAL;
+		goto error_exit;
+	} else {
+		sess->gen_digest_length = CHACHA20_POLY1305_DIGEST_LENGTH;
+	}
+
+error_exit:
+	return ret;
+}
+
+/**
+ * Process a crypto operation, calling
+ * the direct chacha poly API from the multi buffer library.
+ *
+ * @param	qp		queue pair
+ * @param	op		symmetric crypto operation
+ * @param	session		chacha poly session
+ *
+ * @return
+ * - Return 0 if success
+ */
+static int
+chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
+		struct chacha20_poly1305_session *session)
+{
+	struct chacha20_poly1305_qp_data *qp_data =
+					ipsec_mb_get_qp_private_data(qp);
+	uint8_t *src, *dst;
+	uint8_t *iv_ptr;
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct rte_mbuf *m_src = sym_op->m_src;
+	uint32_t offset, data_offset, data_length;
+	uint32_t part_len, data_len;
+	int total_len;
+	uint8_t *tag;
+	unsigned int oop = 0;
+
+	offset = sym_op->aead.data.offset;
+	data_offset = offset;
+	data_length = sym_op->aead.data.length;
+	RTE_ASSERT(m_src != NULL);
+
+	while (offset >= m_src->data_len && data_length != 0) {
+		offset -= m_src->data_len;
+		m_src = m_src->next;
+
+		RTE_ASSERT(m_src != NULL);
+	}
+
+	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
+	data_len = m_src->data_len - offset;
+	part_len = (data_len < data_length) ? data_len :
+			data_length;
+
+	/* In-place */
+	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
+		dst = src;
+	/* Out-of-place */
+	else {
+		oop = 1;
+		/* Segmented destination buffer is not supported
+		 * if operation is Out-of-place
+		 */
+		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
+		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+					data_offset);
+	}
+
+	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+				session->iv.offset);
+
+	IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
+				&qp_data->chacha20_poly1305_ctx_data,
+				iv_ptr,	sym_op->aead.aad.data,
+				(uint64_t)session->aad_length);
+
+	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
+		IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
+				session->key,
+				&qp_data->chacha20_poly1305_ctx_data,
+				dst, src, (uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			if (oop)
+				dst += part_len;
+			else
+				dst = src;
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			if (dst == NULL || src == NULL) {
+				IPSEC_MB_LOG(ERR, "Invalid src or dst input");
+				return -EINVAL;
+			}
+			IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
+					session->key,
+					&qp_data->chacha20_poly1305_ctx_data,
+					dst, src, (uint64_t)part_len);
+			total_len -= part_len;
+			if (total_len < 0) {
+				IPSEC_MB_LOG(ERR, "Invalid part len");
+				return -EINVAL;
+			}
+		}
+
+		tag = sym_op->aead.digest.data;
+		IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
+					&qp_data->chacha20_poly1305_ctx_data,
+					tag, session->gen_digest_length);
+
+	} else {
+		IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
+					session->key,
+					&qp_data->chacha20_poly1305_ctx_data,
+					dst, src, (uint64_t)part_len);
+
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			if (oop)
+				dst += part_len;
+			else
+				dst = src;
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			if (dst == NULL || src == NULL) {
+				IPSEC_MB_LOG(ERR, "Invalid src or dst input");
+				return -EINVAL;
+			}
+			IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
+					session->key,
+					&qp_data->chacha20_poly1305_ctx_data,
+					dst, src, (uint64_t)part_len);
+			total_len -= part_len;
+			if (total_len < 0) {
+				IPSEC_MB_LOG(ERR, "Invalid part len");
+				return -EINVAL;
+			}
+		}
+
+		tag = qp_data->temp_digest;
+		IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
+					&qp_data->chacha20_poly1305_ctx_data,
+					tag, session->gen_digest_length);
+	}
+
+	return 0;
+}
+
+/**
+ * Process a completed chacha poly op
+ *
+ * @param qp		Queue Pair to process
+ * @param op		Crypto operation
+ * @param sess		Crypto session
+ *
+ * @return
+ * - void
+ */
+static void
+post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
+		struct rte_crypto_op *op,
+		struct chacha20_poly1305_session *session)
+{
+	struct chacha20_poly1305_qp_data *qp_data =
+					ipsec_mb_get_qp_private_data(qp);
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	/* Verify digest if required */
+	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
+			session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
+		uint8_t *digest = op->sym->aead.digest.data;
+		uint8_t *tag = qp_data->temp_digest;
+
+#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
+		rte_hexdump(stdout, "auth tag (orig):",
+				digest, session->req_digest_length);
+		rte_hexdump(stdout, "auth tag (calc):",
+				tag, session->req_digest_length);
+#endif
+		if (memcmp(tag, digest,	session->req_digest_length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+	}
+
+}
+
+/**
+ * Process a completed Chacha20_poly1305 request
+ *
+ * @param qp		Queue Pair to process
+ * @param op		Crypto operation
+ * @param sess		Crypto session
+ *
+ * @return
+ * - void
+ */
+static void
+handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
+		struct rte_crypto_op *op,
+		struct chacha20_poly1305_session *sess)
+{
+	post_process_chacha20_poly1305_crypto_op(qp, op, sess);
+
+	/* Free session if a session-less crypto op */
+	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		memset(sess, 0, sizeof(struct chacha20_poly1305_session));
+		memset(op->sym->session, 0,
+			rte_cryptodev_sym_get_existing_header_session_size(
+				op->sym->session));
+		rte_mempool_put(qp->sess_mp_priv, sess);
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+}
+
+static uint16_t
+chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct chacha20_poly1305_session *sess;
+	struct ipsec_mb_qp *qp = queue_pair;
+
+	int retval = 0;
+	unsigned int i = 0, nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	for (i = 0; i < nb_dequeued; i++) {
+
+		sess = ipsec_mb_get_session_private(qp, ops[i]);
+		if (unlikely(sess == NULL)) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+
+		retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
+		if (retval < 0) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+
+		handle_completed_chacha20_poly1305_crypto_op(qp, ops[i], sess);
+	}
+
+	qp->stats.dequeued_count += i;
+
+	return i;
+}
+
+struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
+	.dev_configure = ipsec_mb_pmd_config,
+	.dev_start = ipsec_mb_pmd_start,
+	.dev_stop = ipsec_mb_pmd_stop,
+	.dev_close = ipsec_mb_pmd_close,
+
+	.stats_get = ipsec_mb_pmd_stats_get,
+	.stats_reset = ipsec_mb_pmd_stats_reset,
+
+	.dev_infos_get = ipsec_mb_pmd_info_get,
+
+	.queue_pair_setup = ipsec_mb_pmd_qp_setup,
+	.queue_pair_release = ipsec_mb_pmd_qp_release,
+
+	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
+	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
+	.sym_session_clear = ipsec_mb_pmd_sym_session_clear
+};
+
+struct rte_cryptodev_ops *rte_chacha20_poly1305_pmd_ops =
+						&chacha20_poly1305_pmd_ops;
+
+static int
+cryptodev_chacha20_poly1305_probe(struct rte_vdev_device *vdev)
+{
+	return cryptodev_ipsec_mb_create(vdev,
+			IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305);
+}
+
+static struct rte_vdev_driver cryptodev_chacha20_poly1305_pmd_drv = {
+	.probe = cryptodev_chacha20_poly1305_probe,
+	.remove = cryptodev_ipsec_mb_remove
+};
+
+static struct cryptodev_driver chacha20_poly1305_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CHACHA20_POLY1305_PMD,
+					cryptodev_chacha20_poly1305_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_CHACHA20_POLY1305_PMD,
+					cryptodev_chacha20_poly1305_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CHACHA20_POLY1305_PMD,
+			       "max_nb_queue_pairs=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(chacha20_poly1305_crypto_drv,
+				cryptodev_chacha20_poly1305_pmd_drv.driver,
+				pmd_driver_id_chacha20_poly1305);
+
+/* Constructor function to register chacha20_poly1305 PMD */
+RTE_INIT(ipsec_mb_register_chacha20_poly1305)
+{
+	struct ipsec_mb_pmd_data *chacha_poly_data
+		= &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
+
+	chacha_poly_data->caps = chacha20_poly1305_capabilities;
+	chacha_poly_data->dequeue_burst = chacha20_poly1305_pmd_dequeue_burst;
+	chacha_poly_data->feature_flags =
+		RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+		RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+		RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+		RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+		RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
+		RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+	chacha_poly_data->internals_priv_size = 0;
+	chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
+	chacha_poly_data->qp_priv_size =
+			sizeof(struct chacha20_poly1305_qp_data);
+	chacha_poly_data->session_configure =
+			chacha20_poly1305_session_configure;
+	chacha_poly_data->session_priv_size =
+			sizeof(struct chacha20_poly1305_session);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index b6a98a85ba..db36584f3a 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -49,6 +49,9 @@ extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
 #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
 /**< IPSEC Multi buffer PMD zuc device name */
 
+#define CRYPTODEV_NAME_CHACHA20_POLY1305_PMD crypto_chacha20_poly1305
+/**< IPSEC Multi buffer PMD chacha20_poly1305 device name */
+
 /** PMD LOGTYPE DRIVER, common to all PMDs */
 extern int ipsec_mb_logtype_driver;
 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
@@ -62,6 +65,7 @@ enum ipsec_mb_pmd_types {
 	IPSEC_MB_PMD_TYPE_KASUMI,
 	IPSEC_MB_PMD_TYPE_SNOW3G,
 	IPSEC_MB_PMD_TYPE_ZUC,
+	IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305,
 	IPSEC_MB_N_PMD_TYPES
 };
 
@@ -85,6 +89,7 @@ extern uint8_t pmd_driver_id_aesni_gcm;
 extern uint8_t pmd_driver_id_kasumi;
 extern uint8_t pmd_driver_id_snow3g;
 extern uint8_t pmd_driver_id_zuc;
+extern uint8_t pmd_driver_id_chacha20_poly1305;
 
 /** Helper function. Gets driver ID based on PMD type */
 static __rte_always_inline uint8_t
@@ -101,6 +106,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
 		return pmd_driver_id_snow3g;
 	case IPSEC_MB_PMD_TYPE_ZUC:
 		return pmd_driver_id_zuc;
+	case IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305:
+		return pmd_driver_id_chacha20_poly1305;
 	default:
 		break;
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* [dpdk-dev] [PATCH v3 10/10] doc/rel_notes: added note for SW Crypto PMD change
  2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
                           ` (8 preceding siblings ...)
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 09/10] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
@ 2021-09-29 16:30         ` Ciara Power
  9 siblings, 0 replies; 30+ messages in thread
From: Ciara Power @ 2021-09-29 16:30 UTC (permalink / raw)
  To: dev; +Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Ciara Power

The SW Crypto PMDs were consolidated into one IPSec_MB PMD.
This patch adds a release note to highlight this change.

Signed-off-by: Ciara Power <ciara.power@intel.com>

---
v3: Modified release note wording and added CHACHA20-POLY1305.
---
 doc/guides/rel_notes/release_21_11.rst | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 3beecb2392..7c5a46470c 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -81,6 +81,21 @@ New Features
   * Added PMD to support chacha20-poly1305 algorithms to IPSec_MB PMD framework.
   * Test vector added for chacha20-poly1305 SGL test.
 
+* **Consolidated SW Crypto PMDs with IPSec_MB dependency.**
+
+  Intel SW Crypto PMDS that depend on the IPSec_MB library were
+  consolidated into a single source folder so that common code can
+  be shared between them. The usage and EAL options of these PMDs
+  are the same as before.
+  This change includes:
+
+  * AESNI_MB PMD.
+  * AESNI_GCM PMD.
+  * KASUMI PMD.
+  * SNOW3G PMD.
+  * ZUC PMD.
+  * CHACHA20-POLY1305 PMD.
+
 * **Updated Marvell cnxk crypto PMD.**
 
   * Added AES-CBC SHA1-HMAC support in lookaside protocol (IPsec) for CN10K.
-- 
2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework Ciara Power
@ 2021-09-30  9:51           ` Kinsella, Ray
  2021-10-06 13:50           ` [dpdk-dev] [EXT] " Akhil Goyal
  1 sibling, 0 replies; 30+ messages in thread
From: Kinsella, Ray @ 2021-09-30  9:51 UTC (permalink / raw)
  To: Ciara Power, dev
  Cc: roy.fan.zhang, piotrx.bronowski, gakhil, Thomas Monjalon, Pablo de Lara



On 29/09/2021 17:30, Ciara Power wrote:
> From: Fan Zhang <roy.fan.zhang@intel.com>
> 
> This patch introduces the new framework to share common code between
> the SW crypto PMDs that depend on the intel-ipsec-mb library.
> This change helps to reduce future effort on the code maintenance and
> feature updates.
> 
> The PMDs that will be added to this framework in subsequent patches are:
>   - AESNI MB
>   - AESNI GCM
>   - KASUMI
>   - SNOW3G
>   - ZUC
> 
> The use of these PMDs will not change, they will still be supported for
> x86, and will use the same EAL args as before.
> 
> The minimum required version for the intel-ipsec-mb library is now v1.0.
> 
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> 
> ---
> v3:
>   - Updated intel-ipsec-mb macros.
>   - Added use of auto init function for IMB_MGR.
>   - Added detail to commit log.
> v2:
>   - Added qp NULL check in get stats function.
>   - Added maintainers file entry.
>   - Replaced strlcpy with rte_strlcpy.
> ---
Acked-by: Ray Kinsella <mdr@ashroe.eu>
(for the series).

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [PATCH v3 06/10] drivers/crypto: move snow3g PMD to IPsec-mb framework
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 06/10] drivers/crypto: move snow3g " Ciara Power
@ 2021-10-04 12:45           ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 30+ messages in thread
From: De Lara Guarch, Pablo @ 2021-10-04 12:45 UTC (permalink / raw)
  To: Power, Ciara, dev
  Cc: Zhang, Roy Fan, Bronowski, PiotrX, gakhil, Thomas Monjalon, Ray Kinsella

Hi Ciara,

> -----Original Message-----
> From: Power, Ciara <ciara.power@intel.com>
> Sent: Wednesday, September 29, 2021 5:31 PM
> To: dev@dpdk.org
> Cc: Zhang, Roy Fan <roy.fan.zhang@intel.com>; Bronowski, PiotrX
> <piotrx.bronowski@intel.com>; gakhil@marvell.com; Power, Ciara
> <ciara.power@intel.com>; Thomas Monjalon <thomas@monjalon.net>; De Lara
> Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Ray Kinsella
> <mdr@ashroe.eu>
> Subject: [PATCH v3 06/10] drivers/crypto: move snow3g PMD to IPsec-mb
> framework
> 
> From: Piotr Bronowski <piotrx.bronowski@intel.com>
> 
> This patch removes the crypto/snow3g folder and gathers all snow3g PMD
> implementation specific details into a single file, pmd_snow3g.c in the
> crypto/ipsec_mb folder.
> 
> Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> 
> ---
> v3: Removed extra empty lines.
> v2: Updated maintainers file.
> ---
>  MAINTAINERS                                   |   8 +-
>  doc/guides/cryptodevs/snow3g.rst              |   3 +-
>  drivers/crypto/ipsec_mb/meson.build           |   3 +-
>  .../pmd_snow3g.c}                             | 457 ++++++++----------
>  .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   7 +
>  drivers/crypto/meson.build                    |   1 -
>  drivers/crypto/snow3g/meson.build             |  24 -
>  drivers/crypto/snow3g/rte_snow3g_pmd_ops.c    | 323 -------------
>  drivers/crypto/snow3g/snow3g_pmd_private.h    |  84 ----
>  drivers/crypto/snow3g/version.map             |   3 -
>  10 files changed, 205 insertions(+), 708 deletions(-)  rename
> drivers/crypto/{snow3g/rte_snow3g_pmd.c => ipsec_mb/pmd_snow3g.c} (57%)
> delete mode 100644 drivers/crypto/snow3g/meson.build  delete mode 100644
> drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
>  delete mode 100644 drivers/crypto/snow3g/snow3g_pmd_private.h
>  delete mode 100644 drivers/crypto/snow3g/version.map
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 794bad11c2..28855222d6 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS

...

> -	case SNOW3G_OP_AUTH_CIPHER:
> +	case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
> +	case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
>  		processed_ops = process_snow3g_hash_op(qp, ops, session,
>  				num_ops);
>  		process_snow3g_cipher_op(qp, ops, session, processed_ops);
> @@ -358,9 +343,9 @@ process_ops(struct rte_crypto_op **ops, struct
> snow3g_session *session,
>  		}
>  	}
> 
> -	enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
> +	enqueued_ops = rte_ring_enqueue_burst(qp->ingress_queue,
>  			(void **)ops, processed_ops, NULL);

Looks like there is a bug here. We don't need to enqueue operations back in the ring here.
We used to enqueue in the ring when crypto processing was done in enqueue, but now this is part of dequeue
and we already dequeued the operations from the ring.
As far as I know, the only enqueue operation in the ring should be done in enqueue_burst.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework Ciara Power
  2021-09-30  9:51           ` Kinsella, Ray
@ 2021-10-06 13:50           ` Akhil Goyal
  2021-10-06 15:45             ` Power, Ciara
  1 sibling, 1 reply; 30+ messages in thread
From: Akhil Goyal @ 2021-10-06 13:50 UTC (permalink / raw)
  To: Ciara Power, dev
  Cc: roy.fan.zhang, piotrx.bronowski, Thomas Monjalon, Pablo de Lara,
	Ray Kinsella

> From: Fan Zhang <roy.fan.zhang@intel.com>
> 
> This patch introduces the new framework to share common code between
> the SW crypto PMDs that depend on the intel-ipsec-mb library.
> This change helps to reduce future effort on the code maintenance and
> feature updates.
> 
> The PMDs that will be added to this framework in subsequent patches are:
>   - AESNI MB
>   - AESNI GCM
>   - KASUMI
>   - SNOW3G
>   - ZUC
> 
> The use of these PMDs will not change, they will still be supported for
> x86, and will use the same EAL args as before.
> 
> The minimum required version for the intel-ipsec-mb library is now v1.0.
> 
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> 
> ---
> v3:
>   - Updated intel-ipsec-mb macros.
>   - Added use of auto init function for IMB_MGR.
>   - Added detail to commit log.
> v2:
>   - Added qp NULL check in get stats function.
>   - Added maintainers file entry.
>   - Replaced strlcpy with rte_strlcpy.
> ---
>  MAINTAINERS                                   |   4 +
>  drivers/crypto/ipsec_mb/meson.build           |  27 ++
>  drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c    | 169 ++++++++++
>  .../crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c    | 291 ++++++++++++++++++
>  .../ipsec_mb/rte_ipsec_mb_pmd_private.h       | 275 +++++++++++++++++
>  drivers/crypto/ipsec_mb/version.map           |   3 +
>  drivers/crypto/meson.build                    |   1 +
>  7 files changed, 770 insertions(+)
>  create mode 100644 drivers/crypto/ipsec_mb/meson.build
>  create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
>  create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
>  create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
>  create mode 100644 drivers/crypto/ipsec_mb/version.map
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 1e0d303394..f1aaf7d408 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -1065,6 +1065,10 @@ F: drivers/common/qat/
>  F: doc/guides/cryptodevs/qat.rst
>  F: doc/guides/cryptodevs/features/qat.ini
> 
> +IPsec MB

Not sure if the name ipsec_mb is appropriate for a crypto PMD which
also support algos which are not specified for IPsec like ZUC/SNOW/KASUMI.
Moreover, this is a crypto PMD and not IPsec PMD.

> +M: Fan Zhang <roy.fan.zhang@intel.com>
> +F: drivers/crypto/ipsec_mb/
> +
>  KASUMI
>  M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
>  F: drivers/crypto/kasumi/
> diff --git a/drivers/crypto/ipsec_mb/meson.build
> b/drivers/crypto/ipsec_mb/meson.build
> new file mode 100644
> index 0000000000..3d48da60ed
> --- /dev/null
> +++ b/drivers/crypto/ipsec_mb/meson.build
> @@ -0,0 +1,27 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2018 - 2021 Intel Corporation

These are new files created, IMO copyright should be of 2021 only.
Check in other files also.

> +
> +IMB_required_ver = '1.0.0'
> +lib = cc.find_library('IPSec_MB', required: false)
> +if not lib.found()
> +	build = false
> +	reason = 'missing dependency, "libIPSec_MB"'
> +else
> +	ext_deps += lib
> +
> +	# version comes with quotes, so we split based on " and take the
> middle
> +	imb_ver = cc.get_define('IMB_VERSION_STR',
> +		prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
> +
> +	if (imb_ver == '') or (imb_ver.version_compare('<' +
> IMB_required_ver))
> +		reason = 'IPSec_MB version >= @0@ is required, found
> version @1@'.format(
> +				IMB_required_ver, imb_ver)
> +		build = false
> +	endif
> +
> +endif
> +
> +sources = files('rte_ipsec_mb_pmd.c',
> +		'rte_ipsec_mb_pmd_ops.c',
> +		)
> +deps += ['bus_vdev', 'net', 'security']
> diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
> b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
> new file mode 100644
> index 0000000000..3f2cefed52
> --- /dev/null
> +++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
> @@ -0,0 +1,169 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2015-2021 Intel Corporation
> + */
> +
> +#include <rte_bus_vdev.h>
> +#include <rte_common.h>
> +#include <rte_cryptodev.h>
> +
> +#include "rte_ipsec_mb_pmd_private.h"
> +
> +RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
> +
> +struct ipsec_mb_pmd_data ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
> +int ipsec_mb_logtype_driver;
> +enum ipsec_mb_vector_mode vector_mode;
> +
> +/**
> + * Generic burst enqueue, place crypto operations on ingress queue for
> + * processing.
> + *
> + * @param __qp         Queue Pair to process
> + * @param ops          Crypto operations for processing
> + * @param nb_ops       Number of crypto operations for processing
> + *
> + * @return
> + * - Number of crypto operations enqueued
> + */
> +static uint16_t
> +ipsec_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
> +		uint16_t nb_ops)
> +{
> +	struct ipsec_mb_qp *qp = __qp;
> +
> +	unsigned int nb_enqueued;
> +
> +	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
> +			(void **)ops, nb_ops, NULL);
> +
> +	qp->stats.enqueued_count += nb_enqueued;
> +	qp->stats.enqueue_err_count += nb_ops - nb_enqueued;
> +
> +	return nb_enqueued;
> +}
> +
> +int
> +cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
> +	enum ipsec_mb_pmd_types pmd_type)
> +{
> +	struct rte_cryptodev *dev;
> +	struct ipsec_mb_private *internals;
> +	struct ipsec_mb_pmd_data *pmd_data =
> &ipsec_mb_pmds[pmd_type];
> +	struct rte_cryptodev_pmd_init_params init_params = {};
> +	const char *name, *args;
> +	int retval;
> +
> +	if (vector_mode == IPSEC_MB_NOT_SUPPORTED) {
> +		/* Check CPU for supported vector instruction set */
> +		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
> +			vector_mode = IPSEC_MB_AVX512;
> +		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
> +			vector_mode = IPSEC_MB_AVX2;
> +		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
> +			vector_mode = IPSEC_MB_AVX;
> +		else
> +			vector_mode = IPSEC_MB_SSE;
> +	}
> +
> +	init_params.private_data_size = sizeof(struct ipsec_mb_private) +
> +		pmd_data->internals_priv_size;
> +	init_params.max_nb_queue_pairs =
> +		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS;
> +	init_params.socket_id = rte_socket_id();
> +
> +	name = rte_vdev_device_name(vdev);
> +	if (name == NULL)
> +		return -EINVAL;
> +
> +	args = rte_vdev_device_args(vdev);
> +
> +	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
> +	if (retval) {
> +		IPSEC_MB_LOG(
> +		    ERR, "Failed to parse initialisation arguments[%s]", args);
> +		return -EINVAL;
> +	}
> +
> +	dev = rte_cryptodev_pmd_create(name, &vdev->device,
> &init_params);
> +	if (dev == NULL) {
> +		IPSEC_MB_LOG(ERR, "driver %s: create failed",
> +			     init_params.name);
> +		return -ENODEV;
> +	}
> +
> +	/* Set vector instructions mode supported */
> +	internals = dev->data->dev_private;
> +	internals->pmd_type = pmd_type;
> +	internals->max_nb_queue_pairs =
> init_params.max_nb_queue_pairs;
> +
> +	dev->driver_id = ipsec_mb_get_driver_id(pmd_type);
> +	if (dev->driver_id == UINT8_MAX) {
> +		IPSEC_MB_LOG(ERR, "driver %s: create failed",
> +			     init_params.name);
> +		return -ENODEV;
> +	}
> +	dev->dev_ops = ipsec_mb_pmds[pmd_type].ops;
> +	dev->enqueue_burst = ipsec_mb_pmd_enqueue_burst;
> +	dev->dequeue_burst = ipsec_mb_pmds[pmd_type].dequeue_burst;
> +
> +	if (pmd_data->dev_config) {
> +		retval = (*pmd_data->dev_config)(dev);
> +		if (retval < 0) {
> +			IPSEC_MB_LOG(ERR,
> +				"Failed to configure device %s", name);
> +			rte_cryptodev_pmd_destroy(dev);
> +			return retval;
> +		}
> +	}
> +
> +	dev->feature_flags = pmd_data->feature_flags;
> +
> +	switch (vector_mode) {
> +	case IPSEC_MB_AVX512:
> +		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
> +		break;
> +	case IPSEC_MB_AVX2:
> +		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
> +		break;
> +	case IPSEC_MB_AVX:
> +		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
> +		break;
> +	case IPSEC_MB_SSE:
> +		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
> +		break;
> +	default:
> +		break;
> +	}
> +
> +	IPSEC_MB_LOG(INFO, "IPSec Multi-buffer library version used:
> %s\n",
> +		     imb_get_version_str());
> +
> +	return 0;
> +}
> +
> +int
> +cryptodev_ipsec_mb_remove(struct rte_vdev_device *vdev)
> +{
> +	struct rte_cryptodev *cryptodev;
> +	const char *name;
> +
> +	name = rte_vdev_device_name(vdev);
> +	if (name == NULL)
> +		return -EINVAL;
> +
> +	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
> +	if (cryptodev == NULL)
> +		return -ENODEV;
> +
> +	if (RTE_PER_LCORE(mb_mgr)) {
> +		free_mb_mgr(RTE_PER_LCORE(mb_mgr));
> +		RTE_PER_LCORE(mb_mgr) = NULL;
> +	}
> +
> +	if (cryptodev->security_ctx) {
> +		rte_free(cryptodev->security_ctx);
> +		cryptodev->security_ctx = NULL;
> +	}
> +
> +	return rte_cryptodev_pmd_destroy(cryptodev);
> +}
> diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
> b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
> new file mode 100644
> index 0000000000..1146297216
> --- /dev/null
> +++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
> @@ -0,0 +1,291 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2015-2021 Intel Corporation
> + */
> +
> +#include <string.h>
> +
> +#include <rte_common.h>
> +#include <rte_malloc.h>
> +
> +#include "rte_ipsec_mb_pmd_private.h"
> +
> +/** Configure device */
> +int
> +ipsec_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
> +		    __rte_unused struct rte_cryptodev_config *config)
> +{
> +	return 0;
> +}
> +
> +/** Start device */
> +int
> +ipsec_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
> +{
> +	return 0;
> +}
> +
> +/** Stop device */
> +void
> +ipsec_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
> +{
> +}
> +
> +/** Close device */
> +int
> +ipsec_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
> +{
> +	return 0;
> +}
> +
> +/** Get device statistics */
> +void
> +ipsec_mb_pmd_stats_get(struct rte_cryptodev *dev,
> +		struct rte_cryptodev_stats *stats)

I believe 1 instance of the ipsec_mb PMD will support only
one kind of operation(aesni_mb/aesni_gcm/zuc/snow/kasumi).
This cannot be changed during the lifetime of the process. Right?

> +{
> +	int qp_id;
> +
> +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> +		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
> +		if (qp == NULL) {
> +			IPSEC_MB_LOG(DEBUG, "Uninitialised qp %d",
> qp_id);
> +			continue;
> +		}
> +
> +		stats->enqueued_count += qp->stats.enqueued_count;
> +		stats->dequeued_count += qp->stats.dequeued_count;
> +
> +		stats->enqueue_err_count += qp->stats.enqueue_err_count;
> +		stats->dequeue_err_count += qp->stats.dequeue_err_count;
> +	}
> +}
> +
> +/** Reset device statistics */
> +void
> +ipsec_mb_pmd_stats_reset(struct rte_cryptodev *dev)
> +{
> +	int qp_id;
> +
> +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> +		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
> +
> +		memset(&qp->stats, 0, sizeof(qp->stats));
> +	}
> +}
> +
> +/** Get device info */
> +void
> +ipsec_mb_pmd_info_get(struct rte_cryptodev *dev,
> +		struct rte_cryptodev_info *dev_info)
> +{
> +	struct ipsec_mb_private *internals = dev->data->dev_private;
> +	struct ipsec_mb_pmd_data *pmd_info =
> +		&ipsec_mb_pmds[internals->pmd_type];
> +
> +	if (dev_info != NULL) {
> +		dev_info->driver_id = dev->driver_id;
> +		dev_info->feature_flags = dev->feature_flags;
> +		dev_info->capabilities = pmd_info->caps;
> +		dev_info->max_nb_queue_pairs = internals-
> >max_nb_queue_pairs;
> +		/* No limit of number of sessions */
> +		dev_info->sym.max_nb_sessions = 0;
> +	}
> +}
> +
> +/** Release queue pair */
> +int
> +ipsec_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
> +{
> +	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
> +	struct rte_ring *r = NULL;
> +
> +	if (qp != NULL) {
> +		r = rte_ring_lookup(qp->name);
> +		if (r)
> +			rte_ring_free(r);
> +		rte_free(qp);
> +		dev->data->queue_pairs[qp_id] = NULL;
> +	}
> +	return 0;
> +}
> +
> +/** Set a unique name for the queue pair */
> +int
> +ipsec_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
> +					   struct ipsec_mb_qp *qp)
> +{
> +	uint32_t n =
> +	    snprintf(qp->name, sizeof(qp->name),
> "ipsec_mb_pmd_%u_qp_%u",
> +		     dev->data->dev_id, qp->id);
> +
> +	if (n >= sizeof(qp->name))
> +		return -1;
> +
> +	return 0;
> +}
> +
> +/** Create a ring to place processed operations on */
> +static struct rte_ring
> +*ipsec_mb_pmd_qp_create_processed_ops_ring(
> +	struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id)
> +{
> +	struct rte_ring *r;
> +	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
> +
> +	unsigned int n = rte_strlcpy(ring_name, qp->name,
> sizeof(ring_name));
> +
> +	if (n >= sizeof(ring_name))
> +		return NULL;
> +
> +	r = rte_ring_lookup(ring_name);
> +	if (r) {
> +		if (rte_ring_get_size(r) >= ring_size) {
> +			IPSEC_MB_LOG(
> +			    INFO, "Reusing existing ring %s for processed ops",
> +			    ring_name);
> +			return r;
> +		}
> +		IPSEC_MB_LOG(
> +		    ERR, "Unable to reuse existing ring %s for processed ops",
> +		    ring_name);
> +		return NULL;
> +	}
> +
> +	return rte_ring_create(ring_name, ring_size, socket_id,
> +			       RING_F_SP_ENQ | RING_F_SC_DEQ);
> +}
> +
> +/** Setup a queue pair */
> +int
> +ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
> +				const struct rte_cryptodev_qp_conf
> *qp_conf,
> +				int socket_id)
> +{
> +	struct ipsec_mb_qp *qp = NULL;
> +	struct ipsec_mb_private *internals = dev->data->dev_private;
> +	struct ipsec_mb_pmd_data *pmd_data =
> +		&ipsec_mb_pmds[internals->pmd_type];
> +	uint32_t qp_size;
> +	int ret = -1;
> +
> +	/* Free memory prior to re-allocation if needed. */
> +	if (dev->data->queue_pairs[qp_id] != NULL)
> +		ipsec_mb_pmd_qp_release(dev, qp_id);
> +
> +	qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
> +	/* Allocate the queue pair data structure. */
> +	qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
> +				RTE_CACHE_LINE_SIZE, socket_id);
> +	if (qp == NULL)
> +		return -ENOMEM;
> +
> +	qp->id = qp_id;
> +	dev->data->queue_pairs[qp_id] = qp;
> +	if (ipsec_mb_pmd_qp_set_unique_name(dev, qp))
> +		goto qp_setup_cleanup;
> +
> +	qp->pmd_type = internals->pmd_type;
> +	qp->sess_mp = qp_conf->mp_session;
> +	qp->sess_mp_priv = qp_conf->mp_session_private;
> +
> +	qp->ingress_queue =
> ipsec_mb_pmd_qp_create_processed_ops_ring(qp,
> +		qp_conf->nb_descriptors, socket_id);
> +	if (qp->ingress_queue == NULL) {
> +		ret = -1;

-1??

> +		goto qp_setup_cleanup;
> +	}
> +
> +	qp->mb_mgr = alloc_init_mb_mgr();
> +	if (!qp->mb_mgr) {
> +		ret = -ENOMEM;
> +		goto qp_setup_cleanup;
> +	}
> +
> +	memset(&qp->stats, 0, sizeof(qp->stats));
> +
> +	if (pmd_data->queue_pair_configure) {
> +		ret = pmd_data->queue_pair_configure(qp);
> +		if (ret < 0)
> +			goto qp_setup_cleanup;
> +	}
> +
> +	return 0;
> +
> +qp_setup_cleanup:
> +	if (qp->mb_mgr)
> +		free_mb_mgr(qp->mb_mgr);
> +	if (qp)
> +		rte_free(qp);
> +	return ret;
> +}
> +
> +/** Return the size of the specific pmd session structure */
> +unsigned
> +ipsec_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev)
> +{
> +	struct ipsec_mb_private *internals = dev->data->dev_private;
> +	struct ipsec_mb_pmd_data *pmd_data =
> +		&ipsec_mb_pmds[internals->pmd_type];
> +
> +	return pmd_data->session_priv_size;
> +}
> +
> +/** Configure pmd specific multi-buffer session from a crypto xform chain
> */
> +int
> +ipsec_mb_pmd_sym_session_configure(
> +	struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform,
> +	struct rte_cryptodev_sym_session *sess, struct rte_mempool
> *mempool)
> +{
> +	void *sess_private_data;
> +	struct ipsec_mb_private *internals = dev->data->dev_private;
> +	struct ipsec_mb_pmd_data *pmd_data =
> +		&ipsec_mb_pmds[internals->pmd_type];
> +	IMB_MGR *mb_mgr = alloc_init_mb_mgr();
> +	int ret = 0;
> +
> +	if (!mb_mgr)
> +		return -ENOMEM;
> +
> +	if (unlikely(sess == NULL)) {
> +		IPSEC_MB_LOG(ERR, "invalid session struct");
> +		free_mb_mgr(mb_mgr);
> +		return -EINVAL;
> +	}
> +
> +	if (rte_mempool_get(mempool, &sess_private_data)) {
> +		IPSEC_MB_LOG(ERR, "Couldn't get object from session
> mempool");
> +		free_mb_mgr(mb_mgr);
> +		return -ENOMEM;
> +	}
> +
> +	ret = (*pmd_data->session_configure)(mb_mgr, sess_private_data,
> xform);
> +	if (ret != 0) {
> +		IPSEC_MB_LOG(ERR, "failed configure session parameters");
> +
> +		/* Return session to mempool */
> +		rte_mempool_put(mempool, sess_private_data);
> +		free_mb_mgr(mb_mgr);
> +		return ret;
> +	}
> +
> +	set_sym_session_private_data(sess, dev->driver_id,
> sess_private_data);
> +
> +	return 0;
> +}
> +
> +/** Clear the session memory */
> +void
> +ipsec_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
> +			       struct rte_cryptodev_sym_session *sess)
> +{
> +	uint8_t index = dev->driver_id;
> +	void *sess_priv = get_sym_session_private_data(sess, index);
> +
> +	/* Zero out the whole structure */
> +	if (sess_priv) {
> +		memset(sess_priv, 0,
> ipsec_mb_pmd_sym_session_get_size(dev));
> +		struct rte_mempool *sess_mp =
> rte_mempool_from_obj(sess_priv);
> +
> +		set_sym_session_private_data(sess, index, NULL);
> +		rte_mempool_put(sess_mp, sess_priv);
> +	}
> +}
> diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> new file mode 100644
> index 0000000000..754259aa59
> --- /dev/null
> +++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> @@ -0,0 +1,275 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2015-2021 Intel Corporation
> + */
> +
> +#ifndef _IPSEC_MB_PMD_PRIVATE_H_
> +#define _IPSEC_MB_PMD_PRIVATE_H_
> +
> +#include <intel-ipsec-mb.h>
> +#include <cryptodev_pmd.h>
> +#include <rte_bus_vdev.h>
> +
> +#if defined(RTE_LIB_SECURITY)
> +#define IPSEC_MB_DOCSIS_SEC_ENABLED 1
> +#include <rte_security.h>
> +#include <rte_security_driver.h>
> +#endif
> +
> +/* Maximum length for digest */
> +#define DIGEST_LENGTH_MAX 64
> +
> +enum ipsec_mb_vector_mode {
> +	IPSEC_MB_NOT_SUPPORTED = 0,
> +	IPSEC_MB_SSE,
> +	IPSEC_MB_AVX,
> +	IPSEC_MB_AVX2,
> +	IPSEC_MB_AVX512
> +};
> +
> +extern enum ipsec_mb_vector_mode vector_mode;
> +
> +/** IMB_MGR instances, one per thread */
> +extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
> +
> +/** PMD LOGTYPE DRIVER, common to all PMDs */
> +extern int ipsec_mb_logtype_driver;
> +#define IPSEC_MB_LOG(level, fmt, ...)                                         \
> +	rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
> +		"%s() line %u: " fmt "\n", __func__, __LINE__,
> ##__VA_ARGS__)
> +
> +/** All supported device types */
> +enum ipsec_mb_pmd_types {
> +	IPSEC_MB_N_PMD_TYPES
> +};
> +
> +/** Crypto operations */
> +enum ipsec_mb_operation {
> +	IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
> +	IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
> +	IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
> +	IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
> +	IPSEC_MB_OP_ENCRYPT_ONLY,
> +	IPSEC_MB_OP_DECRYPT_ONLY,
> +	IPSEC_MB_OP_HASH_GEN_ONLY,
> +	IPSEC_MB_OP_HASH_VERIFY_ONLY,
> +	IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
> +	IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
> +	IPSEC_MB_OP_NOT_SUPPORTED
> +};
> +
> +/** Helper function. Gets driver ID based on PMD type */
> +static __rte_always_inline uint8_t
> +ipsec_mb_get_driver_id(__rte_unused enum ipsec_mb_pmd_types
> pmd_type)
> +{
> +	return UINT8_MAX;
> +}
> +
> +/** Common private data structure for each PMD */
> +struct ipsec_mb_private {
> +	enum ipsec_mb_pmd_types pmd_type;
> +	/**< PMD  type */
> +	uint32_t max_nb_queue_pairs;
> +	/**< Max number of queue pairs supported by device */
> +	__extension__ uint8_t priv[0];
> +};
> +
> +/** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
> +struct ipsec_mb_qp {
> +	uint16_t id;
> +	/**< Queue Pair Identifier */
> +	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
> +	struct rte_ring *ingress_queue;
> +	/**< Ring for placing operations ready for processing */
> +	struct rte_mempool *sess_mp;
> +	/**< Session Mempool */
> +	struct rte_mempool *sess_mp_priv;
> +	/**< Session Private Data Mempool */
> +	struct rte_cryptodev_stats stats;
> +	/**< Queue pair statistics */
> +	enum ipsec_mb_pmd_types pmd_type;
> +	/**< pmd type */
> +	uint8_t digest_idx;
> +	/**< Index of the next
> +	 * slot to be used in temp_digests,
> +	 * to store the digest for a given operation
> +	 */
> +	IMB_MGR *mb_mgr;
> +	/* Multi buffer manager */
> +	__extension__ uint8_t additional_data[0];
> +	/**< Storing PMD specific additional data */
> +};
> +
> +static __rte_always_inline void *
> +ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
> +{
> +	return (void *)qp->additional_data;
> +}
> +
> +/** Helper function. Allocates job manager */
> +static __rte_always_inline IMB_MGR *
> +alloc_init_mb_mgr(void)
> +{
> +	IMB_MGR *mb_mgr = alloc_mb_mgr(0);
> +
> +	if (unlikely(mb_mgr == NULL)) {
> +		IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n");
> +		return NULL;
> +	}
> +
> +	init_mb_mgr_auto(mb_mgr, NULL);
> +
> +	return mb_mgr;
> +}
> +
> +/** Helper function. Gets per thread job manager */
> +static __rte_always_inline IMB_MGR *
> +get_per_thread_mb_mgr(void)
> +{
> +	if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
> +		RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
> +
> +	return RTE_PER_LCORE(mb_mgr);
> +}
> +
> +/** Device creation function */
> +int
> +cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
> +	enum ipsec_mb_pmd_types pmd_type);
> +
> +/** Device remove function */
> +int
> +cryptodev_ipsec_mb_remove(struct rte_vdev_device *vdev);
> +
> +/** Configure queue pair PMD type specific data */
> +typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
> +
> +/** Configure session PMD type specific data */
> +typedef int (*ipsec_mb_session_configure_t)(IMB_MGR *mbr_mgr,
> +		void *session_private,
> +		const struct rte_crypto_sym_xform *xform);
> +
> +/** Configure internals PMD type specific data */
> +typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
> +
> +/** Per PMD type operation and data */
> +struct ipsec_mb_pmd_data {
> +	uint8_t is_configured;
> +	dequeue_pkt_burst_t dequeue_burst;
> +	ipsec_mb_dev_configure_t dev_config;
> +	ipsec_mb_queue_pair_configure_t queue_pair_configure;
> +	ipsec_mb_session_configure_t session_configure;
> +	const struct rte_cryptodev_capabilities *caps;
> +	struct rte_cryptodev_ops *ops;
> +	struct rte_security_ops *security_ops;
> +	uint64_t feature_flags;
> +	uint32_t session_priv_size;
> +	uint32_t qp_priv_size;
> +	uint32_t internals_priv_size;
> +};
> +
> +/** Global PMD type specific data */
> +extern struct ipsec_mb_pmd_data
> ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
> +
> +int
> +ipsec_mb_pmd_config(struct rte_cryptodev *dev,
> +	struct rte_cryptodev_config *config);
> +
> +int
> +ipsec_mb_pmd_start(struct rte_cryptodev *dev);
> +
> +void
> +ipsec_mb_pmd_stop(struct rte_cryptodev *dev);
> +
> +int
> +ipsec_mb_pmd_close(struct rte_cryptodev *dev);
> +
> +void
> +ipsec_mb_pmd_stats_get(struct rte_cryptodev *dev,
> +		struct rte_cryptodev_stats *stats);
> +
> +void
> +ipsec_mb_pmd_stats_reset(struct rte_cryptodev *dev);
> +
> +void
> +ipsec_mb_pmd_info_get(struct rte_cryptodev *dev,
> +		struct rte_cryptodev_info *dev_info);
> +
> +int
> +ipsec_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
> +
> +int
> +ipsec_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
> +					   struct ipsec_mb_qp *qp);
> +
> +int
> +ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
> +				 const struct rte_cryptodev_qp_conf
> *qp_conf,
> +				 int socket_id);
> +
> +/** Returns the size of the aesni multi-buffer session structure */
> +unsigned
> +ipsec_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev);
> +
> +/** Configure an aesni multi-buffer session from a crypto xform chain */
> +int ipsec_mb_pmd_sym_session_configure(
> +	struct rte_cryptodev *dev,
> +	struct rte_crypto_sym_xform *xform,
> +	struct rte_cryptodev_sym_session *sess,
> +	struct rte_mempool *mempool);
> +
> +/** Clear the memory of session so it does not leave key material behind */
> +void
> +ipsec_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
> +				struct rte_cryptodev_sym_session *sess);
> +
> +/** Get session from op. If sessionless create a session */
> +static __rte_always_inline void *
> +ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct
> rte_crypto_op *op)
> +{
> +	void *sess = NULL;
> +	uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
> +	struct rte_crypto_sym_op *sym_op = op->sym;
> +	uint8_t sess_type = op->sess_type;
> +	void *_sess;
> +	void *_sess_private_data = NULL;
> +	struct ipsec_mb_pmd_data *pmd_data = &ipsec_mb_pmds[qp-
> >pmd_type];
> +
> +	switch (sess_type) {
> +	case RTE_CRYPTO_OP_WITH_SESSION:
> +		if (likely(sym_op->session != NULL))
> +			sess = get_sym_session_private_data(sym_op-
> >session,
> +							    driver_id);
> +	break;
> +	case RTE_CRYPTO_OP_SESSIONLESS:
> +		if (!qp->sess_mp ||
> +		    rte_mempool_get(qp->sess_mp, (void **)&_sess))
> +			return NULL;
> +
> +		if (!qp->sess_mp_priv ||
> +		    rte_mempool_get(qp->sess_mp_priv,
> +					(void **)&_sess_private_data))
> +			return NULL;
> +
> +		sess = _sess_private_data;
> +		if (unlikely(pmd_data->session_configure(qp->mb_mgr,
> +				sess, sym_op->xform) != 0)) {
> +			rte_mempool_put(qp->sess_mp, _sess);
> +			rte_mempool_put(qp->sess_mp_priv,
> _sess_private_data);
> +			sess = NULL;
> +		}
> +
> +		sym_op->session = (struct rte_cryptodev_sym_session
> *)_sess;
> +		set_sym_session_private_data(sym_op->session, driver_id,
> +					     _sess_private_data);
> +	break;
> +	default:
> +		IPSEC_MB_LOG(ERR, "Unrecognized session type %u",
> sess_type);
> +	}
> +
> +	if (unlikely(sess == NULL))
> +		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
> +
> +	return sess;
> +}
> +
> +#endif /* _IPSEC_MB_PMD_PRIVATE_H_ */
> diff --git a/drivers/crypto/ipsec_mb/version.map
> b/drivers/crypto/ipsec_mb/version.map
> new file mode 100644
> index 0000000000..4a76d1d52d
> --- /dev/null
> +++ b/drivers/crypto/ipsec_mb/version.map
> @@ -0,0 +1,3 @@
> +DPDK_21 {
> +	local: *;
> +};

This should be DPDK_22


> diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
> index ea239f4c56..e40b18b17b 100644
> --- a/drivers/crypto/meson.build
> +++ b/drivers/crypto/meson.build
> @@ -6,6 +6,7 @@ if is_windows
>  endif
> 
>  drivers = [
> +        'ipsec_mb',
>          'aesni_gcm',
>          'aesni_mb',
>          'armv8',

Alphabetical order??

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v3 02/10] crypto/ipsec_mb: add multiprocess support
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 02/10] crypto/ipsec_mb: add multiprocess support Ciara Power
@ 2021-10-06 14:01           ` Akhil Goyal
  0 siblings, 0 replies; 30+ messages in thread
From: Akhil Goyal @ 2021-10-06 14:01 UTC (permalink / raw)
  To: Ciara Power, dev
  Cc: roy.fan.zhang, piotrx.bronowski, Pablo de Lara, Anatoly Burakov

> The ipsec_mb SW PMD now has multiprocess support.
> The queue-pair IMB_MGR is stored in a memzone instead of being allocated
> externally by the Intel IPSec MB library, when v1.1 is used.
> If v1.0 is used, multi process is not supported, and allocation is
> done as before.
> The secondary process needs to reconfigure the queue-pair to allow for
> IMB_MGR function pointers be updated.
> 
> Intel IPsec MB library version 1.1 is required for this support.
> 
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> ---
>  doc/guides/rel_notes/release_21_11.rst        |   7 ++
>  .../crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c    | 110 +++++++++++++++---
>  .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   5 +

Missed to comment on first patch.
rte_ may be skipped from the internal files of the PMD.
rte_ is normally added for files which are exposed to the user.

>  3 files changed, 106 insertions(+), 16 deletions(-)
> 
> diff --git a/doc/guides/rel_notes/release_21_11.rst
> b/doc/guides/rel_notes/release_21_11.rst
> index 43d367bcad..3c9d7e19cb 100644
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -62,6 +62,13 @@ New Features
>    * Added bus-level parsing of the devargs syntax.
>    * Kept compatibility with the legacy syntax as parsing fallback.
> 
> +* **Added multi-process support for IPsec-mb PMD.**
> +
> +  Added multi-process support to IPsec-mb PMD, which will add support
> +  for PMDs that are moved to use this shared framework.
> +  This feature makes use of an intel-ipsec-mb API found in v1.1,
> +  which is the minimum required version to use this multi-process support.
> +

I believe first new PMD framework should be introduced in the release note and
a sub bullet may be added to specify multi process support.

>  * **Updated Marvell cnxk crypto PMD.**
> 
>    * Added AES-CBC SHA1-HMAC support in lookaside protocol (IPsec) for
> CN10K.
> diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
> b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
> index 1146297216..c7bcfd3dce 100644
> --- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
> +++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
> @@ -9,6 +9,8 @@
> 
>  #include "rte_ipsec_mb_pmd_private.h"
> 
> +#define IMB_MP_REQ_VER_STR "1.1.0"
> +
>  /** Configure device */
>  int
>  ipsec_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
> @@ -98,10 +100,20 @@ ipsec_mb_pmd_qp_release(struct rte_cryptodev
> *dev, uint16_t qp_id)
>  	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
>  	struct rte_ring *r = NULL;
> 
> -	if (qp != NULL) {
> +	if (qp != NULL && rte_eal_process_type() == RTE_PROC_PRIMARY) {
>  		r = rte_ring_lookup(qp->name);
>  		if (r)
>  			rte_ring_free(r);
> +
> +#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
> +		if (qp->mb_mgr)
> +			free_mb_mgr(qp->mb_mgr);
> +#else
> +		if (qp->mb_mgr_mz) {
> +			rte_memzone_free(qp->mb_mgr_mz);
> +			qp->mb_mgr = NULL;
> +		}
> +#endif
>  		rte_free(qp);
>  		dev->data->queue_pairs[qp_id] = NULL;
>  	}
> @@ -154,6 +166,42 @@ static struct rte_ring
>  			       RING_F_SP_ENQ | RING_F_SC_DEQ);
>  }
> 
> +#if IMB_VERSION(1, 1, 0) <= IMB_VERSION_NUM
> +static IMB_MGR *
> +ipsec_mb_pmd_alloc_mb_from_memzone(const struct rte_memzone
> **mb_mgr_mz,
> +		const char *mb_mgr_mz_name)
> +{
> +	IMB_MGR *mb_mgr;
> +
> +	if (rte_eal_process_type() ==  RTE_PROC_PRIMARY) {
> +		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
> +		if (*mb_mgr_mz == NULL) {
> +			*mb_mgr_mz =
> rte_memzone_reserve(mb_mgr_mz_name,
> +			imb_get_mb_mgr_size(),
> +			rte_socket_id(), 0);
> +		}
> +		if (*mb_mgr_mz == NULL) {
> +			IPSEC_MB_LOG(DEBUG, "Error allocating memzone
> for %s",
> +					mb_mgr_mz_name);
> +			return NULL;
> +		}
> +		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr,
> 0, 1);
> +		init_mb_mgr_auto(mb_mgr, NULL);
> +	} else {
> +		*mb_mgr_mz = rte_memzone_lookup(mb_mgr_mz_name);
> +		if (*mb_mgr_mz == NULL) {
> +			IPSEC_MB_LOG(ERR,
> +				"Secondary can't find %s mz, did primary
> create it?",
> +				mb_mgr_mz_name);
> +			return NULL;
> +		}
> +		mb_mgr = imb_set_pointers_mb_mgr((*mb_mgr_mz)->addr,
> 0, 0);
> +		init_mb_mgr_auto(mb_mgr, NULL);
> +	}
> +	return mb_mgr;
> +}
> +#endif
> +
>  /** Setup a queue pair */
>  int
>  ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
> @@ -167,16 +215,44 @@ ipsec_mb_pmd_qp_setup(struct rte_cryptodev
> *dev, uint16_t qp_id,
>  	uint32_t qp_size;
>  	int ret = -1;
> 
> -	/* Free memory prior to re-allocation if needed. */
> -	if (dev->data->queue_pairs[qp_id] != NULL)
> -		ipsec_mb_pmd_qp_release(dev, qp_id);
> +	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
> +#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
> +		IPSEC_MB_LOG(ERR, "The intel-ipsec-mb version (%s) does
> not support multiprocess,"
> +				"the minimum version required for this
> feature is %s.",
> +				IMB_VERSION_STR, IMB_MP_REQ_VER_STR);
> +		return -EINVAL;
> +#endif
> +		if (dev->data->queue_pairs[qp_id] != NULL)
> +			qp = dev->data->queue_pairs[qp_id];
> +	} else {
> +		/* Free memory prior to re-allocation if needed. */
> +		if (dev->data->queue_pairs[qp_id] != NULL)
> +			ipsec_mb_pmd_qp_release(dev, qp_id);
> +
> +		qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
> +		/* Allocate the queue pair data structure. */
> +		qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
> +					RTE_CACHE_LINE_SIZE, socket_id);
> +		if (qp == NULL)
> +			return -ENOMEM;
> +	}
> +
> +#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
> +	qp->mb_mgr = alloc_init_mb_mgr();
> +#else
> +	char mz_name[IPSEC_MB_MAX_MZ_NAME];
> +	snprintf(mz_name, sizeof(mz_name), "IMB_MGR_DEV_%d_QP_%d",
> +			dev->data->dev_id, qp_id);
> +	qp->mb_mgr = ipsec_mb_pmd_alloc_mb_from_memzone(&(qp-
> >mb_mgr_mz),
> +			mz_name);

_pmd_ may be skipped from the internal APIs. The function names are pretty long.

> +#endif
> +	if (qp->mb_mgr == NULL) {
> +		ret = -ENOMEM;
> +		goto qp_setup_cleanup;
> +	}
> 
> -	qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
> -	/* Allocate the queue pair data structure. */
> -	qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
> -				RTE_CACHE_LINE_SIZE, socket_id);
> -	if (qp == NULL)
> -		return -ENOMEM;
> +	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
> +		return 0;
> 
>  	qp->id = qp_id;
>  	dev->data->queue_pairs[qp_id] = qp;
> @@ -194,12 +270,6 @@ ipsec_mb_pmd_qp_setup(struct rte_cryptodev
> *dev, uint16_t qp_id,
>  		goto qp_setup_cleanup;
>  	}
> 
> -	qp->mb_mgr = alloc_init_mb_mgr();
> -	if (!qp->mb_mgr) {
> -		ret = -ENOMEM;
> -		goto qp_setup_cleanup;
> -	}
> -
>  	memset(&qp->stats, 0, sizeof(qp->stats));
> 
>  	if (pmd_data->queue_pair_configure) {
> @@ -211,8 +281,15 @@ ipsec_mb_pmd_qp_setup(struct rte_cryptodev
> *dev, uint16_t qp_id,
>  	return 0;
> 
>  qp_setup_cleanup:
> +#if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM
>  	if (qp->mb_mgr)
>  		free_mb_mgr(qp->mb_mgr);
> +#else
> +	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
> +		return ret;
> +	if (qp->mb_mgr_mz)
> +		rte_memzone_free(qp->mb_mgr_mz);
> +#endif
>  	if (qp)
>  		rte_free(qp);
>  	return ret;
> @@ -269,6 +346,7 @@ ipsec_mb_pmd_sym_session_configure(
> 
>  	set_sym_session_private_data(sess, dev->driver_id,
> sess_private_data);
> 
> +	free_mb_mgr(mb_mgr);
>  	return 0;
>  }
> 
> diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> index 754259aa59..35860b1b10 100644
> --- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> +++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> @@ -18,6 +18,9 @@
>  /* Maximum length for digest */
>  #define DIGEST_LENGTH_MAX 64
> 
> +/* Maximum length for memzone name */
> +#define IPSEC_MB_MAX_MZ_NAME 32
> +
>  enum ipsec_mb_vector_mode {
>  	IPSEC_MB_NOT_SUPPORTED = 0,
>  	IPSEC_MB_SSE,
> @@ -95,6 +98,8 @@ struct ipsec_mb_qp {
>  	 */
>  	IMB_MGR *mb_mgr;
>  	/* Multi buffer manager */
> +	const struct rte_memzone *mb_mgr_mz;
> +	/* Shared memzone for storing mb_mgr */
>  	__extension__ uint8_t additional_data[0];
>  	/**< Storing PMD specific additional data */
>  };
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v3 04/10] drivers/crypto: move aesni-gcm PMD to IPsec-mb framework
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 04/10] drivers/crypto: move aesni-gcm " Ciara Power
@ 2021-10-06 14:31           ` Akhil Goyal
  0 siblings, 0 replies; 30+ messages in thread
From: Akhil Goyal @ 2021-10-06 14:31 UTC (permalink / raw)
  To: Ciara Power, dev
  Cc: roy.fan.zhang, piotrx.bronowski, Thomas Monjalon, Pablo de Lara,
	Ray Kinsella

> diff --git a/drivers/crypto/ipsec_mb/meson.build
> b/drivers/crypto/ipsec_mb/meson.build
> index bac5d85e26..8550eaee9a 100644
> --- a/drivers/crypto/ipsec_mb/meson.build
> +++ b/drivers/crypto/ipsec_mb/meson.build
> @@ -23,6 +23,7 @@ endif
> 
>  sources = files('rte_ipsec_mb_pmd.c',
>  		'rte_ipsec_mb_pmd_ops.c',
> -		'pmd_aesni_mb.c'
> +		'pmd_aesni_mb.c',
> +		'pmd_aesni_gcm.c'
>  		)
>  deps += ['bus_vdev', 'net', 'security']
> diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
> b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
> new file mode 100644
> index 0000000000..2fcfa97a63
> --- /dev/null
> +++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
> @@ -0,0 +1,1003 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2015-2021 Intel Corporation

Copyright years are different from the original file

> + */
> +
> +#include <intel-ipsec-mb.h>
> +
> +#if defined(RTE_LIB_SECURITY)
> +#define AESNI_MB_DOCSIS_SEC_ENABLED 1
> +#include <rte_ether.h>
> +#include <rte_security.h>
> +#include <rte_security_driver.h>
> +#endif
> +
> +#include "rte_ipsec_mb_pmd_private.h"
> +
> +#define AESNI_GCM_IV_LENGTH 12
> +
> +static const struct rte_cryptodev_capabilities aesni_gcm_capabilities[] = {
> +	{	/* AES GMAC (AUTH) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 32,
> +					.increment = 8
> +				},
> +				.digest_size = {
> +					.min = 1,
> +					.max = 16,
> +					.increment = 1
> +				},
> +				.iv_size = {
> +					.min = AESNI_GCM_IV_LENGTH,
> +					.max = AESNI_GCM_IV_LENGTH,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{	/* AES GCM */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +			{.aead = {
> +				.algo = RTE_CRYPTO_AEAD_AES_GCM,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 32,
> +					.increment = 8
> +				},
> +				.digest_size = {
> +					.min = 1,
> +					.max = 16,
> +					.increment = 1
> +				},
> +				.aad_size = {
> +					.min = 0,
> +					.max = 65535,
> +					.increment = 1
> +				},
> +				.iv_size = {
> +					.min = AESNI_GCM_IV_LENGTH,
> +					.max = AESNI_GCM_IV_LENGTH,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
> +};
> +
> +uint8_t pmd_driver_id_aesni_gcm;
> +
> +enum aesni_gcm_key_length {
> +	GCM_KEY_128 = 0,
> +	GCM_KEY_192,
> +	GCM_KEY_256,
> +	GCM_NUM_KEY_TYPES
> +};
> +
> +typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
> +			    struct gcm_context_data *gcm_ctx_data,
> +			    uint8_t *out, const uint8_t *in,
> +			    uint64_t plaintext_len, const uint8_t *iv,
> +			    const uint8_t *aad, uint64_t aad_len,
> +			    uint8_t *auth_tag, uint64_t auth_tag_len);
> +
> +typedef void (*aesni_gcm_pre_t)(const void *key,
> +				struct gcm_key_data *gcm_data);
> +
> +typedef void (*aesni_gcm_init_t)(const struct gcm_key_data
> *gcm_key_data,
> +				 struct gcm_context_data *gcm_ctx_data,
> +				 const uint8_t *iv, uint8_t const *aad,
> +				 uint64_t aad_len);
> +
> +typedef void (*aesni_gcm_update_t)(const struct gcm_key_data
> *gcm_key_data,
> +				   struct gcm_context_data *gcm_ctx_data,
> +				   uint8_t *out, const uint8_t *in,
> +				   uint64_t plaintext_len);
> +
> +typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data
> *gcm_key_data,
> +				     struct gcm_context_data *gcm_ctx_data,
> +				     uint8_t *auth_tag, uint64_t auth_tag_len);
> +
> +typedef void (*aesni_gmac_init_t)(const struct gcm_key_data
> *gcm_key_data,
> +				  struct gcm_context_data *gcm_ctx_data,
> +				  const uint8_t *iv, const uint64_t iv_len);
> +
> +typedef void (*aesni_gmac_update_t)(const struct gcm_key_data
> *gcm_key_data,
> +				    struct gcm_context_data *gcm_ctx_data,
> +				    const uint8_t *in,
> +				    const uint64_t plaintext_len);
> +
> +typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data
> *gcm_key_data,
> +				      struct gcm_context_data *gcm_ctx_data,
> +				      uint8_t *auth_tag,
> +				      const uint64_t auth_tag_len);
> +
> +/** GCM operation handlers */
> +struct aesni_gcm_ops {
> +	aesni_gcm_t enc;
> +	aesni_gcm_t dec;
> +	aesni_gcm_pre_t pre;
> +	aesni_gcm_init_t init;
> +	aesni_gcm_update_t update_enc;
> +	aesni_gcm_update_t update_dec;
> +	aesni_gcm_finalize_t finalize_enc;
> +	aesni_gcm_finalize_t finalize_dec;
> +	aesni_gmac_init_t gmac_init;
> +	aesni_gmac_update_t gmac_update;
> +	aesni_gmac_finalize_t gmac_finalize;
> +};
> +
> +RTE_DEFINE_PER_LCORE(struct aesni_gcm_ops[GCM_NUM_KEY_TYPES],
> gcm_ops);
> +
> +struct aesni_gcm_qp_data {
> +	struct gcm_context_data gcm_ctx_data;
> +	uint8_t temp_digest[DIGEST_LENGTH_MAX];
> +	/* *< Buffers used to store the digest generated
> +	 * by the driver when verifying a digest provided
> +	 * by the user (using authentication verify operation)
> +	 */
> +	struct aesni_gcm_ops ops[GCM_NUM_KEY_TYPES];
> +	/**< Operation Handlers */
> +};
> +
> +/** AESNI GCM private session structure */
> +struct aesni_gcm_session {
> +	struct {
> +		uint16_t length;
> +		uint16_t offset;
> +	} iv;
> +	/**< IV parameters */
> +	uint16_t aad_length;
> +	/**< AAD length */
> +	uint16_t req_digest_length;
> +	/**< Requested digest length */
> +	uint16_t gen_digest_length;
> +	/**< Generated digest length */
> +	enum ipsec_mb_operation op;
> +	/**< GCM operation type */
> +	struct gcm_key_data gdata_key;
> +	/**< GCM parameters */
> +	enum aesni_gcm_key_length key_length;
> +	/** Key Length */
> +};

Is it not better to move the above code in a header file? aesni_gcm_priv.h ??
Similarly for others also?


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v3 09/10] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 09/10] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
@ 2021-10-06 14:48           ` Akhil Goyal
  2021-10-07 15:07             ` Ji, Kai
  0 siblings, 1 reply; 30+ messages in thread
From: Akhil Goyal @ 2021-10-06 14:48 UTC (permalink / raw)
  To: Ciara Power, dev
  Cc: roy.fan.zhang, piotrx.bronowski, Kai Ji, Declan Doherty, Pablo de Lara

> From: Kai Ji <kai.ji@intel.com>
> 
> Add in new chacha20_poly1305 support in ipsec_mb.
> Add in new chacha20_poly1305 test vector for SGL test.
> 
> Signed-off-by: Kai Ji <kai.ji@intel.com>
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> 
> ---
> v3:
>   - Fixed some formatting.
>   - Removed unnecessary get session function.
> 
> v2:
>   - Added unused tag to session configure parameter.
>   - Added release note.
>   - Added documentation for PMD.
> ---
>  app/test/test_cryptodev.c                     |  23 +
>  app/test/test_cryptodev.h                     |   1 +
>  app/test/test_cryptodev_aead_test_vectors.h   | 114 +++++

Separate out test app changes from this patch.

>  doc/guides/cryptodevs/chacha20_poly1305.rst   |  99 ++++
>  .../cryptodevs/features/chacha20_poly1305.ini |  35 ++
>  doc/guides/cryptodevs/index.rst               |   1 +
>  doc/guides/rel_notes/release_21_11.rst        |   5 +
>  drivers/crypto/ipsec_mb/meson.build           |   1 +
>  drivers/crypto/ipsec_mb/pmd_chacha_poly.c     | 482 ++++++++++++++++++
>  .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   7 +
>  10 files changed, 768 insertions(+)
>  create mode 100644 doc/guides/cryptodevs/chacha20_poly1305.rst
>  create mode 100644
> doc/guides/cryptodevs/features/chacha20_poly1305.ini
>  create mode 100644 drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> 
> diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
> index 16d770a17f..92c9bd0141 100644
> --- a/app/test/test_cryptodev.c
> +++ b/app/test/test_cryptodev.c
> @@ -13455,6 +13455,14 @@
> test_chacha20_poly1305_decrypt_test_case_rfc8439(void)
>  	return
> test_authenticated_decryption(&chacha20_poly1305_case_rfc8439);
>  }
> 
> +static int
> +test_chacha20_poly1305_encrypt_SGL_out_of_place(void)
> +{
> +	return test_authenticated_encryption_SGL(
> +		&chacha20_poly1305_case_2, OUT_OF_PLACE, 32,
> +		chacha20_poly1305_case_2.plaintext.len);
> +}
> +
>  #ifdef RTE_CRYPTO_SCHEDULER
> 
>  /* global AESNI worker IDs for the scheduler test */
> @@ -14063,6 +14071,8 @@ static struct unit_test_suite
> cryptodev_chacha20_poly1305_testsuite  = {
> 
> 	test_chacha20_poly1305_encrypt_test_case_rfc8439),
>  		TEST_CASE_ST(ut_setup, ut_teardown,
> 
> 	test_chacha20_poly1305_decrypt_test_case_rfc8439),
> +		TEST_CASE_ST(ut_setup, ut_teardown,
> +
> 	test_chacha20_poly1305_encrypt_SGL_out_of_place),
>  		TEST_CASES_END()
>  	}
>  };
> @@ -14629,6 +14639,17 @@ test_cryptodev_cpu_aesni_mb(void)
>  	return rc;
>  }
> 
> +static int
> +test_cryptodev_chacha_poly_mb(void)
> +{
> +	int32_t rc;
> +	enum rte_security_session_action_type at = gbl_action_type;
> +	rc = run_cryptodev_testsuite(
> +
> 	RTE_STR(CRYPTODEV_NAME_CHACHA20_POLY1305_PMD));
> +	gbl_action_type = at;
> +	return rc;
> +}
> +
>  static int
>  test_cryptodev_openssl(void)
>  {
> @@ -14888,6 +14909,8 @@
> REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
>  REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest,
> test_cryptodev_aesni_mb);
>  REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
>  	test_cryptodev_cpu_aesni_mb);
> +REGISTER_TEST_COMMAND(cryptodev_chacha_poly_mb_autotest,
> +	test_cryptodev_chacha_poly_mb);
>  REGISTER_TEST_COMMAND(cryptodev_openssl_autotest,
> test_cryptodev_openssl);
>  REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_autotest,
> test_cryptodev_aesni_gcm);
>  REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_gcm_autotest,
> diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
> index 1cdd84d01f..90c8287365 100644
> --- a/app/test/test_cryptodev.h
> +++ b/app/test/test_cryptodev.h
> @@ -59,6 +59,7 @@
>  #define CRYPTODEV_NAME_SNOW3G_PMD	crypto_snow3g
>  #define CRYPTODEV_NAME_KASUMI_PMD	crypto_kasumi
>  #define CRYPTODEV_NAME_ZUC_PMD		crypto_zuc
> +#define CRYPTODEV_NAME_CHACHA20_POLY1305_PMD
> 	crypto_chacha20_poly1305
>  #define CRYPTODEV_NAME_ARMV8_PMD	crypto_armv8
>  #define CRYPTODEV_NAME_DPAA_SEC_PMD	crypto_dpaa_sec
>  #define CRYPTODEV_NAME_DPAA2_SEC_PMD	crypto_dpaa2_sec
> diff --git a/app/test/test_cryptodev_aead_test_vectors.h
> b/app/test/test_cryptodev_aead_test_vectors.h
> index 73cc143f10..07292620a4 100644
> --- a/app/test/test_cryptodev_aead_test_vectors.h
> +++ b/app/test/test_cryptodev_aead_test_vectors.h
> @@ -3930,4 +3930,118 @@ static const struct aead_test_data
> chacha20_poly1305_case_rfc8439 = {
>  		.len = 16
>  	}
>  };
> +

Any specific reason to add new vectors for chacha poly?
Are the ones already present not enough?

> +static uint8_t chacha_aad_2[] = {
> +			0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
> +			0x00, 0x00, 0x4e, 0x91
> +};
> +
> +static const struct aead_test_data chacha20_poly1305_case_2 = {
> +	.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> +	.key = {
> +		.data = {
> +				0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3,
> 0x8a,
> +				0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
> +				0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80,
> 0x09,
> +				0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
> +		},
> +		.len = 32
> +	},
> +	.iv = {
> +		.data = {
> +				0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
> 0x04,
> +				0x05, 0x06, 0x07, 0x08
> +		},
> +		.len = 12
> +	},
> +	.aad = {
> +		.data = chacha_aad_2,
> +		.len = 12
> +	},
> +	.plaintext = {
> +		.data = {
> +				0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65,
> 0x74,
> +				0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73,
> 0x20,
> +				0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61,
> 0x66,
> +				0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d,
> 0x65,
> +				0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c,
> 0x69,
> +				0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61,
> 0x20,
> +				0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
> 0x20,
> +				0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20,
> 0x6d,
> +				0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61,
> 0x6e,
> +				0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62,
> 0x65,
> +				0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
> 0x64,
> +				0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61,
> 0x63,
> +				0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
> +				0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65,
> 0x64,
> +				0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68,
> 0x65,
> +				0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d,
> 0x65,
> +				0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20,
> 0x61,
> +				0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65,
> 0x2e,
> +				0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20,
> 0x69,
> +				0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70,
> 0x72,
> +				0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f,
> 0x20,
> +				0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74,
> 0x65,
> +				0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72,
> 0x61,
> +				0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20,
> 0x72,
> +				0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
> 0x65,
> +				0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69,
> 0x61,
> +				0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
> +				0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68,
> 0x65,
> +				0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72,
> 0x20,
> +				0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73,
> 0x20,
> +				0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72,
> 0x6b,
> +				0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f,
> 0x67,
> +				0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2,
> 0x80,
> +				0x9d
> +		},
> +		.len = 265
> +	},
> +	.ciphertext = {
> +		.data = {
> +				0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a,
> 0xf4,
> +				0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43,
> 0xbd,
> +				0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
> +				0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c,
> 0xb2,
> +				0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43,
> 0xee,
> +				0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26,
> 0xb0,
> +				0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01,
> 0x00,
> +				0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
> +				0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97,
> 0xce,
> +				0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94,
> 0x81,
> +				0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33,
> 0xbd,
> +				0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
> +				0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
> +				0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06,
> 0x38,
> +				0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02,
> 0xb0,
> +				0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67,
> 0xe4,
> +				0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d,
> 0x46,
> +				0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4,
> 0xe9,
> +				0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22,
> 0x5e,
> +				0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52,
> 0x3e,
> +				0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
> +				0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54,
> 0x6a,
> +				0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e,
> 0xea,
> +				0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27,
> 0x1a,
> +				0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76,
> 0x99,
> +				0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43,
> 0x4e,
> +				0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a,
> 0x10,
> +				0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a,
> 0x10,
> +				0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10,
> 0x94,
> +				0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
> +				0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04,
> 0xdf,
> +				0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c,
> 0x29,
> +				0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02,
> 0x70,
> +				0x9b
> +		},
> +		.len = 265
> +	},
> +	.auth_tag = {
> +		.data = {
> +				0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
> 0x22,
> +				0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, 0x38
> +		},
> +		.len = 16
> +	}
> +};
>  #endif /* TEST_CRYPTODEV_AEAD_TEST_VECTORS_H_ */
> diff --git a/doc/guides/cryptodevs/chacha20_poly1305.rst
> b/doc/guides/cryptodevs/chacha20_poly1305.rst
> new file mode 100644
> index 0000000000..e5f7368d6d
> --- /dev/null
> +++ b/doc/guides/cryptodevs/chacha20_poly1305.rst
> @@ -0,0 +1,99 @@
> +..  SPDX-License-Identifier: BSD-3-Clause
> +    Copyright(c) 2016-2019 Intel Corporation.
> +
> +Chacha20-poly1305 Crypto Poll Mode Driver
> +=========================================
> +
> +The Chacha20-poly1305 PMD provides poll mode crypto driver support for
> +utilizing `Intel IPSec Multi-buffer library
> <https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__github.com_01org_intel-2Dipsec-
> 2Dmb&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si2wl_PRwpZ9TWe
> y3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rODDkdR7x3SsmW4
> -
> 54dwxIGI6os&s=FrXg2cbYTOa7noqANBGmUQgdAVNCP8wTOBeCtjNkUH8&e=
> >`_.
> +
> +Features
> +--------
> +
> +Chacha20-poly1305 PMD has support for:
> +
> +AEAD algorithms:
> +
> +* RTE_CRYPTO_AEAD_CHACHA20_POLY1305
> +
> +
> +Installation
> +------------
> +
> +To build DPDK with the Chacha20-poly1305 PMD the user is required to
> download
> +the multi-buffer library from `here
> <https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__github.com_01org_intel-2Dipsec-
> 2Dmb&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si2wl_PRwpZ9TWe
> y3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rODDkdR7x3SsmW4
> -
> 54dwxIGI6os&s=FrXg2cbYTOa7noqANBGmUQgdAVNCP8wTOBeCtjNkUH8&e=
> >`_
> +and compile it on their user system before building DPDK.
> +The latest version of the library supported by this PMD is v1.0, which
> +can be downloaded from
> `<https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__github.com_01org_intel-2Dipsec-
> 2Dmb_archive_v1.0.zip&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si
> 2wl_PRwpZ9TWey3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rO
> DDkdR7x3SsmW4-54dwxIGI6os&s=EZeDlYo123VyTtXBBs_5FB5hrTLQTD5-
> OfGMKFbqRXs&e= >`_.
> +
> +After downloading the library, the user needs to unpack and compile it
> +on their system before building DPDK:
> +
> +.. code-block:: console
> +
> +    make
> +    make install
> +
> +The library requires NASM to be built. Depending on the library version, it
> might
> +require a minimum NASM version (e.g. v0.54 requires at least NASM 2.14).
> +
> +NASM is packaged for different OS. However, on some OS the version is too
> old,
> +so a manual installation is required. In that case, NASM can be downloaded
> from
> +`NASM website <https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__www.nasm.us_pub_nasm_releasebuilds_-3FC-3DM-3BO-
> 3DD&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si2wl_PRwpZ9TWey
> 3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rODDkdR7x3SsmW4-
> 54dwxIGI6os&s=3BB7_2sRCgmORUOvnzI3Lc9AG4lq07D6K1OndgbWQVc&e=
> >`_.
> +Once it is downloaded, extract it and follow these steps:
> +
> +.. code-block:: console
> +
> +    ./configure
> +    make
> +    make install
> +
> +.. note::
> +
> +   Compilation of the Multi-Buffer library is broken when GCC < 5.0, if library
> <= v0.53.
> +   If a lower GCC version than 5.0, the workaround proposed by the
> following link
> +   should be used: `<https://urldefense.proofpoint.com/v2/url?u=https-
> 3A__github.com_intel_intel-2Dipsec-
> 2Dmb_issues_40&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si2wl_P
> RwpZ9TWey3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rODDkdR
> 7x3SsmW4-
> 54dwxIGI6os&s=3vpATFyHRB05ndE_OFBm4s0K6Z_tx8qsAiyQGSZOhOk&e=
> >`_.
> +
> +As a reference, the following table shows a mapping between the past
> DPDK versions
> +and the external crypto libraries supported by them:
> +
> +.. _table_zuc_versions:

ZUC ????

> +
> +.. table:: DPDK and external crypto library version compatibility
> +
> +   =============  ================================
> +   DPDK version   Crypto library version
> +   =============  ================================
> +   21.11+         Multi-buffer library 1.0*
> +   =============  ================================
> +
> +\* Multi-buffer library 1.0 or newer only works for Meson but not Make
> build system.
> +
> +Initialization
> +--------------
> +
> +In order to enable this virtual crypto PMD, user must:
> +
> +* Build the multi buffer library (explained in Installation section).
> +
> +To use the PMD in an application, user must:
> +
> +* Call rte_vdev_init("crypto_chacha20_poly1305") within the application.
> +
> +* Use --vdev="crypto_chacha20_poly1305" in the EAL options, which will
> call
> +  rte_vdev_init() internally.
> +
> +The following parameters (all optional) can be provided in the previous two
> calls:
> +
> +* socket_id: Specify the socket where the memory for the device is going to
> be allocated
> +  (by default, socket_id will be the socket where the core that is creating the
> PMD is running on).
> +
> +* max_nb_queue_pairs: Specify the maximum number of queue pairs in the
> device (8 by default).
> +
> +* max_nb_sessions: Specify the maximum number of sessions that can be
> created (2048 by default).
> +
> +Example:
> +
> +.. code-block:: console
> +
> +    --vdev="crypto_chacha20_poly1305,socket_id=0,max_nb_sessions=128"
> diff --git a/doc/guides/cryptodevs/features/chacha20_poly1305.ini
> b/doc/guides/cryptodevs/features/chacha20_poly1305.ini
> new file mode 100644
> index 0000000000..3353e031c9
> --- /dev/null
> +++ b/doc/guides/cryptodevs/features/chacha20_poly1305.ini
> @@ -0,0 +1,35 @@
> +;
> +; Supported features of the 'chacha20_poly1305' crypto driver.
> +;
> +; Refer to default.ini for the full list of available PMD features.
> +;
> +[Features]
> +Symmetric crypto       = Y
> +Sym operation chaining = Y
> +Symmetric sessionless  = Y
> +Non-Byte aligned data  = Y
> +In Place SGL           = Y
> +OOP SGL In LB  Out     = Y
> +OOP LB  In LB  Out     = Y
> +CPU crypto             = Y
> +
> +;
> +; Supported crypto algorithms of the 'chacha20_poly1305' crypto driver.
> +;
> +[Cipher]
> +
> +;
> +; Supported authentication algorithms of the 'chacha20_poly1305' crypto
> driver.
> +;
> +[Auth]
> +
> +;
> +; Supported AEAD algorithms of the 'chacha20_poly1305' crypto driver.
> +;
> +[AEAD]
> +CHACHA20-POLY1305 = Y
> +
> +;
> +; Supported Asymmetric algorithms of the 'chacha20_poly1305' crypto
> driver.
> +;
> +[Asymmetric]
> diff --git a/doc/guides/cryptodevs/index.rst
> b/doc/guides/cryptodevs/index.rst
> index 0f981c77b5..3dcc2ecd2e 100644
> --- a/doc/guides/cryptodevs/index.rst
> +++ b/doc/guides/cryptodevs/index.rst
> @@ -16,6 +16,7 @@ Crypto Device Drivers
>      bcmfs
>      caam_jr
>      ccp
> +    chacha20_poly1305
>      cnxk
>      dpaa2_sec
>      dpaa_sec
> diff --git a/doc/guides/rel_notes/release_21_11.rst
> b/doc/guides/rel_notes/release_21_11.rst
> index 696541dab7..3beecb2392 100644
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -76,6 +76,11 @@ New Features
>    * Added support for partially encrypted digest when using auth-cipher
>      operations.
> 
> +* **Added Chacha20-poly1305 Crypto PMD.**
> +
> +  * Added PMD to support chacha20-poly1305 algorithms to IPSec_MB PMD
> framework.

A sub-bullet may be sufficient in the ipsec-mb update.

> +  * Test vector added for chacha20-poly1305 SGL test.
> +
>  * **Updated Marvell cnxk crypto PMD.**
> 
>    * Added AES-CBC SHA1-HMAC support in lookaside protocol (IPsec) for
> CN10K.
> diff --git a/drivers/crypto/ipsec_mb/meson.build
> b/drivers/crypto/ipsec_mb/meson.build
> index a1619c78ac..6e0a5f8004 100644
> --- a/drivers/crypto/ipsec_mb/meson.build
> +++ b/drivers/crypto/ipsec_mb/meson.build
> @@ -25,6 +25,7 @@ sources = files('rte_ipsec_mb_pmd.c',
>  		'rte_ipsec_mb_pmd_ops.c',
>  		'pmd_aesni_mb.c',
>  		'pmd_aesni_gcm.c',
> +		'pmd_chacha_poly.c',
>  		'pmd_kasumi.c',
>  		'pmd_snow3g.c',
>  		'pmd_zuc.c'
> diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> new file mode 100644
> index 0000000000..814bc0761c
> --- /dev/null
> +++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> @@ -0,0 +1,482 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2015-2021 Intel Corporation
> + */
> +
> +#include <intel-ipsec-mb.h>
> +
> +#if defined(RTE_LIB_SECURITY)
> +#define AESNI_MB_DOCSIS_SEC_ENABLED 1
> +#include <rte_ether.h>
> +#include <rte_security.h>
> +#include <rte_security_driver.h>
> +#endif
> +
> +#include "rte_ipsec_mb_pmd_private.h"
> +
> +#define CHACHA20_POLY1305_IV_LENGTH 12
> +#define CHACHA20_POLY1305_DIGEST_LENGTH 16
> +#define CHACHA20_POLY1305_KEY_SIZE  32
> +
> +static const
> +struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
> +	{/* CHACHA20-POLY1305 */
> +	    .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +	    {.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +		    {.aead = {
> +				.algo =
> RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> +				.block_size = 64,
> +				.key_size = {
> +					.min = 32,
> +					.max = 32,
> +					.increment = 0},
> +				.digest_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0},
> +				.aad_size = {
> +					.min = 0,
> +					.max = 240,
> +					.increment = 1},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0},
> +			    },
> +			}
> +		},}
> +	},
> +	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
> +};
> +
> +uint8_t pmd_driver_id_chacha20_poly1305;
> +
> +/** CHACHA20 POLY1305 private session structure */
> +struct chacha20_poly1305_session {
> +	struct {
> +		uint16_t length;
> +		uint16_t offset;
> +	} iv;
> +	/**< IV parameters */
> +	uint16_t aad_length;
> +	/**< AAD length */
> +	uint16_t req_digest_length;
> +	/**< Requested digest length */
> +	uint16_t gen_digest_length;
> +	/**< Generated digest length */
> +	uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
> +	enum ipsec_mb_operation op;
> +} __rte_cache_aligned;
> +
> +struct chacha20_poly1305_qp_data {
> +	struct chacha20_poly1305_context_data
> chacha20_poly1305_ctx_data;
> +	uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
> +	/**< Buffer used to store the digest generated
> +	 * by the driver when verifying a digest provided
> +	 * by the user (using authentication verify operation)
> +	 */
> +};
> +
> +/** Parse crypto xform chain and set private session parameters. */
> +static int
> +chacha20_poly1305_session_configure(IMB_MGR * mb_mgr __rte_unused,
> +		void *priv_sess, const struct rte_crypto_sym_xform *xform)
> +{
> +	struct chacha20_poly1305_session *sess = priv_sess;
> +	const struct rte_crypto_sym_xform *auth_xform;
> +	const struct rte_crypto_sym_xform *cipher_xform;
> +	const struct rte_crypto_sym_xform *aead_xform;
> +
> +	uint8_t key_length;
> +	const uint8_t *key;
> +	enum ipsec_mb_operation mode;
> +	int ret = 0;
> +
> +	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
> +				&cipher_xform, &aead_xform);
> +	if (ret)
> +		return ret;
> +
> +	sess->op = mode;
> +
> +	switch (sess->op) {
> +	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
> +	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
> +		if (aead_xform->aead.algo !=
> +				RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
> +			IPSEC_MB_LOG(ERR,
> +			"The only combined operation supported is
> CHACHA20 POLY1305");
> +			ret = -ENOTSUP;
> +			goto error_exit;
> +		}
> +		/* Set IV parameters */
> +		sess->iv.offset = aead_xform->aead.iv.offset;
> +		sess->iv.length = aead_xform->aead.iv.length;
> +		key_length = aead_xform->aead.key.length;
> +		key = aead_xform->aead.key.data;
> +		sess->aad_length = aead_xform->aead.aad_length;
> +		sess->req_digest_length = aead_xform->aead.digest_length;
> +		break;
> +	default:
> +		IPSEC_MB_LOG(
> +		    ERR, "Wrong xform type, has to be AEAD or
> authentication");
> +		ret = -ENOTSUP;
> +		goto error_exit;
> +	}
> +
> +	/* IV check */
> +	if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
> +		sess->iv.length != 0) {
> +		IPSEC_MB_LOG(ERR, "Wrong IV length");
> +		ret = -EINVAL;
> +		goto error_exit;
> +	}
> +
> +	/* Check key length */
> +	if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
> +		IPSEC_MB_LOG(ERR, "Invalid key length");
> +		ret = -EINVAL;
> +		goto error_exit;
> +	} else {
> +		memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
> +	}
> +
> +	/* Digest check */
> +	if (sess->req_digest_length !=
> CHACHA20_POLY1305_DIGEST_LENGTH) {
> +		IPSEC_MB_LOG(ERR, "Invalid digest length");
> +		ret = -EINVAL;
> +		goto error_exit;
> +	} else {
> +		sess->gen_digest_length =
> CHACHA20_POLY1305_DIGEST_LENGTH;
> +	}
> +
> +error_exit:
> +	return ret;
> +}
> +
> +/**
> + * Process a crypto operation, calling
> + * the direct chacha poly API from the multi buffer library.
> + *
> + * @param	qp		queue pair
> + * @param	op		symmetric crypto operation
> + * @param	session		chacha poly session
> + *
> + * @return
> + * - Return 0 if success
> + */
> +static int
> +chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct
> rte_crypto_op *op,
> +		struct chacha20_poly1305_session *session)
> +{
> +	struct chacha20_poly1305_qp_data *qp_data =
> +					ipsec_mb_get_qp_private_data(qp);
> +	uint8_t *src, *dst;
> +	uint8_t *iv_ptr;
> +	struct rte_crypto_sym_op *sym_op = op->sym;
> +	struct rte_mbuf *m_src = sym_op->m_src;
> +	uint32_t offset, data_offset, data_length;
> +	uint32_t part_len, data_len;
> +	int total_len;
> +	uint8_t *tag;
> +	unsigned int oop = 0;
> +
> +	offset = sym_op->aead.data.offset;
> +	data_offset = offset;
> +	data_length = sym_op->aead.data.length;
> +	RTE_ASSERT(m_src != NULL);
> +
> +	while (offset >= m_src->data_len && data_length != 0) {
> +		offset -= m_src->data_len;
> +		m_src = m_src->next;
> +
> +		RTE_ASSERT(m_src != NULL);
> +	}
> +
> +	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
> +
> +	data_len = m_src->data_len - offset;
> +	part_len = (data_len < data_length) ? data_len :
> +			data_length;
> +
> +	/* In-place */
> +	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
> +		dst = src;
> +	/* Out-of-place */
> +	else {
> +		oop = 1;
> +		/* Segmented destination buffer is not supported
> +		 * if operation is Out-of-place
> +		 */
> +		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
> +		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
> +					data_offset);
> +	}
> +
> +	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
> +				session->iv.offset);
> +
> +	IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
> +				&qp_data->chacha20_poly1305_ctx_data,
> +				iv_ptr,	sym_op->aead.aad.data,
> +				(uint64_t)session->aad_length);
> +
> +	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
> {
> +		IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
> +				session->key,
> +				&qp_data->chacha20_poly1305_ctx_data,
> +				dst, src, (uint64_t)part_len);
> +		total_len = data_length - part_len;
> +
> +		while (total_len) {
> +			m_src = m_src->next;
> +			RTE_ASSERT(m_src != NULL);
> +
> +			src = rte_pktmbuf_mtod(m_src, uint8_t *);
> +			if (oop)
> +				dst += part_len;
> +			else
> +				dst = src;
> +			part_len = (m_src->data_len < total_len) ?
> +					m_src->data_len : total_len;
> +
> +			if (dst == NULL || src == NULL) {
> +				IPSEC_MB_LOG(ERR, "Invalid src or dst
> input");
> +				return -EINVAL;
> +			}
> +			IMB_CHACHA20_POLY1305_ENC_UPDATE(qp-
> >mb_mgr,
> +					session->key,
> +					&qp_data-
> >chacha20_poly1305_ctx_data,
> +					dst, src, (uint64_t)part_len);
> +			total_len -= part_len;
> +			if (total_len < 0) {
> +				IPSEC_MB_LOG(ERR, "Invalid part len");
> +				return -EINVAL;
> +			}
> +		}
> +
> +		tag = sym_op->aead.digest.data;
> +		IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
> +					&qp_data-
> >chacha20_poly1305_ctx_data,
> +					tag, session->gen_digest_length);
> +
> +	} else {
> +		IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
> +					session->key,
> +					&qp_data-
> >chacha20_poly1305_ctx_data,
> +					dst, src, (uint64_t)part_len);
> +
> +		total_len = data_length - part_len;
> +
> +		while (total_len) {
> +			m_src = m_src->next;
> +
> +			RTE_ASSERT(m_src != NULL);
> +
> +			src = rte_pktmbuf_mtod(m_src, uint8_t *);
> +			if (oop)
> +				dst += part_len;
> +			else
> +				dst = src;
> +			part_len = (m_src->data_len < total_len) ?
> +					m_src->data_len : total_len;
> +
> +			if (dst == NULL || src == NULL) {
> +				IPSEC_MB_LOG(ERR, "Invalid src or dst
> input");
> +				return -EINVAL;
> +			}
> +			IMB_CHACHA20_POLY1305_DEC_UPDATE(qp-
> >mb_mgr,
> +					session->key,
> +					&qp_data-
> >chacha20_poly1305_ctx_data,
> +					dst, src, (uint64_t)part_len);
> +			total_len -= part_len;
> +			if (total_len < 0) {
> +				IPSEC_MB_LOG(ERR, "Invalid part len");
> +				return -EINVAL;
> +			}
> +		}
> +
> +		tag = qp_data->temp_digest;
> +		IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
> +					&qp_data-
> >chacha20_poly1305_ctx_data,
> +					tag, session->gen_digest_length);
> +	}
> +
> +	return 0;
> +}
> +
> +/**
> + * Process a completed chacha poly op
> + *
> + * @param qp		Queue Pair to process
> + * @param op		Crypto operation
> + * @param sess		Crypto session
> + *
> + * @return
> + * - void
> + */
> +static void
> +post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
> +		struct rte_crypto_op *op,
> +		struct chacha20_poly1305_session *session)
> +{
> +	struct chacha20_poly1305_qp_data *qp_data =
> +					ipsec_mb_get_qp_private_data(qp);
> +
> +	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> +	/* Verify digest if required */
> +	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT
> ||
> +			session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
> +		uint8_t *digest = op->sym->aead.digest.data;
> +		uint8_t *tag = qp_data->temp_digest;
> +
> +#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
> +		rte_hexdump(stdout, "auth tag (orig):",
> +				digest, session->req_digest_length);
> +		rte_hexdump(stdout, "auth tag (calc):",
> +				tag, session->req_digest_length);
> +#endif
> +		if (memcmp(tag, digest,	session->req_digest_length)
> != 0)
> +			op->status =
> RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> +
> +	}
> +
> +}
> +
> +/**
> + * Process a completed Chacha20_poly1305 request
> + *
> + * @param qp		Queue Pair to process
> + * @param op		Crypto operation
> + * @param sess		Crypto session
> + *
> + * @return
> + * - void
> + */
> +static void
> +handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp
> *qp,
> +		struct rte_crypto_op *op,
> +		struct chacha20_poly1305_session *sess)
> +{
> +	post_process_chacha20_poly1305_crypto_op(qp, op, sess);
> +
> +	/* Free session if a session-less crypto op */
> +	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
> +		memset(sess, 0, sizeof(struct chacha20_poly1305_session));
> +		memset(op->sym->session, 0,
> +
> 	rte_cryptodev_sym_get_existing_header_session_size(
> +				op->sym->session));
> +		rte_mempool_put(qp->sess_mp_priv, sess);
> +		rte_mempool_put(qp->sess_mp, op->sym->session);
> +		op->sym->session = NULL;
> +	}
> +}
> +
> +static uint16_t
> +chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
> +		struct rte_crypto_op **ops, uint16_t nb_ops)
> +{
> +	struct chacha20_poly1305_session *sess;
> +	struct ipsec_mb_qp *qp = queue_pair;
> +
> +	int retval = 0;
> +	unsigned int i = 0, nb_dequeued;
> +
> +	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
> +			(void **)ops, nb_ops, NULL);
> +
> +	for (i = 0; i < nb_dequeued; i++) {
> +
> +		sess = ipsec_mb_get_session_private(qp, ops[i]);
> +		if (unlikely(sess == NULL)) {
> +			ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +			qp->stats.dequeue_err_count++;
> +			break;
> +		}
> +
> +		retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
> +		if (retval < 0) {
> +			ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +			qp->stats.dequeue_err_count++;
> +			break;
> +		}
> +
> +		handle_completed_chacha20_poly1305_crypto_op(qp,
> ops[i], sess);
> +	}
> +
> +	qp->stats.dequeued_count += i;
> +
> +	return i;
> +}
> +
> +struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
> +	.dev_configure = ipsec_mb_pmd_config,
> +	.dev_start = ipsec_mb_pmd_start,
> +	.dev_stop = ipsec_mb_pmd_stop,
> +	.dev_close = ipsec_mb_pmd_close,
> +
> +	.stats_get = ipsec_mb_pmd_stats_get,
> +	.stats_reset = ipsec_mb_pmd_stats_reset,
> +
> +	.dev_infos_get = ipsec_mb_pmd_info_get,
> +
> +	.queue_pair_setup = ipsec_mb_pmd_qp_setup,
> +	.queue_pair_release = ipsec_mb_pmd_qp_release,
> +
> +	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
> +	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
> +	.sym_session_clear = ipsec_mb_pmd_sym_session_clear
> +};
> +
> +struct rte_cryptodev_ops *rte_chacha20_poly1305_pmd_ops =
> +
> 	&chacha20_poly1305_pmd_ops;
> +
> +static int
> +cryptodev_chacha20_poly1305_probe(struct rte_vdev_device *vdev)
> +{
> +	return cryptodev_ipsec_mb_create(vdev,
> +			IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305);
> +}
> +
> +static struct rte_vdev_driver cryptodev_chacha20_poly1305_pmd_drv = {
> +	.probe = cryptodev_chacha20_poly1305_probe,
> +	.remove = cryptodev_ipsec_mb_remove
> +};
> +
> +static struct cryptodev_driver chacha20_poly1305_crypto_drv;
> +
> +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CHACHA20_POLY1305_PM
> D,
> +
> 	cryptodev_chacha20_poly1305_pmd_drv);
> +RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_CHACHA20_POLY1305_PM
> D,
> +
> 	cryptodev_chacha20_poly1305_pmd);
> +RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CHACHA20_POL
> Y1305_PMD,
> +			       "max_nb_queue_pairs=<int> socket_id=<int>");
> +RTE_PMD_REGISTER_CRYPTO_DRIVER(chacha20_poly1305_crypto_drv,
> +
> 	cryptodev_chacha20_poly1305_pmd_drv.driver,
> +				pmd_driver_id_chacha20_poly1305);
> +
> +/* Constructor function to register chacha20_poly1305 PMD */
> +RTE_INIT(ipsec_mb_register_chacha20_poly1305)
> +{
> +	struct ipsec_mb_pmd_data *chacha_poly_data
> +		=
> &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
> +
> +	chacha_poly_data->caps = chacha20_poly1305_capabilities;
> +	chacha_poly_data->dequeue_burst =
> chacha20_poly1305_pmd_dequeue_burst;
> +	chacha_poly_data->feature_flags =
> +		RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
> +		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
> +		RTE_CRYPTODEV_FF_IN_PLACE_SGL |
> +		RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
> +		RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
> +		RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
> +		RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
> +	chacha_poly_data->internals_priv_size = 0;
> +	chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
> +	chacha_poly_data->qp_priv_size =
> +			sizeof(struct chacha20_poly1305_qp_data);
> +	chacha_poly_data->session_configure =
> +			chacha20_poly1305_session_configure;
> +	chacha_poly_data->session_priv_size =
> +			sizeof(struct chacha20_poly1305_session);
> +}
> diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> index b6a98a85ba..db36584f3a 100644
> --- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> +++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> @@ -49,6 +49,9 @@ extern RTE_DEFINE_PER_LCORE(IMB_MGR *, mb_mgr);
>  #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
>  /**< IPSEC Multi buffer PMD zuc device name */
> 
> +#define CRYPTODEV_NAME_CHACHA20_POLY1305_PMD
> crypto_chacha20_poly1305
> +/**< IPSEC Multi buffer PMD chacha20_poly1305 device name */
> +
>  /** PMD LOGTYPE DRIVER, common to all PMDs */
>  extern int ipsec_mb_logtype_driver;
>  #define IPSEC_MB_LOG(level, fmt, ...)                                         \
> @@ -62,6 +65,7 @@ enum ipsec_mb_pmd_types {
>  	IPSEC_MB_PMD_TYPE_KASUMI,
>  	IPSEC_MB_PMD_TYPE_SNOW3G,
>  	IPSEC_MB_PMD_TYPE_ZUC,
> +	IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305,
>  	IPSEC_MB_N_PMD_TYPES
>  };
> 
> @@ -85,6 +89,7 @@ extern uint8_t pmd_driver_id_aesni_gcm;
>  extern uint8_t pmd_driver_id_kasumi;
>  extern uint8_t pmd_driver_id_snow3g;
>  extern uint8_t pmd_driver_id_zuc;
> +extern uint8_t pmd_driver_id_chacha20_poly1305;
> 
>  /** Helper function. Gets driver ID based on PMD type */
>  static __rte_always_inline uint8_t
> @@ -101,6 +106,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types
> pmd_type)
>  		return pmd_driver_id_snow3g;
>  	case IPSEC_MB_PMD_TYPE_ZUC:
>  		return pmd_driver_id_zuc;
> +	case IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305:
> +		return pmd_driver_id_chacha20_poly1305;
>  	default:
>  		break;
>  	}
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework
  2021-10-06 13:50           ` [dpdk-dev] [EXT] " Akhil Goyal
@ 2021-10-06 15:45             ` Power, Ciara
  2021-10-06 17:34               ` Akhil Goyal
  0 siblings, 1 reply; 30+ messages in thread
From: Power, Ciara @ 2021-10-06 15:45 UTC (permalink / raw)
  To: Akhil Goyal, dev
  Cc: Zhang, Roy Fan, Bronowski, PiotrX, Thomas Monjalon,
	De Lara Guarch, Pablo, Ray Kinsella

Hi Akhil,

>-----Original Message-----
>From: Akhil Goyal <gakhil@marvell.com>
>Sent: Wednesday 6 October 2021 14:51
>To: Power, Ciara <ciara.power@intel.com>; dev@dpdk.org
>Cc: Zhang, Roy Fan <roy.fan.zhang@intel.com>; Bronowski, PiotrX
><piotrx.bronowski@intel.com>; Thomas Monjalon <thomas@monjalon.net>; De
>Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Ray Kinsella
><mdr@ashroe.eu>
>Subject: RE: [EXT] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb
>framework
>
>> From: Fan Zhang <roy.fan.zhang@intel.com>
>>
>> This patch introduces the new framework to share common code between
>> the SW crypto PMDs that depend on the intel-ipsec-mb library.
>> This change helps to reduce future effort on the code maintenance and
>> feature updates.
>>
>> The PMDs that will be added to this framework in subsequent patches are:
>>   - AESNI MB
>>   - AESNI GCM
>>   - KASUMI
>>   - SNOW3G
>>   - ZUC
>>
>> The use of these PMDs will not change, they will still be supported
>> for x86, and will use the same EAL args as before.
>>
>> The minimum required version for the intel-ipsec-mb library is now v1.0.
>>
>> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
>> Signed-off-by: Ciara Power <ciara.power@intel.com>
>>
>> ---
>> v3:
>>   - Updated intel-ipsec-mb macros.
>>   - Added use of auto init function for IMB_MGR.
>>   - Added detail to commit log.
>> v2:
>>   - Added qp NULL check in get stats function.
>>   - Added maintainers file entry.
>>   - Replaced strlcpy with rte_strlcpy.
>> ---
>>  MAINTAINERS                                   |   4 +
>>  drivers/crypto/ipsec_mb/meson.build           |  27 ++
>>  drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c    | 169 ++++++++++
>>  .../crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c    | 291 ++++++++++++++++++
>>  .../ipsec_mb/rte_ipsec_mb_pmd_private.h       | 275 +++++++++++++++++
>>  drivers/crypto/ipsec_mb/version.map           |   3 +
>>  drivers/crypto/meson.build                    |   1 +
>>  7 files changed, 770 insertions(+)
>>  create mode 100644 drivers/crypto/ipsec_mb/meson.build
>>  create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
>>  create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
>>  create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
>>  create mode 100644 drivers/crypto/ipsec_mb/version.map
>>
>> diff --git a/MAINTAINERS b/MAINTAINERS index 1e0d303394..f1aaf7d408
>> 100644
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -1065,6 +1065,10 @@ F: drivers/common/qat/
>>  F: doc/guides/cryptodevs/qat.rst
>>  F: doc/guides/cryptodevs/features/qat.ini
>>
>> +IPsec MB
>
>Not sure if the name ipsec_mb is appropriate for a crypto PMD which also
>support algos which are not specified for IPsec like ZUC/SNOW/KASUMI.
>Moreover, this is a crypto PMD and not IPsec PMD.
>
<snip>
>> +/** Get device statistics */
>> +void
>> +ipsec_mb_pmd_stats_get(struct rte_cryptodev *dev,
>> +		struct rte_cryptodev_stats *stats)
>
>I believe 1 instance of the ipsec_mb PMD will support only one kind of
>operation(aesni_mb/aesni_gcm/zuc/snow/kasumi).
>This cannot be changed during the lifetime of the process. Right?
>
<snip>

Yes - please treat the ipsec_mb_pmd.c/pmd_ops.c as the shared code base for these existing SW crypto PMDs based on intel-ipsec-mb library. There is no new PMD called ipsec_mb as such.

Maybe to make it less misleading we could rename the file names like so:
Ipsec_mb_pmd.c -> ipsec_mb_private.c
And similar change for ipsec_mb_pmd_ops.c and ipsec_mb_pmd_private.h
What do you think?

Thanks,
Ciara



^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework
  2021-10-06 15:45             ` Power, Ciara
@ 2021-10-06 17:34               ` Akhil Goyal
  0 siblings, 0 replies; 30+ messages in thread
From: Akhil Goyal @ 2021-10-06 17:34 UTC (permalink / raw)
  To: Power, Ciara, dev
  Cc: Zhang, Roy Fan, Bronowski, PiotrX, Thomas Monjalon,
	De Lara Guarch, Pablo, Ray Kinsella

> Hi Akhil,
> 
> >> From: Fan Zhang <roy.fan.zhang@intel.com>
> >>
> >> This patch introduces the new framework to share common code
> between
> >> the SW crypto PMDs that depend on the intel-ipsec-mb library.
> >> This change helps to reduce future effort on the code maintenance and
> >> feature updates.
> >>
> >> The PMDs that will be added to this framework in subsequent patches
> are:
> >>   - AESNI MB
> >>   - AESNI GCM
> >>   - KASUMI
> >>   - SNOW3G
> >>   - ZUC
> >>
> >> The use of these PMDs will not change, they will still be supported
> >> for x86, and will use the same EAL args as before.
> >>
> >> The minimum required version for the intel-ipsec-mb library is now v1.0.
> >>
> >> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> >> Signed-off-by: Ciara Power <ciara.power@intel.com>
> >>
> >> ---
> >> v3:
> >>   - Updated intel-ipsec-mb macros.
> >>   - Added use of auto init function for IMB_MGR.
> >>   - Added detail to commit log.
> >> v2:
> >>   - Added qp NULL check in get stats function.
> >>   - Added maintainers file entry.
> >>   - Replaced strlcpy with rte_strlcpy.
> >> ---
> >>  MAINTAINERS                                   |   4 +
> >>  drivers/crypto/ipsec_mb/meson.build           |  27 ++
> >>  drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c    | 169 ++++++++++
> >>  .../crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c    | 291
> ++++++++++++++++++
> >>  .../ipsec_mb/rte_ipsec_mb_pmd_private.h       | 275 +++++++++++++++++
> >>  drivers/crypto/ipsec_mb/version.map           |   3 +
> >>  drivers/crypto/meson.build                    |   1 +
> >>  7 files changed, 770 insertions(+)
> >>  create mode 100644 drivers/crypto/ipsec_mb/meson.build
> >>  create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
> >>  create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
> >>  create mode 100644
> drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> >>  create mode 100644 drivers/crypto/ipsec_mb/version.map
> >>
> >> diff --git a/MAINTAINERS b/MAINTAINERS index 1e0d303394..f1aaf7d408
> >> 100644
> >> --- a/MAINTAINERS
> >> +++ b/MAINTAINERS
> >> @@ -1065,6 +1065,10 @@ F: drivers/common/qat/
> >>  F: doc/guides/cryptodevs/qat.rst
> >>  F: doc/guides/cryptodevs/features/qat.ini
> >>
> >> +IPsec MB
> >
> >Not sure if the name ipsec_mb is appropriate for a crypto PMD which also
> >support algos which are not specified for IPsec like ZUC/SNOW/KASUMI.
> >Moreover, this is a crypto PMD and not IPsec PMD.
> >
> <snip>
> >> +/** Get device statistics */
> >> +void
> >> +ipsec_mb_pmd_stats_get(struct rte_cryptodev *dev,
> >> +		struct rte_cryptodev_stats *stats)
> >
> >I believe 1 instance of the ipsec_mb PMD will support only one kind of
> >operation(aesni_mb/aesni_gcm/zuc/snow/kasumi).
> >This cannot be changed during the lifetime of the process. Right?
> >
> <snip>
> 
> Yes - please treat the ipsec_mb_pmd.c/pmd_ops.c as the shared code base
> for these existing SW crypto PMDs based on intel-ipsec-mb library. There is
> no new PMD called ipsec_mb as such.
> 
> Maybe to make it less misleading we could rename the file names like so:
> Ipsec_mb_pmd.c -> ipsec_mb_private.c
> And similar change for ipsec_mb_pmd_ops.c and ipsec_mb_pmd_private.h
> What do you think?
> 
Ok that would be better.


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v3 09/10] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework
  2021-10-06 14:48           ` [dpdk-dev] [EXT] " Akhil Goyal
@ 2021-10-07 15:07             ` Ji, Kai
  2021-10-07 15:22               ` Akhil Goyal
  0 siblings, 1 reply; 30+ messages in thread
From: Ji, Kai @ 2021-10-07 15:07 UTC (permalink / raw)
  To: Akhil Goyal, Power, Ciara, dev
  Cc: Zhang, Roy Fan, Bronowski, PiotrX, Doherty, Declan,
	De Lara Guarch, Pablo

Hi Ahkill,

The addition chacha20-poly test was added for a more robust test case to cover oop sgl test scenario.

Regards

Kai 

> 
> Any specific reason to add new vectors for chacha poly?
> Are the ones already present not enough?
> 
> > +static uint8_t chacha_aad_2[] = {
> > +			0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
> > +			0x00, 0x00, 0x4e, 0x91
> > +};
> > +
> > +static const struct aead_test_data chacha20_poly1305_case_2 = {
> > +	.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> > +	.key = {
> > +		.data = {
> > +				0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3,
> > 0x8a,
> > +				0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
> > +				0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80,
> > 0x09,
> > +				0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
> > +		},
> > +		.len = 32
> > +	},
> > +	.iv = {
> > +		.data = {
> > +				0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
> > 0x04,
> > +				0x05, 0x06, 0x07, 0x08
> > +		},
> > +		.len = 12
> > +	},
> > +	.aad = {
> > +		.data = chacha_aad_2,
> > +		.len = 12
> > +	},
> > +	.plaintext = {
> > +		.data = {
> > +				0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65,
> > 0x74,
> > +				0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73,
> > 0x20,
> > +				0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61,
> > 0x66,
> > +				0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d,
> > 0x65,
> > +				0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c,
> > 0x69,
> > +				0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61,
> > 0x20,
> > +				0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
> > 0x20,
> > +				0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20,
> > 0x6d,
> > +				0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61,
> > 0x6e,
> > +				0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62,
> > 0x65,
> > +				0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
> > 0x64,
> > +				0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61,
> > 0x63,
> > +				0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
> > +				0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65,
> > 0x64,
> > +				0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68,
> > 0x65,
> > +				0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d,
> > 0x65,
> > +				0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20,
> > 0x61,
> > +				0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65,
> > 0x2e,
> > +				0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20,
> > 0x69,
> > +				0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70,
> > 0x72,
> > +				0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f,
> > 0x20,
> > +				0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74,
> > 0x65,
> > +				0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72,
> > 0x61,
> > +				0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20,
> > 0x72,
> > +				0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
> > 0x65,
> > +				0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69,
> > 0x61,
> > +				0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
> > +				0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68,
> > 0x65,
> > +				0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72,
> > 0x20,
> > +				0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73,
> > 0x20,
> > +				0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72,
> > 0x6b,
> > +				0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f,
> > 0x67,
> > +				0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2,
> > 0x80,
> > +				0x9d
> > +		},
> > +		.len = 265
> > +	},
> > +	.ciphertext = {
> > +		.data = {
> > +				0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a,
> > 0xf4,
> > +				0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43,
> > 0xbd,
> > +				0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
> > +				0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c,
> > 0xb2,
> > +				0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43,
> > 0xee,
> > +				0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26,
> > 0xb0,
> > +				0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01,
> > 0x00,
> > +				0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
> > +				0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97,
> > 0xce,
> > +				0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94,
> > 0x81,
> > +				0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33,
> > 0xbd,
> > +				0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
> > +				0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
> > +				0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06,
> > 0x38,
> > +				0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02,
> > 0xb0,
> > +				0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67,
> > 0xe4,
> > +				0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d,
> > 0x46,
> > +				0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4,
> > 0xe9,
> > +				0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22,
> > 0x5e,
> > +				0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52,
> > 0x3e,
> > +				0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
> > +				0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54,
> > 0x6a,
> > +				0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e,
> > 0xea,
> > +				0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27,
> > 0x1a,
> > +				0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76,
> > 0x99,
> > +				0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43,
> > 0x4e,
> > +				0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a,
> > 0x10,
> > +				0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a,
> > 0x10,
> > +				0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10,
> > 0x94,
> > +				0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
> > +				0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04,
> > 0xdf,
> > +				0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c,
> > 0x29,
> > +				0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02,
> > 0x70,
> > +				0x9b
> > +		},
> > +		.len = 265
> > +	},
> > +	.auth_tag = {
> > +		.data = {
> > +				0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
> > 0x22,
> > +				0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, 0x38
> > +		},
> > +		.len = 16
> > +	}
> > +};
> >  #endif /* TEST_CRYPTODEV_AEAD_TEST_VECTORS_H_ */ diff --git
> > a/doc/guides/cryptodevs/chacha20_poly1305.rst
> > b/doc/guides/cryptodevs/chacha20_poly1305.rst
> > new file mode 100644
> > index 0000000000..e5f7368d6d
> > --- /dev/null
> > +++ b/doc/guides/cryptodevs/chacha20_poly1305.rst
> > @@ -0,0 +1,99 @@
> > +..  SPDX-License-Identifier: BSD-3-Clause
> > +    Copyright(c) 2016-2019 Intel Corporation.
> > +
> > +Chacha20-poly1305 Crypto Poll Mode Driver
> > +=========================================
> > +
> > +The Chacha20-poly1305 PMD provides poll mode crypto driver support
> > +for utilizing `Intel IPSec Multi-buffer library
> > <https://urldefense.proofpoint.com/v2/url?u=https-
> > 3A__github.com_01org_intel-2Dipsec-
> >
> 2Dmb&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si2wl_PRwpZ9TW
> e
> >
> y3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rODDkdR7x3SsmW4
> > -
> >
> 54dwxIGI6os&s=FrXg2cbYTOa7noqANBGmUQgdAVNCP8wTOBeCtjNkUH8&e
> =
> > >`_.
> > +
> > +Features
> > +--------
> > +
> > +Chacha20-poly1305 PMD has support for:
> > +
> > +AEAD algorithms:
> > +
> > +* RTE_CRYPTO_AEAD_CHACHA20_POLY1305
> > +
> > +
> > +Installation
> > +------------
> > +
> > +To build DPDK with the Chacha20-poly1305 PMD the user is required to
> > download
> > +the multi-buffer library from `here
> > <https://urldefense.proofpoint.com/v2/url?u=https-
> > 3A__github.com_01org_intel-2Dipsec-
> >
> 2Dmb&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si2wl_PRwpZ9TW
> e
> >
> y3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rODDkdR7x3SsmW4
> > -
> >
> 54dwxIGI6os&s=FrXg2cbYTOa7noqANBGmUQgdAVNCP8wTOBeCtjNkUH8&e
> =
> > >`_
> > +and compile it on their user system before building DPDK.
> > +The latest version of the library supported by this PMD is v1.0,
> > +which can be downloaded from
> > `<https://urldefense.proofpoint.com/v2/url?u=https-
> > 3A__github.com_01org_intel-2Dipsec-
> >
> 2Dmb_archive_v1.0.zip&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7S
> i
> >
> 2wl_PRwpZ9TWey3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rO
> > DDkdR7x3SsmW4-54dwxIGI6os&s=EZeDlYo123VyTtXBBs_5FB5hrTLQTD5-
> > OfGMKFbqRXs&e= >`_.
> > +
> > +After downloading the library, the user needs to unpack and compile
> > +it on their system before building DPDK:
> > +
> > +.. code-block:: console
> > +
> > +    make
> > +    make install
> > +
> > +The library requires NASM to be built. Depending on the library
> > +version, it
> > might
> > +require a minimum NASM version (e.g. v0.54 requires at least NASM 2.14).
> > +
> > +NASM is packaged for different OS. However, on some OS the version is
> > +too
> > old,
> > +so a manual installation is required. In that case, NASM can be
> > +downloaded
> > from
> > +`NASM website <https://urldefense.proofpoint.com/v2/url?u=https-
> > 3A__www.nasm.us_pub_nasm_releasebuilds_-3FC-3DM-3BO-
> >
> 3DD&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si2wl_PRwpZ9TWey
> >
> 3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rODDkdR7x3SsmW4-
> >
> 54dwxIGI6os&s=3BB7_2sRCgmORUOvnzI3Lc9AG4lq07D6K1OndgbWQVc&e=
> > >`_.
> > +Once it is downloaded, extract it and follow these steps:
> > +
> > +.. code-block:: console
> > +
> > +    ./configure
> > +    make
> > +    make install
> > +
> > +.. note::
> > +
> > +   Compilation of the Multi-Buffer library is broken when GCC < 5.0,
> > + if library
> > <= v0.53.
> > +   If a lower GCC version than 5.0, the workaround proposed by the
> > following link
> > +   should be used:
> > + `<https://urldefense.proofpoint.com/v2/url?u=https-
> > 3A__github.com_intel_intel-2Dipsec-
> >
> 2Dmb_issues_40&d=DwIDAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=DnL7Si2wl_P
> >
> RwpZ9TWey3eu68gBzn7DkPwuqhd6WNyo&m=cJBMpjPsWqfEmtw2rODDkd
> R
> > 7x3SsmW4-
> > 54dwxIGI6os&s=3vpATFyHRB05ndE_OFBm4s0K6Z_tx8qsAiyQGSZOhOk&e=
> > >`_.
> > +
> > +As a reference, the following table shows a mapping between the past
> > DPDK versions
> > +and the external crypto libraries supported by them:
> > +
> > +.. _table_zuc_versions:
> 
> ZUC ????
> 
> > +
> > +.. table:: DPDK and external crypto library version compatibility
> > +
> > +   =============  ================================
> > +   DPDK version   Crypto library version
> > +   =============  ================================
> > +   21.11+         Multi-buffer library 1.0*
> > +   =============  ================================
> > +
> > +\* Multi-buffer library 1.0 or newer only works for Meson but not
> > +Make
> > build system.
> > +
> > +Initialization
> > +--------------
> > +
> > +In order to enable this virtual crypto PMD, user must:
> > +
> > +* Build the multi buffer library (explained in Installation section).
> > +
> > +To use the PMD in an application, user must:
> > +
> > +* Call rte_vdev_init("crypto_chacha20_poly1305") within the application.
> > +
> > +* Use --vdev="crypto_chacha20_poly1305" in the EAL options, which
> > +will
> > call
> > +  rte_vdev_init() internally.
> > +
> > +The following parameters (all optional) can be provided in the
> > +previous two
> > calls:
> > +
> > +* socket_id: Specify the socket where the memory for the device is
> > +going to
> > be allocated
> > +  (by default, socket_id will be the socket where the core that is
> > + creating the
> > PMD is running on).
> > +
> > +* max_nb_queue_pairs: Specify the maximum number of queue pairs in
> > +the
> > device (8 by default).
> > +
> > +* max_nb_sessions: Specify the maximum number of sessions that can
> be
> > created (2048 by default).
> > +
> > +Example:
> > +
> > +.. code-block:: console
> > +
> > +    --
> vdev="crypto_chacha20_poly1305,socket_id=0,max_nb_sessions=128"
> > diff --git a/doc/guides/cryptodevs/features/chacha20_poly1305.ini
> > b/doc/guides/cryptodevs/features/chacha20_poly1305.ini
> > new file mode 100644
> > index 0000000000..3353e031c9
> > --- /dev/null
> > +++ b/doc/guides/cryptodevs/features/chacha20_poly1305.ini
> > @@ -0,0 +1,35 @@
> > +;
> > +; Supported features of the 'chacha20_poly1305' crypto driver.
> > +;
> > +; Refer to default.ini for the full list of available PMD features.
> > +;
> > +[Features]
> > +Symmetric crypto       = Y
> > +Sym operation chaining = Y
> > +Symmetric sessionless  = Y
> > +Non-Byte aligned data  = Y
> > +In Place SGL           = Y
> > +OOP SGL In LB  Out     = Y
> > +OOP LB  In LB  Out     = Y
> > +CPU crypto             = Y
> > +
> > +;
> > +; Supported crypto algorithms of the 'chacha20_poly1305' crypto driver.
> > +;
> > +[Cipher]
> > +
> > +;
> > +; Supported authentication algorithms of the 'chacha20_poly1305'
> > +crypto
> > driver.
> > +;
> > +[Auth]
> > +
> > +;
> > +; Supported AEAD algorithms of the 'chacha20_poly1305' crypto driver.
> > +;
> > +[AEAD]
> > +CHACHA20-POLY1305 = Y
> > +
> > +;
> > +; Supported Asymmetric algorithms of the 'chacha20_poly1305' crypto
> > driver.
> > +;
> > +[Asymmetric]
> > diff --git a/doc/guides/cryptodevs/index.rst
> > b/doc/guides/cryptodevs/index.rst index 0f981c77b5..3dcc2ecd2e 100644
> > --- a/doc/guides/cryptodevs/index.rst
> > +++ b/doc/guides/cryptodevs/index.rst
> > @@ -16,6 +16,7 @@ Crypto Device Drivers
> >      bcmfs
> >      caam_jr
> >      ccp
> > +    chacha20_poly1305
> >      cnxk
> >      dpaa2_sec
> >      dpaa_sec
> > diff --git a/doc/guides/rel_notes/release_21_11.rst
> > b/doc/guides/rel_notes/release_21_11.rst
> > index 696541dab7..3beecb2392 100644
> > --- a/doc/guides/rel_notes/release_21_11.rst
> > +++ b/doc/guides/rel_notes/release_21_11.rst
> > @@ -76,6 +76,11 @@ New Features
> >    * Added support for partially encrypted digest when using auth-cipher
> >      operations.
> >
> > +* **Added Chacha20-poly1305 Crypto PMD.**
> > +
> > +  * Added PMD to support chacha20-poly1305 algorithms to IPSec_MB
> PMD
> > framework.
> 
> A sub-bullet may be sufficient in the ipsec-mb update.
> 
> > +  * Test vector added for chacha20-poly1305 SGL test.
> > +
> >  * **Updated Marvell cnxk crypto PMD.**
> >
> >    * Added AES-CBC SHA1-HMAC support in lookaside protocol (IPsec) for
> > CN10K.
> > diff --git a/drivers/crypto/ipsec_mb/meson.build
> > b/drivers/crypto/ipsec_mb/meson.build
> > index a1619c78ac..6e0a5f8004 100644
> > --- a/drivers/crypto/ipsec_mb/meson.build
> > +++ b/drivers/crypto/ipsec_mb/meson.build
> > @@ -25,6 +25,7 @@ sources = files('rte_ipsec_mb_pmd.c',
> >  		'rte_ipsec_mb_pmd_ops.c',
> >  		'pmd_aesni_mb.c',
> >  		'pmd_aesni_gcm.c',
> > +		'pmd_chacha_poly.c',
> >  		'pmd_kasumi.c',
> >  		'pmd_snow3g.c',
> >  		'pmd_zuc.c'
> > diff --git a/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> > b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> > new file mode 100644
> > index 0000000000..814bc0761c
> > --- /dev/null
> > +++ b/drivers/crypto/ipsec_mb/pmd_chacha_poly.c
> > @@ -0,0 +1,482 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2015-2021 Intel Corporation  */
> > +
> > +#include <intel-ipsec-mb.h>
> > +
> > +#if defined(RTE_LIB_SECURITY)
> > +#define AESNI_MB_DOCSIS_SEC_ENABLED 1 #include <rte_ether.h>
> #include
> > +<rte_security.h> #include <rte_security_driver.h> #endif
> > +
> > +#include "rte_ipsec_mb_pmd_private.h"
> > +
> > +#define CHACHA20_POLY1305_IV_LENGTH 12 #define
> > +CHACHA20_POLY1305_DIGEST_LENGTH 16 #define
> CHACHA20_POLY1305_KEY_SIZE
> > +32
> > +
> > +static const
> > +struct rte_cryptodev_capabilities chacha20_poly1305_capabilities[] = {
> > +	{/* CHACHA20-POLY1305 */
> > +	    .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> > +	    {.sym = {
> > +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> > +		    {.aead = {
> > +				.algo =
> > RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> > +				.block_size = 64,
> > +				.key_size = {
> > +					.min = 32,
> > +					.max = 32,
> > +					.increment = 0},
> > +				.digest_size = {
> > +					.min = 16,
> > +					.max = 16,
> > +					.increment = 0},
> > +				.aad_size = {
> > +					.min = 0,
> > +					.max = 240,
> > +					.increment = 1},
> > +				.iv_size = {
> > +					.min = 12,
> > +					.max = 12,
> > +					.increment = 0},
> > +			    },
> > +			}
> > +		},}
> > +	},
> > +	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
> > +};
> > +
> > +uint8_t pmd_driver_id_chacha20_poly1305;
> > +
> > +/** CHACHA20 POLY1305 private session structure */ struct
> > +chacha20_poly1305_session {
> > +	struct {
> > +		uint16_t length;
> > +		uint16_t offset;
> > +	} iv;
> > +	/**< IV parameters */
> > +	uint16_t aad_length;
> > +	/**< AAD length */
> > +	uint16_t req_digest_length;
> > +	/**< Requested digest length */
> > +	uint16_t gen_digest_length;
> > +	/**< Generated digest length */
> > +	uint8_t key[CHACHA20_POLY1305_KEY_SIZE];
> > +	enum ipsec_mb_operation op;
> > +} __rte_cache_aligned;
> > +
> > +struct chacha20_poly1305_qp_data {
> > +	struct chacha20_poly1305_context_data
> > chacha20_poly1305_ctx_data;
> > +	uint8_t temp_digest[CHACHA20_POLY1305_DIGEST_LENGTH];
> > +	/**< Buffer used to store the digest generated
> > +	 * by the driver when verifying a digest provided
> > +	 * by the user (using authentication verify operation)
> > +	 */
> > +};
> > +
> > +/** Parse crypto xform chain and set private session parameters. */
> > +static int chacha20_poly1305_session_configure(IMB_MGR * mb_mgr
> > +__rte_unused,
> > +		void *priv_sess, const struct rte_crypto_sym_xform *xform)
> {
> > +	struct chacha20_poly1305_session *sess = priv_sess;
> > +	const struct rte_crypto_sym_xform *auth_xform;
> > +	const struct rte_crypto_sym_xform *cipher_xform;
> > +	const struct rte_crypto_sym_xform *aead_xform;
> > +
> > +	uint8_t key_length;
> > +	const uint8_t *key;
> > +	enum ipsec_mb_operation mode;
> > +	int ret = 0;
> > +
> > +	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
> > +				&cipher_xform, &aead_xform);
> > +	if (ret)
> > +		return ret;
> > +
> > +	sess->op = mode;
> > +
> > +	switch (sess->op) {
> > +	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
> > +	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
> > +		if (aead_xform->aead.algo !=
> > +				RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
> > +			IPSEC_MB_LOG(ERR,
> > +			"The only combined operation supported is
> > CHACHA20 POLY1305");
> > +			ret = -ENOTSUP;
> > +			goto error_exit;
> > +		}
> > +		/* Set IV parameters */
> > +		sess->iv.offset = aead_xform->aead.iv.offset;
> > +		sess->iv.length = aead_xform->aead.iv.length;
> > +		key_length = aead_xform->aead.key.length;
> > +		key = aead_xform->aead.key.data;
> > +		sess->aad_length = aead_xform->aead.aad_length;
> > +		sess->req_digest_length = aead_xform->aead.digest_length;
> > +		break;
> > +	default:
> > +		IPSEC_MB_LOG(
> > +		    ERR, "Wrong xform type, has to be AEAD or
> > authentication");
> > +		ret = -ENOTSUP;
> > +		goto error_exit;
> > +	}
> > +
> > +	/* IV check */
> > +	if (sess->iv.length != CHACHA20_POLY1305_IV_LENGTH &&
> > +		sess->iv.length != 0) {
> > +		IPSEC_MB_LOG(ERR, "Wrong IV length");
> > +		ret = -EINVAL;
> > +		goto error_exit;
> > +	}
> > +
> > +	/* Check key length */
> > +	if (key_length != CHACHA20_POLY1305_KEY_SIZE) {
> > +		IPSEC_MB_LOG(ERR, "Invalid key length");
> > +		ret = -EINVAL;
> > +		goto error_exit;
> > +	} else {
> > +		memcpy(sess->key, key, CHACHA20_POLY1305_KEY_SIZE);
> > +	}
> > +
> > +	/* Digest check */
> > +	if (sess->req_digest_length !=
> > CHACHA20_POLY1305_DIGEST_LENGTH) {
> > +		IPSEC_MB_LOG(ERR, "Invalid digest length");
> > +		ret = -EINVAL;
> > +		goto error_exit;
> > +	} else {
> > +		sess->gen_digest_length =
> > CHACHA20_POLY1305_DIGEST_LENGTH;
> > +	}
> > +
> > +error_exit:
> > +	return ret;
> > +}
> > +
> > +/**
> > + * Process a crypto operation, calling
> > + * the direct chacha poly API from the multi buffer library.
> > + *
> > + * @param	qp		queue pair
> > + * @param	op		symmetric crypto operation
> > + * @param	session		chacha poly session
> > + *
> > + * @return
> > + * - Return 0 if success
> > + */
> > +static int
> > +chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp, struct
> > rte_crypto_op *op,
> > +		struct chacha20_poly1305_session *session) {
> > +	struct chacha20_poly1305_qp_data *qp_data =
> > +					ipsec_mb_get_qp_private_data(qp);
> > +	uint8_t *src, *dst;
> > +	uint8_t *iv_ptr;
> > +	struct rte_crypto_sym_op *sym_op = op->sym;
> > +	struct rte_mbuf *m_src = sym_op->m_src;
> > +	uint32_t offset, data_offset, data_length;
> > +	uint32_t part_len, data_len;
> > +	int total_len;
> > +	uint8_t *tag;
> > +	unsigned int oop = 0;
> > +
> > +	offset = sym_op->aead.data.offset;
> > +	data_offset = offset;
> > +	data_length = sym_op->aead.data.length;
> > +	RTE_ASSERT(m_src != NULL);
> > +
> > +	while (offset >= m_src->data_len && data_length != 0) {
> > +		offset -= m_src->data_len;
> > +		m_src = m_src->next;
> > +
> > +		RTE_ASSERT(m_src != NULL);
> > +	}
> > +
> > +	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
> > +
> > +	data_len = m_src->data_len - offset;
> > +	part_len = (data_len < data_length) ? data_len :
> > +			data_length;
> > +
> > +	/* In-place */
> > +	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op-
> >m_src))
> > +		dst = src;
> > +	/* Out-of-place */
> > +	else {
> > +		oop = 1;
> > +		/* Segmented destination buffer is not supported
> > +		 * if operation is Out-of-place
> > +		 */
> > +		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
> > +		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
> > +					data_offset);
> > +	}
> > +
> > +	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
> > +				session->iv.offset);
> > +
> > +	IMB_CHACHA20_POLY1305_INIT(qp->mb_mgr, session->key,
> > +				&qp_data->chacha20_poly1305_ctx_data,
> > +				iv_ptr,	sym_op->aead.aad.data,
> > +				(uint64_t)session->aad_length);
> > +
> > +	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
> > {
> > +		IMB_CHACHA20_POLY1305_ENC_UPDATE(qp->mb_mgr,
> > +				session->key,
> > +				&qp_data->chacha20_poly1305_ctx_data,
> > +				dst, src, (uint64_t)part_len);
> > +		total_len = data_length - part_len;
> > +
> > +		while (total_len) {
> > +			m_src = m_src->next;
> > +			RTE_ASSERT(m_src != NULL);
> > +
> > +			src = rte_pktmbuf_mtod(m_src, uint8_t *);
> > +			if (oop)
> > +				dst += part_len;
> > +			else
> > +				dst = src;
> > +			part_len = (m_src->data_len < total_len) ?
> > +					m_src->data_len : total_len;
> > +
> > +			if (dst == NULL || src == NULL) {
> > +				IPSEC_MB_LOG(ERR, "Invalid src or dst
> > input");
> > +				return -EINVAL;
> > +			}
> > +			IMB_CHACHA20_POLY1305_ENC_UPDATE(qp-
> > >mb_mgr,
> > +					session->key,
> > +					&qp_data-
> > >chacha20_poly1305_ctx_data,
> > +					dst, src, (uint64_t)part_len);
> > +			total_len -= part_len;
> > +			if (total_len < 0) {
> > +				IPSEC_MB_LOG(ERR, "Invalid part len");
> > +				return -EINVAL;
> > +			}
> > +		}
> > +
> > +		tag = sym_op->aead.digest.data;
> > +		IMB_CHACHA20_POLY1305_ENC_FINALIZE(qp->mb_mgr,
> > +					&qp_data-
> > >chacha20_poly1305_ctx_data,
> > +					tag, session->gen_digest_length);
> > +
> > +	} else {
> > +		IMB_CHACHA20_POLY1305_DEC_UPDATE(qp->mb_mgr,
> > +					session->key,
> > +					&qp_data-
> > >chacha20_poly1305_ctx_data,
> > +					dst, src, (uint64_t)part_len);
> > +
> > +		total_len = data_length - part_len;
> > +
> > +		while (total_len) {
> > +			m_src = m_src->next;
> > +
> > +			RTE_ASSERT(m_src != NULL);
> > +
> > +			src = rte_pktmbuf_mtod(m_src, uint8_t *);
> > +			if (oop)
> > +				dst += part_len;
> > +			else
> > +				dst = src;
> > +			part_len = (m_src->data_len < total_len) ?
> > +					m_src->data_len : total_len;
> > +
> > +			if (dst == NULL || src == NULL) {
> > +				IPSEC_MB_LOG(ERR, "Invalid src or dst
> > input");
> > +				return -EINVAL;
> > +			}
> > +			IMB_CHACHA20_POLY1305_DEC_UPDATE(qp-
> > >mb_mgr,
> > +					session->key,
> > +					&qp_data-
> > >chacha20_poly1305_ctx_data,
> > +					dst, src, (uint64_t)part_len);
> > +			total_len -= part_len;
> > +			if (total_len < 0) {
> > +				IPSEC_MB_LOG(ERR, "Invalid part len");
> > +				return -EINVAL;
> > +			}
> > +		}
> > +
> > +		tag = qp_data->temp_digest;
> > +		IMB_CHACHA20_POLY1305_DEC_FINALIZE(qp->mb_mgr,
> > +					&qp_data-
> > >chacha20_poly1305_ctx_data,
> > +					tag, session->gen_digest_length);
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +/**
> > + * Process a completed chacha poly op
> > + *
> > + * @param qp		Queue Pair to process
> > + * @param op		Crypto operation
> > + * @param sess		Crypto session
> > + *
> > + * @return
> > + * - void
> > + */
> > +static void
> > +post_process_chacha20_poly1305_crypto_op(struct ipsec_mb_qp *qp,
> > +		struct rte_crypto_op *op,
> > +		struct chacha20_poly1305_session *session) {
> > +	struct chacha20_poly1305_qp_data *qp_data =
> > +					ipsec_mb_get_qp_private_data(qp);
> > +
> > +	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> > +	/* Verify digest if required */
> > +	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT
> > ||
> > +			session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
> {
> > +		uint8_t *digest = op->sym->aead.digest.data;
> > +		uint8_t *tag = qp_data->temp_digest;
> > +
> > +#ifdef RTE_LIBRTE_PMD_CHACHA20_POLY1305_DEBUG
> > +		rte_hexdump(stdout, "auth tag (orig):",
> > +				digest, session->req_digest_length);
> > +		rte_hexdump(stdout, "auth tag (calc):",
> > +				tag, session->req_digest_length); #endif
> > +		if (memcmp(tag, digest,	session->req_digest_length)
> > != 0)
> > +			op->status =
> > RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> > +
> > +	}
> > +
> > +}
> > +
> > +/**
> > + * Process a completed Chacha20_poly1305 request
> > + *
> > + * @param qp		Queue Pair to process
> > + * @param op		Crypto operation
> > + * @param sess		Crypto session
> > + *
> > + * @return
> > + * - void
> > + */
> > +static void
> > +handle_completed_chacha20_poly1305_crypto_op(struct ipsec_mb_qp
> > *qp,
> > +		struct rte_crypto_op *op,
> > +		struct chacha20_poly1305_session *sess) {
> > +	post_process_chacha20_poly1305_crypto_op(qp, op, sess);
> > +
> > +	/* Free session if a session-less crypto op */
> > +	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
> > +		memset(sess, 0, sizeof(struct chacha20_poly1305_session));
> > +		memset(op->sym->session, 0,
> > +
> > 	rte_cryptodev_sym_get_existing_header_session_size(
> > +				op->sym->session));
> > +		rte_mempool_put(qp->sess_mp_priv, sess);
> > +		rte_mempool_put(qp->sess_mp, op->sym->session);
> > +		op->sym->session = NULL;
> > +	}
> > +}
> > +
> > +static uint16_t
> > +chacha20_poly1305_pmd_dequeue_burst(void *queue_pair,
> > +		struct rte_crypto_op **ops, uint16_t nb_ops) {
> > +	struct chacha20_poly1305_session *sess;
> > +	struct ipsec_mb_qp *qp = queue_pair;
> > +
> > +	int retval = 0;
> > +	unsigned int i = 0, nb_dequeued;
> > +
> > +	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
> > +			(void **)ops, nb_ops, NULL);
> > +
> > +	for (i = 0; i < nb_dequeued; i++) {
> > +
> > +		sess = ipsec_mb_get_session_private(qp, ops[i]);
> > +		if (unlikely(sess == NULL)) {
> > +			ops[i]->status =
> > RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> > +			qp->stats.dequeue_err_count++;
> > +			break;
> > +		}
> > +
> > +		retval = chacha20_poly1305_crypto_op(qp, ops[i], sess);
> > +		if (retval < 0) {
> > +			ops[i]->status =
> > RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> > +			qp->stats.dequeue_err_count++;
> > +			break;
> > +		}
> > +
> > +		handle_completed_chacha20_poly1305_crypto_op(qp,
> > ops[i], sess);
> > +	}
> > +
> > +	qp->stats.dequeued_count += i;
> > +
> > +	return i;
> > +}
> > +
> > +struct rte_cryptodev_ops chacha20_poly1305_pmd_ops = {
> > +	.dev_configure = ipsec_mb_pmd_config,
> > +	.dev_start = ipsec_mb_pmd_start,
> > +	.dev_stop = ipsec_mb_pmd_stop,
> > +	.dev_close = ipsec_mb_pmd_close,
> > +
> > +	.stats_get = ipsec_mb_pmd_stats_get,
> > +	.stats_reset = ipsec_mb_pmd_stats_reset,
> > +
> > +	.dev_infos_get = ipsec_mb_pmd_info_get,
> > +
> > +	.queue_pair_setup = ipsec_mb_pmd_qp_setup,
> > +	.queue_pair_release = ipsec_mb_pmd_qp_release,
> > +
> > +	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
> > +	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
> > +	.sym_session_clear = ipsec_mb_pmd_sym_session_clear };
> > +
> > +struct rte_cryptodev_ops *rte_chacha20_poly1305_pmd_ops =
> > +
> > 	&chacha20_poly1305_pmd_ops;
> > +
> > +static int
> > +cryptodev_chacha20_poly1305_probe(struct rte_vdev_device *vdev) {
> > +	return cryptodev_ipsec_mb_create(vdev,
> > +			IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305);
> > +}
> > +
> > +static struct rte_vdev_driver cryptodev_chacha20_poly1305_pmd_drv = {
> > +	.probe = cryptodev_chacha20_poly1305_probe,
> > +	.remove = cryptodev_ipsec_mb_remove
> > +};
> > +
> > +static struct cryptodev_driver chacha20_poly1305_crypto_drv;
> > +
> >
> +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CHACHA20_POLY1305_P
> M
> > D,
> > +
> > 	cryptodev_chacha20_poly1305_pmd_drv);
> >
> +RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_CHACHA20_POLY1305_P
> M
> > D,
> > +
> > 	cryptodev_chacha20_poly1305_pmd);
> >
> +RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CHACHA20_PO
> L
> > Y1305_PMD,
> > +			       "max_nb_queue_pairs=<int> socket_id=<int>");
> > +RTE_PMD_REGISTER_CRYPTO_DRIVER(chacha20_poly1305_crypto_drv,
> > +
> > 	cryptodev_chacha20_poly1305_pmd_drv.driver,
> > +				pmd_driver_id_chacha20_poly1305);
> > +
> > +/* Constructor function to register chacha20_poly1305 PMD */
> > +RTE_INIT(ipsec_mb_register_chacha20_poly1305)
> > +{
> > +	struct ipsec_mb_pmd_data *chacha_poly_data
> > +		=
> > &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305];
> > +
> > +	chacha_poly_data->caps = chacha20_poly1305_capabilities;
> > +	chacha_poly_data->dequeue_burst =
> > chacha20_poly1305_pmd_dequeue_burst;
> > +	chacha_poly_data->feature_flags =
> > +		RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
> > +		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
> > +		RTE_CRYPTODEV_FF_IN_PLACE_SGL |
> > +		RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
> > +		RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
> > +		RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
> > +		RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
> > +	chacha_poly_data->internals_priv_size = 0;
> > +	chacha_poly_data->ops = &chacha20_poly1305_pmd_ops;
> > +	chacha_poly_data->qp_priv_size =
> > +			sizeof(struct chacha20_poly1305_qp_data);
> > +	chacha_poly_data->session_configure =
> > +			chacha20_poly1305_session_configure;
> > +	chacha_poly_data->session_priv_size =
> > +			sizeof(struct chacha20_poly1305_session); }
> > diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> > b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> > index b6a98a85ba..db36584f3a 100644
> > --- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> > +++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
> > @@ -49,6 +49,9 @@ extern RTE_DEFINE_PER_LCORE(IMB_MGR *,
> mb_mgr);
> > #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc  /**< IPSEC Multi
> buffer PMD
> > zuc device name */
> >
> > +#define CRYPTODEV_NAME_CHACHA20_POLY1305_PMD
> > crypto_chacha20_poly1305
> > +/**< IPSEC Multi buffer PMD chacha20_poly1305 device name */
> > +
> >  /** PMD LOGTYPE DRIVER, common to all PMDs */  extern int
> > ipsec_mb_logtype_driver;
> >  #define IPSEC_MB_LOG(level, fmt, ...)                                         \
> > @@ -62,6 +65,7 @@ enum ipsec_mb_pmd_types {
> >  	IPSEC_MB_PMD_TYPE_KASUMI,
> >  	IPSEC_MB_PMD_TYPE_SNOW3G,
> >  	IPSEC_MB_PMD_TYPE_ZUC,
> > +	IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305,
> >  	IPSEC_MB_N_PMD_TYPES
> >  };
> >
> > @@ -85,6 +89,7 @@ extern uint8_t pmd_driver_id_aesni_gcm;  extern
> > uint8_t pmd_driver_id_kasumi;  extern uint8_t pmd_driver_id_snow3g;
> > extern uint8_t pmd_driver_id_zuc;
> > +extern uint8_t pmd_driver_id_chacha20_poly1305;
> >
> >  /** Helper function. Gets driver ID based on PMD type */  static
> > __rte_always_inline uint8_t @@ -101,6 +106,8 @@
> > ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types
> > pmd_type)
> >  		return pmd_driver_id_snow3g;
> >  	case IPSEC_MB_PMD_TYPE_ZUC:
> >  		return pmd_driver_id_zuc;
> > +	case IPSEC_MB_PMD_TYPE_CHACHA20_POLY1305:
> > +		return pmd_driver_id_chacha20_poly1305;
> >  	default:
> >  		break;
> >  	}
> > --
> > 2.25.1


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [EXT] [PATCH v3 09/10] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework
  2021-10-07 15:07             ` Ji, Kai
@ 2021-10-07 15:22               ` Akhil Goyal
  0 siblings, 0 replies; 30+ messages in thread
From: Akhil Goyal @ 2021-10-07 15:22 UTC (permalink / raw)
  To: Ji, Kai, Power, Ciara, dev
  Cc: Zhang, Roy Fan, Bronowski, PiotrX, Doherty, Declan,
	De Lara Guarch, Pablo

> 
> The addition chacha20-poly test was added for a more robust test case to
> cover oop sgl test scenario.
> 
Ok. Split the test app changes in a separate patch.
Please avoid top posting comments and delete irrelevant stuff while replying.



^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [dpdk-dev] [PATCH v3 03/10] drivers/crypto: move aesni-mb PMD to IPsec-mb framework
  2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 03/10] drivers/crypto: move aesni-mb PMD to IPsec-mb framework Ciara Power
@ 2021-10-11 11:09           ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 30+ messages in thread
From: De Lara Guarch, Pablo @ 2021-10-11 11:09 UTC (permalink / raw)
  To: Power, Ciara, dev
  Cc: Zhang, Roy Fan, Bronowski, PiotrX, gakhil, Thomas Monjalon, Ray Kinsella



> -----Original Message-----
> From: Power, Ciara <ciara.power@intel.com>
> Sent: Wednesday, September 29, 2021 5:30 PM
> To: dev@dpdk.org
> Cc: Zhang, Roy Fan <roy.fan.zhang@intel.com>; Bronowski, PiotrX
> <piotrx.bronowski@intel.com>; gakhil@marvell.com; Power, Ciara
> <ciara.power@intel.com>; Thomas Monjalon <thomas@monjalon.net>; De Lara
> Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Ray Kinsella
> <mdr@ashroe.eu>
> Subject: [PATCH v3 03/10] drivers/crypto: move aesni-mb PMD to IPsec-mb
> framework
> 
> From: Piotr Bronowski <piotrx.bronowski@intel.com>
> 
> This patch removes the crypto/aesni_mb folder and gathers all
> aesni-mb PMD implementation specific details into a single file,
> pmd_aesni_mb.c in crypto/ipsec_mb.
> 
> Now that intel-ipsec-mb v1.0 is the minimum supported version, old
> macros can be replaced with the newer macros supported by this version.
> 
> Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
> Signed-off-by: Ciara Power <ciara.power@intel.com>
> 

...

>  deps += ['bus_vdev', 'net', 'security']
> diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c

...

> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +			{.aead = {
> +				.algo =
> RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> +				.block_size = 64,
> +				.key_size = {
> +					.min = 32,
> +					.max = 32,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.aad_size = {
> +					.min = 0,
> +					.max = 240,

We support more than 240 bytes of AAD. I think we can support up to 1024 bytes.
Could you change it here?

> +					.increment = 1
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				},
> +			}, }
> +		}, }
> +	},
> +	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()

...

> +static int
> +aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
...

> +		case IMB_KEY_192_BYTES:
> +			IMB_AES192_GCM_PRE(mb_mgr, xform-
> >auth.key.data,
> +				&sess->cipher.gcm_key);
> +			sess->cipher.key_length_in_bytes =
> IMB_KEY_192_BYTES;
> +			break;
> +		case IMB_KEY_256_BYTES:
> +			IMB_AES256_GCM_PRE(mb_mgr, xform-
> >auth.key.data,
> +				&sess->cipher.gcm_key);
> +			sess->cipher.key_length_in_bytes =
> IMB_KEY_256_BYTES;
> +			break;
> +		default:
> +			RTE_LOG(ERR, PMD, "failed to parse test type\n");

Wrong error message. This should say something like "Wrong authentication key length".
Also, the same message is present for the cipher key length, for GCM.

Thanks,
Pablo

^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2021-10-11 11:09 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-05 17:15 [dpdk-dev] [dpdk-dev v1] crypto/snow3g: add support for digest appended ops Kai Ji
2021-05-08 12:57 ` [dpdk-dev] [EXT] " Akhil Goyal
2021-05-10  9:50 ` [dpdk-dev] [dpdk-dev v2] " Kai Ji
2021-06-29 20:14   ` [dpdk-dev] [EXT] " Akhil Goyal
2021-06-30 12:08     ` Zhang, Roy Fan
2021-07-06 19:48       ` Akhil Goyal
2021-07-21  9:22   ` [dpdk-dev] [dpdk-dev v3] " Kai Ji
2021-07-27  8:38     ` [dpdk-dev] [dpdk-dev v4] " Fan Zhang
2021-09-29 16:30       ` [dpdk-dev] [PATCH v3 00/10] drivers/crypto: introduce ipsec_mb framework Ciara Power
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 01/10] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-09-30  9:51           ` Kinsella, Ray
2021-10-06 13:50           ` [dpdk-dev] [EXT] " Akhil Goyal
2021-10-06 15:45             ` Power, Ciara
2021-10-06 17:34               ` Akhil Goyal
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 02/10] crypto/ipsec_mb: add multiprocess support Ciara Power
2021-10-06 14:01           ` [dpdk-dev] [EXT] " Akhil Goyal
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 03/10] drivers/crypto: move aesni-mb PMD to IPsec-mb framework Ciara Power
2021-10-11 11:09           ` De Lara Guarch, Pablo
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 04/10] drivers/crypto: move aesni-gcm " Ciara Power
2021-10-06 14:31           ` [dpdk-dev] [EXT] " Akhil Goyal
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 05/10] drivers/crypto: move kasumi " Ciara Power
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 06/10] drivers/crypto: move snow3g " Ciara Power
2021-10-04 12:45           ` De Lara Guarch, Pablo
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 07/10] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 08/10] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 09/10] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-10-06 14:48           ` [dpdk-dev] [EXT] " Akhil Goyal
2021-10-07 15:07             ` Ji, Kai
2021-10-07 15:22               ` Akhil Goyal
2021-09-29 16:30         ` [dpdk-dev] [PATCH v3 10/10] doc/rel_notes: added note for SW Crypto PMD change Ciara Power

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).