DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ciara Power <ciara.power@intel.com>
To: dev@dpdk.org
Cc: roy.fan.zhang@intel.com, piotrx.bronowski@intel.com,
	Declan Doherty <declan.doherty@intel.com>,
	Pablo de Lara <pablo.de.lara.guarch@intel.com>,
	Ray Kinsella <mdr@ashroe.eu>
Subject: [dpdk-dev] [PATCH v1 3/8] drivers/crypto: move aesni-gcm PMD to IPsec-mb framework
Date: Thu, 26 Aug 2021 15:16:14 +0000	[thread overview]
Message-ID: <20210826151619.577237-4-ciara.power@intel.com> (raw)
In-Reply-To: <20210826151619.577237-1-ciara.power@intel.com>

From: pbronowx <piotrx.bronowski@intel.com>

This patch removes the crypto/aesni_gcm folder and gathers all
aesni-gcm PMD implementation specific details into a single file,
pmd_aesni_gcm.c in the crypto/ipsec_mb folder.
A redundant check for iv length is removed.

Signed-off-by: pbronowx <piotrx.bronowski@intel.com>
---
 doc/guides/cryptodevs/aesni_gcm.rst           |   4 +-
 drivers/crypto/aesni_gcm/aesni_gcm_ops.h      | 104 --
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c      | 984 ------------------
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c  | 333 ------
 .../crypto/aesni_gcm/aesni_gcm_pmd_private.h  | 123 ---
 drivers/crypto/aesni_gcm/meson.build          |  24 -
 drivers/crypto/aesni_gcm/version.map          |   3 -
 drivers/crypto/ipsec_mb/meson.build           |   3 +-
 drivers/crypto/ipsec_mb/pmd_aesni_gcm.c       | 956 +++++++++++++++++
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       |   7 +
 drivers/crypto/meson.build                    |   1 -
 11 files changed, 968 insertions(+), 1574 deletions(-)
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_ops.h
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
 delete mode 100644 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
 delete mode 100644 drivers/crypto/aesni_gcm/meson.build
 delete mode 100644 drivers/crypto/aesni_gcm/version.map
 create mode 100644 drivers/crypto/ipsec_mb/pmd_aesni_gcm.c

diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 11b23958d5..bbe9d99840 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -83,7 +83,9 @@ and the external crypto libraries supported by them:
    17.02 - 17.05  ISA-L Crypto v2.18
    17.08 - 18.02  Multi-buffer library 0.46 - 0.48
    18.05 - 19.02  Multi-buffer library 0.49 - 0.52
-   19.05+         Multi-buffer library 0.52 - 1.0*
+   19.05 - 20.08  Multi-buffer library 0.52 - 0.55
+   20.11 - 21.08  Multi-buffer library 0.53 - 1.0*
+   21.11+         Multi-buffer library 1.0*
    =============  ================================
 
 \* Multi-buffer library 1.0 or newer only works for Meson but not Make build system.
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
deleted file mode 100644
index 8a0d074b6e..0000000000
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef _AESNI_GCM_OPS_H_
-#define _AESNI_GCM_OPS_H_
-
-#ifndef LINUX
-#define LINUX
-#endif
-
-#include <intel-ipsec-mb.h>
-
-/** Supported vector modes */
-enum aesni_gcm_vector_mode {
-	RTE_AESNI_GCM_NOT_SUPPORTED = 0,
-	RTE_AESNI_GCM_SSE,
-	RTE_AESNI_GCM_AVX,
-	RTE_AESNI_GCM_AVX2,
-	RTE_AESNI_GCM_AVX512,
-	RTE_AESNI_GCM_VECTOR_NUM
-};
-
-enum aesni_gcm_key {
-	GCM_KEY_128 = 0,
-	GCM_KEY_192,
-	GCM_KEY_256,
-	GCM_KEY_NUM
-};
-
-typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data, uint8_t *out,
-		const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv,
-		const uint8_t *aad, uint64_t aad_len,
-		uint8_t *auth_tag, uint64_t auth_tag_len);
-
-typedef void (*aesni_gcm_pre_t)(const void *key, struct gcm_key_data *gcm_data);
-
-typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		const uint8_t *iv,
-		uint8_t const *aad,
-		uint64_t aad_len);
-
-typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		uint8_t *out,
-		const uint8_t *in,
-		uint64_t plaintext_len);
-
-typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		uint8_t *auth_tag,
-		uint64_t auth_tag_len);
-
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		const uint8_t *iv,
-		const uint64_t iv_len);
-
-typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		const uint8_t *in,
-		const uint64_t plaintext_len);
-
-typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
-		struct gcm_context_data *gcm_ctx_data,
-		uint8_t *auth_tag,
-		const uint64_t auth_tag_len);
-#endif
-
-/** GCM library function pointer table */
-struct aesni_gcm_ops {
-	aesni_gcm_t enc;        /**< GCM encode function pointer */
-	aesni_gcm_t dec;        /**< GCM decode function pointer */
-	aesni_gcm_pre_t pre;    /**< GCM pre-compute */
-	aesni_gcm_init_t init;
-	aesni_gcm_update_t update_enc;
-	aesni_gcm_update_t update_dec;
-	aesni_gcm_finalize_t finalize_enc;
-	aesni_gcm_finalize_t finalize_dec;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	aesni_gmac_init_t gmac_init;
-	aesni_gmac_update_t gmac_update;
-	aesni_gmac_finalize_t gmac_finalize;
-#endif
-};
-
-/** GCM per-session operation handlers */
-struct aesni_gcm_session_ops {
-	aesni_gcm_t cipher;
-	aesni_gcm_pre_t pre;
-	aesni_gcm_init_t init;
-	aesni_gcm_update_t update;
-	aesni_gcm_finalize_t finalize;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	aesni_gmac_init_t gmac_init;
-	aesni_gmac_update_t gmac_update;
-	aesni_gmac_finalize_t gmac_finalize;
-#endif
-};
-
-#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
deleted file mode 100644
index 886e2a5aaa..0000000000
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ /dev/null
@@ -1,984 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
-#include <rte_bus_vdev.h>
-#include <rte_malloc.h>
-#include <rte_cpuflags.h>
-#include <rte_byteorder.h>
-
-#include "aesni_gcm_pmd_private.h"
-
-static uint8_t cryptodev_driver_id;
-
-/* setup session handlers */
-static void
-set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)
-{
-	s->ops.pre = gcm_ops->pre;
-	s->ops.init = gcm_ops->init;
-
-	switch (s->op) {
-	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
-		s->ops.cipher = gcm_ops->enc;
-		s->ops.update = gcm_ops->update_enc;
-		s->ops.finalize = gcm_ops->finalize_enc;
-		break;
-	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
-		s->ops.cipher = gcm_ops->dec;
-		s->ops.update = gcm_ops->update_dec;
-		s->ops.finalize = gcm_ops->finalize_dec;
-		break;
-	case AESNI_GMAC_OP_GENERATE:
-	case AESNI_GMAC_OP_VERIFY:
-		s->ops.finalize = gcm_ops->finalize_enc;
-		break;
-	}
-}
-
-/** Parse crypto xform chain and set private session parameters */
-int
-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
-		struct aesni_gcm_session *sess,
-		const struct rte_crypto_sym_xform *xform)
-{
-	const struct rte_crypto_sym_xform *auth_xform;
-	const struct rte_crypto_sym_xform *aead_xform;
-	uint8_t key_length;
-	const uint8_t *key;
-
-	/* AES-GMAC */
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		auth_xform = xform;
-		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
-			AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
-				"authentication only algorithm");
-			return -ENOTSUP;
-		}
-		/* Set IV parameters */
-		sess->iv.offset = auth_xform->auth.iv.offset;
-		sess->iv.length = auth_xform->auth.iv.length;
-
-		/* Select Crypto operation */
-		if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
-			sess->op = AESNI_GMAC_OP_GENERATE;
-		else
-			sess->op = AESNI_GMAC_OP_VERIFY;
-
-		key_length = auth_xform->auth.key.length;
-		key = auth_xform->auth.key.data;
-		sess->req_digest_length = auth_xform->auth.digest_length;
-
-	/* AES-GCM */
-	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		aead_xform = xform;
-
-		if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
-			AESNI_GCM_LOG(ERR, "The only combined operation "
-						"supported is AES GCM");
-			return -ENOTSUP;
-		}
-
-		/* Set IV parameters */
-		sess->iv.offset = aead_xform->aead.iv.offset;
-		sess->iv.length = aead_xform->aead.iv.length;
-
-		/* Select Crypto operation */
-		if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
-			sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
-		/* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
-		else
-			sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
-
-		key_length = aead_xform->aead.key.length;
-		key = aead_xform->aead.key.data;
-
-		sess->aad_length = aead_xform->aead.aad_length;
-		sess->req_digest_length = aead_xform->aead.digest_length;
-	} else {
-		AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
-		return -ENOTSUP;
-	}
-
-	/* IV check */
-	if (sess->iv.length != 16 && sess->iv.length != 12 &&
-			sess->iv.length != 0) {
-		AESNI_GCM_LOG(ERR, "Wrong IV length");
-		return -EINVAL;
-	}
-
-	/* Check key length and calculate GCM pre-compute. */
-	switch (key_length) {
-	case 16:
-		sess->key = GCM_KEY_128;
-		break;
-	case 24:
-		sess->key = GCM_KEY_192;
-		break;
-	case 32:
-		sess->key = GCM_KEY_256;
-		break;
-	default:
-		AESNI_GCM_LOG(ERR, "Invalid key length");
-		return -EINVAL;
-	}
-
-	/* setup session handlers */
-	set_func_ops(sess, &gcm_ops[sess->key]);
-
-	/* pre-generate key */
-	gcm_ops[sess->key].pre(key, &sess->gdata_key);
-
-	/* Digest check */
-	if (sess->req_digest_length > 16) {
-		AESNI_GCM_LOG(ERR, "Invalid digest length");
-		return -EINVAL;
-	}
-	/*
-	 * Multi-buffer lib supports digest sizes from 4 to 16 bytes
-	 * in version 0.50 and sizes of 8, 12 and 16 bytes,
-	 * in version 0.49.
-	 * If size requested is different, generate the full digest
-	 * (16 bytes) in a temporary location and then memcpy
-	 * the requested number of bytes.
-	 */
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-	if (sess->req_digest_length < 4)
-#else
-	if (sess->req_digest_length != 16 &&
-			sess->req_digest_length != 12 &&
-			sess->req_digest_length != 8)
-#endif
-		sess->gen_digest_length = 16;
-	else
-		sess->gen_digest_length = sess->req_digest_length;
-
-	return 0;
-}
-
-/** Get gcm session */
-static struct aesni_gcm_session *
-aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
-{
-	struct aesni_gcm_session *sess = NULL;
-	struct rte_crypto_sym_op *sym_op = op->sym;
-
-	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-		if (likely(sym_op->session != NULL))
-			sess = (struct aesni_gcm_session *)
-					get_sym_session_private_data(
-					sym_op->session,
-					cryptodev_driver_id);
-	} else  {
-		void *_sess;
-		void *_sess_private_data = NULL;
-
-		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
-			return NULL;
-
-		if (rte_mempool_get(qp->sess_mp_priv,
-				(void **)&_sess_private_data))
-			return NULL;
-
-		sess = (struct aesni_gcm_session *)_sess_private_data;
-
-		if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
-				sess, sym_op->xform) != 0)) {
-			rte_mempool_put(qp->sess_mp, _sess);
-			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
-			sess = NULL;
-		}
-		sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
-		set_sym_session_private_data(sym_op->session,
-				cryptodev_driver_id, _sess_private_data);
-	}
-
-	if (unlikely(sess == NULL))
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-
-	return sess;
-}
-
-/**
- * Process a crypto operation, calling
- * the GCM API from the multi buffer library.
- *
- * @param	qp		queue pair
- * @param	op		symmetric crypto operation
- * @param	session		GCM session
- *
- * @return
- *
- */
-static int
-process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
-		struct aesni_gcm_session *session)
-{
-	uint8_t *src, *dst;
-	uint8_t *iv_ptr;
-	struct rte_crypto_sym_op *sym_op = op->sym;
-	struct rte_mbuf *m_src = sym_op->m_src;
-	uint32_t offset, data_offset, data_length;
-	uint32_t part_len, total_len, data_len;
-	uint8_t *tag;
-	unsigned int oop = 0;
-
-	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
-			session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
-		offset = sym_op->aead.data.offset;
-		data_offset = offset;
-		data_length = sym_op->aead.data.length;
-	} else {
-		offset = sym_op->auth.data.offset;
-		data_offset = offset;
-		data_length = sym_op->auth.data.length;
-	}
-
-	RTE_ASSERT(m_src != NULL);
-
-	while (offset >= m_src->data_len && data_length != 0) {
-		offset -= m_src->data_len;
-		m_src = m_src->next;
-
-		RTE_ASSERT(m_src != NULL);
-	}
-
-	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
-
-	data_len = m_src->data_len - offset;
-	part_len = (data_len < data_length) ? data_len :
-			data_length;
-
-	RTE_ASSERT((sym_op->m_dst == NULL) ||
-			((sym_op->m_dst != NULL) &&
-					rte_pktmbuf_is_contiguous(sym_op->m_dst)));
-
-	/* In-place */
-	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
-		dst = src;
-	/* Out-of-place */
-	else {
-		oop = 1;
-		/* Segmented destination buffer is not supported if operation is
-		 * Out-of-place */
-		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
-		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
-					data_offset);
-	}
-
-	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
-				session->iv.offset);
-
-	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
-		qp->ops[session->key].init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				sym_op->aead.aad.data,
-				(uint64_t)session->aad_length);
-
-		qp->ops[session->key].update_enc(&session->gdata_key,
-				&qp->gdata_ctx, dst, src,
-				(uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			if (oop)
-				dst += part_len;
-			else
-				dst = src;
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			qp->ops[session->key].update_enc(&session->gdata_key,
-					&qp->gdata_ctx, dst, src,
-					(uint64_t)part_len);
-			total_len -= part_len;
-		}
-
-		if (session->req_digest_length != session->gen_digest_length)
-			tag = qp->temp_digest;
-		else
-			tag = sym_op->aead.digest.data;
-
-		qp->ops[session->key].finalize_enc(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
-		qp->ops[session->key].init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				sym_op->aead.aad.data,
-				(uint64_t)session->aad_length);
-
-		qp->ops[session->key].update_dec(&session->gdata_key,
-				&qp->gdata_ctx, dst, src,
-				(uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			if (oop)
-				dst += part_len;
-			else
-				dst = src;
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			qp->ops[session->key].update_dec(&session->gdata_key,
-					&qp->gdata_ctx,
-					dst, src,
-					(uint64_t)part_len);
-			total_len -= part_len;
-		}
-
-		tag = qp->temp_digest;
-		qp->ops[session->key].finalize_dec(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
-		qp->ops[session->key].gmac_init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				session->iv.length);
-
-		qp->ops[session->key].gmac_update(&session->gdata_key,
-				&qp->gdata_ctx, src,
-				(uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			qp->ops[session->key].gmac_update(&session->gdata_key,
-					&qp->gdata_ctx, src,
-					(uint64_t)part_len);
-			total_len -= part_len;
-		}
-
-		if (session->req_digest_length != session->gen_digest_length)
-			tag = qp->temp_digest;
-		else
-			tag = sym_op->auth.digest.data;
-
-		qp->ops[session->key].gmac_finalize(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	} else { /* AESNI_GMAC_OP_VERIFY */
-		qp->ops[session->key].gmac_init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				session->iv.length);
-
-		qp->ops[session->key].gmac_update(&session->gdata_key,
-				&qp->gdata_ctx, src,
-				(uint64_t)part_len);
-		total_len = data_length - part_len;
-
-		while (total_len) {
-			m_src = m_src->next;
-
-			RTE_ASSERT(m_src != NULL);
-
-			src = rte_pktmbuf_mtod(m_src, uint8_t *);
-			part_len = (m_src->data_len < total_len) ?
-					m_src->data_len : total_len;
-
-			qp->ops[session->key].gmac_update(&session->gdata_key,
-					&qp->gdata_ctx, src,
-					(uint64_t)part_len);
-			total_len -= part_len;
-		}
-
-		tag = qp->temp_digest;
-
-		qp->ops[session->key].gmac_finalize(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	}
-#else
-	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
-		qp->ops[session->key].init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				src,
-				(uint64_t)data_length);
-		if (session->req_digest_length != session->gen_digest_length)
-			tag = qp->temp_digest;
-		else
-			tag = sym_op->auth.digest.data;
-		qp->ops[session->key].finalize_enc(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	} else { /* AESNI_GMAC_OP_VERIFY */
-		qp->ops[session->key].init(&session->gdata_key,
-				&qp->gdata_ctx,
-				iv_ptr,
-				src,
-				(uint64_t)data_length);
-
-		/*
-		 * Generate always 16 bytes and later compare only
-		 * the bytes passed.
-		 */
-		tag = qp->temp_digest;
-		qp->ops[session->key].finalize_enc(&session->gdata_key,
-				&qp->gdata_ctx,
-				tag,
-				session->gen_digest_length);
-	}
-#endif
-
-	return 0;
-}
-
-static inline void
-aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
-{
-	uint32_t i;
-
-	for (i = 0; i < vec->num; i++)
-		vec->status[i] = errnum;
-}
-
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, uint8_t *digest)
-{
-	if (s->req_digest_length != s->gen_digest_length) {
-		uint8_t tmpdigest[s->gen_digest_length];
-
-		s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
-			s->gen_digest_length);
-		memcpy(digest, tmpdigest, s->req_digest_length);
-	} else {
-		s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
-			s->gen_digest_length);
-	}
-
-	return 0;
-}
-
-static inline int32_t
-aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, uint8_t *digest)
-{
-	uint8_t tmpdigest[s->gen_digest_length];
-
-	s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
-		s->gen_digest_length);
-
-	return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
-		EBADMSG;
-}
-
-static inline void
-aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
-	void *iv, void *aad)
-{
-	uint32_t i;
-
-	/* init crypto operation */
-	s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
-		(uint64_t)s->aad_length);
-
-	/* update with sgl data */
-	for (i = 0; i < sgl->num; i++) {
-		struct rte_crypto_vec *vec = &sgl->vec[i];
-
-		s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
-			vec->len);
-	}
-}
-
-static inline void
-aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
-	void *iv)
-{
-	s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
-		sgl->vec[0].len);
-}
-
-static inline uint32_t
-aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
-{
-	uint32_t i, processed;
-
-	processed = 0;
-	for (i = 0; i < vec->num; ++i) {
-		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
-			vec->aad[i].va);
-		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
-			gdata_ctx, vec->digest[i].va);
-		processed += (vec->status[i] == 0);
-	}
-
-	return processed;
-}
-
-static inline uint32_t
-aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
-{
-	uint32_t i, processed;
-
-	processed = 0;
-	for (i = 0; i < vec->num; ++i) {
-		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va,
-			vec->aad[i].va);
-		 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
-			gdata_ctx, vec->digest[i].va);
-		processed += (vec->status[i] == 0);
-	}
-
-	return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
-{
-	uint32_t i, processed;
-
-	processed = 0;
-	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
-			vec->status[i] = ENOTSUP;
-			continue;
-		}
-
-		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
-		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
-			gdata_ctx, vec->digest[i].va);
-		processed += (vec->status[i] == 0);
-	}
-
-	return processed;
-}
-
-static inline uint32_t
-aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
-	struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
-{
-	uint32_t i, processed;
-
-	processed = 0;
-	for (i = 0; i < vec->num; ++i) {
-		if (vec->sgl[i].num != 1) {
-			vec->status[i] = ENOTSUP;
-			continue;
-		}
-
-		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
-			&vec->sgl[i], vec->iv[i].va);
-		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
-			gdata_ctx, vec->digest[i].va);
-		processed += (vec->status[i] == 0);
-	}
-
-	return processed;
-}
-
-/** Process CPU crypto bulk operations */
-uint32_t
-aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
-	struct rte_cryptodev_sym_session *sess,
-	__rte_unused union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_sym_vec *vec)
-{
-	void *sess_priv;
-	struct aesni_gcm_session *s;
-	struct gcm_context_data gdata_ctx;
-
-	sess_priv = get_sym_session_private_data(sess, dev->driver_id);
-	if (unlikely(sess_priv == NULL)) {
-		aesni_gcm_fill_error_code(vec, EINVAL);
-		return 0;
-	}
-
-	s = sess_priv;
-	switch (s->op) {
-	case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
-		return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
-	case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
-		return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
-	case AESNI_GMAC_OP_GENERATE:
-		return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
-	case AESNI_GMAC_OP_VERIFY:
-		return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
-	default:
-		aesni_gcm_fill_error_code(vec, EINVAL);
-		return 0;
-	}
-}
-
-/**
- * Process a completed job and return rte_mbuf which job processed
- *
- * @param job	JOB_AES_HMAC job to process
- *
- * @return
- * - Returns processed mbuf which is trimmed of output digest used in
- * verification of supplied digest in the case of a HASH_CIPHER operation
- * - Returns NULL on invalid job
- */
-static void
-post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
-		struct rte_crypto_op *op,
-		struct aesni_gcm_session *session)
-{
-	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
-	/* Verify digest if required */
-	if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
-			session->op == AESNI_GMAC_OP_VERIFY) {
-		uint8_t *digest;
-
-		uint8_t *tag = qp->temp_digest;
-
-		if (session->op == AESNI_GMAC_OP_VERIFY)
-			digest = op->sym->auth.digest.data;
-		else
-			digest = op->sym->aead.digest.data;
-
-#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
-		rte_hexdump(stdout, "auth tag (orig):",
-				digest, session->req_digest_length);
-		rte_hexdump(stdout, "auth tag (calc):",
-				tag, session->req_digest_length);
-#endif
-
-		if (memcmp(tag, digest,	session->req_digest_length) != 0)
-			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-	} else {
-		if (session->req_digest_length != session->gen_digest_length) {
-			if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)
-				memcpy(op->sym->aead.digest.data, qp->temp_digest,
-						session->req_digest_length);
-			else
-				memcpy(op->sym->auth.digest.data, qp->temp_digest,
-						session->req_digest_length);
-		}
-	}
-}
-
-/**
- * Process a completed GCM request
- *
- * @param qp		Queue Pair to process
- * @param op		Crypto operation
- * @param job		JOB_AES_HMAC job
- *
- * @return
- * - Number of processed jobs
- */
-static void
-handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
-		struct rte_crypto_op *op,
-		struct aesni_gcm_session *sess)
-{
-	post_process_gcm_crypto_op(qp, op, sess);
-
-	/* Free session if a session-less crypto op */
-	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
-		memset(sess, 0, sizeof(struct aesni_gcm_session));
-		memset(op->sym->session, 0,
-			rte_cryptodev_sym_get_existing_header_session_size(
-				op->sym->session));
-		rte_mempool_put(qp->sess_mp_priv, sess);
-		rte_mempool_put(qp->sess_mp, op->sym->session);
-		op->sym->session = NULL;
-	}
-}
-
-static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
-		struct rte_crypto_op **ops, uint16_t nb_ops)
-{
-	struct aesni_gcm_session *sess;
-	struct aesni_gcm_qp *qp = queue_pair;
-
-	int retval = 0;
-	unsigned int i, nb_dequeued;
-
-	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
-			(void **)ops, nb_ops, NULL);
-
-	for (i = 0; i < nb_dequeued; i++) {
-
-		sess = aesni_gcm_get_session(qp, ops[i]);
-		if (unlikely(sess == NULL)) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->qp_stats.dequeue_err_count++;
-			break;
-		}
-
-		retval = process_gcm_crypto_op(qp, ops[i], sess);
-		if (retval < 0) {
-			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-			qp->qp_stats.dequeue_err_count++;
-			break;
-		}
-
-		handle_completed_gcm_crypto_op(qp, ops[i], sess);
-	}
-
-	qp->qp_stats.dequeued_count += i;
-
-	return i;
-}
-
-static uint16_t
-aesni_gcm_pmd_enqueue_burst(void *queue_pair,
-		struct rte_crypto_op **ops, uint16_t nb_ops)
-{
-	struct aesni_gcm_qp *qp = queue_pair;
-
-	unsigned int nb_enqueued;
-
-	nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
-			(void **)ops, nb_ops, NULL);
-	qp->qp_stats.enqueued_count += nb_enqueued;
-
-	return nb_enqueued;
-}
-
-static int aesni_gcm_remove(struct rte_vdev_device *vdev);
-
-static int
-aesni_gcm_create(const char *name,
-		struct rte_vdev_device *vdev,
-		struct rte_cryptodev_pmd_init_params *init_params)
-{
-	struct rte_cryptodev *dev;
-	struct aesni_gcm_private *internals;
-	enum aesni_gcm_vector_mode vector_mode;
-	MB_MGR *mb_mgr;
-
-	dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
-	if (dev == NULL) {
-		AESNI_GCM_LOG(ERR, "driver %s: create failed",
-			init_params->name);
-		return -ENODEV;
-	}
-
-	/* Check CPU for supported vector instruction set */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
-		vector_mode = RTE_AESNI_GCM_AVX512;
-	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
-		vector_mode = RTE_AESNI_GCM_AVX2;
-	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
-		vector_mode = RTE_AESNI_GCM_AVX;
-	else
-		vector_mode = RTE_AESNI_GCM_SSE;
-
-	dev->driver_id = cryptodev_driver_id;
-	dev->dev_ops = rte_aesni_gcm_pmd_ops;
-
-	/* register rx/tx burst functions for data path */
-	dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
-	dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
-
-	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
-			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
-			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
-			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
-			RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
-			RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
-
-	/* Check CPU for support for AES instruction set */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
-	else
-		AESNI_GCM_LOG(WARNING, "AES instructions not supported by CPU");
-
-	mb_mgr = alloc_mb_mgr(0);
-	if (mb_mgr == NULL)
-		return -ENOMEM;
-
-	switch (vector_mode) {
-	case RTE_AESNI_GCM_SSE:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
-		init_mb_mgr_sse(mb_mgr);
-		break;
-	case RTE_AESNI_GCM_AVX:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
-		init_mb_mgr_avx(mb_mgr);
-		break;
-	case RTE_AESNI_GCM_AVX2:
-		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
-		init_mb_mgr_avx2(mb_mgr);
-		break;
-	case RTE_AESNI_GCM_AVX512:
-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_VAES)) {
-			dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
-			init_mb_mgr_avx512(mb_mgr);
-		} else {
-			dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
-			init_mb_mgr_avx2(mb_mgr);
-			vector_mode = RTE_AESNI_GCM_AVX2;
-		}
-		break;
-	default:
-		AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
-		goto error_exit;
-	}
-
-	internals = dev->data->dev_private;
-
-	internals->vector_mode = vector_mode;
-	internals->mb_mgr = mb_mgr;
-
-	/* Set arch independent function pointers, based on key size */
-	internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
-	internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
-	internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
-	internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
-	internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
-	internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
-	internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
-	internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
-	internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
-	internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
-#endif
-
-	internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
-	internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
-	internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
-	internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
-	internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
-	internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
-	internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
-	internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
-	internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
-	internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
-#endif
-
-	internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
-	internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
-	internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
-	internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
-	internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
-	internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
-	internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
-	internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
-#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
-	internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
-	internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
-	internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
-#endif
-
-	internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
-
-#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
-	AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
-			imb_get_version_str());
-#else
-	AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
-#endif
-
-	return 0;
-
-error_exit:
-	if (mb_mgr)
-		free_mb_mgr(mb_mgr);
-
-	rte_cryptodev_pmd_destroy(dev);
-
-	return -1;
-}
-
-static int
-aesni_gcm_probe(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev_pmd_init_params init_params = {
-		"",
-		sizeof(struct aesni_gcm_private),
-		rte_socket_id(),
-		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
-	};
-	const char *name;
-	const char *input_args;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-	input_args = rte_vdev_device_args(vdev);
-	rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
-
-	return aesni_gcm_create(name, vdev, &init_params);
-}
-
-static int
-aesni_gcm_remove(struct rte_vdev_device *vdev)
-{
-	struct rte_cryptodev *cryptodev;
-	struct aesni_gcm_private *internals;
-	const char *name;
-
-	name = rte_vdev_device_name(vdev);
-	if (name == NULL)
-		return -EINVAL;
-
-	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
-	if (cryptodev == NULL)
-		return -ENODEV;
-
-	internals = cryptodev->data->dev_private;
-
-	free_mb_mgr(internals->mb_mgr);
-
-	return rte_cryptodev_pmd_destroy(cryptodev);
-}
-
-static struct rte_vdev_driver aesni_gcm_pmd_drv = {
-	.probe = aesni_gcm_probe,
-	.remove = aesni_gcm_remove
-};
-
-static struct cryptodev_driver aesni_gcm_crypto_drv;
-
-RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
-RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
-RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
-	"max_nb_queue_pairs=<int> "
-	"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
-		cryptodev_driver_id);
-RTE_LOG_REGISTER_DEFAULT(aesni_gcm_logtype_driver, NOTICE);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
deleted file mode 100644
index 18dbc4c18c..0000000000
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ /dev/null
@@ -1,333 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <rte_cryptodev_pmd.h>
-
-#include "aesni_gcm_pmd_private.h"
-
-static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
-	{	/* AES GMAC (AUTH) */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-			{.auth = {
-				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 16,
-					.increment = 1
-				},
-				.iv_size = {
-					.min = 12,
-					.max = 12,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	{	/* AES GCM */
-		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-		{.sym = {
-			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
-			{.aead = {
-				.algo = RTE_CRYPTO_AEAD_AES_GCM,
-				.block_size = 16,
-				.key_size = {
-					.min = 16,
-					.max = 32,
-					.increment = 8
-				},
-				.digest_size = {
-					.min = 1,
-					.max = 16,
-					.increment = 1
-				},
-				.aad_size = {
-					.min = 0,
-					.max = 65535,
-					.increment = 1
-				},
-				.iv_size = {
-					.min = 12,
-					.max = 12,
-					.increment = 0
-				}
-			}, }
-		}, }
-	},
-	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-/** Configure device */
-static int
-aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused struct rte_cryptodev_config *config)
-{
-	return 0;
-}
-
-/** Start device */
-static int
-aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-/** Stop device */
-static void
-aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
-{
-}
-
-/** Close device */
-static int
-aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
-{
-	return 0;
-}
-
-
-/** Get device statistics */
-static void
-aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_stats *stats)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
-
-		stats->enqueued_count += qp->qp_stats.enqueued_count;
-		stats->dequeued_count += qp->qp_stats.dequeued_count;
-
-		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
-		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
-	}
-}
-
-/** Reset device statistics */
-static void
-aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
-{
-	int qp_id;
-
-	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
-		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
-
-		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-	}
-}
-
-
-/** Get device info */
-static void
-aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
-		struct rte_cryptodev_info *dev_info)
-{
-	struct aesni_gcm_private *internals = dev->data->dev_private;
-
-	if (dev_info != NULL) {
-		dev_info->driver_id = dev->driver_id;
-		dev_info->feature_flags = dev->feature_flags;
-		dev_info->capabilities = aesni_gcm_pmd_capabilities;
-
-		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
-		/* No limit of number of sessions */
-		dev_info->sym.max_nb_sessions = 0;
-	}
-}
-
-/** Release queue pair */
-static int
-aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
-{
-	if (dev->data->queue_pairs[qp_id] != NULL) {
-		struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
-
-		if (qp->processed_pkts)
-			rte_ring_free(qp->processed_pkts);
-
-		rte_free(dev->data->queue_pairs[qp_id]);
-		dev->data->queue_pairs[qp_id] = NULL;
-	}
-	return 0;
-}
-
-/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
-static int
-aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
-		struct aesni_gcm_qp *qp)
-{
-	unsigned n = snprintf(qp->name, sizeof(qp->name),
-			"aesni_gcm_pmd_%u_qp_%u",
-			dev->data->dev_id, qp->id);
-
-	if (n >= sizeof(qp->name))
-		return -1;
-
-	return 0;
-}
-
-/** Create a ring to place process packets on */
-static struct rte_ring *
-aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
-		unsigned ring_size, int socket_id)
-{
-	struct rte_ring *r;
-
-	r = rte_ring_lookup(qp->name);
-	if (r) {
-		if (rte_ring_get_size(r) >= ring_size) {
-			AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
-				" packets", qp->name);
-			return r;
-		}
-		AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
-				" packets", qp->name);
-		return NULL;
-	}
-
-	return rte_ring_create(qp->name, ring_size, socket_id,
-			RING_F_SP_ENQ | RING_F_SC_DEQ);
-}
-
-/** Setup a queue pair */
-static int
-aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
-		const struct rte_cryptodev_qp_conf *qp_conf,
-		int socket_id)
-{
-	struct aesni_gcm_qp *qp = NULL;
-	struct aesni_gcm_private *internals = dev->data->dev_private;
-
-	/* Free memory prior to re-allocation if needed. */
-	if (dev->data->queue_pairs[qp_id] != NULL)
-		aesni_gcm_pmd_qp_release(dev, qp_id);
-
-	/* Allocate the queue pair data structure. */
-	qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
-					RTE_CACHE_LINE_SIZE, socket_id);
-	if (qp == NULL)
-		return (-ENOMEM);
-
-	qp->id = qp_id;
-	dev->data->queue_pairs[qp_id] = qp;
-
-	if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
-		goto qp_setup_cleanup;
-
-	qp->ops = (const struct aesni_gcm_ops *)internals->ops;
-
-	qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
-			qp_conf->nb_descriptors, socket_id);
-	if (qp->processed_pkts == NULL)
-		goto qp_setup_cleanup;
-
-	qp->sess_mp = qp_conf->mp_session;
-	qp->sess_mp_priv = qp_conf->mp_session_private;
-
-	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
-
-	return 0;
-
-qp_setup_cleanup:
-	if (qp)
-		rte_free(qp);
-
-	return -1;
-}
-
-/** Returns the size of the aesni gcm session structure */
-static unsigned
-aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
-{
-	return sizeof(struct aesni_gcm_session);
-}
-
-/** Configure a aesni gcm session from a crypto xform chain */
-static int
-aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
-		struct rte_crypto_sym_xform *xform,
-		struct rte_cryptodev_sym_session *sess,
-		struct rte_mempool *mempool)
-{
-	void *sess_private_data;
-	int ret;
-	struct aesni_gcm_private *internals = dev->data->dev_private;
-
-	if (unlikely(sess == NULL)) {
-		AESNI_GCM_LOG(ERR, "invalid session struct");
-		return -EINVAL;
-	}
-
-	if (rte_mempool_get(mempool, &sess_private_data)) {
-		AESNI_GCM_LOG(ERR,
-				"Couldn't get object from session mempool");
-		return -ENOMEM;
-	}
-	ret = aesni_gcm_set_session_parameters(internals->ops,
-				sess_private_data, xform);
-	if (ret != 0) {
-		AESNI_GCM_LOG(ERR, "failed configure session parameters");
-
-		/* Return session to mempool */
-		rte_mempool_put(mempool, sess_private_data);
-		return ret;
-	}
-
-	set_sym_session_private_data(sess, dev->driver_id,
-			sess_private_data);
-
-	return 0;
-}
-
-/** Clear the memory of session so it doesn't leave key material behind */
-static void
-aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
-		struct rte_cryptodev_sym_session *sess)
-{
-	uint8_t index = dev->driver_id;
-	void *sess_priv = get_sym_session_private_data(sess, index);
-
-	/* Zero out the whole structure */
-	if (sess_priv) {
-		memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
-		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
-		set_sym_session_private_data(sess, index, NULL);
-		rte_mempool_put(sess_mp, sess_priv);
-	}
-}
-
-struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
-		.dev_configure		= aesni_gcm_pmd_config,
-		.dev_start		= aesni_gcm_pmd_start,
-		.dev_stop		= aesni_gcm_pmd_stop,
-		.dev_close		= aesni_gcm_pmd_close,
-
-		.stats_get		= aesni_gcm_pmd_stats_get,
-		.stats_reset		= aesni_gcm_pmd_stats_reset,
-
-		.dev_infos_get		= aesni_gcm_pmd_info_get,
-
-		.queue_pair_setup	= aesni_gcm_pmd_qp_setup,
-		.queue_pair_release	= aesni_gcm_pmd_qp_release,
-
-		.sym_cpu_process        = aesni_gcm_pmd_cpu_crypto_process,
-
-		.sym_session_get_size	= aesni_gcm_pmd_sym_session_get_size,
-		.sym_session_configure	= aesni_gcm_pmd_sym_session_configure,
-		.sym_session_clear	= aesni_gcm_pmd_sym_session_clear
-};
-
-struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
deleted file mode 100644
index 2763d1c492..0000000000
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2020 Intel Corporation
- */
-
-#ifndef _AESNI_GCM_PMD_PRIVATE_H_
-#define _AESNI_GCM_PMD_PRIVATE_H_
-
-#include "aesni_gcm_ops.h"
-
-/*
- * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
- * so if macro is not defined, it means that the version is 0.49.
- */
-#if !defined(IMB_VERSION_NUM)
-#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
-#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
-#endif
-
-#define CRYPTODEV_NAME_AESNI_GCM_PMD	crypto_aesni_gcm
-/**< AES-NI GCM PMD device name */
-
-/** AES-NI GCM PMD  LOGTYPE DRIVER */
-extern int aesni_gcm_logtype_driver;
-#define AESNI_GCM_LOG(level, fmt, ...) \
-	rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver,	\
-			"%s() line %u: "fmt "\n", __func__, __LINE__,	\
-					## __VA_ARGS__)
-
-/* Maximum length for digest */
-#define DIGEST_LENGTH_MAX 16
-
-/** private data structure for each virtual AESNI GCM device */
-struct aesni_gcm_private {
-	enum aesni_gcm_vector_mode vector_mode;
-	/**< Vector mode */
-	unsigned max_nb_queue_pairs;
-	/**< Max number of queue pairs supported by device */
-	MB_MGR *mb_mgr;
-	/**< Multi-buffer instance */
-	struct aesni_gcm_ops ops[GCM_KEY_NUM];
-	/**< Function pointer table of the gcm APIs */
-};
-
-struct aesni_gcm_qp {
-	const struct aesni_gcm_ops *ops;
-	/**< Function pointer table of the gcm APIs */
-	struct rte_ring *processed_pkts;
-	/**< Ring for placing process packets */
-	struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */
-	/**< GCM parameters */
-	struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */
-	/**< Queue pair statistics */
-	struct rte_mempool *sess_mp;
-	/**< Session Mempool */
-	struct rte_mempool *sess_mp_priv;
-	/**< Session Private Data Mempool */
-	uint16_t id;
-	/**< Queue Pair Identifier */
-	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-	/**< Unique Queue Pair Name */
-	uint8_t temp_digest[DIGEST_LENGTH_MAX];
-	/**< Buffer used to store the digest generated
-	 * by the driver when verifying a digest provided
-	 * by the user (using authentication verify operation)
-	 */
-} __rte_cache_aligned;
-
-
-enum aesni_gcm_operation {
-	AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
-	AESNI_GCM_OP_AUTHENTICATED_DECRYPTION,
-	AESNI_GMAC_OP_GENERATE,
-	AESNI_GMAC_OP_VERIFY
-};
-
-/** AESNI GCM private session structure */
-struct aesni_gcm_session {
-	struct {
-		uint16_t length;
-		uint16_t offset;
-	} iv;
-	/**< IV parameters */
-	uint16_t aad_length;
-	/**< AAD length */
-	uint16_t req_digest_length;
-	/**< Requested digest length */
-	uint16_t gen_digest_length;
-	/**< Generated digest length */
-	enum aesni_gcm_operation op;
-	/**< GCM operation type */
-	enum aesni_gcm_key key;
-	/**< GCM key type */
-	struct gcm_key_data gdata_key;
-	/**< GCM parameters */
-	struct aesni_gcm_session_ops ops;
-	/**< Session handlers */
-};
-
-
-/**
- * Setup GCM session parameters
- * @param	sess	aesni gcm session structure
- * @param	xform	crypto transform chain
- *
- * @return
- * - On success returns 0
- * - On failure returns error code < 0
- */
-extern int
-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
-		struct aesni_gcm_session *sess,
-		const struct rte_crypto_sym_xform *xform);
-
-/* Device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
-
-/** CPU crypto bulk process handler */
-uint32_t
-aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
-	struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_sym_vec *vec);
-
-#endif /* _AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_gcm/meson.build b/drivers/crypto/aesni_gcm/meson.build
deleted file mode 100644
index 0fcac2a8eb..0000000000
--- a/drivers/crypto/aesni_gcm/meson.build
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 Intel Corporation
-
-IMB_required_ver = '0.52.0'
-lib = cc.find_library('IPSec_MB', required: false)
-if not lib.found()
-    build = false
-    reason = 'missing dependency, "libIPSec_MB"'
-else
-    ext_deps += lib
-
-    # version comes with quotes, so we split based on " and take the middle
-    imb_ver = cc.get_define('IMB_VERSION_STR',
-        prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
-
-    if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
-        reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
-                IMB_required_ver, imb_ver)
-        build = false
-    endif
-endif
-
-sources = files('aesni_gcm_pmd.c', 'aesni_gcm_pmd_ops.c')
-deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_gcm/version.map b/drivers/crypto/aesni_gcm/version.map
deleted file mode 100644
index c2e0723b4c..0000000000
--- a/drivers/crypto/aesni_gcm/version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_22 {
-	local: *;
-};
diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
index bac5d85e26..8550eaee9a 100644
--- a/drivers/crypto/ipsec_mb/meson.build
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -23,6 +23,7 @@ endif
 
 sources = files('rte_ipsec_mb_pmd.c',
 		'rte_ipsec_mb_pmd_ops.c',
-		'pmd_aesni_mb.c'
+		'pmd_aesni_mb.c',
+		'pmd_aesni_gcm.c'
 		)
 deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
new file mode 100644
index 0000000000..72560c06be
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c
@@ -0,0 +1,956 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#include <intel-ipsec-mb.h>
+
+#if defined(RTE_LIB_SECURITY)
+#define AESNI_MB_DOCSIS_SEC_ENABLED 1
+#include <rte_ether.h>
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#endif
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+#define AESNI_GCM_IV_LENGTH 12
+
+static const struct rte_cryptodev_capabilities aesni_gcm_capabilities[] = {
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = AESNI_GCM_IV_LENGTH,
+					.max = AESNI_GCM_IV_LENGTH,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 1,
+					.max = 16,
+					.increment = 1
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = AESNI_GCM_IV_LENGTH,
+					.max = AESNI_GCM_IV_LENGTH,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t pmd_driver_id_aesni_gcm;
+
+struct aesni_gcm_qp_data {
+	struct gcm_context_data gcm_ctx_data;
+	uint8_t temp_digest[DIGEST_LENGTH_MAX];
+	/* *< Buffers used to store the digest generated
+	 * by the driver when verifying a digest provided
+	 * by the user (using authentication verify operation)
+	 */
+};
+
+enum aesni_gcm_key_length {
+	GCM_KEY_LENGTH_128 = 0,
+	GCM_KEY_LENGTH_192,
+	GCM_KEY_192,
+	GCM_KEY_256,
+	GCM_NUM_KEY_TYPES
+};
+
+typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data,
+			    struct gcm_context_data *gcm_ctx_data,
+			    uint8_t *out, const uint8_t *in,
+			    uint64_t plaintext_len, const uint8_t *iv,
+			    const uint8_t *aad, uint64_t aad_len,
+			    uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gcm_pre_t)(const void *key,
+				struct gcm_key_data *gcm_data);
+
+typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data,
+				 struct gcm_context_data *gcm_ctx_data,
+				 const uint8_t *iv, uint8_t const *aad,
+				 uint64_t aad_len);
+
+typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data,
+				   struct gcm_context_data *gcm_ctx_data,
+				   uint8_t *out, const uint8_t *in,
+				   uint64_t plaintext_len);
+
+typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data,
+				     struct gcm_context_data *gcm_ctx_data,
+				     uint8_t *auth_tag, uint64_t auth_tag_len);
+
+typedef void (*aesni_gmac_init_t)(const struct gcm_key_data *gcm_key_data,
+				  struct gcm_context_data *gcm_ctx_data,
+				  const uint8_t *iv, const uint64_t iv_len);
+
+typedef void (*aesni_gmac_update_t)(const struct gcm_key_data *gcm_key_data,
+				    struct gcm_context_data *gcm_ctx_data,
+				    const uint8_t *in,
+				    const uint64_t plaintext_len);
+
+typedef void (*aesni_gmac_finalize_t)(const struct gcm_key_data *gcm_key_data,
+				      struct gcm_context_data *gcm_ctx_data,
+				      uint8_t *auth_tag,
+				      const uint64_t auth_tag_len);
+
+/** GCM per-session operation handlers */
+struct aesni_gcm_session_ops {
+	aesni_gcm_t cipher;
+	aesni_gcm_pre_t pre;
+	aesni_gcm_init_t init;
+	aesni_gcm_update_t update;
+	aesni_gcm_finalize_t finalize;
+	aesni_gmac_init_t gmac_init;
+	aesni_gmac_update_t gmac_update;
+	aesni_gmac_finalize_t gmac_finalize;
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	/**< IV parameters */
+	uint16_t aad_length;
+	/**< AAD length */
+	uint16_t req_digest_length;
+	/**< Requested digest length */
+	uint16_t gen_digest_length;
+	/**< Generated digest length */
+	enum ipsec_mb_operation op;
+	/**< GCM operation type */
+	struct gcm_key_data gdata_key;
+	/**< GCM parameters */
+	struct aesni_gcm_session_ops ops;
+	/**< Session handlers */
+};
+
+static int
+aesni_gcm_session_configure(MB_MGR *mb_mgr, void *session,
+			    const struct rte_crypto_sym_xform *xform)
+{
+	struct aesni_gcm_session *sess = session;
+	const struct rte_crypto_sym_xform *auth_xform;
+	const struct rte_crypto_sym_xform *cipher_xform;
+	const struct rte_crypto_sym_xform *aead_xform;
+
+	uint8_t key_length;
+	const uint8_t *key;
+	enum ipsec_mb_operation mode;
+	int ret = 0;
+
+	ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
+				&cipher_xform, &aead_xform);
+	if (ret)
+		return ret;
+
+	/**< GCM key type */
+
+	sess->op = mode;
+
+	switch (sess->op) {
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+		/* AES-GMAC
+		 * auth_xform = xform;
+		 */
+		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+			IPSEC_MB_LOG(ERR,
+	"Only AES GMAC is supported as an authentication only algorithm");
+			ret = -ENOTSUP;
+			goto error_exit;
+		}
+		/* Set IV parameters */
+		sess->iv.offset = auth_xform->auth.iv.offset;
+		sess->iv.length = auth_xform->auth.iv.length;
+		key_length = auth_xform->auth.key.length;
+		key = auth_xform->auth.key.data;
+		sess->req_digest_length = auth_xform->auth.digest_length;
+		break;
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
+		/* AES-GCM
+		 * aead_xform = xform;
+		 */
+
+		if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
+			IPSEC_MB_LOG(ERR,
+			"The only combined operation supported is AES GCM");
+			ret = -ENOTSUP;
+			goto error_exit;
+		}
+		/* Set IV parameters */
+		sess->iv.offset = aead_xform->aead.iv.offset;
+		sess->iv.length = aead_xform->aead.iv.length;
+		key_length = aead_xform->aead.key.length;
+		key = aead_xform->aead.key.data;
+		sess->aad_length = aead_xform->aead.aad_length;
+		sess->req_digest_length = aead_xform->aead.digest_length;
+		break;
+	default:
+		IPSEC_MB_LOG(
+		    ERR, "Wrong xform type, has to be AEAD or authentication");
+		ret = -ENOTSUP;
+		goto error_exit;
+	}
+
+	/* Check key length, setup session handlers
+	 * and calculate GCM pre-compute.
+	 */
+	switch (key_length) {
+	case 16:
+		sess->ops.pre = mb_mgr->gcm128_pre;
+		sess->ops.init = mb_mgr->gcm128_init;
+
+		if (sess->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
+			sess->ops.cipher = mb_mgr->gcm128_enc;
+			sess->ops.update = mb_mgr->gcm128_enc_update;
+			sess->ops.finalize = mb_mgr->gcm128_enc_finalize;
+		} else {
+			sess->ops.cipher = mb_mgr->gcm128_dec;
+			sess->ops.update = mb_mgr->gcm128_dec_update;
+			sess->ops.finalize = mb_mgr->gcm128_dec_finalize;
+		}
+		sess->ops.gmac_init = mb_mgr->gmac128_init;
+		sess->ops.gmac_update = mb_mgr->gmac128_update;
+		sess->ops.gmac_finalize = mb_mgr->gmac128_finalize;
+		break;
+	case 24:
+		sess->ops.pre = mb_mgr->gcm192_pre;
+		sess->ops.init = mb_mgr->gcm192_init;
+
+		if (sess->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
+			sess->ops.cipher = mb_mgr->gcm192_enc;
+			sess->ops.update = mb_mgr->gcm192_enc_update;
+			sess->ops.finalize = mb_mgr->gcm192_enc_finalize;
+		} else {
+			sess->ops.cipher = mb_mgr->gcm192_dec;
+			sess->ops.update = mb_mgr->gcm192_dec_update;
+			sess->ops.finalize = mb_mgr->gcm192_dec_finalize;
+		}
+		sess->ops.gmac_init = mb_mgr->gmac192_init;
+		sess->ops.gmac_update = mb_mgr->gmac192_update;
+		sess->ops.gmac_finalize = mb_mgr->gmac192_finalize;
+
+		break;
+	case 32:
+		sess->ops.pre = mb_mgr->gcm256_pre;
+		sess->ops.init = mb_mgr->gcm256_init;
+
+		if (sess->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
+			sess->ops.cipher = mb_mgr->gcm256_enc;
+			sess->ops.update = mb_mgr->gcm256_enc_update;
+			sess->ops.finalize = mb_mgr->gcm256_enc_finalize;
+		} else {
+			sess->ops.cipher = mb_mgr->gcm256_dec;
+			sess->ops.update = mb_mgr->gcm256_dec_update;
+			sess->ops.finalize = mb_mgr->gcm256_dec_finalize;
+		}
+		sess->ops.gmac_init = mb_mgr->gmac256_init;
+		sess->ops.gmac_update = mb_mgr->gmac256_update;
+		sess->ops.gmac_finalize = mb_mgr->gmac256_finalize;
+		break;
+	default:
+		IPSEC_MB_LOG(ERR, "Invalid key length");
+		ret = -EINVAL;
+		goto error_exit;
+	}
+
+	/* pre-generate key */
+	sess->ops.pre(key, &sess->gdata_key);
+
+	/* Digest check */
+	if (sess->req_digest_length > 16) {
+		IPSEC_MB_LOG(ERR, "Invalid digest length");
+		ret = -EINVAL;
+		goto error_exit;
+	}
+	/*
+	 * If size requested is different, generate the full digest
+	 * (16 bytes) in a temporary location and then memcpy
+	 * the requested number of bytes.
+	 */
+	if (sess->req_digest_length < 4)
+		sess->gen_digest_length = 16;
+	else
+		sess->gen_digest_length = sess->req_digest_length;
+
+error_exit:
+	return ret;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job	JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct ipsec_mb_qp *qp,
+		struct rte_crypto_op *op,
+		struct aesni_gcm_session *session)
+{
+	struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+
+	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	/* Verify digest if required */
+	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT ||
+			session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY) {
+		uint8_t *digest;
+
+		uint8_t *tag = qp_data->temp_digest;
+
+		if (session->op == IPSEC_MB_OP_HASH_VERIFY_ONLY)
+			digest = op->sym->auth.digest.data;
+		else
+			digest = op->sym->aead.digest.data;
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+		rte_hexdump(stdout, "auth tag (orig):",
+				digest, session->req_digest_length);
+		rte_hexdump(stdout, "auth tag (calc):",
+				tag, session->req_digest_length);
+#endif
+
+		if (memcmp(tag, digest,	session->req_digest_length) != 0)
+			op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	} else {
+		if (session->req_digest_length != session->gen_digest_length) {
+			if (session->op ==
+				IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT)
+				memcpy(op->sym->aead.digest.data,
+					qp_data->temp_digest,
+					session->req_digest_length);
+			else
+				memcpy(op->sym->auth.digest.data,
+					qp_data->temp_digest,
+					session->req_digest_length);
+		}
+	}
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp		Queue Pair to process
+ * @param op		Crypto operation
+ * @param job		JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct ipsec_mb_qp *qp,
+		struct rte_crypto_op *op,
+		struct aesni_gcm_session *sess)
+{
+	post_process_gcm_crypto_op(qp, op, sess);
+
+	/* Free session if a session-less crypto op */
+	if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		memset(sess, 0, sizeof(struct aesni_gcm_session));
+		memset(op->sym->session, 0,
+			rte_cryptodev_sym_get_existing_header_session_size(
+				op->sym->session));
+		rte_mempool_put(qp->sess_mp_priv, sess);
+		rte_mempool_put(qp->sess_mp, op->sym->session);
+		op->sym->session = NULL;
+	}
+}
+
+/**
+ * Process a crypto operation, calling
+ * the GCM API from the multi buffer library.
+ *
+ * @param	qp		queue pair
+ * @param	op		symmetric crypto operation
+ * @param	session		GCM session
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct ipsec_mb_qp *qp, struct rte_crypto_op *op,
+		struct aesni_gcm_session *session)
+{
+	struct aesni_gcm_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
+	uint8_t *src, *dst;
+	uint8_t *iv_ptr;
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct rte_mbuf *m_src = sym_op->m_src;
+	uint32_t offset, data_offset, data_length;
+	uint32_t part_len, total_len, data_len;
+	uint8_t *tag;
+	unsigned int oop = 0;
+
+	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT ||
+			session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
+		offset = sym_op->aead.data.offset;
+		data_offset = offset;
+		data_length = sym_op->aead.data.length;
+	} else {
+		offset = sym_op->auth.data.offset;
+		data_offset = offset;
+		data_length = sym_op->auth.data.length;
+	}
+
+	RTE_ASSERT(m_src != NULL);
+
+	while (offset >= m_src->data_len && data_length != 0) {
+		offset -= m_src->data_len;
+		m_src = m_src->next;
+
+		RTE_ASSERT(m_src != NULL);
+	}
+
+	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
+	data_len = m_src->data_len - offset;
+	part_len = (data_len < data_length) ? data_len :
+			data_length;
+
+	RTE_ASSERT((sym_op->m_dst == NULL) ||
+			((sym_op->m_dst != NULL) &&
+				rte_pktmbuf_is_contiguous(sym_op->m_dst)));
+
+	/* In-place */
+	if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
+		dst = src;
+	/* Out-of-place */
+	else {
+		oop = 1;
+		/* Segmented destination buffer is not supported
+		 * if operation is Out-of-place
+		 */
+		RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
+		dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+					data_offset);
+	}
+
+	iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+				session->iv.offset);
+
+	if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT) {
+		session->ops.init(&session->gdata_key, &qp_data->gcm_ctx_data,
+				iv_ptr,
+				sym_op->aead.aad.data,
+				(uint64_t)session->aad_length);
+
+		session->ops.update(&session->gdata_key,
+			&qp_data->gcm_ctx_data, dst, src, (uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			if (oop)
+				dst += part_len;
+			else
+				dst = src;
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			session->ops.update(&session->gdata_key,
+				&qp_data->gcm_ctx_data, dst, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp_data->temp_digest;
+		else
+			tag = sym_op->aead.digest.data;
+
+		session->ops.finalize(&session->gdata_key,
+			&qp_data->gcm_ctx_data, tag,
+				session->gen_digest_length);
+	} else if (session->op == IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT) {
+		session->ops.init(&session->gdata_key, &qp_data->gcm_ctx_data,
+				iv_ptr, sym_op->aead.aad.data,
+				(uint64_t)session->aad_length);
+
+		session->ops.update(&session->gdata_key,
+			&qp_data->gcm_ctx_data, dst, src, (uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			if (oop)
+				dst += part_len;
+			else
+				dst = src;
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			session->ops.update(&session->gdata_key,
+					&qp_data->gcm_ctx_data,
+					dst, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp_data->temp_digest;
+		session->ops.finalize(&session->gdata_key,
+				&qp_data->gcm_ctx_data,
+				tag,
+				session->gen_digest_length);
+	} else if (session->op == IPSEC_MB_OP_HASH_GEN_ONLY) {
+		session->ops.gmac_init(&session->gdata_key,
+				&qp_data->gcm_ctx_data,
+				iv_ptr,
+				session->iv.length);
+
+		session->ops.gmac_update(&session->gdata_key,
+				&qp_data->gcm_ctx_data, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			session->ops.gmac_update(&session->gdata_key,
+					&qp_data->gcm_ctx_data, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		if (session->req_digest_length != session->gen_digest_length)
+			tag = qp_data->temp_digest;
+		else
+			tag = sym_op->auth.digest.data;
+
+		session->ops.gmac_finalize(&session->gdata_key,
+				&qp_data->gcm_ctx_data,
+				tag,
+				session->gen_digest_length);
+	} else { /* IPSEC_MB_OP_HASH_VERIFY_ONLY */
+		session->ops.gmac_init(&session->gdata_key,
+				&qp_data->gcm_ctx_data,
+				iv_ptr,
+				session->iv.length);
+
+		session->ops.gmac_update(&session->gdata_key,
+				&qp_data->gcm_ctx_data, src,
+				(uint64_t)part_len);
+		total_len = data_length - part_len;
+
+		while (total_len) {
+			m_src = m_src->next;
+
+			RTE_ASSERT(m_src != NULL);
+
+			src = rte_pktmbuf_mtod(m_src, uint8_t *);
+			part_len = (m_src->data_len < total_len) ?
+					m_src->data_len : total_len;
+
+			session->ops.gmac_update(&session->gdata_key,
+					&qp_data->gcm_ctx_data, src,
+					(uint64_t)part_len);
+			total_len -= part_len;
+		}
+
+		tag = qp_data->temp_digest;
+
+		session->ops.gmac_finalize(&session->gdata_key,
+				&qp_data->gcm_ctx_data,
+				tag,
+				session->gen_digest_length);
+	}
+	return 0;
+}
+
+/** Get gcm session */
+static inline struct aesni_gcm_session *
+aesni_gcm_get_session(struct ipsec_mb_qp *qp,
+	     struct rte_crypto_op *op)
+{
+	struct aesni_gcm_session *sess = NULL;
+	uint32_t driver_id =
+	    ipsec_mb_get_driver_id(IPSEC_MB_PMD_TYPE_AESNI_GCM);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+		if (likely(sym_op->session != NULL))
+			sess = (struct aesni_gcm_session *)
+			    get_sym_session_private_data(sym_op->session,
+							 driver_id);
+	} else {
+		void *_sess;
+		void *_sess_private_data = NULL;
+
+		if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+			return NULL;
+
+		if (rte_mempool_get(qp->sess_mp_priv,
+				(void **)&_sess_private_data))
+			return NULL;
+
+		sess = (struct aesni_gcm_session *)_sess_private_data;
+
+		if (unlikely(aesni_gcm_session_configure(qp->mb_mgr,
+				 _sess_private_data, sym_op->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
+			sess = NULL;
+		}
+		sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_sym_session_private_data(sym_op->session, driver_id,
+					     _sess_private_data);
+	}
+
+	if (unlikely(sess == NULL))
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct aesni_gcm_session *sess;
+	struct ipsec_mb_qp *qp = queue_pair;
+
+	int retval = 0;
+	unsigned int i, nb_dequeued;
+
+	nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	for (i = 0; i < nb_dequeued; i++) {
+
+		sess = aesni_gcm_get_session(qp, ops[i]);
+		if (unlikely(sess == NULL)) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+
+		retval = process_gcm_crypto_op(qp, ops[i], sess);
+		if (retval < 0) {
+			ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+
+		handle_completed_gcm_crypto_op(qp, ops[i], sess);
+	}
+
+	qp->stats.dequeued_count += i;
+
+	return i;
+}
+
+static inline void
+aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec,
+			  int32_t errnum)
+{
+	uint32_t i;
+
+	for (i = 0; i < vec->num; i++)
+		vec->status[i] = errnum;
+}
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
+				     struct gcm_context_data *gdata_ctx,
+				     uint8_t *digest)
+{
+	if (s->req_digest_length != s->gen_digest_length) {
+		uint8_t tmpdigest[s->gen_digest_length];
+
+		s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
+				s->gen_digest_length);
+		memcpy(digest, tmpdigest, s->req_digest_length);
+	} else {
+		s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
+				s->gen_digest_length);
+	}
+
+	return 0;
+}
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
+				     struct gcm_context_data *gdata_ctx,
+				     uint8_t *digest)
+{
+	uint8_t tmpdigest[s->gen_digest_length];
+
+	s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
+			s->gen_digest_length);
+
+	return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0
+								    : EBADMSG;
+}
+
+static inline void
+aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
+			     struct gcm_context_data *gdata_ctx,
+			     struct rte_crypto_sgl *sgl, void *iv, void *aad)
+{
+	uint32_t i;
+
+	/* init crypto operation */
+	s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
+		    (uint64_t)s->aad_length);
+
+	/* update with sgl data */
+	for (i = 0; i < sgl->num; i++) {
+		struct rte_crypto_vec *vec = &sgl->vec[i];
+
+		s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
+			      vec->len);
+	}
+}
+
+static inline void
+aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
+			      struct gcm_context_data *gdata_ctx,
+			      struct rte_crypto_sgl *sgl, void *iv)
+{
+	s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
+		    sgl->vec[0].len);
+}
+
+static inline uint32_t
+aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
+		      struct gcm_context_data *gdata_ctx,
+		      struct rte_crypto_sym_vec *vec)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->sgl[i],
+					     vec->iv[i].va, vec->aad[i].va);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
+		    s, gdata_ctx, vec->digest[i].va);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
+		      struct gcm_context_data *gdata_ctx,
+		      struct rte_crypto_sym_vec *vec)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		aesni_gcm_process_gcm_sgl_op(s, gdata_ctx, &vec->sgl[i],
+					     vec->iv[i].va, vec->aad[i].va);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
+		    s, gdata_ctx, vec->digest[i].va);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
+			struct gcm_context_data *gdata_ctx,
+			struct rte_crypto_sym_vec *vec)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		if (vec->sgl[i].num != 1) {
+			vec->status[i] = ENOTSUP;
+			continue;
+		}
+
+		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->sgl[i],
+					      vec->iv[i].va);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(
+		    s, gdata_ctx, vec->digest[i].va);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+static inline uint32_t
+aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
+		      struct gcm_context_data *gdata_ctx,
+		      struct rte_crypto_sym_vec *vec)
+{
+	uint32_t i, processed;
+
+	processed = 0;
+	for (i = 0; i < vec->num; ++i) {
+		if (vec->sgl[i].num != 1) {
+			vec->status[i] = ENOTSUP;
+			continue;
+		}
+
+		aesni_gcm_process_gmac_sgl_op(s, gdata_ctx, &vec->sgl[i],
+					      vec->iv[i].va);
+		vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(
+		    s, gdata_ctx, vec->digest[i].va);
+		processed += (vec->status[i] == 0);
+	}
+
+	return processed;
+}
+
+/** Process CPU crypto bulk operations */
+static uint32_t
+aesni_gcm_process_bulk(struct rte_cryptodev *dev,
+			struct rte_cryptodev_sym_session *sess,
+			__rte_unused union rte_crypto_sym_ofs ofs,
+			struct rte_crypto_sym_vec *vec)
+{
+	void *sess_priv;
+	struct aesni_gcm_session *s;
+	struct gcm_context_data gdata_ctx;
+
+	sess_priv = get_sym_session_private_data(sess, dev->driver_id);
+	if (unlikely(sess_priv == NULL)) {
+		aesni_gcm_fill_error_code(vec, EINVAL);
+		return 0;
+	}
+
+	s = sess_priv;
+	switch (s->op) {
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
+		return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
+	case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
+		return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
+	case IPSEC_MB_OP_HASH_GEN_ONLY:
+		return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
+	case IPSEC_MB_OP_HASH_VERIFY_ONLY:
+		return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
+	default:
+		aesni_gcm_fill_error_code(vec, EINVAL);
+		return 0;
+	}
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+	.dev_configure = ipsec_mb_pmd_config,
+	.dev_start = ipsec_mb_pmd_start,
+	.dev_stop = ipsec_mb_pmd_stop,
+	.dev_close = ipsec_mb_pmd_close,
+
+	.stats_get = ipsec_mb_pmd_stats_get,
+	.stats_reset = ipsec_mb_pmd_stats_reset,
+
+	.dev_infos_get = ipsec_mb_pmd_info_get,
+
+	.queue_pair_setup = ipsec_mb_pmd_qp_setup,
+	.queue_pair_release = ipsec_mb_pmd_qp_release,
+
+	.sym_cpu_process = aesni_gcm_process_bulk,
+
+	.sym_session_get_size = ipsec_mb_pmd_sym_session_get_size,
+	.sym_session_configure = ipsec_mb_pmd_sym_session_configure,
+	.sym_session_clear = ipsec_mb_pmd_sym_session_clear
+};
+
+static int
+cryptodev_aesni_gcm_probe(struct rte_vdev_device *vdev)
+{
+	return cryptodev_ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_GCM);
+}
+
+static struct rte_vdev_driver cryptodev_aesni_gcm_pmd_drv = {
+	.probe = cryptodev_aesni_gcm_probe,
+	.remove = cryptodev_ipsec_mb_remove
+};
+
+static struct cryptodev_driver aesni_gcm_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD,
+		      cryptodev_aesni_gcm_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
+			      "max_nb_queue_pairs=<int> socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv,
+			       cryptodev_aesni_gcm_pmd_drv.driver,
+			       pmd_driver_id_aesni_gcm);
+
+/* Constructor function to register aesni-gcm PMD */
+RTE_INIT(ipsec_mb_register_aesni_gcm)
+{
+	struct ipsec_mb_pmd_data *aesni_gcm_data =
+		&ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_GCM];
+
+	aesni_gcm_data->caps = aesni_gcm_capabilities;
+	aesni_gcm_data->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+	aesni_gcm_data->feature_flags =
+		RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+		RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+		RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+		RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+		RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
+		RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+	aesni_gcm_data->internals_priv_size = 0;
+	aesni_gcm_data->ops = &aesni_gcm_pmd_ops;
+	aesni_gcm_data->qp_priv_size = sizeof(struct aesni_gcm_qp_data);
+	aesni_gcm_data->queue_pair_configure = NULL;
+	aesni_gcm_data->session_configure = aesni_gcm_session_configure;
+	aesni_gcm_data->session_priv_size = sizeof(struct aesni_gcm_session);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
index 979c4f66b4..ea8baf1dce 100644
--- a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -34,6 +34,9 @@ extern RTE_DEFINE_PER_LCORE(MB_MGR *, mb_mgr);
 #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
 /**< IPSEC Multi buffer aesni_mb PMD device name */
 
+#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
+/**< IPSEC Multi buffer PMD aesni_gcm device name */
+
 /** PMD LOGTYPE DRIVER, common to all PMDs */
 extern int ipsec_mb_logtype_driver;
 #define IPSEC_MB_LOG(level, fmt, ...)                                         \
@@ -43,6 +46,7 @@ extern int ipsec_mb_logtype_driver;
 /** All supported device types */
 enum ipsec_mb_pmd_types {
 	IPSEC_MB_PMD_TYPE_AESNI_MB = 0,
+	IPSEC_MB_PMD_TYPE_AESNI_GCM,
 	IPSEC_MB_N_PMD_TYPES
 };
 
@@ -62,6 +66,7 @@ enum ipsec_mb_operation {
 };
 
 extern uint8_t pmd_driver_id_aesni_mb;
+extern uint8_t pmd_driver_id_aesni_gcm;
 
 /** Helper function. Gets driver ID based on PMD type */
 static __rte_always_inline uint8_t
@@ -70,6 +75,8 @@ ipsec_mb_get_driver_id(enum ipsec_mb_pmd_types pmd_type)
 	switch (pmd_type) {
 	case IPSEC_MB_PMD_TYPE_AESNI_MB:
 		return pmd_driver_id_aesni_mb;
+	case IPSEC_MB_PMD_TYPE_AESNI_GCM:
+		return pmd_driver_id_aesni_gcm;
 	default:
 		break;
 	}
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index b2ccea6f94..14a13f2263 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -7,7 +7,6 @@ endif
 
 drivers = [
         'ipsec_mb',
-        'aesni_gcm',
         'armv8',
         'bcmfs',
         'caam_jr',
-- 
2.25.1


  parent reply	other threads:[~2021-08-26 15:17 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-18 12:17 [dpdk-dev] [RFC 0/7] crypto/ipsec_mb: introduce ipsec_mb framework pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 1/7] " pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 2/7] crypto/ipsec_mb: move aesni-mb PMD to " pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 3/7] crypto/ipsec_mb: move aesni-gcm " pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 4/7] crypto/ipsec_mb: move kasumi " pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 5/7] crypto/ipsec_mb: move snow3g " pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 6/7] crypto/snow3g: add support for digest appended ops pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 7/7] crypto/ipsec_mb: move zuc PMD to ipsec_mb framework pbronowx
2021-06-18 13:11 ` [dpdk-dev] [RFC 0/7] crypto/ipsec_mb: introduce " David Marchand
2021-06-18 16:05   ` [dpdk-dev] [EXT] " Akhil Goyal
2021-06-21  8:52     ` Zhang, Roy Fan
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 0/8] drivers/crypto: " Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 1/8] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 2/8] drivers/crypto: move aesni-mb PMD to " Ciara Power
2021-08-26 15:16   ` Ciara Power [this message]
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 4/8] drivers/crypto: move kasumi " Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 5/8] drivers/crypto: move snow3g " Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 6/8] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 7/8] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 8/8] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 0/9] drivers/crypto: introduce ipsec_mb framework Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 1/9] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-09-23 15:46     ` Thomas Monjalon
2021-09-27 10:02       ` Power, Ciara
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 2/9] drivers/crypto: move aesni-mb PMD to " Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 3/9] drivers/crypto: move aesni-gcm " Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 4/9] drivers/crypto: move kasumi " Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 5/9] drivers/crypto: move snow3g " Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 6/9] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 7/9] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 8/9] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 9/9] doc/rel_notes: added note for SW Crypto PMD change Ciara Power
2021-09-28 16:43   ` [dpdk-dev] [EXT] [PATCH v2 0/9] drivers/crypto: introduce ipsec_mb framework Akhil Goyal
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 00/14] " Ciara Power
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 01/14] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 02/14] crypto/ipsec_mb: add multiprocess support Ciara Power
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 03/14] drivers/crypto: move aesni-mb PMD to IPsec-mb framework Ciara Power
2021-10-18  7:38     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 04/14] crypto/ipsec_mb: support ZUC-256 for aesni_mb Ciara Power
2021-10-18  7:39     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 05/14] test/crypto: check cipher parameters Ciara Power
2021-10-18  7:40     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 06/14] test/crypto: check auth parameters Ciara Power
2021-10-18  7:41     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 07/14] test/crypto: add ZUC-256 vectors Ciara Power
2021-10-18  7:42     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 08/14] drivers/crypto: move aesni-gcm PMD to IPsec-mb framework Ciara Power
2021-10-18  7:43     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 09/14] drivers/crypto: move kasumi " Ciara Power
2021-10-18  7:42     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 10/14] drivers/crypto: move snow3g " Ciara Power
2021-10-18  7:42     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 11/14] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-10-18  7:43     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 12/14] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-10-18  7:44     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 13/14] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-10-18  7:44     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 14/14] test/crypto: add test for chacha20_poly1305 PMD Ciara Power
2021-10-18  7:44     ` Zhang, Roy Fan
2021-10-18 15:21   ` [dpdk-dev] [EXT] [PATCH v4 00/14] drivers/crypto: introduce ipsec_mb framework Akhil Goyal
2021-10-19 23:09     ` Thomas Monjalon
2021-10-20  4:23       ` Akhil Goyal
2021-10-20  8:31         ` Akhil Goyal
2021-10-20  9:01           ` Thomas Monjalon
2021-10-20  9:00         ` Zhang, Roy Fan
2021-10-20  9:04           ` Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210826151619.577237-4-ciara.power@intel.com \
    --to=ciara.power@intel.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=mdr@ashroe.eu \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=piotrx.bronowski@intel.com \
    --cc=roy.fan.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).