DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Coyle <david.coyle@intel.com>
To: dev@dpdk.org
Cc: declan.doherty@intel.com, fiona.trahe@intel.com,
	pablo.de.lara.guarch@intel.com, brendan.ryan@intel.com,
	shreyansh.jain@nxp.com, hemant.agrawal@nxp.com,
	David Coyle <david.coyle@intel.com>,
	Mairtin o Loingsigh <mairtin.oloingsigh@intel.com>
Subject: [dpdk-dev] [PATCH v2 2/4] raw/aesni_mb: add aesni_mb raw device
Date: Fri,  3 Apr 2020 17:36:54 +0100	[thread overview]
Message-ID: <20200403163656.60545-3-david.coyle@intel.com> (raw)
In-Reply-To: <20200403163656.60545-1-david.coyle@intel.com>

Adding an AESNI-MB raw device, thereby exposing AESNI-MB to the
rawdev API. The AESNI-MB raw device will use the multi-function
interface to allow combined operations be sent to the AESNI-MB
software library.

Signed-off-by: David Coyle <david.coyle@intel.com>
Signed-off-by: Mairtin o Loingsigh <mairtin.oloingsigh@intel.com>
---
 config/common_base                            |    6 +
 drivers/raw/Makefile                          |    2 +
 drivers/raw/aesni_mb/Makefile                 |   47 +
 drivers/raw/aesni_mb/aesni_mb_rawdev.c        | 1536 +++++++++++++++++
 drivers/raw/aesni_mb/aesni_mb_rawdev.h        |  112 ++
 drivers/raw/aesni_mb/aesni_mb_rawdev_test.c   | 1102 ++++++++++++
 .../aesni_mb/aesni_mb_rawdev_test_vectors.h   | 1183 +++++++++++++
 drivers/raw/aesni_mb/meson.build              |   26 +
 .../aesni_mb/rte_rawdev_aesni_mb_version.map  |    3 +
 drivers/raw/meson.build                       |    3 +-
 mk/rte.app.mk                                 |    2 +
 11 files changed, 4021 insertions(+), 1 deletion(-)
 create mode 100644 drivers/raw/aesni_mb/Makefile
 create mode 100644 drivers/raw/aesni_mb/aesni_mb_rawdev.c
 create mode 100644 drivers/raw/aesni_mb/aesni_mb_rawdev.h
 create mode 100644 drivers/raw/aesni_mb/aesni_mb_rawdev_test.c
 create mode 100644 drivers/raw/aesni_mb/aesni_mb_rawdev_test_vectors.h
 create mode 100644 drivers/raw/aesni_mb/meson.build
 create mode 100644 drivers/raw/aesni_mb/rte_rawdev_aesni_mb_version.map

diff --git a/config/common_base b/config/common_base
index 4f004968b..7ac6a3428 100644
--- a/config/common_base
+++ b/config/common_base
@@ -818,6 +818,12 @@ CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV=y
 #
 CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV=y
 
+#
+# Compile PMD for AESNI raw device
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV=n
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG=n
+
 #
 # Compile multi-fn raw device interface
 #
diff --git a/drivers/raw/Makefile b/drivers/raw/Makefile
index e16da8d95..5aa608e1e 100644
--- a/drivers/raw/Makefile
+++ b/drivers/raw/Makefile
@@ -15,5 +15,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV) += ntb
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += octeontx2_dma
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += octeontx2_ep
 DIRS-y += common
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += aesni_mb
+DEPDIRS-aesni_mb := common
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/raw/aesni_mb/Makefile b/drivers/raw/aesni_mb/Makefile
new file mode 100644
index 000000000..0a40b75b4
--- /dev/null
+++ b/drivers/raw/aesni_mb/Makefile
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020 Intel Corporation.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_aesni_mb_rawdev.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# versioning export map
+EXPORT_MAP := rte_rawdev_aesni_mb_version.map
+
+# external library dependencies
+LDLIBS += -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_multi_fn
+
+ifneq ($(CONFIG_RTE_LIBRTE_MULTI_FN_COMMON),y)
+$(error "RTE_LIBRTE_MULTI_FN_COMMON is required to build aesni_mb raw device")
+endif
+
+IMB_HDR = $(shell echo '\#include <intel-ipsec-mb.h>' | \
+	$(CC) -E $(EXTRA_CFLAGS) - | grep 'intel-ipsec-mb.h' | \
+	head -n1 | cut -d'"' -f2)
+
+# Detect library version
+IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut -d'"' -f2)
+IMB_VERSION_NUM = $(shell grep -e "IMB_VERSION_NUM" $(IMB_HDR) | cut -d' ' -f3)
+
+ifeq ($(IMB_VERSION),)
+$(error "IPSec_MB version >= 0.53.3 is required to build aesni_mb raw device")
+endif
+
+ifeq ($(shell expr $(IMB_VERSION_NUM) \< 0x3503), 1)
+$(error "IPSec_MB version >= 0.53.3 is required to build aesni_mb raw device")
+endif
+
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += aesni_mb_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += aesni_mb_rawdev_test.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/aesni_mb/aesni_mb_rawdev.c b/drivers/raw/aesni_mb/aesni_mb_rawdev.c
new file mode 100644
index 000000000..946bdd871
--- /dev/null
+++ b/drivers/raw/aesni_mb/aesni_mb_rawdev.c
@@ -0,0 +1,1536 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <stdbool.h>
+
+#include <intel-ipsec-mb.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_string_fns.h>
+#include <rte_multi_fn.h>
+#include <rte_ether.h>
+
+#include "aesni_mb_rawdev.h"
+
+#define MAX_QUEUES        (64)
+#define RING_NAME_MAX_LEN (64)
+
+#define PON_BIP_LEN             (4)
+#define PON_AUTH_TAG_CRC_OFFSET (4)
+
+static const uint16_t err_detect_output_byte_lengths[] = {
+	[IMB_AUTH_DOCSIS_CRC32] = RTE_ETHER_CRC_LEN,
+	[IMB_AUTH_PON_CRC_BIP] = (PON_BIP_LEN + RTE_ETHER_CRC_LEN),
+};
+
+static const char * const xstat_names[] = {
+		"successful_enqueues", "successful_dequeues",
+		"failed_enqueues", "failed_dequeues",
+};
+
+static const char *driver_name = "rawdev_aesni_mb";
+
+static int
+qp_unique_name_set(struct rte_rawdev *rawdev, struct aesni_mb_rawdev_qp *qp)
+{
+	unsigned int n = snprintf(qp->name,
+				  sizeof(qp->name),
+				  "aesni_mb_rawdev_pmd_%u_qp_%u",
+				  rawdev->dev_id,
+				  qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+static struct rte_ring *
+qp_processed_ops_ring_create(struct aesni_mb_rawdev_qp *qp,
+			     unsigned int ring_size,
+			     int socket_id)
+{
+	struct rte_ring *r;
+	char ring_name[RING_NAME_MAX_LEN];
+
+	unsigned int n = strlcpy(ring_name, qp->name, sizeof(ring_name));
+
+	if (n >= sizeof(ring_name))
+		return NULL;
+
+	r = rte_ring_lookup(ring_name);
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			AESNI_MB_RAWDEV_DEBUG(
+				"Reusing existing ring %s for processed ops",
+				ring_name);
+			return r;
+		}
+
+		AESNI_MB_RAWDEV_ERR(
+			"Unable to reuse existing ring %s for processed ops",
+			ring_name);
+		return NULL;
+	}
+
+	return rte_ring_create(ring_name,
+			       ring_size,
+			       socket_id,
+			       RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static uint16_t
+err_detect_output_byte_length_get(JOB_HASH_ALG algo)
+{
+	return err_detect_output_byte_lengths[algo];
+}
+
+static bool
+docsis_crc_crypto_encrypt_check(struct rte_multi_fn_xform *xform)
+{
+	struct rte_crypto_sym_xform *crypto_sym;
+	struct rte_multi_fn_err_detect_xform *err_detect;
+	struct rte_multi_fn_xform *next;
+
+	if (xform->type == RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+		err_detect = &xform->err_detect;
+		next = xform->next;
+
+		if (err_detect->algo ==
+				RTE_MULTI_FN_ERR_DETECT_CRC32_ETH &&
+		    err_detect->op ==
+				RTE_MULTI_FN_ERR_DETECT_OP_GENERATE &&
+		    next != NULL &&
+		    next->type == RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM) {
+
+			crypto_sym = &next->crypto_sym;
+			next = next->next;
+
+			if (crypto_sym->type ==
+					RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			    crypto_sym->cipher.op ==
+					RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+			    crypto_sym->cipher.algo ==
+					RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+			    crypto_sym->cipher.key.length ==
+					IMB_KEY_AES_128_BYTES &&
+			    crypto_sym->cipher.iv.length ==
+					AES_BLOCK_SIZE &&
+			    next == NULL)
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static bool
+docsis_crypto_decrypt_crc_check(struct rte_multi_fn_xform *xform)
+{
+	struct rte_crypto_sym_xform *crypto_sym;
+	struct rte_multi_fn_err_detect_xform *err_detect;
+	struct rte_multi_fn_xform *next;
+
+	if (xform->type == RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM) {
+
+		crypto_sym = &xform->crypto_sym;
+		next = xform->next;
+
+		if (crypto_sym->type ==
+				RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		    crypto_sym->cipher.op ==
+				RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+		    crypto_sym->cipher.algo ==
+				RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+		    crypto_sym->cipher.key.length ==
+				IMB_KEY_AES_128_BYTES &&
+		    crypto_sym->cipher.iv.length ==
+				AES_BLOCK_SIZE &&
+		    next != NULL &&
+		    next->type == RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+			err_detect = &next->err_detect;
+			next = next->next;
+
+			if (err_detect->algo ==
+					RTE_MULTI_FN_ERR_DETECT_CRC32_ETH &&
+			    err_detect->op ==
+					RTE_MULTI_FN_ERR_DETECT_OP_VERIFY &&
+			    next == NULL)
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static bool
+pon_crc_crypto_encrypt_bip_check(struct rte_multi_fn_xform *xform)
+{
+	struct rte_crypto_sym_xform *crypto_sym;
+	struct rte_multi_fn_err_detect_xform *err_detect;
+	struct rte_multi_fn_xform *next;
+
+	if (xform->type == RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+		err_detect = &xform->err_detect;
+		next = xform->next;
+
+		if (err_detect->algo ==
+				RTE_MULTI_FN_ERR_DETECT_CRC32_ETH &&
+		    err_detect->op ==
+				RTE_MULTI_FN_ERR_DETECT_OP_GENERATE &&
+		    next != NULL &&
+		    next->type == RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM) {
+
+			crypto_sym = &next->crypto_sym;
+			next = next->next;
+
+			if (crypto_sym->type ==
+					RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			    crypto_sym->cipher.op ==
+					RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+			    crypto_sym->cipher.algo ==
+					RTE_CRYPTO_CIPHER_AES_CTR &&
+			    crypto_sym->cipher.key.length ==
+					IMB_KEY_AES_128_BYTES &&
+			    crypto_sym->cipher.iv.length ==
+					AES_BLOCK_SIZE &&
+			    next != NULL &&
+			    next->type ==
+				RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+				err_detect = &next->err_detect;
+				next = next->next;
+
+				if (err_detect->algo ==
+					RTE_MULTI_FN_ERR_DETECT_BIP32 &&
+				    err_detect->op ==
+					RTE_MULTI_FN_ERR_DETECT_OP_GENERATE &&
+				    next == NULL)
+					return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+static bool
+pon_bip_crypto_decrypt_crc_check(struct rte_multi_fn_xform *xform)
+{
+	struct rte_crypto_sym_xform *crypto_sym;
+	struct rte_multi_fn_err_detect_xform *err_detect;
+	struct rte_multi_fn_xform *next;
+
+	if (xform->type == RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+		err_detect = &xform->err_detect;
+		next = xform->next;
+
+		if (err_detect->algo ==
+				RTE_MULTI_FN_ERR_DETECT_BIP32 &&
+		    err_detect->op ==
+				RTE_MULTI_FN_ERR_DETECT_OP_GENERATE &&
+		    next != NULL &&
+		    next->type == RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM) {
+
+			crypto_sym = &next->crypto_sym;
+			next = next->next;
+
+			if (crypto_sym->type ==
+					RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			    crypto_sym->cipher.op ==
+					RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+			    crypto_sym->cipher.algo ==
+					RTE_CRYPTO_CIPHER_AES_CTR &&
+			    crypto_sym->cipher.key.length ==
+					IMB_KEY_AES_128_BYTES &&
+			    crypto_sym->cipher.iv.length ==
+					AES_BLOCK_SIZE &&
+			    next != NULL &&
+			    next->type ==
+				RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+				err_detect = &next->err_detect;
+				next = next->next;
+
+				if (err_detect->algo ==
+					RTE_MULTI_FN_ERR_DETECT_CRC32_ETH &&
+				    err_detect->op ==
+					RTE_MULTI_FN_ERR_DETECT_OP_VERIFY &&
+				    next == NULL)
+					return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+static enum aesni_mb_rawdev_op
+session_support_check(struct rte_multi_fn_xform *xform)
+{
+	enum aesni_mb_rawdev_op op = AESNI_MB_RAWDEV_OP_NOT_SUPPORTED;
+
+	if (docsis_crc_crypto_encrypt_check(xform))
+		op = AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO;
+	else if (docsis_crypto_decrypt_crc_check(xform))
+		op = AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC;
+	else if (pon_crc_crypto_encrypt_bip_check(xform))
+		op = AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP;
+	else if (pon_bip_crypto_decrypt_crc_check(xform))
+		op = AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC;
+
+	return op;
+}
+
+static int
+session_err_detect_parameters_set(struct aesni_mb_rawdev_session *sess)
+{
+	switch (sess->op) {
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+		sess->err_detect.operation =
+					RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+		sess->err_detect.algo = IMB_AUTH_DOCSIS_CRC32;
+		break;
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+		sess->err_detect.operation = RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+		sess->err_detect.algo = IMB_AUTH_DOCSIS_CRC32;
+		break;
+	case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+		sess->err_detect.operation =
+					RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+		sess->err_detect.algo = IMB_AUTH_PON_CRC_BIP;
+		break;
+	case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+		sess->err_detect.operation = RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+		sess->err_detect.algo = IMB_AUTH_PON_CRC_BIP;
+		break;
+	default:
+		AESNI_MB_RAWDEV_ERR(
+				"Unsupported operation for error detection");
+		return -ENOTSUP;
+	}
+
+	sess->err_detect.gen_output_len =
+		err_detect_output_byte_length_get(sess->err_detect.algo);
+
+	return 0;
+}
+
+static int
+session_cipher_parameters_set(const MB_MGR *mb_mgr,
+			      struct aesni_mb_rawdev_session *sess,
+			      const struct rte_crypto_sym_xform *xform)
+{
+
+	if (xform == NULL) {
+		sess->cipher.mode = IMB_CIPHER_NULL;
+		return -EINVAL;
+	}
+
+	if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		AESNI_MB_RAWDEV_ERR("Crypto xform not of type cipher");
+		return -EINVAL;
+	}
+
+	/* Select cipher direction */
+	switch (sess->op) {
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+		sess->cipher.direction = IMB_DIR_ENCRYPT;
+		sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI;
+		break;
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+		sess->cipher.direction = IMB_DIR_DECRYPT;
+		sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI;
+		break;
+	case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+		sess->cipher.direction = IMB_DIR_ENCRYPT;
+		sess->cipher.mode = IMB_CIPHER_PON_AES_CNTR;
+		break;
+	case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+		sess->cipher.direction = IMB_DIR_DECRYPT;
+		sess->cipher.mode = IMB_CIPHER_PON_AES_CNTR;
+		break;
+	default:
+		AESNI_MB_RAWDEV_ERR("Unsupported operation for cipher");
+		return -ENOTSUP;
+	}
+
+	/* Set IV parameters */
+	sess->iv.offset = xform->cipher.iv.offset;
+	sess->iv.length = xform->cipher.iv.length;
+
+	/* Check key length and choose key expansion function for AES */
+	switch (xform->cipher.key.length) {
+	case IMB_KEY_AES_128_BYTES:
+		sess->cipher.key_length_in_bytes = IMB_KEY_AES_128_BYTES;
+		IMB_AES_KEYEXP_128(mb_mgr,
+				   xform->cipher.key.data,
+				   sess->cipher.expanded_aes_keys.encode,
+				   sess->cipher.expanded_aes_keys.decode);
+		break;
+	case IMB_KEY_AES_256_BYTES:
+		sess->cipher.key_length_in_bytes = IMB_KEY_AES_256_BYTES;
+		IMB_AES_KEYEXP_256(mb_mgr,
+				   xform->cipher.key.data,
+				   sess->cipher.expanded_aes_keys.encode,
+				   sess->cipher.expanded_aes_keys.decode);
+		break;
+	default:
+		AESNI_MB_RAWDEV_ERR("Invalid cipher key length");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline struct aesni_mb_rawdev_session *
+session_get(struct rte_multi_fn_op *op)
+{
+	struct aesni_mb_rawdev_session *sess = NULL;
+
+	if (likely(op->sess != NULL))
+		sess = op->sess->sess_private_data;
+	else
+		op->overall_status = RTE_MULTI_FN_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+static inline int
+op_chain_parse(struct aesni_mb_rawdev_session *sess,
+	       struct rte_multi_fn_op *op_chain,
+	       struct rte_multi_fn_op **cipher_op,
+	       struct rte_multi_fn_op **crc_op,
+	       struct rte_multi_fn_op **bip_op)
+{
+	*cipher_op = NULL;
+	*crc_op = NULL;
+	*bip_op = NULL;
+
+	switch (sess->op) {
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+		if (unlikely(op_chain == NULL || op_chain->next == NULL)) {
+			return -EINVAL;
+		} else if (sess->op == AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO) {
+			*crc_op = op_chain;
+			*cipher_op = op_chain->next;
+		} else {
+			*cipher_op = op_chain;
+			*crc_op = op_chain->next;
+		}
+		break;
+	case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+	case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+		if (unlikely(op_chain == NULL ||
+			     op_chain->next == NULL ||
+			     op_chain->next->next == NULL)) {
+			return -EINVAL;
+		} else if (sess->op == AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP) {
+			*crc_op = op_chain;
+			*cipher_op = op_chain->next;
+			*bip_op = op_chain->next->next;
+		} else {
+			*bip_op = op_chain;
+			*cipher_op = op_chain->next;
+			*crc_op = op_chain->next->next;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline void
+op_statuses_set(struct rte_multi_fn_op *first_op,
+		struct rte_multi_fn_op *cipher_op,
+		struct rte_multi_fn_op *crc_op,
+		struct rte_multi_fn_op *bip_op,
+		enum rte_multi_fn_op_status overall_status,
+		uint8_t crypto_status,
+		uint8_t err_detect_status)
+{
+	first_op->overall_status = overall_status;
+
+	if (cipher_op != NULL)
+		cipher_op->op_status = crypto_status;
+	if (crc_op != NULL)
+		crc_op->op_status = err_detect_status;
+	if (bip_op != NULL)
+		bip_op->op_status = err_detect_status;
+}
+
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG
+#define DOCSIS_CIPHER_CRC_OFFSET_DIFF (RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN)
+#define DOCSIS_CIPHER_CRC_LENGTH_DIFF (RTE_ETHER_HDR_LEN - \
+					RTE_ETHER_TYPE_LEN - \
+					RTE_ETHER_CRC_LEN)
+
+static inline int
+docsis_crypto_crc_check(struct rte_multi_fn_op *first_op,
+			struct rte_multi_fn_op *cipher_op,
+			struct rte_multi_fn_op *crc_op)
+{
+	struct rte_multi_fn_op *err_op = NULL;
+	uint8_t err_op_status;
+	const uint32_t offset_diff = DOCSIS_CIPHER_CRC_OFFSET_DIFF;
+
+	if (cipher_op->crypto_sym.cipher.data.length &&
+	    crc_op->err_detect.data.length) {
+		/* Cipher offset must be at least 12 greater than CRC offset */
+		if (cipher_op->crypto_sym.cipher.data.offset <
+		    ((uint32_t)crc_op->err_detect.data.offset + offset_diff)) {
+			err_op = crc_op;
+			err_op_status = RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR;
+		/*
+		 * Cipher length must be at least 8 less than CRC length, taking
+		 * known differences of what is ciphered and what is crc'ed into
+		 * account
+		 */
+		} else if ((cipher_op->crypto_sym.cipher.data.length +
+				DOCSIS_CIPHER_CRC_LENGTH_DIFF) >
+			    crc_op->err_detect.data.length) {
+			err_op = crc_op;
+			err_op_status = RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR;
+		}
+	}
+
+	if (err_op != NULL) {
+		err_op->op_status = err_op_status;
+		first_op->overall_status = RTE_MULTI_FN_OP_STATUS_FAILURE;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#define PON_FRAME_HDR_SIZE      (8U)
+#define PON_FRAME_MULTIPLE_SIZE (4)
+#define PON_PLI_SHIFT_BITS      (2)
+
+static inline int
+pon_crypto_crc_bip_check(struct rte_multi_fn_op *first_op,
+			 struct rte_multi_fn_op *crc_op,
+			 struct rte_multi_fn_op *bip_op,
+			 struct rte_mbuf *m_src)
+{
+	struct rte_multi_fn_op *err_op = NULL;
+	uint8_t err_op_status;
+
+	/*
+	 * BIP length must be multiple of 4 and be at least a full PON header
+	 * in size
+	 */
+	if (bip_op->err_detect.data.length % PON_FRAME_MULTIPLE_SIZE != 0 ||
+	    bip_op->err_detect.data.length < PON_FRAME_HDR_SIZE) {
+		err_op = bip_op;
+		err_op_status = RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR;
+	}
+
+	/*
+	 * Check the PLI field in the PON frame header matches the
+	 * CRC length
+	 */
+	uint16_t *pli_key_idx = rte_pktmbuf_mtod(m_src, uint16_t *);
+	uint16_t pli = rte_bswap16(*pli_key_idx) >> PON_PLI_SHIFT_BITS;
+	if (crc_op->err_detect.data.length != 0 &&
+	    crc_op->err_detect.data.length != (pli - RTE_ETHER_CRC_LEN)) {
+		err_op = crc_op;
+		err_op_status = RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR;
+	}
+
+	if (err_op != NULL) {
+		err_op->op_status = err_op_status;
+		first_op->overall_status = RTE_MULTI_FN_OP_STATUS_FAILURE;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif /* RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG */
+
+static inline int
+mb_job_params_set(JOB_AES_HMAC *job,
+		  struct aesni_mb_rawdev_qp *qp,
+		  struct rte_multi_fn_op *op,
+		  uint8_t *output_idx)
+{
+	struct rte_mbuf *m_src, *m_dst;
+	struct rte_multi_fn_op *cipher_op;
+	struct rte_multi_fn_op *crc_op;
+	struct rte_multi_fn_op *bip_op;
+	uint32_t cipher_offset;
+	struct aesni_mb_rawdev_session *session;
+
+	session = session_get(op);
+	if (unlikely(session == NULL)) {
+		op->overall_status = RTE_MULTI_FN_STATUS_INVALID_SESSION;
+		return -EINVAL;
+	}
+
+	if (unlikely(op_chain_parse(session,
+				    op,
+				    &cipher_op,
+				    &crc_op,
+				    &bip_op) < 0)) {
+		op_statuses_set(
+			op,
+			cipher_op,
+			crc_op,
+			bip_op,
+			RTE_MULTI_FN_OP_STATUS_FAILURE,
+			RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+			RTE_MULTI_FN_ERR_DETECT_OP_STATUS_NOT_PROCESSED);
+		return -EINVAL;
+	}
+
+	op_statuses_set(op,
+			cipher_op,
+			crc_op,
+			bip_op,
+			RTE_MULTI_FN_OP_STATUS_NOT_PROCESSED,
+			RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+			RTE_MULTI_FN_ERR_DETECT_OP_STATUS_NOT_PROCESSED);
+
+	m_src = op->m_src;
+
+	if (op->m_dst == NULL || op->m_dst == op->m_src) {
+		/* in-place operation */
+		m_dst = m_src;
+	} else {
+		/* out-of-place operation not supported */
+		op->overall_status = RTE_MULTI_FN_OP_STATUS_FAILURE;
+		return -EINVAL;
+	}
+
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG
+	switch (session->op) {
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+		if (docsis_crypto_crc_check(op, cipher_op, crc_op) < 0)
+			return -EINVAL;
+		break;
+	case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+	case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+	/*
+	 * session->op is known to be ok at this point so ok to include
+	 * default case here
+	 */
+	default:
+		if (pon_crypto_crc_bip_check(op, crc_op, bip_op, m_src) < 0)
+			return -EINVAL;
+		break;
+	}
+#endif
+
+	/* Set order */
+	job->chain_order = session->chain_order;
+
+	/* Set cipher parameters */
+	job->cipher_direction = session->cipher.direction;
+	job->cipher_mode = session->cipher.mode;
+
+	job->key_len_in_bytes = session->cipher.key_length_in_bytes;
+	job->enc_keys = session->cipher.expanded_aes_keys.encode;
+	job->dec_keys = session->cipher.expanded_aes_keys.decode;
+
+	/*
+	 * Set error detection parameters
+	 * In intel-ipsec-mb, error detection is treated as a hash algorithm
+	 */
+	job->hash_alg = session->err_detect.algo;
+
+	job->auth_tag_output = qp->temp_outputs[*output_idx];
+	*output_idx = (*output_idx + 1) % MAX_JOBS;
+
+	job->auth_tag_output_len_in_bytes = session->err_detect.gen_output_len;
+
+	/* Set data parameters */
+	cipher_offset = cipher_op->crypto_sym.cipher.data.offset;
+
+	job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+	job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, cipher_offset);
+
+	job->cipher_start_src_offset_in_bytes =	cipher_offset;
+	job->msg_len_to_cipher_in_bytes =
+				cipher_op->crypto_sym.cipher.data.length;
+
+	switch (session->op) {
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+		job->hash_start_src_offset_in_bytes =
+						crc_op->err_detect.data.offset;
+		job->msg_len_to_hash_in_bytes = crc_op->err_detect.data.length;
+
+		break;
+	case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+	case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+	/*
+	 * session->op is known to be ok at this point so ok to include
+	 * default case here
+	 */
+	default:
+		job->hash_start_src_offset_in_bytes =
+						bip_op->err_detect.data.offset;
+		job->msg_len_to_hash_in_bytes = bip_op->err_detect.data.length;
+
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG
+#endif
+		break;
+	}
+
+	/* Set IV parameters */
+	job->iv_len_in_bytes = session->iv.length;
+	job->iv = (uint8_t *)cipher_op + session->iv.offset;
+
+	job->user_data = op;
+
+	return 0;
+}
+
+static inline void
+bip_copy(JOB_AES_HMAC *job, struct rte_multi_fn_op *bip_op)
+{
+	if (bip_op->err_detect.data.length == 0)
+		return;
+
+	/* Copy BIP to output location */
+	memcpy(bip_op->err_detect.output.data,
+	       job->auth_tag_output,
+	       PON_BIP_LEN);
+}
+
+static inline void
+crc_verify(JOB_AES_HMAC *job,
+	   struct rte_multi_fn_op *crc_op,
+	   uint8_t auth_tag_crc_offset)
+{
+	if (crc_op->err_detect.data.length == 0)
+		return;
+
+	/* Verify CRC */
+	if (memcmp(job->auth_tag_output + auth_tag_crc_offset,
+		   crc_op->err_detect.output.data,
+		   RTE_ETHER_CRC_LEN) != 0)
+		crc_op->op_status =
+			RTE_MULTI_FN_ERR_DETECT_OP_STATUS_VERIFY_FAILED;
+}
+
+static inline struct rte_multi_fn_op *
+mb_job_post_process(JOB_AES_HMAC *job)
+{
+	struct rte_multi_fn_op *op = (struct rte_multi_fn_op *)job->user_data;
+	struct aesni_mb_rawdev_session *sess = op->sess->sess_private_data;
+	struct rte_multi_fn_op *cipher_op;
+	struct rte_multi_fn_op *crc_op;
+	struct rte_multi_fn_op *bip_op;
+
+	if (unlikely(op_chain_parse(sess,
+				    op,
+				    &cipher_op,
+				    &crc_op,
+				    &bip_op) < 0)) {
+		op_statuses_set(
+			op,
+			cipher_op,
+			crc_op,
+			bip_op,
+			RTE_MULTI_FN_OP_STATUS_FAILURE,
+			RTE_CRYPTO_OP_STATUS_ERROR,
+			RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR);
+
+	} else if (op->overall_status ==
+				RTE_MULTI_FN_OP_STATUS_NOT_PROCESSED) {
+		switch (job->status) {
+		case STS_COMPLETED:
+			if (unlikely(job->hash_alg == IMB_AUTH_NULL))
+				break;
+
+			op_statuses_set(
+				op,
+				cipher_op,
+				crc_op,
+				bip_op,
+				RTE_MULTI_FN_OP_STATUS_SUCCESS,
+				RTE_CRYPTO_OP_STATUS_SUCCESS,
+				RTE_MULTI_FN_ERR_DETECT_OP_STATUS_SUCCESS);
+
+			if (job->hash_alg == IMB_AUTH_PON_CRC_BIP)
+				bip_copy(job, bip_op);
+
+			if (sess->err_detect.operation ==
+					RTE_MULTI_FN_ERR_DETECT_OP_VERIFY)
+				crc_verify(
+					job,
+					crc_op,
+					job->hash_alg == IMB_AUTH_PON_CRC_BIP ?
+						PON_AUTH_TAG_CRC_OFFSET : 0);
+
+			if (crc_op->op_status !=
+				RTE_MULTI_FN_ERR_DETECT_OP_STATUS_SUCCESS)
+				op->overall_status =
+					RTE_MULTI_FN_OP_STATUS_FAILURE;
+			break;
+		default:
+			op_statuses_set(
+				op,
+				cipher_op,
+				crc_op,
+				bip_op,
+				RTE_MULTI_FN_OP_STATUS_FAILURE,
+				RTE_CRYPTO_OP_STATUS_ERROR,
+				RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR);
+			break;
+		}
+	}
+
+	return op;
+}
+
+static unsigned
+completed_jobs_handle(struct aesni_mb_rawdev_qp *qp,
+		      JOB_AES_HMAC *job,
+		      struct rte_multi_fn_op **ops,
+		      uint16_t nb_ops)
+{
+	struct rte_multi_fn_op *op = NULL;
+	unsigned int processed_jobs = 0;
+
+	while (job != NULL) {
+		op = mb_job_post_process(job);
+
+		if (op) {
+			ops[processed_jobs++] = op;
+			qp->stats.dequeued_count++;
+		} else {
+			qp->stats.dequeue_err_count++;
+			break;
+		}
+		if (processed_jobs == nb_ops)
+			break;
+
+		job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
+	}
+
+	return processed_jobs;
+}
+
+static inline uint16_t
+mb_mgr_flush(struct aesni_mb_rawdev_qp *qp,
+	     struct rte_multi_fn_op **ops,
+	     uint16_t nb_ops)
+{
+	int processed_ops = 0;
+
+	/* Flush the remaining jobs */
+	JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
+
+	if (job)
+		processed_ops += completed_jobs_handle(qp,
+						       job,
+						       &ops[processed_ops],
+						       nb_ops - processed_ops);
+
+	return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+mb_job_params_null_set(JOB_AES_HMAC *job, struct rte_multi_fn_op *op)
+{
+	job->chain_order = IMB_ORDER_HASH_CIPHER;
+	job->cipher_mode = IMB_CIPHER_NULL;
+	job->hash_alg = IMB_AUTH_NULL;
+	job->cipher_direction = IMB_DIR_DECRYPT;
+
+	/* Set user data to be crypto operation data struct */
+	job->user_data = op;
+
+	return job;
+}
+
+static int
+aesni_mb_rawdev_pmd_config(const struct rte_rawdev *rawdev,
+			   rte_rawdev_obj_t config)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct rte_multi_fn_dev_config *conf = config;
+
+	aesni_mb_dev->nb_queue_pairs = conf->nb_queues;
+
+	aesni_mb_dev->queue_pairs =
+			rte_zmalloc_socket(
+				"aesni_mb_rawdev_qps",
+				aesni_mb_dev->nb_queue_pairs *
+					sizeof(struct aesni_mb_rawdev_qp *),
+				RTE_CACHE_LINE_SIZE,
+				rawdev->socket_id);
+
+	if (aesni_mb_dev->queue_pairs == NULL) {
+		AESNI_MB_RAWDEV_ERR("Unable to allocate queue pairs");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void
+aesni_mb_rawdev_pmd_info_get(struct rte_rawdev *rawdev,
+			     rte_rawdev_obj_t dev_info)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct rte_multi_fn_dev_info *info = dev_info;
+
+	if (info != NULL)
+		info->max_nb_queues = aesni_mb_dev->max_nb_queue_pairs;
+}
+
+static int
+aesni_mb_rawdev_pmd_start(__rte_unused struct rte_rawdev *rawdev)
+{
+	return 0;
+}
+
+static void
+aesni_mb_rawdev_pmd_stop(__rte_unused struct rte_rawdev *rawdev)
+{
+}
+
+static int
+aesni_mb_rawdev_pmd_close(struct rte_rawdev *rawdev)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+
+	if (aesni_mb_dev->queue_pairs != NULL)
+		rte_free(aesni_mb_dev->queue_pairs);
+
+	return 0;
+}
+
+static int
+aesni_mb_rawdev_pmd_qp_release(struct rte_rawdev *rawdev, uint16_t qp_id)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct aesni_mb_rawdev_qp *qp = aesni_mb_dev->queue_pairs[qp_id];
+	struct rte_ring *r = NULL;
+
+	if (qp != NULL) {
+		r = rte_ring_lookup(qp->name);
+		if (r)
+			rte_ring_free(r);
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+		rte_free(qp);
+		aesni_mb_dev->queue_pairs[qp_id] = NULL;
+	}
+
+	return 0;
+}
+
+static int
+aesni_mb_rawdev_pmd_qp_setup(struct rte_rawdev *rawdev,
+			     uint16_t qp_id,
+			     rte_rawdev_obj_t qp_c)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct aesni_mb_rawdev_qp *qp = NULL;
+	const struct rte_multi_fn_qp_config *qp_conf =
+			(const struct rte_multi_fn_qp_config *)qp_c;
+	int ret = -1;
+
+	if (qp_id >= aesni_mb_dev->max_nb_queue_pairs) {
+		AESNI_MB_RAWDEV_ERR("Invalid queue pair id=%d", qp_id);
+		return -EINVAL;
+	}
+
+	/* Free memory prior to re-allocation if needed */
+	if (aesni_mb_dev->queue_pairs[qp_id] != NULL)
+		aesni_mb_rawdev_pmd_qp_release(rawdev, qp_id);
+
+	/* Allocate the queue pair data structure */
+	qp = rte_zmalloc_socket("aesni_mb_rawdev_qp",
+				sizeof(struct aesni_mb_rawdev_qp),
+				RTE_CACHE_LINE_SIZE,
+				rawdev->socket_id);
+	if (qp == NULL)
+		return -ENOMEM;
+
+	qp->id = qp_id;
+	aesni_mb_dev->queue_pairs[qp_id] = qp;
+
+	if (qp_unique_name_set(rawdev, qp))
+		goto qp_setup_cleanup;
+
+	qp->mb_mgr = alloc_mb_mgr(0);
+	if (qp->mb_mgr == NULL) {
+		ret = -ENOMEM;
+		goto qp_setup_cleanup;
+	}
+
+	switch (aesni_mb_dev->vector_mode) {
+	case AESNI_MB_RAWDEV_SSE:
+		init_mb_mgr_sse(qp->mb_mgr);
+		break;
+	case AESNI_MB_RAWDEV_AVX:
+		init_mb_mgr_avx(qp->mb_mgr);
+		break;
+	case AESNI_MB_RAWDEV_AVX2:
+		init_mb_mgr_avx2(qp->mb_mgr);
+		break;
+	case AESNI_MB_RAWDEV_AVX512:
+		init_mb_mgr_avx512(qp->mb_mgr);
+		break;
+	default:
+		AESNI_MB_RAWDEV_ERR("Unsupported vector mode %u",
+				    aesni_mb_dev->vector_mode);
+		goto qp_setup_cleanup;
+	}
+
+	qp->ingress_queue = qp_processed_ops_ring_create(
+						qp,
+						qp_conf->nb_descriptors,
+						rawdev->socket_id);
+	if (qp->ingress_queue == NULL) {
+		ret = -1;
+		goto qp_setup_cleanup;
+	}
+
+	memset(&qp->stats, 0, sizeof(qp->stats));
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp) {
+		if (qp->mb_mgr)
+			free_mb_mgr(qp->mb_mgr);
+		rte_free(qp);
+	}
+
+	return ret;
+}
+
+static uint16_t
+aesni_mb_rawdev_pmd_qp_count(struct rte_rawdev *rawdev)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+
+	return aesni_mb_dev->nb_queue_pairs;
+}
+
+static int
+aesni_mb_rawdev_pmd_enq(struct rte_rawdev *rawdev,
+			struct rte_rawdev_buf **ops,
+			unsigned int nb_ops,
+			rte_rawdev_obj_t q_id)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct aesni_mb_rawdev_qp *qp;
+	unsigned int nb_enqueued;
+
+	qp = aesni_mb_dev->queue_pairs[*(uint16_t *)q_id];
+
+	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+					     (void **)ops,
+					     nb_ops,
+					     NULL);
+
+	qp->stats.enqueued_count += nb_enqueued;
+
+	return nb_enqueued;
+}
+
+static int
+aesni_mb_rawdev_pmd_deq(struct rte_rawdev *rawdev,
+			struct rte_rawdev_buf **ops,
+			unsigned int nb_ops,
+			rte_rawdev_obj_t q_id)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct aesni_mb_rawdev_qp *qp;
+	struct rte_multi_fn_op *op;
+	JOB_AES_HMAC *job;
+	uint8_t output_idx;
+	unsigned int processed_jobs = 0;
+	int ret;
+
+	qp = aesni_mb_dev->queue_pairs[*(uint16_t *)q_id];
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	output_idx = qp->output_idx;
+
+	do {
+		/* Get next free mb job struct from mb manager */
+		job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+		if (unlikely(job == NULL)) {
+			/* if no free mb job structs we need to flush mb_mgr */
+			processed_jobs += mb_mgr_flush(
+						qp,
+						(struct rte_multi_fn_op **)
+							&ops[processed_jobs],
+						nb_ops - processed_jobs);
+
+			if (nb_ops == processed_jobs)
+				break;
+
+			job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+		}
+
+		/*
+		 * Get next operation to process from ingress queue.
+		 * There is no need to return the job to the MB_MGR if there
+		 * are no more operations to process, since the MB_MGR can use
+		 * that pointer again in next get_next calls.
+		 */
+		ret = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+		if (ret < 0)
+			break;
+
+		ret = mb_job_params_set(job, qp, op, &output_idx);
+		if (unlikely(ret != 0)) {
+			qp->stats.dequeue_err_count++;
+			mb_job_params_null_set(job, op);
+		}
+
+		/* Submit job to multi-buffer for processing */
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG
+		job = IMB_SUBMIT_JOB(qp->mb_mgr);
+#else
+		job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
+#endif
+		/*
+		 * If submit returns a processed job then handle it,
+		 * before submitting subsequent jobs
+		 */
+		if (job)
+			processed_jobs += completed_jobs_handle(
+						qp,
+						job,
+						(struct rte_multi_fn_op **)
+							&ops[processed_jobs],
+						nb_ops - processed_jobs);
+
+	} while (processed_jobs < nb_ops);
+
+	qp->output_idx = output_idx;
+
+	if (processed_jobs < 1)
+		processed_jobs += mb_mgr_flush(qp,
+					       (struct rte_multi_fn_op **)
+							&ops[processed_jobs],
+					       nb_ops - processed_jobs);
+
+	return processed_jobs;
+}
+
+static int
+aesni_mb_rawdev_pmd_xstats_get(const struct rte_rawdev *rawdev,
+			       const unsigned int ids[],
+			       uint64_t values[],
+			       unsigned int n)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct aesni_mb_rawdev_qp *qp;
+	struct aesni_mb_rawdev_stats stats = {0};
+	int qp_id;
+	unsigned int i;
+
+	for (qp_id = 0; qp_id < aesni_mb_dev->nb_queue_pairs; qp_id++) {
+		qp = aesni_mb_dev->queue_pairs[qp_id];
+
+		stats.enqueued_count += qp->stats.enqueued_count;
+		stats.dequeued_count += qp->stats.dequeued_count;
+
+		stats.enqueue_err_count += qp->stats.enqueue_err_count;
+		stats.dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+
+	for (i = 0; i < n; i++) {
+		switch (ids[i]) {
+		case 0:
+			values[i] = stats.enqueued_count;
+			break;
+		case 1:
+			values[i] = stats.dequeued_count;
+			break;
+		case 2:
+			values[i] = stats.enqueue_err_count;
+			break;
+		case 3:
+			values[i] = stats.dequeue_err_count;
+			break;
+		default:
+			values[i] = 0;
+			break;
+		}
+	}
+
+	return n;
+}
+
+static int
+aesni_mb_rawdev_pmd_xstats_get_names(
+				__rte_unused const struct rte_rawdev *rawdev,
+				struct rte_rawdev_xstats_name *names,
+				unsigned int size)
+{
+	unsigned int i;
+
+	if (size < RTE_DIM(xstat_names))
+		return RTE_DIM(xstat_names);
+
+	for (i = 0; i < RTE_DIM(xstat_names); i++)
+		strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
+
+	return RTE_DIM(xstat_names);
+}
+
+static int
+aesni_mb_rawdev_pmd_xstats_reset(struct rte_rawdev *rawdev,
+				 const uint32_t *ids,
+				 uint32_t nb_ids)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct aesni_mb_rawdev_qp *qp;
+	unsigned int i;
+	int qp_id;
+
+	if (!ids) {
+		for (qp_id = 0; qp_id < aesni_mb_dev->nb_queue_pairs; qp_id++) {
+			qp = aesni_mb_dev->queue_pairs[qp_id];
+			qp->stats.enqueued_count = 0;
+			qp->stats.dequeued_count = 0;
+			qp->stats.enqueue_err_count = 0;
+			qp->stats.dequeue_err_count = 0;
+		}
+
+		return 0;
+	}
+
+	for (i = 0; i < nb_ids; i++) {
+		switch (ids[i]) {
+		case 0:
+			for (qp_id = 0;
+			     qp_id < aesni_mb_dev->nb_queue_pairs;
+			     qp_id++) {
+				qp = aesni_mb_dev->queue_pairs[qp_id];
+				qp->stats.enqueued_count = 0;
+			}
+			break;
+		case 1:
+			for (qp_id = 0;
+			     qp_id < aesni_mb_dev->nb_queue_pairs;
+			     qp_id++) {
+				qp = aesni_mb_dev->queue_pairs[qp_id];
+				qp->stats.dequeued_count = 0;
+			}
+			break;
+		case 2:
+			for (qp_id = 0;
+			     qp_id < aesni_mb_dev->nb_queue_pairs;
+			     qp_id++) {
+				qp = aesni_mb_dev->queue_pairs[qp_id];
+				qp->stats.enqueue_err_count = 0;
+			}
+			break;
+		case 3:
+			for (qp_id = 0;
+			     qp_id < aesni_mb_dev->nb_queue_pairs;
+			     qp_id++) {
+				qp = aesni_mb_dev->queue_pairs[qp_id];
+				qp->stats.dequeue_err_count = 0;
+			}
+			break;
+		default:
+			AESNI_MB_RAWDEV_ERR("Invalid xstat id - cannot reset");
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+aesni_mb_rawdev_pmd_selftest(uint16_t dev_id)
+{
+	return aesni_mb_rawdev_test(dev_id);
+}
+
+static struct rte_multi_fn_session *
+aesni_mb_rawdev_pmd_session_create(struct rte_rawdev *rawdev,
+				   struct rte_multi_fn_xform *xform,
+				   int socket_id)
+{
+	struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+	struct aesni_mb_rawdev_session *aesni_sess = NULL;
+	struct rte_multi_fn_session *session;
+	struct rte_crypto_sym_xform *cipher_xform;
+	enum aesni_mb_rawdev_op op;
+	int ret;
+
+	op = session_support_check(xform);
+
+	/* Allocate multi-function session */
+	session = rte_zmalloc_socket("multi_fn_session",
+				     sizeof(struct rte_multi_fn_session),
+				     RTE_CACHE_LINE_MIN_SIZE,
+				     socket_id);
+
+	if (session == NULL) {
+		AESNI_MB_RAWDEV_ERR("Multi-function session allocation failed");
+		return NULL;
+	}
+
+	/* Allocate AESNI-MB_rawdev session */
+	aesni_sess = rte_zmalloc_socket("aesni_mb_rawdev_session",
+					sizeof(struct aesni_mb_rawdev_session),
+					RTE_CACHE_LINE_MIN_SIZE,
+					socket_id);
+
+	if (aesni_sess == NULL) {
+		AESNI_MB_RAWDEV_ERR(
+				"AESNI-MB rawdev session allocation failed");
+		return NULL;
+	}
+
+	session->sess_private_data = aesni_sess;
+	aesni_sess->op = op;
+
+	switch (op) {
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+	case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+		aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
+		cipher_xform = &xform->next->crypto_sym;
+		break;
+	case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+		aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
+		cipher_xform = &xform->crypto_sym;
+		break;
+	case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+		aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
+		cipher_xform = &xform->next->crypto_sym;
+		break;
+	default:
+		AESNI_MB_RAWDEV_ERR("Unsupported multi-function xform chain");
+		return NULL;
+	}
+
+	ret = session_err_detect_parameters_set(aesni_sess);
+
+	if (ret != 0) {
+		AESNI_MB_RAWDEV_ERR(
+				"Invalid/unsupported error detect parameters");
+		return NULL;
+	}
+
+	ret = session_cipher_parameters_set(aesni_mb_dev->mb_mgr,
+					    aesni_sess,
+					    cipher_xform);
+
+	if (ret != 0) {
+		AESNI_MB_RAWDEV_ERR("Invalid/unsupported cipher parameters");
+		return NULL;
+	}
+
+	return session;
+}
+
+static int
+aesni_mb_rawdev_pmd_session_destroy(__rte_unused struct rte_rawdev *rawdev,
+				    struct rte_multi_fn_session *sess)
+{
+
+	if (sess) {
+		if (sess->sess_private_data)
+			rte_free(sess->sess_private_data);
+		rte_free(sess);
+	}
+
+	return 0;
+}
+
+static const struct rte_rawdev_ops aesni_mb_rawdev_ops = {
+	.dev_configure = aesni_mb_rawdev_pmd_config,
+	.dev_info_get = aesni_mb_rawdev_pmd_info_get,
+	.dev_start = aesni_mb_rawdev_pmd_start,
+	.dev_stop = aesni_mb_rawdev_pmd_stop,
+	.dev_close = aesni_mb_rawdev_pmd_close,
+	.queue_setup = aesni_mb_rawdev_pmd_qp_setup,
+	.queue_release = aesni_mb_rawdev_pmd_qp_release,
+	.queue_count = aesni_mb_rawdev_pmd_qp_count,
+	.enqueue_bufs = aesni_mb_rawdev_pmd_enq,
+	.dequeue_bufs = aesni_mb_rawdev_pmd_deq,
+	.xstats_get = aesni_mb_rawdev_pmd_xstats_get,
+	.xstats_get_names = aesni_mb_rawdev_pmd_xstats_get_names,
+	.xstats_reset = aesni_mb_rawdev_pmd_xstats_reset,
+	.dev_selftest = aesni_mb_rawdev_pmd_selftest,
+};
+
+static const struct rte_multi_fn_ops mf_ops = {
+	.session_create = aesni_mb_rawdev_pmd_session_create,
+	.session_destroy = aesni_mb_rawdev_pmd_session_destroy,
+};
+
+static int
+aesni_mb_rawdev_create(const char *name,
+		       struct rte_vdev_device *vdev,
+		       unsigned int socket_id)
+{
+	struct rte_rawdev *rawdev;
+	struct aesni_mb_rawdev *aesni_mb_dev;
+	enum aesni_mb_rawdev_vector_mode vector_mode;
+	MB_MGR *mb_mgr;
+
+	/* Allocate device structure */
+	rawdev = rte_rawdev_pmd_allocate(name,
+					 sizeof(struct aesni_mb_rawdev),
+					 socket_id);
+	if (!rawdev) {
+		AESNI_MB_RAWDEV_ERR("Unable to allocate raw device");
+		return -EINVAL;
+	}
+
+	rawdev->dev_ops = &aesni_mb_rawdev_ops;
+	rawdev->device = &vdev->device;
+	rawdev->driver_name = driver_name;
+
+	/* Check CPU for supported vector instruction set */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+		vector_mode = AESNI_MB_RAWDEV_AVX512;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+		vector_mode = AESNI_MB_RAWDEV_AVX2;
+	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+		vector_mode = AESNI_MB_RAWDEV_AVX;
+	else
+		vector_mode = AESNI_MB_RAWDEV_SSE;
+
+	/* Check CPU for support for AES instruction set */
+	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
+		AESNI_MB_RAWDEV_WARN("AES instructions not supported by CPU");
+
+	mb_mgr = alloc_mb_mgr(0);
+
+	if (mb_mgr == NULL)
+		return -ENOMEM;
+
+	switch (vector_mode) {
+	case AESNI_MB_RAWDEV_SSE:
+		init_mb_mgr_sse(mb_mgr);
+		break;
+	case AESNI_MB_RAWDEV_AVX:
+		init_mb_mgr_avx(mb_mgr);
+		break;
+	case AESNI_MB_RAWDEV_AVX2:
+		init_mb_mgr_avx2(mb_mgr);
+		break;
+	case AESNI_MB_RAWDEV_AVX512:
+		init_mb_mgr_avx512(mb_mgr);
+		break;
+	default:
+		AESNI_MB_RAWDEV_ERR("Unsupported vector mode %u",
+				    vector_mode);
+		free_mb_mgr(mb_mgr);
+		mb_mgr = NULL;
+		break;
+	}
+
+	if (mb_mgr == NULL) {
+		rte_rawdev_pmd_release(rawdev);
+		return -1;
+	}
+
+	/* Set the device's private data */
+	aesni_mb_dev = rawdev->dev_private;
+	aesni_mb_dev->mf_ops = &mf_ops;
+	aesni_mb_dev->vector_mode = vector_mode;
+	aesni_mb_dev->max_nb_queue_pairs = MAX_QUEUES;
+	aesni_mb_dev->mb_mgr = mb_mgr;
+
+	AESNI_MB_RAWDEV_INFO("IPSec Multi-buffer library version used: %s",
+			     imb_get_version_str());
+
+	return 0;
+}
+
+static int
+aesni_mb_rawdev_destroy(const char *name)
+{
+	struct rte_rawdev *rawdev;
+	struct aesni_mb_rawdev *aesni_mb_dev;
+	int ret;
+
+	rawdev = rte_rawdev_pmd_get_named_dev(name);
+	if (rawdev == NULL) {
+		AESNI_MB_RAWDEV_ERR("Invalid device name (%s)", name);
+		return -EINVAL;
+	}
+
+	aesni_mb_dev = rawdev->dev_private;
+	free_mb_mgr(aesni_mb_dev->mb_mgr);
+
+	ret = rte_rawdev_pmd_release(rawdev);
+	if (ret)
+		AESNI_MB_RAWDEV_DEBUG("Device cleanup failed");
+
+	return 0;
+}
+
+static int
+aesni_mb_rawdev_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	AESNI_MB_RAWDEV_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+	return aesni_mb_rawdev_create(name, vdev, rte_socket_id());
+}
+
+static int
+aesni_mb_rawdev_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	AESNI_MB_RAWDEV_INFO("Closing %s on NUMA node %d",
+			     name,
+			     rte_socket_id());
+
+	return aesni_mb_rawdev_destroy(name);
+}
+
+static struct rte_vdev_driver rawdev_aesni_mb_pmd_drv = {
+	.probe = aesni_mb_rawdev_probe,
+	.remove = aesni_mb_rawdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(rawdev_aesni_mb, rawdev_aesni_mb_pmd_drv);
+
+RTE_INIT(aesni_mb_raw_init_log)
+{
+	aesni_mb_rawdev_pmd_logtype = rte_log_register("rawdev.aesni_mb");
+	if (aesni_mb_rawdev_pmd_logtype >= 0)
+		rte_log_set_level(aesni_mb_rawdev_pmd_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/aesni_mb/aesni_mb_rawdev.h b/drivers/raw/aesni_mb/aesni_mb_rawdev.h
new file mode 100644
index 000000000..59d78b8d8
--- /dev/null
+++ b/drivers/raw/aesni_mb/aesni_mb_rawdev.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#ifndef _AESNI_MB_RAWDEV_H_
+#define _AESNI_MB_RAWDEV_H_
+
+#include <intel-ipsec-mb.h>
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#include <rte_multi_fn_driver.h>
+
+/* AESNI-MB Rawdev PMD logtype */
+int aesni_mb_rawdev_pmd_logtype;
+
+#define AESNI_MB_RAWDEV_LOG(level, fmt, args...)  \
+	rte_log(RTE_LOG_ ## level, aesni_mb_rawdev_pmd_logtype,  \
+		"%s() line %u: " fmt "\n", \
+		__func__, __LINE__, ##args)
+#define AESNI_MB_RAWDEV_DEBUG(fmt, args...) \
+	AESNI_MB_RAWDEV_LOG(DEBUG, fmt, ## args)
+#define AESNI_MB_RAWDEV_INFO(fmt, args...) \
+	AESNI_MB_RAWDEV_LOG(INFO, fmt, ## args)
+#define AESNI_MB_RAWDEV_ERR(fmt, args...) \
+	AESNI_MB_RAWDEV_LOG(ERR, fmt, ## args)
+#define AESNI_MB_RAWDEV_WARN(fmt, args...) \
+	AESNI_MB_RAWDEV_LOG(WARNING, fmt, ## args)
+
+
+/* Maximum length for output */
+#define OUTPUT_LENGTH_MAX 8
+
+/* AESNI-MB supported operations */
+enum aesni_mb_rawdev_op {
+	AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO,  /* DOCSIS encrypt direction */
+	AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC,  /* DOCSIS decrypt direction */
+	AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP, /* PON encrypt direction */
+	AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC, /* PON decrypt direction */
+	AESNI_MB_RAWDEV_OP_NOT_SUPPORTED
+};
+
+/* AESNI-MB device statistics */
+struct aesni_mb_rawdev_stats {
+	uint64_t enqueued_count;
+	uint64_t dequeued_count;
+	uint64_t enqueue_err_count;
+	uint64_t dequeue_err_count;
+};
+
+/* AESNI-MB queue pair */
+struct aesni_mb_rawdev_qp {
+	uint16_t id;
+	char name[RTE_RAWDEV_NAME_MAX_LEN];
+	MB_MGR *mb_mgr;
+	struct rte_ring *ingress_queue;
+	struct aesni_mb_rawdev_stats stats;
+	uint8_t output_idx;
+	uint8_t temp_outputs[MAX_JOBS][OUTPUT_LENGTH_MAX];
+} __rte_cache_aligned;
+
+/* AESNI-MB vector modes */
+enum aesni_mb_rawdev_vector_mode {
+	AESNI_MB_RAWDEV_NOT_SUPPORTED = 0,
+	AESNI_MB_RAWDEV_SSE,
+	AESNI_MB_RAWDEV_AVX,
+	AESNI_MB_RAWDEV_AVX2,
+	AESNI_MB_RAWDEV_AVX512
+};
+
+/* AESNI-MB device data */
+struct aesni_mb_rawdev {
+	const struct rte_multi_fn_ops *mf_ops; /* MUST be first */
+	MB_MGR *mb_mgr;
+	struct aesni_mb_rawdev_qp **queue_pairs;
+	enum aesni_mb_rawdev_vector_mode vector_mode;
+	uint16_t max_nb_queue_pairs;
+	uint16_t nb_queue_pairs;
+};
+
+/* AESNI-MB private session structure */
+struct aesni_mb_rawdev_session {
+	enum aesni_mb_rawdev_op op;
+	JOB_CHAIN_ORDER chain_order;
+	struct {
+		uint16_t length;
+		uint16_t offset;
+	} iv;
+	struct {
+		JOB_CIPHER_DIRECTION direction;
+		JOB_CIPHER_MODE mode;
+
+		uint64_t key_length_in_bytes;
+
+		union {
+			struct {
+				uint32_t encode[60] __rte_aligned(16);
+				uint32_t decode[60] __rte_aligned(16);
+			} expanded_aes_keys;
+		};
+	} cipher;
+	struct {
+		JOB_HASH_ALG algo;
+		enum rte_multi_fn_err_detect_operation operation;
+		uint16_t gen_output_len;
+
+	} err_detect;
+} __rte_cache_aligned;
+
+int
+aesni_mb_rawdev_test(uint16_t dev_id);
+
+#endif /* _AESNI_MB_RAWDEV_H_ */
diff --git a/drivers/raw/aesni_mb/aesni_mb_rawdev_test.c b/drivers/raw/aesni_mb/aesni_mb_rawdev_test.c
new file mode 100644
index 000000000..a8051cc80
--- /dev/null
+++ b/drivers/raw/aesni_mb/aesni_mb_rawdev_test.c
@@ -0,0 +1,1102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_dev.h>
+#include <rte_bus_vdev.h>
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#include <rte_ether.h>
+#include <rte_test.h>
+
+#include "aesni_mb_rawdev.h"
+#include "aesni_mb_rawdev_test_vectors.h"
+
+#define TEST_DEV_NAME "rawdev_aesni_mb"
+
+#define TEST(setup, teardown, run, data, suffix) \
+	test_run(setup, teardown, run, data, RTE_STR(run)"_"suffix)
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+#define QP_NB_DESC (4096)
+
+#define MBUF_POOL_NAME        "aesni_mb_rawdev_mbuf_pool"
+#define MBUF_POOL_SIZE        (8191)
+#define MBUF_CACHE_SIZE       (256)
+#define MBUF_DATAPAYLOAD_SIZE (2048)
+#define MBUF_SIZE             (sizeof(struct rte_mbuf) + \
+				RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
+
+#define OP_POOL_NAME  "aesni_mb_rawdev_op_pool"
+#define OP_POOL_SIZE  (8191)
+#define OP_PRIV_SIZE  (16)
+#define OP_CACHE_SIZE (256)
+
+#define MAX_OPS (3)
+
+static int eal_log_level;
+
+struct testsuite_params {
+	uint16_t dev_id;
+	struct rte_mempool *mbuf_pool;
+	struct rte_mempool *op_pool;
+};
+
+struct unittest_params {
+	struct rte_multi_fn_session *sess;
+	struct rte_multi_fn_op *ops[MAX_OPS];
+	struct rte_mbuf *ibuf;
+	struct rte_mbuf *obuf;
+};
+
+static struct testsuite_params testsuite_params;
+static struct unittest_params unittest_params;
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int
+testsuite_setup(uint16_t dev_id)
+{
+	struct testsuite_params *ts_params = &testsuite_params;
+	uint8_t count = rte_rawdev_count();
+
+	eal_log_level = rte_log_get_level(RTE_LOGTYPE_EAL);
+	rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
+
+	memset(ts_params, 0, sizeof(*ts_params));
+
+	if (!count) {
+		AESNI_MB_RAWDEV_INFO("No existing rawdev found - creating %s",
+				     TEST_DEV_NAME);
+		return rte_vdev_init(TEST_DEV_NAME, NULL);
+	}
+
+	ts_params->dev_id = dev_id;
+
+	ts_params->mbuf_pool = rte_mempool_lookup(MBUF_POOL_NAME);
+	if (ts_params->mbuf_pool == NULL) {
+		/* Not already created so create */
+		ts_params->mbuf_pool = rte_pktmbuf_pool_create(
+						MBUF_POOL_NAME,
+						MBUF_POOL_SIZE,
+						MBUF_CACHE_SIZE,
+						0,
+						MBUF_SIZE,
+						rte_socket_id());
+		if (ts_params->mbuf_pool == NULL) {
+			AESNI_MB_RAWDEV_ERR("Cannot create AESNI-MB rawdev "
+					    "mbuf pool");
+			return TEST_FAILED;
+		}
+	}
+
+	ts_params->op_pool = rte_multi_fn_op_pool_create(OP_POOL_NAME,
+							  OP_POOL_SIZE,
+							  OP_CACHE_SIZE,
+							  OP_PRIV_SIZE,
+							  rte_socket_id());
+
+	if (ts_params->op_pool == NULL) {
+		AESNI_MB_RAWDEV_ERR("Cannot create AESNI-MB rawdev operation "
+				     "pool");
+		return TEST_FAILED;
+	}
+
+	return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(void)
+{
+	struct testsuite_params *ts_params = &testsuite_params;
+
+	if (ts_params->mbuf_pool != NULL) {
+		rte_mempool_free(ts_params->mbuf_pool);
+		ts_params->mbuf_pool = NULL;
+	}
+
+	if (ts_params->op_pool != NULL) {
+		rte_mempool_free(ts_params->op_pool);
+		ts_params->op_pool = NULL;
+	}
+
+	rte_vdev_uninit(TEST_DEV_NAME);
+
+	rte_log_set_level(RTE_LOGTYPE_EAL, eal_log_level);
+}
+
+static int
+test_setup(void)
+{
+	struct testsuite_params *ts_params = &testsuite_params;
+	struct unittest_params *ut_params = &unittest_params;
+
+	struct rte_rawdev_info info = {0};
+	struct rte_multi_fn_dev_config mf_dev_conf = {0};
+	struct rte_multi_fn_qp_config qp_conf = {0};
+	uint16_t qp_id;
+	int ret;
+
+	/* Clear unit test parameters before running test */
+	memset(ut_params, 0, sizeof(*ut_params));
+
+	/* Configure device and queue pairs */
+	mf_dev_conf.nb_queues = 1;
+	info.dev_private = &mf_dev_conf;
+	qp_conf.nb_descriptors = QP_NB_DESC;
+
+	ret = rte_rawdev_configure(ts_params->dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret,
+				"Failed to configure rawdev %u",
+				ts_params->dev_id);
+
+	for (qp_id = 0; qp_id < mf_dev_conf.nb_queues; qp_id++) {
+		ret = rte_rawdev_queue_setup(ts_params->dev_id,
+					     qp_id,
+					     &qp_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+					"Failed to setup queue pair %u on "
+					"rawdev %u",
+					qp_id,
+					ts_params->dev_id);
+	}
+
+	ret = rte_rawdev_xstats_reset(ts_params->dev_id, NULL, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret,
+				"Failed to reset stats on rawdev %u",
+				ts_params->dev_id);
+
+	/* Start the device */
+	ret = rte_rawdev_start(ts_params->dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret,
+				"Failed to start rawdev %u",
+				ts_params->dev_id);
+
+	return 0;
+}
+
+static void
+test_teardown(void)
+{
+	struct testsuite_params *ts_params = &testsuite_params;
+	struct unittest_params *ut_params = &unittest_params;
+
+	int i;
+
+	/* Free multi-function operations */
+	for (i = 0; i < MAX_OPS; i++) {
+		if (ut_params->ops[i] != NULL) {
+			rte_multi_fn_op_free(ut_params->ops[i]);
+			ut_params->ops[i] = NULL;
+		}
+	}
+
+	/* Free multi-function session */
+	if (ut_params->sess != NULL) {
+		rte_multi_fn_session_destroy(ts_params->dev_id,
+					     ut_params->sess);
+		ut_params->sess = NULL;
+	}
+
+	/*
+	 * Free mbuf - both obuf and ibuf are usually the same,
+	 * so check if they point at the same address is necessary,
+	 * to avoid freeing the mbuf twice.
+	 */
+	if (ut_params->obuf != NULL) {
+		rte_pktmbuf_free(ut_params->obuf);
+		if (ut_params->ibuf == ut_params->obuf)
+			ut_params->ibuf = NULL;
+		ut_params->obuf = NULL;
+	}
+	if (ut_params->ibuf != NULL) {
+		rte_pktmbuf_free(ut_params->ibuf);
+		ut_params->ibuf = NULL;
+	}
+
+	/* Stop the device */
+	rte_rawdev_stop(ts_params->dev_id);
+}
+
+static int
+test_docsis_encrypt(void *vtdata)
+{
+	struct docsis_test_data *tdata = (struct docsis_test_data *)vtdata;
+	struct testsuite_params *ts_params = &testsuite_params;
+	struct unittest_params *ut_params = &unittest_params;
+
+	/* Xforms */
+	struct rte_multi_fn_xform xform1 = {0};
+	struct rte_multi_fn_xform xform2 = {0};
+	struct rte_crypto_cipher_xform *xform_cipher;
+
+	/* Operations */
+	struct rte_multi_fn_op *result;
+	struct rte_crypto_sym_op *cipher_op;
+	struct rte_multi_fn_err_detect_op *crc_op;
+
+	/* Cipher params */
+	int cipher_len = 0;
+	uint8_t *iv_ptr;
+
+	/* CRC params */
+	int crc_len = 0, crc_data_len = 0;
+
+	/* Test data */
+	uint8_t *plaintext = NULL, *ciphertext = NULL;
+
+	/* Stats */
+	uint64_t stats[4] = {0};
+	struct rte_rawdev_xstats_name stats_names[4] = {0};
+	const unsigned int stats_id[4] = {0, 1, 2, 3};
+	int num_stats = 0, num_names = 0;
+
+	uint16_t qp_id = 0, nb_enq, nb_deq = 0, nb_ops;
+	int i, ret = TEST_SUCCESS;
+
+	/* Setup source mbuf */
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	RTE_TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+				 "Failed to allocate source mbuf");
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *),
+	       0,
+	       rte_pktmbuf_tailroom(ut_params->ibuf));
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+						  tdata->plaintext.len);
+	memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+
+	/* Create session */
+	xform1.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+	xform1.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+	xform1.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+	xform1.next = &xform2;
+
+	xform2.type = RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+	xform2.crypto_sym.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	xform_cipher = &xform2.crypto_sym.cipher;
+	xform_cipher->op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+	xform_cipher->algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI;
+	xform_cipher->key.data = tdata->key.data;
+	xform_cipher->key.length = tdata->key.len;
+	xform_cipher->iv.offset = sizeof(struct rte_multi_fn_op);
+	xform_cipher->iv.length = tdata->cipher_iv.len;
+	xform2.next = NULL;
+
+	ut_params->sess = rte_multi_fn_session_create(ts_params->dev_id,
+						      &xform1,
+						      rte_socket_id());
+
+	RTE_TEST_ASSERT((ut_params->sess != NULL &&
+			 ut_params->sess->sess_private_data != NULL),
+			"Failed to create multi-function session");
+
+	/* Create operations */
+	nb_ops = rte_multi_fn_op_bulk_alloc(ts_params->op_pool,
+					    ut_params->ops,
+					    2);
+	RTE_TEST_ASSERT_EQUAL(nb_ops,
+			      2,
+			      "Failed to allocate multi-function operations");
+
+	ut_params->ops[0]->next = ut_params->ops[1];
+	ut_params->ops[0]->m_src = ut_params->ibuf;
+	ut_params->ops[0]->m_dst = NULL;
+	ut_params->ops[1]->next = NULL;
+
+	/* CRC op config */
+	crc_len = tdata->plaintext.no_crc == false ?
+					(tdata->plaintext.len -
+					 tdata->plaintext.crc_offset -
+					 RTE_ETHER_CRC_LEN) :
+					0;
+	crc_len = crc_len > 0 ? crc_len : 0;
+	crc_data_len = crc_len == 0 ? 0 : RTE_ETHER_CRC_LEN;
+	crc_op = &ut_params->ops[0]->err_detect;
+	crc_op->data.offset = tdata->plaintext.crc_offset;
+	crc_op->data.length = crc_len;
+	crc_op->output.data = rte_pktmbuf_mtod_offset(
+						ut_params->ibuf,
+						uint8_t *,
+						ut_params->ibuf->data_len -
+							crc_data_len);
+
+	/* Cipher encrypt op config */
+	cipher_len = tdata->plaintext.no_cipher == false ?
+					(tdata->plaintext.len -
+					 tdata->plaintext.cipher_offset) :
+					0;
+	cipher_len = cipher_len > 0 ? cipher_len : 0;
+	cipher_op = &ut_params->ops[1]->crypto_sym;
+	cipher_op->cipher.data.offset = tdata->plaintext.cipher_offset;
+	cipher_op->cipher.data.length = cipher_len;
+	iv_ptr = (uint8_t *)(ut_params->ops[1]) +
+				sizeof(struct rte_multi_fn_op);
+	rte_memcpy(iv_ptr, tdata->cipher_iv.data, tdata->cipher_iv.len);
+
+	/* Attach session to operation */
+	ut_params->ops[0]->sess = ut_params->sess;
+
+	/* Enqueue to device */
+	nb_enq = rte_rawdev_enqueue_buffers(
+				ts_params->dev_id,
+				(struct rte_rawdev_buf **)ut_params->ops,
+				1,
+				(rte_rawdev_obj_t)&qp_id);
+
+	RTE_TEST_ASSERT_EQUAL(nb_enq,
+			      1,
+			      "Failed to enqueue multi-function operations");
+
+	/* Dequeue from device */
+	do {
+		nb_deq = rte_rawdev_dequeue_buffers(
+					ts_params->dev_id,
+					(struct rte_rawdev_buf **)&result,
+					1,
+					(rte_rawdev_obj_t)&qp_id);
+	} while (nb_deq < 1);
+
+	RTE_TEST_ASSERT_EQUAL(nb_deq,
+			      1,
+			      "Failed to dequeue multi-function operations");
+
+	/* Check results */
+	ciphertext = plaintext;
+
+	/* Validate ciphertext */
+	ret = memcmp(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Ciphertext not as expected");
+
+	RTE_TEST_ASSERT_EQUAL(result->overall_status,
+			      RTE_MULTI_FN_OP_STATUS_SUCCESS,
+			      "Multi-function op processing failed");
+
+	/* Print stats */
+	num_stats = rte_rawdev_xstats_get(ts_params->dev_id,
+					  stats_id,
+					  stats,
+					  4);
+	num_names = rte_rawdev_xstats_names_get(ts_params->dev_id,
+						stats_names,
+						4);
+	RTE_TEST_ASSERT_EQUAL(num_stats, 4, "Failed to get stats");
+	RTE_TEST_ASSERT_EQUAL(num_names, 4, "Failed to get stats names");
+
+	for (i = 0; i < num_stats; i++)
+		AESNI_MB_RAWDEV_DEBUG("%s:  %"PRIu64,
+				      stats_names[i].name,
+				      stats[i]);
+
+	return 0;
+}
+
+static int
+test_docsis_decrypt(void *vtdata)
+{
+	struct docsis_test_data *tdata = (struct docsis_test_data *)vtdata;
+	struct testsuite_params *ts_params = &testsuite_params;
+	struct unittest_params *ut_params = &unittest_params;
+
+	/* Xforms */
+	struct rte_multi_fn_xform xform1 = {0};
+	struct rte_multi_fn_xform xform2 = {0};
+	struct rte_crypto_cipher_xform *xform_cipher;
+
+	/* Operations */
+	struct rte_multi_fn_op *result;
+	struct rte_crypto_sym_op *cipher_op;
+	struct rte_multi_fn_err_detect_op *crc_op;
+
+	/* Cipher params */
+	int cipher_len = 0;
+	uint8_t *iv_ptr;
+
+	/* CRC params */
+	int crc_len = 0, crc_data_len;
+
+	/* Test data */
+	uint8_t *plaintext = NULL, *ciphertext = NULL;
+
+	/* Stats */
+	uint64_t stats[4] = {0};
+	struct rte_rawdev_xstats_name stats_names[4] = {0};
+	const unsigned int stats_id[4] = {0, 1, 2, 3};
+	int num_stats = 0, num_names = 0;
+
+	uint16_t qp_id = 0, nb_enq, nb_deq = 0, nb_ops;
+	int i, ret = TEST_SUCCESS;
+
+	/* Setup source mbuf */
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	RTE_TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+				 "Failed to allocate source mbuf");
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *),
+	       0,
+	       rte_pktmbuf_tailroom(ut_params->ibuf));
+	ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+						   tdata->ciphertext.len);
+	memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+
+	/* Create session */
+	xform1.type = RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+	xform1.crypto_sym.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	xform_cipher = &xform1.crypto_sym.cipher;
+	xform_cipher->op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+	xform_cipher->algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI;
+	xform_cipher->key.data = tdata->key.data;
+	xform_cipher->key.length = tdata->key.len;
+	xform_cipher->iv.offset = sizeof(struct rte_multi_fn_op);
+	xform_cipher->iv.length = tdata->cipher_iv.len;
+	xform1.next = &xform2;
+
+	xform2.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+	xform2.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+	xform2.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+	xform2.next = NULL;
+
+	ut_params->sess = rte_multi_fn_session_create(ts_params->dev_id,
+						      &xform1,
+						      rte_socket_id());
+
+	RTE_TEST_ASSERT((ut_params->sess != NULL &&
+			 ut_params->sess->sess_private_data != NULL),
+			"Failed to create multi-function session");
+
+	/* Create operations */
+	nb_ops = rte_multi_fn_op_bulk_alloc(ts_params->op_pool,
+					    ut_params->ops,
+					    2);
+	RTE_TEST_ASSERT_EQUAL(nb_ops,
+			      2,
+			      "Failed to allocate multi-function operations");
+
+	ut_params->ops[0]->next = ut_params->ops[1];
+	ut_params->ops[0]->m_src = ut_params->ibuf;
+	ut_params->ops[0]->m_dst = NULL;
+	ut_params->ops[1]->next = NULL;
+
+	/* Cipher decrypt op config */
+	cipher_len = tdata->ciphertext.no_cipher == false ?
+					(tdata->ciphertext.len -
+					 tdata->ciphertext.cipher_offset) :
+					0;
+	cipher_len = cipher_len > 0 ? cipher_len : 0;
+	cipher_op = &ut_params->ops[0]->crypto_sym;
+	cipher_op->cipher.data.offset = tdata->ciphertext.cipher_offset;
+	cipher_op->cipher.data.length = cipher_len;
+	iv_ptr = (uint8_t *)(ut_params->ops[1]) +
+				sizeof(struct rte_multi_fn_op);
+	rte_memcpy(iv_ptr, tdata->cipher_iv.data, tdata->cipher_iv.len);
+
+	/* CRC op config */
+	crc_len = tdata->plaintext.no_crc == false ?
+					(tdata->ciphertext.len -
+					 tdata->ciphertext.crc_offset -
+					 RTE_ETHER_CRC_LEN) :
+					0;
+	crc_len = crc_len > 0 ? crc_len : 0;
+	crc_data_len = crc_len == 0 ? 0 : RTE_ETHER_CRC_LEN;
+	crc_op = &ut_params->ops[1]->err_detect;
+	crc_op->data.offset = tdata->ciphertext.crc_offset;
+	crc_op->data.length = crc_len;
+	crc_op->output.data = rte_pktmbuf_mtod_offset(
+						ut_params->ibuf,
+						uint8_t *,
+						ut_params->ibuf->data_len -
+							crc_data_len);
+
+	/* Attach session to operation */
+	ut_params->ops[0]->sess = ut_params->sess;
+
+	/* Enqueue to device */
+	nb_enq = rte_rawdev_enqueue_buffers(
+				ts_params->dev_id,
+				(struct rte_rawdev_buf **)ut_params->ops,
+				1,
+				(rte_rawdev_obj_t)&qp_id);
+
+	RTE_TEST_ASSERT_EQUAL(nb_enq,
+			      1,
+			      "Failed to enqueue multi-function operations");
+
+	/* Dequeue to device */
+	do {
+		nb_deq = rte_rawdev_dequeue_buffers(
+					ts_params->dev_id,
+					(struct rte_rawdev_buf **)&result,
+					1,
+					(rte_rawdev_obj_t)&qp_id);
+	} while (nb_deq < 1);
+
+	RTE_TEST_ASSERT_EQUAL(nb_deq,
+			      1,
+			      "Failed to dequeue multi-function operations");
+
+	/* Check results */
+	plaintext = ciphertext;
+
+	/* Validate plaintext */
+	ret = memcmp(plaintext,
+		     tdata->plaintext.data,
+		     /* Check only as far as CRC - CRC is checked internally */
+		     tdata->plaintext.len - crc_data_len);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Plaintext not as expected");
+
+	RTE_TEST_ASSERT_EQUAL(result->overall_status,
+			      RTE_MULTI_FN_OP_STATUS_SUCCESS,
+			      "Multi-function op processing failed");
+
+	/* Print stats */
+	num_stats = rte_rawdev_xstats_get(ts_params->dev_id,
+					  stats_id,
+					  stats,
+					  4);
+	num_names = rte_rawdev_xstats_names_get(ts_params->dev_id,
+						stats_names,
+						4);
+	RTE_TEST_ASSERT_EQUAL(num_stats, 4, "Failed to get stats");
+	RTE_TEST_ASSERT_EQUAL(num_names, 4, "Failed to get stats names");
+
+	for (i = 0; i < num_stats; i++)
+		AESNI_MB_RAWDEV_DEBUG("%s:  %"PRIu64,
+				      stats_names[i].name,
+				      stats[i]);
+
+	return 0;
+}
+
+static int
+test_pon_encrypt(void *vtdata)
+{
+	struct pon_test_data *tdata = (struct pon_test_data *)vtdata;
+	struct testsuite_params *ts_params = &testsuite_params;
+	struct unittest_params *ut_params = &unittest_params;
+
+	/* Xforms */
+	struct rte_multi_fn_xform xform1 = {0};
+	struct rte_multi_fn_xform xform2 = {0};
+	struct rte_multi_fn_xform xform3 = {0};
+	struct rte_crypto_cipher_xform *xform_cipher;
+
+	/* Operations */
+	struct rte_multi_fn_op *result;
+	struct rte_crypto_sym_op *cipher_op;
+	struct rte_multi_fn_err_detect_op *crc_op;
+	struct rte_multi_fn_err_detect_op *bip_op;
+
+	/* Cipher params */
+	int cipher_len = 0;
+	uint8_t *iv_ptr;
+
+	/* CRC params */
+	int crc_len = 0, crc_data_len = 0;
+
+	/* BIP params */
+	int bip_len = 0;
+
+	/* Test data */
+	uint8_t *plaintext = NULL, *ciphertext = NULL;
+
+	/* Stats */
+	uint64_t stats[4] = {0};
+	struct rte_rawdev_xstats_name stats_names[4] = {0};
+	const unsigned int stats_id[4] = {0, 1, 2, 3};
+	int num_stats = 0, num_names = 0;
+
+	uint16_t qp_id = 0, nb_enq, nb_deq = 0, nb_ops;
+	int i, ret = TEST_SUCCESS;
+
+	/* Setup source mbuf */
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	RTE_TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+				 "Failed to allocate source mbuf");
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *),
+	       0,
+	       rte_pktmbuf_tailroom(ut_params->ibuf));
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+						  tdata->plaintext.len);
+	memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+
+	/* Create session */
+	xform1.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+	xform1.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+	xform1.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+	xform1.next = &xform2;
+
+	xform2.type = RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+	xform2.crypto_sym.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	xform_cipher = &xform2.crypto_sym.cipher;
+	xform_cipher->op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+	xform_cipher->algo = RTE_CRYPTO_CIPHER_AES_CTR;
+	xform_cipher->key.data = tdata->key.data;
+	xform_cipher->key.length = tdata->key.len;
+	xform_cipher->iv.offset = sizeof(struct rte_multi_fn_op);
+	xform_cipher->iv.length = tdata->cipher_iv.len;
+	xform2.next = &xform3;
+
+	xform3.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+	xform3.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_BIP32;
+	xform3.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+	xform3.next = NULL;
+
+	ut_params->sess = rte_multi_fn_session_create(ts_params->dev_id,
+						      &xform1,
+						      rte_socket_id());
+
+	RTE_TEST_ASSERT((ut_params->sess != NULL &&
+			 ut_params->sess->sess_private_data != NULL),
+			"Failed to create multi-function session");
+
+	/* Create operations */
+	nb_ops = rte_multi_fn_op_bulk_alloc(ts_params->op_pool,
+					    ut_params->ops,
+					    3);
+	RTE_TEST_ASSERT_EQUAL(nb_ops,
+			      3,
+			      "Failed to allocate multi-function operations");
+
+	ut_params->ops[0]->next = ut_params->ops[1];
+	ut_params->ops[0]->m_src = ut_params->ibuf;
+	ut_params->ops[0]->m_dst = NULL;
+	ut_params->ops[1]->next = ut_params->ops[2];
+	ut_params->ops[2]->next = NULL;
+
+	/* CRC op config */
+	crc_len = tdata->plaintext.len -
+			tdata->plaintext.crc_offset -
+			tdata->plaintext.padding_len -
+			RTE_ETHER_CRC_LEN;
+	crc_len = crc_len > 0 ? crc_len : 0;
+	crc_data_len = crc_len == 0 ? 0 : RTE_ETHER_CRC_LEN;
+	crc_op = &ut_params->ops[0]->err_detect;
+	crc_op->data.offset = tdata->plaintext.crc_offset;
+	crc_op->data.length = crc_len;
+	crc_op->output.data = rte_pktmbuf_mtod_offset(
+					ut_params->ibuf,
+					uint8_t *,
+					ut_params->ibuf->data_len -
+						tdata->plaintext.padding_len -
+						crc_data_len);
+
+	/* Cipher encrypt op config */
+	cipher_len = tdata->plaintext.no_cipher == false ?
+					(tdata->plaintext.len -
+					 tdata->plaintext.cipher_offset) :
+					0;
+	cipher_len = cipher_len > 0 ? cipher_len : 0;
+	cipher_op = &ut_params->ops[1]->crypto_sym;
+	cipher_op->cipher.data.offset = tdata->plaintext.cipher_offset;
+	cipher_op->cipher.data.length = cipher_len;
+	iv_ptr = (uint8_t *)(ut_params->ops[1]) +
+				sizeof(struct rte_multi_fn_op);
+	rte_memcpy(iv_ptr, tdata->cipher_iv.data, tdata->cipher_iv.len);
+
+	/* BIP op config */
+	bip_len = tdata->plaintext.len - tdata->plaintext.bip_offset;
+	bip_len = bip_len > 0 ? bip_len : 0;
+	bip_op = &ut_params->ops[2]->err_detect;
+	bip_op->data.offset = tdata->plaintext.bip_offset;
+	bip_op->data.length = bip_len;
+	bip_op->output.data = (uint8_t *)(ut_params->ops[2]) +
+				sizeof(struct rte_multi_fn_op);
+
+	/* Attach session to op */
+	ut_params->ops[0]->sess = ut_params->sess;
+
+	/* Enqueue to device */
+	nb_enq = rte_rawdev_enqueue_buffers(
+				ts_params->dev_id,
+				(struct rte_rawdev_buf **)ut_params->ops,
+				1,
+				(rte_rawdev_obj_t)&qp_id);
+
+	RTE_TEST_ASSERT_EQUAL(nb_enq,
+			      1,
+			      "Failed to enqueue multi-function operations");
+
+	/* Dequeue from device */
+	do {
+		nb_deq = rte_rawdev_dequeue_buffers(
+					ts_params->dev_id,
+					(struct rte_rawdev_buf **)&result,
+					1,
+					(rte_rawdev_obj_t)&qp_id);
+	} while (nb_deq < 1);
+
+	/* Check results */
+	ciphertext = plaintext;
+
+	/* Validate ciphertext */
+	ret = memcmp(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Ciphertext not as expected");
+
+	ret = memcmp(bip_op->output.data,
+		     tdata->output.data,
+		     tdata->output.len);
+	RTE_TEST_ASSERT_SUCCESS(ret, "BIP not as expected");
+
+	RTE_TEST_ASSERT_EQUAL(result->overall_status,
+			      RTE_MULTI_FN_OP_STATUS_SUCCESS,
+			      "Multi-function op processing failed");
+
+	/* Print stats */
+	num_stats = rte_rawdev_xstats_get(ts_params->dev_id,
+					  stats_id,
+					  stats,
+					  4);
+	num_names = rte_rawdev_xstats_names_get(ts_params->dev_id,
+						stats_names,
+						4);
+	RTE_TEST_ASSERT_EQUAL(num_stats, 4, "Failed to get stats");
+	RTE_TEST_ASSERT_EQUAL(num_names, 4, "Failed to get stats names");
+
+	for (i = 0; i < num_stats; i++)
+		AESNI_MB_RAWDEV_DEBUG("%s:  %"PRIu64,
+				      stats_names[i].name,
+				      stats[i]);
+
+	return 0;
+}
+
+static int
+test_pon_decrypt(void *vtdata)
+{
+	struct pon_test_data *tdata = (struct pon_test_data *)vtdata;
+	struct testsuite_params *ts_params = &testsuite_params;
+	struct unittest_params *ut_params = &unittest_params;
+
+	/* Xforms */
+	struct rte_multi_fn_xform xform1 = {0};
+	struct rte_multi_fn_xform xform2 = {0};
+	struct rte_multi_fn_xform xform3 = {0};
+	struct rte_crypto_cipher_xform *xform_cipher;
+
+	/* Operations */
+	struct rte_multi_fn_op *result;
+	struct rte_crypto_sym_op *cipher_op;
+	struct rte_multi_fn_err_detect_op *crc_op;
+	struct rte_multi_fn_err_detect_op *bip_op;
+
+	/* Cipher params */
+	int cipher_len = 0;
+	uint8_t *iv_ptr;
+
+	/* CRC params */
+	int crc_len = 0, crc_data_len = 0;
+
+	/* BIP params */
+	int bip_len = 0;
+
+	/* Test data */
+	uint8_t *plaintext = NULL, *ciphertext = NULL;
+
+	/* Stats */
+	uint64_t stats[4] = {0};
+	struct rte_rawdev_xstats_name stats_names[4] = {0};
+	const unsigned int stats_id[4] = {0, 1, 2, 3};
+	int num_stats = 0, num_names = 0;
+
+	uint16_t qp_id = 0, nb_enq, nb_deq = 0, nb_ops;
+	int i, ret = TEST_SUCCESS;
+
+	/* Setup source mbuf */
+	ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+	RTE_TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+				 "Failed to allocate source mbuf");
+	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *),
+	       0,
+	       rte_pktmbuf_tailroom(ut_params->ibuf));
+	ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+						   tdata->ciphertext.len);
+	memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+
+	/* Create session */
+	xform1.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+	xform1.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_BIP32;
+	xform1.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+	xform1.next = &xform2;
+
+	xform2.type = RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+	xform2.crypto_sym.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+	xform_cipher = &xform2.crypto_sym.cipher;
+	xform_cipher->op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+	xform_cipher->algo = RTE_CRYPTO_CIPHER_AES_CTR;
+	xform_cipher->key.data = tdata->key.data;
+	xform_cipher->key.length = tdata->key.len;
+	xform_cipher->iv.offset = sizeof(struct rte_multi_fn_op);
+	xform_cipher->iv.length = tdata->cipher_iv.len;
+	xform2.next = &xform3;
+
+	xform3.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+	xform3.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+	xform3.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+	xform3.next = NULL;
+
+	ut_params->sess = rte_multi_fn_session_create(ts_params->dev_id,
+						      &xform1,
+						      rte_socket_id());
+
+	RTE_TEST_ASSERT((ut_params->sess != NULL &&
+			 ut_params->sess->sess_private_data != NULL),
+			"Failed to create multi-function session");
+
+	/* Create operations */
+	nb_ops = rte_multi_fn_op_bulk_alloc(ts_params->op_pool,
+					    ut_params->ops,
+					    3);
+	RTE_TEST_ASSERT_EQUAL(nb_ops,
+			      3,
+			      "Failed to allocate multi-function operations");
+
+	ut_params->ops[0]->next = ut_params->ops[1];
+	ut_params->ops[0]->m_src = ut_params->ibuf;
+	ut_params->ops[0]->m_dst = NULL;
+	ut_params->ops[1]->next = ut_params->ops[2];
+	ut_params->ops[2]->next = NULL;
+
+	/* BIP op config */
+	bip_len = tdata->ciphertext.len - tdata->ciphertext.bip_offset;
+	bip_len = bip_len > 0 ? bip_len : 0;
+	bip_op = &ut_params->ops[0]->err_detect;
+	bip_op->data.offset = tdata->ciphertext.bip_offset;
+	bip_op->data.length = bip_len;
+	bip_op->output.data = (uint8_t *)(ut_params->ops[0]) +
+				sizeof(struct rte_multi_fn_op);
+
+	/* Cipher encrypt op config */
+	cipher_len = tdata->ciphertext.no_cipher == false ?
+					(tdata->ciphertext.len -
+					 tdata->ciphertext.cipher_offset) :
+					0;
+	cipher_len = cipher_len > 0 ? cipher_len : 0;
+	cipher_op = &ut_params->ops[1]->crypto_sym;
+	cipher_op->cipher.data.offset = tdata->ciphertext.cipher_offset;
+	cipher_op->cipher.data.length = cipher_len;
+	iv_ptr = (uint8_t *)(ut_params->ops[1]) +
+				sizeof(struct rte_multi_fn_op);
+	rte_memcpy(iv_ptr, tdata->cipher_iv.data, tdata->cipher_iv.len);
+
+	/* CRC op config */
+	crc_len = tdata->ciphertext.len -
+			tdata->ciphertext.crc_offset -
+			tdata->ciphertext.padding_len -
+			RTE_ETHER_CRC_LEN;
+	crc_len = crc_len > 0 ? crc_len : 0;
+	crc_data_len = crc_len == 0 ? 0 : RTE_ETHER_CRC_LEN;
+	crc_op = &ut_params->ops[2]->err_detect;
+	crc_op->data.offset = tdata->ciphertext.crc_offset;
+	crc_op->data.length = crc_len;
+	crc_op->output.data = rte_pktmbuf_mtod_offset(
+					ut_params->ibuf,
+					uint8_t *,
+					ut_params->ibuf->data_len -
+						tdata->ciphertext.padding_len -
+						crc_data_len);
+
+	/* Attach session to op */
+	ut_params->ops[0]->sess = ut_params->sess;
+
+	/* Enqueue to device */
+	nb_enq = rte_rawdev_enqueue_buffers(
+				ts_params->dev_id,
+				(struct rte_rawdev_buf **)ut_params->ops,
+				1,
+				(rte_rawdev_obj_t)&qp_id);
+
+	RTE_TEST_ASSERT_EQUAL(nb_enq,
+			      1,
+			      "Failed to enqueue multi-function operations");
+
+	/* Dequeue from device */
+	do {
+		nb_deq = rte_rawdev_dequeue_buffers(
+					ts_params->dev_id,
+					(struct rte_rawdev_buf **)&result,
+					1,
+					(rte_rawdev_obj_t)&qp_id);
+	} while (nb_deq < 1);
+
+	/* Check results */
+	plaintext = ciphertext;
+
+	/* Validate plaintext */
+	ret = memcmp(plaintext,
+		     tdata->plaintext.data,
+		     /* Check only as far as CRC - CRC is checked internally */
+		     tdata->plaintext.len -
+			tdata->plaintext.padding_len -
+			crc_data_len);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Plaintext not as expected");
+
+	ret = memcmp(bip_op->output.data,
+		     tdata->output.data,
+		     tdata->output.len);
+	RTE_TEST_ASSERT_SUCCESS(ret, "BIP not as expected");
+
+	RTE_TEST_ASSERT_EQUAL(result->overall_status,
+			      RTE_MULTI_FN_OP_STATUS_SUCCESS,
+			      "Multi-function op processing failed");
+
+	/* Print stats */
+	num_stats = rte_rawdev_xstats_get(ts_params->dev_id,
+					  stats_id,
+					  stats,
+					  4);
+	num_names = rte_rawdev_xstats_names_get(ts_params->dev_id,
+						stats_names,
+						4);
+	RTE_TEST_ASSERT_EQUAL(num_stats, 4, "Failed to get stats");
+	RTE_TEST_ASSERT_EQUAL(num_names, 4, "Failed to get stats names");
+
+	for (i = 0; i < num_stats; i++)
+		AESNI_MB_RAWDEV_DEBUG("%s:  %"PRIu64,
+				      stats_names[i].name,
+				      stats[i]);
+
+	return 0;
+}
+
+static void
+test_run(int (*setup)(void),
+	 void (*teardown)(void),
+	 int (*run)(void *),
+	 void *data,
+	 const char *name)
+{
+	int ret = 0;
+
+	if (setup != NULL) {
+		ret = setup();
+		if (ret < 0) {
+			AESNI_MB_RAWDEV_INFO("Error setting up test %s", name);
+			unsupported++;
+		}
+	}
+
+	if (run != NULL) {
+		ret = run(data);
+		if (ret < 0) {
+			failed++;
+			AESNI_MB_RAWDEV_INFO("%s Failed", name);
+		} else {
+			passed++;
+			AESNI_MB_RAWDEV_INFO("%s Passed", name);
+		}
+	}
+
+	if (teardown != NULL)
+		teardown();
+
+	total++;
+}
+
+int
+aesni_mb_rawdev_test(uint16_t dev_id)
+{
+	if (testsuite_setup(dev_id) != TEST_SUCCESS) {
+		AESNI_MB_RAWDEV_ERR("Setup failed");
+		testsuite_teardown();
+		return TEST_FAILED;
+	}
+
+	/* DOCSIS: Crypto-CRC */
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_1, "1");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_2, "2");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_3, "3");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_4, "4");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_5, "5");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_6, "6");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_7, "7");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_8, "8");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_9, "9");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_10, "10");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_11, "11");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_12, "12");
+	TEST(test_setup, test_teardown, test_docsis_encrypt,
+	     &docsis_test_case_13, "13");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_1, "1");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_2, "2");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_3, "3");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_4, "4");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_5, "5");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_6, "6");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_7, "7");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_8, "8");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_9, "9");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_10, "10");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_11, "11");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_12, "12");
+	TEST(test_setup, test_teardown, test_docsis_decrypt,
+	     &docsis_test_case_13, "13");
+
+	/* PON: Crypto-CRC-BIP */
+	TEST(test_setup, test_teardown, test_pon_encrypt,
+	     &pon_test_case_1, "1");
+	TEST(test_setup, test_teardown, test_pon_encrypt,
+	     &pon_test_case_2, "2");
+	TEST(test_setup, test_teardown, test_pon_encrypt,
+	     &pon_test_case_3, "3");
+	TEST(test_setup, test_teardown, test_pon_encrypt,
+	     &pon_test_case_4, "4");
+	TEST(test_setup, test_teardown, test_pon_encrypt,
+	     &pon_test_case_5, "5");
+	TEST(test_setup, test_teardown, test_pon_encrypt,
+	     &pon_test_case_6, "6");
+	TEST(test_setup, test_teardown, test_pon_decrypt,
+	     &pon_test_case_1, "1");
+	TEST(test_setup, test_teardown, test_pon_decrypt,
+	     &pon_test_case_2, "2");
+	TEST(test_setup, test_teardown, test_pon_decrypt,
+	     &pon_test_case_3, "3");
+	TEST(test_setup, test_teardown, test_pon_decrypt,
+	     &pon_test_case_4, "4");
+	TEST(test_setup, test_teardown, test_pon_decrypt,
+	     &pon_test_case_5, "5");
+	TEST(test_setup, test_teardown, test_pon_decrypt,
+	     &pon_test_case_6, "6");
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+	printf("Not supported : %d\n", unsupported);
+
+	if (failed)
+		return TEST_FAILED;
+
+	return TEST_SUCCESS;
+}
diff --git a/drivers/raw/aesni_mb/aesni_mb_rawdev_test_vectors.h b/drivers/raw/aesni_mb/aesni_mb_rawdev_test_vectors.h
new file mode 100644
index 000000000..46bb220f4
--- /dev/null
+++ b/drivers/raw/aesni_mb/aesni_mb_rawdev_test_vectors.h
@@ -0,0 +1,1183 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#ifndef _AESNI_MB_RAWDEV_TEST_VECTORS_H_
+#define _AESNI_MB_RAWDEV_TEST_VECTORS_H_
+
+#include <stdbool.h>
+
+/*
+ * DOCSIS test data and cases
+ * - encrypt direction: CRC-Crypto
+ * - decrypt direction: Crypto-CRC
+ */
+struct docsis_test_data {
+	struct {
+		uint8_t data[16];
+		unsigned int len;
+	} key;
+
+	struct {
+		uint8_t data[16] __rte_aligned(16);
+		unsigned int len;
+	} cipher_iv;
+
+	struct {
+		uint8_t data[1024];
+		unsigned int len;
+		unsigned int cipher_offset;
+		unsigned int crc_offset;
+		bool no_cipher;
+		bool no_crc;
+	} plaintext;
+
+	struct {
+		uint8_t data[1024];
+		unsigned int len;
+		unsigned int cipher_offset;
+		unsigned int crc_offset;
+		bool no_cipher;
+		bool no_crc;
+	} ciphertext;
+};
+
+struct docsis_test_data docsis_test_case_1 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 24,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x7A, 0xF0,
+			/* CRC */
+			0x61, 0xF8, 0x63, 0x42
+		},
+		.len = 24,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_2 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 25,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x7A, 0xF0, 0xDF,
+			/* CRC */
+			0xFE, 0x12, 0x99, 0xE5
+		},
+		.len = 25,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_3 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 34,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0xD6, 0xE2, 0x70, 0x5C,
+			0xE6, 0x4D, 0xCC, 0x8C, 0x47, 0xB7, 0x09, 0xD6,
+			/* CRC */
+			0x54, 0x85, 0xF8, 0x32
+		},
+		.len = 34,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_4 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 35,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x92, 0x6A, 0xC2, 0xDC,
+			0xEE, 0x3B, 0x31, 0xEC, 0x03, 0xDE, 0x95, 0x33,
+			0x5E,
+			/* CRC */
+			0xFE, 0x47, 0x3E, 0x22
+		},
+		.len = 35,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_5 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 82,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x77, 0x74, 0x56, 0x05,
+			0xD1, 0x14, 0xA2, 0x8D, 0x2C, 0x9A, 0x11, 0xFC,
+			0x7D, 0xB0, 0xE7, 0x18, 0xCE, 0x75, 0x7C, 0x89,
+			0x14, 0x56, 0xE2, 0xF2, 0xB7, 0x47, 0x08, 0x27,
+			0xF7, 0x08, 0x7A, 0x13, 0x90, 0x81, 0x75, 0xB0,
+			0xC7, 0x91, 0x04, 0x83, 0xAD, 0x11, 0x46, 0x46,
+			0xF8, 0x54, 0x87, 0xA0, 0x42, 0xF3, 0x71, 0xA9,
+			0x8A, 0xCD, 0x59, 0x77, 0x67, 0x11, 0x1A, 0x87,
+			/* CRC */
+			0xAB, 0xED, 0x2C, 0x26
+		},
+		.len = 82,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_6 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 83,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x77, 0x74, 0x56, 0x05,
+			0xD1, 0x14, 0xA2, 0x8D, 0x2C, 0x9A, 0x11, 0xFC,
+			0x7D, 0xB0, 0xE7, 0x18, 0xCE, 0x75, 0x7C, 0x89,
+			0x14, 0x56, 0xE2, 0xF2, 0xB7, 0x47, 0x08, 0x27,
+			0xF7, 0x08, 0x7A, 0x13, 0x90, 0x81, 0x75, 0xB0,
+			0xC7, 0x91, 0x04, 0x83, 0xAD, 0x11, 0x46, 0x46,
+			0xF8, 0x54, 0x87, 0xA0, 0xA4, 0x0C, 0xC2, 0xF0,
+			0x81, 0x49, 0xA8, 0xA6, 0x6C, 0x48, 0xEB, 0x1F,
+			0x4B,
+			/* CRC */
+			0x2F, 0xD4, 0x48, 0x18
+		},
+		.len = 83,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_7 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 83,
+		.cipher_offset = 40,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0x3B, 0x9F, 0x72, 0x4C, 0xB5, 0x72,
+			0x3E, 0x56, 0x54, 0x49, 0x13, 0x53, 0xC4, 0xAA,
+			0xCD, 0xEA, 0x6A, 0x88, 0x99, 0x07, 0x86, 0xF4,
+			0xCF, 0x03, 0x4E, 0xDF, 0x65, 0x61, 0x47, 0x5B,
+			0x2F, 0x81, 0x09, 0x12, 0x9A, 0xC2, 0x24, 0x8C,
+			0x09,
+			/* CRC */
+			0x11, 0xB4, 0x06, 0x33
+		},
+		.len = 83,
+		.cipher_offset = 40,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_8 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 24,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = true
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x7A, 0xF0,
+			/* CRC */
+			0x8A, 0x0F, 0x74, 0xE8
+		},
+		.len = 24,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = true
+	}
+};
+
+struct docsis_test_data docsis_test_case_9 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 83,
+		.cipher_offset = 40,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = true
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0x3B, 0x9F, 0x72, 0x4C, 0xB5, 0x72,
+			0x3E, 0x56, 0x54, 0x49, 0x13, 0x53, 0xC4, 0xAA,
+			0xCD, 0xEA, 0x6A, 0x88, 0x99, 0x07, 0x86, 0xF4,
+			0xCF, 0x03, 0x4E, 0xDF, 0x65, 0x61, 0x47, 0x5B,
+			0x2F, 0x81, 0x09, 0x12, 0x9A, 0xC2, 0x24, 0x8C,
+			0x09,
+			/* CRC */
+			0x5D, 0x2B, 0x12, 0xF4
+		},
+		.len = 83,
+		.cipher_offset = 40,
+		.crc_offset = 6,
+		.no_cipher = false,
+		.no_crc = true
+	}
+};
+
+struct docsis_test_data docsis_test_case_10 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 24,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = true,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+			/* CRC */
+			0x14, 0x08, 0xE8, 0x55
+		},
+		.len = 24,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = true,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_11 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 83,
+		.cipher_offset = 40,
+		.crc_offset = 6,
+		.no_cipher = true,
+		.no_crc = false
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA,
+			/* CRC */
+			0xB3, 0x60, 0xEB, 0x38
+		},
+		.len = 83,
+		.cipher_offset = 40,
+		.crc_offset = 6,
+		.no_cipher = true,
+		.no_crc = false
+	}
+};
+
+struct docsis_test_data docsis_test_case_12 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 24,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = true,
+		.no_crc = true
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 24,
+		.cipher_offset = 18,
+		.crc_offset = 6,
+		.no_cipher = true,
+		.no_crc = true
+	}
+};
+
+struct docsis_test_data docsis_test_case_13 = {
+	.key = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+			0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+			0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 83,
+		.cipher_offset = 40,
+		.crc_offset = 6,
+		.no_cipher = true,
+		.no_crc = true
+	},
+	.ciphertext = {
+		.data = {
+			/* DOCSIS header */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+			0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+			0xAA,
+			/* CRC */
+			0xFF, 0xFF, 0xFF, 0xFF
+		},
+		.len = 83,
+		.cipher_offset = 40,
+		.crc_offset = 6,
+		.no_cipher = true,
+		.no_crc = true
+	}
+};
+
+/*
+ * PON test data and cases
+ * - encrypt direction: CRC-Crypto-BIP
+ * - decrypt direction: BIP-Crypto-CRC
+ */
+struct pon_test_data {
+	struct {
+		uint8_t data[16];
+		unsigned int len;
+	} key;
+
+	struct {
+		uint8_t data[16] __rte_aligned(16);
+		unsigned int len;
+	} cipher_iv;
+
+	struct {
+		uint8_t data[1024];
+		unsigned int len;
+		unsigned int cipher_offset;
+		unsigned int crc_offset;
+		unsigned int bip_offset;
+		unsigned int padding_len;
+		bool no_cipher;
+	} plaintext;
+
+	struct {
+		uint8_t data[1024];
+		unsigned int len;
+		unsigned int cipher_offset;
+		unsigned int crc_offset;
+		unsigned int bip_offset;
+		unsigned int padding_len;
+		bool no_cipher;
+	} ciphertext;
+
+	struct {
+	uint8_t data[8];
+		unsigned int len;
+	} output;
+};
+
+struct pon_test_data pon_test_case_1 = {
+	.key = {
+		.data = {
+			0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+			0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* XGEM header */
+			0x00, 0x20, 0x27, 0x11, 0x00, 0x00, 0x21, 0x23,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04,
+			/* CRC */
+			0x05, 0x06, 0x01, 0x01
+		},
+		.len = 16,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 0,
+		.no_cipher = false
+	},
+	.ciphertext = {
+		.data = {
+			/* XGEM header */
+			0x00, 0x20, 0x27, 0x11, 0x00, 0x00, 0x21, 0x23,
+			/* Ethernet frame */
+			0xC7, 0x62, 0x82, 0xCA,
+			/* CRC */
+			0x3E, 0x92, 0xC8, 0x5A
+		},
+		.len = 16,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 0,
+		.no_cipher = false
+	},
+	.output = {
+		/* Expected BIP */
+		.data = {0xF9, 0xD0, 0x4C, 0xA2},
+		.len  = 4
+	}
+};
+
+struct pon_test_data pon_test_case_2 = {
+	.key = {
+		.data = {
+			0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+			0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* XGEM header */
+			0x00, 0x40, 0x27, 0x11, 0x00, 0x00, 0x29, 0x3C,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01,
+			0x01, 0x01, 0x01, 0x01,
+			/* CRC */
+			0x81, 0x00, 0x00, 0x01
+		},
+		.len = 24,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 0,
+		.no_cipher = false
+	},
+	.ciphertext = {
+		.data = {
+			/* XGEM header */
+			0x00, 0x40, 0x27, 0x11, 0x00, 0x00, 0x29, 0x3C,
+			/* Ethernet frame */
+			0xC7, 0x62, 0x82, 0xCA, 0xF6, 0x6F, 0xF5, 0xED,
+			0xB7, 0x90, 0x1E, 0x02,
+			/* CRC */
+			0xEA, 0x38, 0xA1, 0x78
+		},
+		.len = 24,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 0,
+		.no_cipher = false
+	},
+	.output = {
+		/* Expected BIP */
+		.data = {0x6C, 0xE5, 0xC6, 0x70},
+		.len  = 4
+	}
+};
+
+struct pon_test_data pon_test_case_3 = {
+	.key = {
+		.data = {
+			0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+			0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* XGEM header */
+			0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01,
+			0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+			0x08, 0x00, 0x45, 0x00, 0x00, 0x6A, 0xB0, 0x7E,
+			0x00, 0x00, 0x04, 0x06, 0x83, 0xBD, 0xC0, 0xA8,
+			0x00, 0x01, 0xC0, 0xA8, 0x01, 0x01, 0x04, 0xD2,
+			0x16, 0x2E, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+			0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xA6, 0x33,
+			0x00, 0x00, 0x30, 0x31,
+			/* CRC */
+			0x32, 0x33, 0x34, 0x35
+		},
+		.len = 72,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 0,
+		.no_cipher = false
+	},
+	.ciphertext = {
+		.data = {
+			/* XGEM header */
+			0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B,
+			/* Ethernet frame */
+			0xC7, 0x62, 0x82, 0xCA, 0xF6, 0x6F, 0xF5, 0xED,
+			0xB7, 0x90, 0x1E, 0x02, 0x6B, 0x2C, 0x08, 0x7D,
+			0x3C, 0x90, 0xE8, 0x2C, 0x44, 0x30, 0x03, 0x29,
+			0x5F, 0x88, 0xA9, 0xD6, 0x1E, 0xF9, 0xD1, 0xF1,
+			0xD6, 0x16, 0x8C, 0x72, 0xA4, 0xCD, 0xD2, 0x8F,
+			0x63, 0x26, 0xC9, 0x66, 0xB0, 0x65, 0x24, 0x9B,
+			0x60, 0x5B, 0x18, 0x60, 0xBD, 0xD5, 0x06, 0x13,
+			0x40, 0xC9, 0x60, 0x64,
+			/* CRC */
+			0x36, 0x5F, 0x86, 0x8C
+		},
+		.len = 72,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 0,
+		.no_cipher = false
+	},
+	.output = {
+		/* Expected BIP */
+		.data = {0xDF, 0xE0, 0xAD, 0xFB},
+		.len  = 4
+	}
+};
+
+struct pon_test_data pon_test_case_4 = {
+	.key = {
+		.data = {
+			0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+			0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* XGEM header */
+			0x00, 0x39, 0x03, 0xFD, 0x00, 0x00, 0xB3, 0x6A,
+			/* Ethernet frame */
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x10, 0x11,
+			/* CRC */
+			0x20, 0x21, 0x22, 0x23,
+			/* Padding */
+			0x55, 0x55
+		},
+		.len = 24,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 2,
+		.no_cipher = false
+	},
+	.ciphertext = {
+		.data = {
+			/* XGEM header */
+			0x00, 0x39, 0x03, 0xFD, 0x00, 0x00, 0xB3, 0x6A,
+			/* Ethernet frame */
+			0x73, 0xE0, 0x5D, 0x5D, 0x32, 0x9C, 0x3B, 0xFA,
+			0x6B, 0x66,
+			/* CRC */
+			0xF6, 0x8E, 0x5B, 0xD5,
+			/* Padding */
+			0xAB, 0xCD
+		},
+		.len = 24,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 2,
+		.no_cipher = false
+	},
+	.output = {
+		/* Expected BIP */
+		.data = {0x71, 0xF6, 0x8B, 0x73},
+		.len  = 4
+	}
+};
+
+struct pon_test_data pon_test_case_5 = {
+	.key = {
+		.data = {
+			0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+			0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* XGEM header */
+			0x00, 0x05, 0x03, 0xFD, 0x00, 0x00, 0xB9, 0xB4,
+			/* Ethernet frame */
+			0x08,
+			/* Padding */
+			0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55
+		},
+		.len = 16,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 7,
+		.no_cipher = false
+	},
+	.ciphertext = {
+		.data = {
+			/* XGEM header */
+			0x00, 0x05, 0x03, 0xFD, 0x00, 0x00, 0xB9, 0xB4,
+			/* Ethernet frame */
+			0x73,
+			/* Padding */
+			0xBC, 0x02, 0x03, 0x6B, 0xC4, 0x60, 0xA0
+		},
+		.len = 16,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 7,
+		.no_cipher = false
+	},
+	.output = {
+		/* Expected BIP */
+		.data = {0x18, 0x7D, 0xD8, 0xEA},
+		.len  = 4
+	}
+};
+
+struct pon_test_data pon_test_case_6 = {
+	.key = {
+		.data = {
+			0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+			0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+		},
+		.len = 16
+	},
+	.cipher_iv = {
+		.data = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+		},
+		.len = 16
+	},
+	.plaintext = {
+		.data = {
+			/* XGEM header */
+			0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01,
+			0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+			0x08, 0x00, 0x45, 0x00, 0x00, 0x6A, 0xB0, 0x7E,
+			0x00, 0x00, 0x04, 0x06, 0x83, 0xBD, 0xC0, 0xA8,
+			0x00, 0x01, 0xC0, 0xA8, 0x01, 0x01, 0x04, 0xD2,
+			0x16, 0x2E, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+			0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xA6, 0x33,
+			0x00, 0x00, 0x30, 0x31,
+			/* CRC */
+			0x32, 0x33, 0x34, 0x35
+		},
+		.len = 72,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 0,
+		.no_cipher = true
+	},
+	.ciphertext = {
+		.data = {
+			/* XGEM header */
+			0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B,
+			/* Ethernet frame */
+			0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01,
+			0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+			0x08, 0x00, 0x45, 0x00, 0x00, 0x6a, 0xb0, 0x7e,
+			0x00, 0x00, 0x04, 0x06, 0x83, 0xbd, 0xc0, 0xa8,
+			0x00, 0x01, 0xc0, 0xa8, 0x01, 0x01, 0x04, 0xd2,
+			0x16, 0x2e, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+			0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xa6, 0x33,
+			0x00, 0x00, 0x30, 0x31,
+			/* CRC */
+			0x53, 0xC1, 0xE6, 0x0C
+		},
+		.len = 72,
+		.cipher_offset = 8,
+		.crc_offset = 8,
+		.bip_offset = 0,
+		.padding_len = 0,
+		.no_cipher = true
+	},
+	.output = {
+		/* Expected BIP */
+		.data = {0x6A, 0xD5, 0xC2, 0xAB},
+		.len  = 4
+	}
+};
+#endif /* _AESNI_MB_RAWDEV_TEST_VECTORS_H_ */
diff --git a/drivers/raw/aesni_mb/meson.build b/drivers/raw/aesni_mb/meson.build
new file mode 100644
index 000000000..085f629be
--- /dev/null
+++ b/drivers/raw/aesni_mb/meson.build
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020 Intel Corporation.
+
+IMB_required_ver = '0.53.3-dev'
+lib = cc.find_library('IPSec_MB', required: false)
+if not lib.found()
+	build = false
+	reason = 'missing dependency, "libIPSec_MB"'
+else
+	ext_deps += lib
+
+	# version comes with quotes, so we split based on " and take the middle
+	imb_ver = cc.get_define('IMB_VERSION_STR',
+		prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
+
+	if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
+		reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
+				IMB_required_ver, imb_ver)
+		build = false
+	endif
+
+endif
+
+sources = files('aesni_mb_rawdev.c', 'aesni_mb_rawdev_test.c')
+allow_experimental_apis = true
+deps += ['bus_vdev', 'net', 'rawdev', 'cryptodev', 'common_multi_fn']
diff --git a/drivers/raw/aesni_mb/rte_rawdev_aesni_mb_version.map b/drivers/raw/aesni_mb/rte_rawdev_aesni_mb_version.map
new file mode 100644
index 000000000..fa9e17c29
--- /dev/null
+++ b/drivers/raw/aesni_mb/rte_rawdev_aesni_mb_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+	local: *;
+};
\ No newline at end of file
diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build
index bb5797760..c0bbc84a6 100644
--- a/drivers/raw/meson.build
+++ b/drivers/raw/meson.build
@@ -5,7 +5,8 @@ drivers = ['dpaa2_cmdif', 'dpaa2_qdma',
 	'ifpga', 'ioat', 'ntb',
 	'octeontx2_dma',
 	'octeontx2_ep',
-	'skeleton']
+	'skeleton',
+	'aesni_mb']
 std_deps = ['rawdev']
 config_flag_fmt = 'RTE_LIBRTE_PMD_@0@_RAWDEV'
 driver_name_fmt = 'rte_rawdev_@0@'
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index b836d220d..a6e1e925f 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -347,6 +347,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV)   += -lrte_rawdev_ioat
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV) += -lrte_rawdev_ntb
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += -lrte_rawdev_octeontx2_dma
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += -lrte_rawdev_octeontx2_ep
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += -lrte_pmd_aesni_mb_rawdev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += -lIPSec_MB
 _LDLIBS-$(CONFIG_RTE_LIBRTE_MULTI_FN_COMMON) += -lrte_multi_fn
 endif # CONFIG_RTE_LIBRTE_RAWDEV
 
-- 
2.17.1


  parent reply	other threads:[~2020-04-03 16:48 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-03 16:36 [dpdk-dev] [PATCH v2 0/4] introduce multi-function processing support David Coyle
2020-04-03 16:36 ` [dpdk-dev] [PATCH v2 1/4] raw/common: add multi-function interface David Coyle
2020-04-06 16:09   ` De Lara Guarch, Pablo
2020-04-10 14:33     ` Coyle, David
2020-04-07 18:56   ` De Lara Guarch, Pablo
2020-04-10 14:35     ` Coyle, David
2020-04-03 16:36 ` David Coyle [this message]
2020-04-07 18:51   ` [dpdk-dev] [PATCH v2 2/4] raw/aesni_mb: add aesni_mb raw device De Lara Guarch, Pablo
2020-04-08 10:44     ` De Lara Guarch, Pablo
2020-04-10 14:34     ` Coyle, David
2020-04-03 16:36 ` [dpdk-dev] [PATCH v2 3/4] test/rawdev: add aesni_mb raw device tests David Coyle
2020-04-03 16:36 ` [dpdk-dev] [PATCH v2 4/4] app/crypto-perf: add support for multi-function processing David Coyle
2020-04-07 18:55   ` De Lara Guarch, Pablo
2020-04-10 14:34     ` Coyle, David
2020-04-06 14:28 ` [dpdk-dev] [PATCH v2 0/4] introduce multi-function processing support Ferruh Yigit
2020-04-07 11:27   ` Coyle, David
2020-04-07 18:05     ` Trahe, Fiona
2020-04-09  9:25       ` Coyle, David
2020-04-09  9:37         ` Trahe, Fiona
2020-04-09 11:55           ` Coyle, David
2020-04-09 13:05             ` Trahe, Fiona
2020-04-08  9:18     ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200403163656.60545-3-david.coyle@intel.com \
    --to=david.coyle@intel.com \
    --cc=brendan.ryan@intel.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=fiona.trahe@intel.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=mairtin.oloingsigh@intel.com \
    --cc=pablo.de.lara.guarch@intel.com \
    --cc=shreyansh.jain@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).