* [dpdk-dev] [PATCH v2 2/4] raw/aesni_mb: add aesni_mb raw device
2020-04-03 16:36 [dpdk-dev] [PATCH v2 0/4] introduce multi-function processing support David Coyle
2020-04-03 16:36 ` [dpdk-dev] [PATCH v2 1/4] raw/common: add multi-function interface David Coyle
@ 2020-04-03 16:36 ` David Coyle
2020-04-07 18:51 ` De Lara Guarch, Pablo
2020-04-03 16:36 ` [dpdk-dev] [PATCH v2 3/4] test/rawdev: add aesni_mb raw device tests David Coyle
` (2 subsequent siblings)
4 siblings, 1 reply; 22+ messages in thread
From: David Coyle @ 2020-04-03 16:36 UTC (permalink / raw)
To: dev
Cc: declan.doherty, fiona.trahe, pablo.de.lara.guarch, brendan.ryan,
shreyansh.jain, hemant.agrawal, David Coyle, Mairtin o Loingsigh
Adding an AESNI-MB raw device, thereby exposing AESNI-MB to the
rawdev API. The AESNI-MB raw device will use the multi-function
interface to allow combined operations be sent to the AESNI-MB
software library.
Signed-off-by: David Coyle <david.coyle@intel.com>
Signed-off-by: Mairtin o Loingsigh <mairtin.oloingsigh@intel.com>
---
config/common_base | 6 +
drivers/raw/Makefile | 2 +
drivers/raw/aesni_mb/Makefile | 47 +
drivers/raw/aesni_mb/aesni_mb_rawdev.c | 1536 +++++++++++++++++
drivers/raw/aesni_mb/aesni_mb_rawdev.h | 112 ++
drivers/raw/aesni_mb/aesni_mb_rawdev_test.c | 1102 ++++++++++++
.../aesni_mb/aesni_mb_rawdev_test_vectors.h | 1183 +++++++++++++
drivers/raw/aesni_mb/meson.build | 26 +
.../aesni_mb/rte_rawdev_aesni_mb_version.map | 3 +
drivers/raw/meson.build | 3 +-
mk/rte.app.mk | 2 +
11 files changed, 4021 insertions(+), 1 deletion(-)
create mode 100644 drivers/raw/aesni_mb/Makefile
create mode 100644 drivers/raw/aesni_mb/aesni_mb_rawdev.c
create mode 100644 drivers/raw/aesni_mb/aesni_mb_rawdev.h
create mode 100644 drivers/raw/aesni_mb/aesni_mb_rawdev_test.c
create mode 100644 drivers/raw/aesni_mb/aesni_mb_rawdev_test_vectors.h
create mode 100644 drivers/raw/aesni_mb/meson.build
create mode 100644 drivers/raw/aesni_mb/rte_rawdev_aesni_mb_version.map
diff --git a/config/common_base b/config/common_base
index 4f004968b..7ac6a3428 100644
--- a/config/common_base
+++ b/config/common_base
@@ -818,6 +818,12 @@ CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV=y
#
CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV=y
+#
+# Compile PMD for AESNI raw device
+#
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV=n
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG=n
+
#
# Compile multi-fn raw device interface
#
diff --git a/drivers/raw/Makefile b/drivers/raw/Makefile
index e16da8d95..5aa608e1e 100644
--- a/drivers/raw/Makefile
+++ b/drivers/raw/Makefile
@@ -15,5 +15,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV) += ntb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += octeontx2_dma
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += octeontx2_ep
DIRS-y += common
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += aesni_mb
+DEPDIRS-aesni_mb := common
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/raw/aesni_mb/Makefile b/drivers/raw/aesni_mb/Makefile
new file mode 100644
index 000000000..0a40b75b4
--- /dev/null
+++ b/drivers/raw/aesni_mb/Makefile
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020 Intel Corporation.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_aesni_mb_rawdev.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# versioning export map
+EXPORT_MAP := rte_rawdev_aesni_mb_version.map
+
+# external library dependencies
+LDLIBS += -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_multi_fn
+
+ifneq ($(CONFIG_RTE_LIBRTE_MULTI_FN_COMMON),y)
+$(error "RTE_LIBRTE_MULTI_FN_COMMON is required to build aesni_mb raw device")
+endif
+
+IMB_HDR = $(shell echo '\#include <intel-ipsec-mb.h>' | \
+ $(CC) -E $(EXTRA_CFLAGS) - | grep 'intel-ipsec-mb.h' | \
+ head -n1 | cut -d'"' -f2)
+
+# Detect library version
+IMB_VERSION = $(shell grep -e "IMB_VERSION_STR" $(IMB_HDR) | cut -d'"' -f2)
+IMB_VERSION_NUM = $(shell grep -e "IMB_VERSION_NUM" $(IMB_HDR) | cut -d' ' -f3)
+
+ifeq ($(IMB_VERSION),)
+$(error "IPSec_MB version >= 0.53.3 is required to build aesni_mb raw device")
+endif
+
+ifeq ($(shell expr $(IMB_VERSION_NUM) \< 0x3503), 1)
+$(error "IPSec_MB version >= 0.53.3 is required to build aesni_mb raw device")
+endif
+
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += aesni_mb_rawdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += aesni_mb_rawdev_test.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/aesni_mb/aesni_mb_rawdev.c b/drivers/raw/aesni_mb/aesni_mb_rawdev.c
new file mode 100644
index 000000000..946bdd871
--- /dev/null
+++ b/drivers/raw/aesni_mb/aesni_mb_rawdev.c
@@ -0,0 +1,1536 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <stdbool.h>
+
+#include <intel-ipsec-mb.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_string_fns.h>
+#include <rte_multi_fn.h>
+#include <rte_ether.h>
+
+#include "aesni_mb_rawdev.h"
+
+#define MAX_QUEUES (64)
+#define RING_NAME_MAX_LEN (64)
+
+#define PON_BIP_LEN (4)
+#define PON_AUTH_TAG_CRC_OFFSET (4)
+
+static const uint16_t err_detect_output_byte_lengths[] = {
+ [IMB_AUTH_DOCSIS_CRC32] = RTE_ETHER_CRC_LEN,
+ [IMB_AUTH_PON_CRC_BIP] = (PON_BIP_LEN + RTE_ETHER_CRC_LEN),
+};
+
+static const char * const xstat_names[] = {
+ "successful_enqueues", "successful_dequeues",
+ "failed_enqueues", "failed_dequeues",
+};
+
+static const char *driver_name = "rawdev_aesni_mb";
+
+static int
+qp_unique_name_set(struct rte_rawdev *rawdev, struct aesni_mb_rawdev_qp *qp)
+{
+ unsigned int n = snprintf(qp->name,
+ sizeof(qp->name),
+ "aesni_mb_rawdev_pmd_%u_qp_%u",
+ rawdev->dev_id,
+ qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+static struct rte_ring *
+qp_processed_ops_ring_create(struct aesni_mb_rawdev_qp *qp,
+ unsigned int ring_size,
+ int socket_id)
+{
+ struct rte_ring *r;
+ char ring_name[RING_NAME_MAX_LEN];
+
+ unsigned int n = strlcpy(ring_name, qp->name, sizeof(ring_name));
+
+ if (n >= sizeof(ring_name))
+ return NULL;
+
+ r = rte_ring_lookup(ring_name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ AESNI_MB_RAWDEV_DEBUG(
+ "Reusing existing ring %s for processed ops",
+ ring_name);
+ return r;
+ }
+
+ AESNI_MB_RAWDEV_ERR(
+ "Unable to reuse existing ring %s for processed ops",
+ ring_name);
+ return NULL;
+ }
+
+ return rte_ring_create(ring_name,
+ ring_size,
+ socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static uint16_t
+err_detect_output_byte_length_get(JOB_HASH_ALG algo)
+{
+ return err_detect_output_byte_lengths[algo];
+}
+
+static bool
+docsis_crc_crypto_encrypt_check(struct rte_multi_fn_xform *xform)
+{
+ struct rte_crypto_sym_xform *crypto_sym;
+ struct rte_multi_fn_err_detect_xform *err_detect;
+ struct rte_multi_fn_xform *next;
+
+ if (xform->type == RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+ err_detect = &xform->err_detect;
+ next = xform->next;
+
+ if (err_detect->algo ==
+ RTE_MULTI_FN_ERR_DETECT_CRC32_ETH &&
+ err_detect->op ==
+ RTE_MULTI_FN_ERR_DETECT_OP_GENERATE &&
+ next != NULL &&
+ next->type == RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM) {
+
+ crypto_sym = &next->crypto_sym;
+ next = next->next;
+
+ if (crypto_sym->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ crypto_sym->cipher.op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ crypto_sym->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+ crypto_sym->cipher.key.length ==
+ IMB_KEY_AES_128_BYTES &&
+ crypto_sym->cipher.iv.length ==
+ AES_BLOCK_SIZE &&
+ next == NULL)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool
+docsis_crypto_decrypt_crc_check(struct rte_multi_fn_xform *xform)
+{
+ struct rte_crypto_sym_xform *crypto_sym;
+ struct rte_multi_fn_err_detect_xform *err_detect;
+ struct rte_multi_fn_xform *next;
+
+ if (xform->type == RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM) {
+
+ crypto_sym = &xform->crypto_sym;
+ next = xform->next;
+
+ if (crypto_sym->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ crypto_sym->cipher.op ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ crypto_sym->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
+ crypto_sym->cipher.key.length ==
+ IMB_KEY_AES_128_BYTES &&
+ crypto_sym->cipher.iv.length ==
+ AES_BLOCK_SIZE &&
+ next != NULL &&
+ next->type == RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+ err_detect = &next->err_detect;
+ next = next->next;
+
+ if (err_detect->algo ==
+ RTE_MULTI_FN_ERR_DETECT_CRC32_ETH &&
+ err_detect->op ==
+ RTE_MULTI_FN_ERR_DETECT_OP_VERIFY &&
+ next == NULL)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool
+pon_crc_crypto_encrypt_bip_check(struct rte_multi_fn_xform *xform)
+{
+ struct rte_crypto_sym_xform *crypto_sym;
+ struct rte_multi_fn_err_detect_xform *err_detect;
+ struct rte_multi_fn_xform *next;
+
+ if (xform->type == RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+ err_detect = &xform->err_detect;
+ next = xform->next;
+
+ if (err_detect->algo ==
+ RTE_MULTI_FN_ERR_DETECT_CRC32_ETH &&
+ err_detect->op ==
+ RTE_MULTI_FN_ERR_DETECT_OP_GENERATE &&
+ next != NULL &&
+ next->type == RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM) {
+
+ crypto_sym = &next->crypto_sym;
+ next = next->next;
+
+ if (crypto_sym->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ crypto_sym->cipher.op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ crypto_sym->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_CTR &&
+ crypto_sym->cipher.key.length ==
+ IMB_KEY_AES_128_BYTES &&
+ crypto_sym->cipher.iv.length ==
+ AES_BLOCK_SIZE &&
+ next != NULL &&
+ next->type ==
+ RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+ err_detect = &next->err_detect;
+ next = next->next;
+
+ if (err_detect->algo ==
+ RTE_MULTI_FN_ERR_DETECT_BIP32 &&
+ err_detect->op ==
+ RTE_MULTI_FN_ERR_DETECT_OP_GENERATE &&
+ next == NULL)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool
+pon_bip_crypto_decrypt_crc_check(struct rte_multi_fn_xform *xform)
+{
+ struct rte_crypto_sym_xform *crypto_sym;
+ struct rte_multi_fn_err_detect_xform *err_detect;
+ struct rte_multi_fn_xform *next;
+
+ if (xform->type == RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+ err_detect = &xform->err_detect;
+ next = xform->next;
+
+ if (err_detect->algo ==
+ RTE_MULTI_FN_ERR_DETECT_BIP32 &&
+ err_detect->op ==
+ RTE_MULTI_FN_ERR_DETECT_OP_GENERATE &&
+ next != NULL &&
+ next->type == RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM) {
+
+ crypto_sym = &next->crypto_sym;
+ next = next->next;
+
+ if (crypto_sym->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ crypto_sym->cipher.op ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ crypto_sym->cipher.algo ==
+ RTE_CRYPTO_CIPHER_AES_CTR &&
+ crypto_sym->cipher.key.length ==
+ IMB_KEY_AES_128_BYTES &&
+ crypto_sym->cipher.iv.length ==
+ AES_BLOCK_SIZE &&
+ next != NULL &&
+ next->type ==
+ RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT) {
+
+ err_detect = &next->err_detect;
+ next = next->next;
+
+ if (err_detect->algo ==
+ RTE_MULTI_FN_ERR_DETECT_CRC32_ETH &&
+ err_detect->op ==
+ RTE_MULTI_FN_ERR_DETECT_OP_VERIFY &&
+ next == NULL)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static enum aesni_mb_rawdev_op
+session_support_check(struct rte_multi_fn_xform *xform)
+{
+ enum aesni_mb_rawdev_op op = AESNI_MB_RAWDEV_OP_NOT_SUPPORTED;
+
+ if (docsis_crc_crypto_encrypt_check(xform))
+ op = AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO;
+ else if (docsis_crypto_decrypt_crc_check(xform))
+ op = AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC;
+ else if (pon_crc_crypto_encrypt_bip_check(xform))
+ op = AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP;
+ else if (pon_bip_crypto_decrypt_crc_check(xform))
+ op = AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC;
+
+ return op;
+}
+
+static int
+session_err_detect_parameters_set(struct aesni_mb_rawdev_session *sess)
+{
+ switch (sess->op) {
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+ sess->err_detect.operation =
+ RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+ sess->err_detect.algo = IMB_AUTH_DOCSIS_CRC32;
+ break;
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+ sess->err_detect.operation = RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+ sess->err_detect.algo = IMB_AUTH_DOCSIS_CRC32;
+ break;
+ case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+ sess->err_detect.operation =
+ RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+ sess->err_detect.algo = IMB_AUTH_PON_CRC_BIP;
+ break;
+ case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+ sess->err_detect.operation = RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+ sess->err_detect.algo = IMB_AUTH_PON_CRC_BIP;
+ break;
+ default:
+ AESNI_MB_RAWDEV_ERR(
+ "Unsupported operation for error detection");
+ return -ENOTSUP;
+ }
+
+ sess->err_detect.gen_output_len =
+ err_detect_output_byte_length_get(sess->err_detect.algo);
+
+ return 0;
+}
+
+static int
+session_cipher_parameters_set(const MB_MGR *mb_mgr,
+ struct aesni_mb_rawdev_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+
+ if (xform == NULL) {
+ sess->cipher.mode = IMB_CIPHER_NULL;
+ return -EINVAL;
+ }
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ AESNI_MB_RAWDEV_ERR("Crypto xform not of type cipher");
+ return -EINVAL;
+ }
+
+ /* Select cipher direction */
+ switch (sess->op) {
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+ sess->cipher.direction = IMB_DIR_ENCRYPT;
+ sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI;
+ break;
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+ sess->cipher.direction = IMB_DIR_DECRYPT;
+ sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI;
+ break;
+ case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+ sess->cipher.direction = IMB_DIR_ENCRYPT;
+ sess->cipher.mode = IMB_CIPHER_PON_AES_CNTR;
+ break;
+ case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+ sess->cipher.direction = IMB_DIR_DECRYPT;
+ sess->cipher.mode = IMB_CIPHER_PON_AES_CNTR;
+ break;
+ default:
+ AESNI_MB_RAWDEV_ERR("Unsupported operation for cipher");
+ return -ENOTSUP;
+ }
+
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
+ /* Check key length and choose key expansion function for AES */
+ switch (xform->cipher.key.length) {
+ case IMB_KEY_AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = IMB_KEY_AES_128_BYTES;
+ IMB_AES_KEYEXP_128(mb_mgr,
+ xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+ break;
+ case IMB_KEY_AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = IMB_KEY_AES_256_BYTES;
+ IMB_AES_KEYEXP_256(mb_mgr,
+ xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+ break;
+ default:
+ AESNI_MB_RAWDEV_ERR("Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline struct aesni_mb_rawdev_session *
+session_get(struct rte_multi_fn_op *op)
+{
+ struct aesni_mb_rawdev_session *sess = NULL;
+
+ if (likely(op->sess != NULL))
+ sess = op->sess->sess_private_data;
+ else
+ op->overall_status = RTE_MULTI_FN_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+static inline int
+op_chain_parse(struct aesni_mb_rawdev_session *sess,
+ struct rte_multi_fn_op *op_chain,
+ struct rte_multi_fn_op **cipher_op,
+ struct rte_multi_fn_op **crc_op,
+ struct rte_multi_fn_op **bip_op)
+{
+ *cipher_op = NULL;
+ *crc_op = NULL;
+ *bip_op = NULL;
+
+ switch (sess->op) {
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+ if (unlikely(op_chain == NULL || op_chain->next == NULL)) {
+ return -EINVAL;
+ } else if (sess->op == AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO) {
+ *crc_op = op_chain;
+ *cipher_op = op_chain->next;
+ } else {
+ *cipher_op = op_chain;
+ *crc_op = op_chain->next;
+ }
+ break;
+ case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+ case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+ if (unlikely(op_chain == NULL ||
+ op_chain->next == NULL ||
+ op_chain->next->next == NULL)) {
+ return -EINVAL;
+ } else if (sess->op == AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP) {
+ *crc_op = op_chain;
+ *cipher_op = op_chain->next;
+ *bip_op = op_chain->next->next;
+ } else {
+ *bip_op = op_chain;
+ *cipher_op = op_chain->next;
+ *crc_op = op_chain->next->next;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void
+op_statuses_set(struct rte_multi_fn_op *first_op,
+ struct rte_multi_fn_op *cipher_op,
+ struct rte_multi_fn_op *crc_op,
+ struct rte_multi_fn_op *bip_op,
+ enum rte_multi_fn_op_status overall_status,
+ uint8_t crypto_status,
+ uint8_t err_detect_status)
+{
+ first_op->overall_status = overall_status;
+
+ if (cipher_op != NULL)
+ cipher_op->op_status = crypto_status;
+ if (crc_op != NULL)
+ crc_op->op_status = err_detect_status;
+ if (bip_op != NULL)
+ bip_op->op_status = err_detect_status;
+}
+
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG
+#define DOCSIS_CIPHER_CRC_OFFSET_DIFF (RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN)
+#define DOCSIS_CIPHER_CRC_LENGTH_DIFF (RTE_ETHER_HDR_LEN - \
+ RTE_ETHER_TYPE_LEN - \
+ RTE_ETHER_CRC_LEN)
+
+static inline int
+docsis_crypto_crc_check(struct rte_multi_fn_op *first_op,
+ struct rte_multi_fn_op *cipher_op,
+ struct rte_multi_fn_op *crc_op)
+{
+ struct rte_multi_fn_op *err_op = NULL;
+ uint8_t err_op_status;
+ const uint32_t offset_diff = DOCSIS_CIPHER_CRC_OFFSET_DIFF;
+
+ if (cipher_op->crypto_sym.cipher.data.length &&
+ crc_op->err_detect.data.length) {
+ /* Cipher offset must be at least 12 greater than CRC offset */
+ if (cipher_op->crypto_sym.cipher.data.offset <
+ ((uint32_t)crc_op->err_detect.data.offset + offset_diff)) {
+ err_op = crc_op;
+ err_op_status = RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR;
+ /*
+ * Cipher length must be at least 8 less than CRC length, taking
+ * known differences of what is ciphered and what is crc'ed into
+ * account
+ */
+ } else if ((cipher_op->crypto_sym.cipher.data.length +
+ DOCSIS_CIPHER_CRC_LENGTH_DIFF) >
+ crc_op->err_detect.data.length) {
+ err_op = crc_op;
+ err_op_status = RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR;
+ }
+ }
+
+ if (err_op != NULL) {
+ err_op->op_status = err_op_status;
+ first_op->overall_status = RTE_MULTI_FN_OP_STATUS_FAILURE;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define PON_FRAME_HDR_SIZE (8U)
+#define PON_FRAME_MULTIPLE_SIZE (4)
+#define PON_PLI_SHIFT_BITS (2)
+
+static inline int
+pon_crypto_crc_bip_check(struct rte_multi_fn_op *first_op,
+ struct rte_multi_fn_op *crc_op,
+ struct rte_multi_fn_op *bip_op,
+ struct rte_mbuf *m_src)
+{
+ struct rte_multi_fn_op *err_op = NULL;
+ uint8_t err_op_status;
+
+ /*
+ * BIP length must be multiple of 4 and be at least a full PON header
+ * in size
+ */
+ if (bip_op->err_detect.data.length % PON_FRAME_MULTIPLE_SIZE != 0 ||
+ bip_op->err_detect.data.length < PON_FRAME_HDR_SIZE) {
+ err_op = bip_op;
+ err_op_status = RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR;
+ }
+
+ /*
+ * Check the PLI field in the PON frame header matches the
+ * CRC length
+ */
+ uint16_t *pli_key_idx = rte_pktmbuf_mtod(m_src, uint16_t *);
+ uint16_t pli = rte_bswap16(*pli_key_idx) >> PON_PLI_SHIFT_BITS;
+ if (crc_op->err_detect.data.length != 0 &&
+ crc_op->err_detect.data.length != (pli - RTE_ETHER_CRC_LEN)) {
+ err_op = crc_op;
+ err_op_status = RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR;
+ }
+
+ if (err_op != NULL) {
+ err_op->op_status = err_op_status;
+ first_op->overall_status = RTE_MULTI_FN_OP_STATUS_FAILURE;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif /* RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG */
+
+static inline int
+mb_job_params_set(JOB_AES_HMAC *job,
+ struct aesni_mb_rawdev_qp *qp,
+ struct rte_multi_fn_op *op,
+ uint8_t *output_idx)
+{
+ struct rte_mbuf *m_src, *m_dst;
+ struct rte_multi_fn_op *cipher_op;
+ struct rte_multi_fn_op *crc_op;
+ struct rte_multi_fn_op *bip_op;
+ uint32_t cipher_offset;
+ struct aesni_mb_rawdev_session *session;
+
+ session = session_get(op);
+ if (unlikely(session == NULL)) {
+ op->overall_status = RTE_MULTI_FN_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ if (unlikely(op_chain_parse(session,
+ op,
+ &cipher_op,
+ &crc_op,
+ &bip_op) < 0)) {
+ op_statuses_set(
+ op,
+ cipher_op,
+ crc_op,
+ bip_op,
+ RTE_MULTI_FN_OP_STATUS_FAILURE,
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ RTE_MULTI_FN_ERR_DETECT_OP_STATUS_NOT_PROCESSED);
+ return -EINVAL;
+ }
+
+ op_statuses_set(op,
+ cipher_op,
+ crc_op,
+ bip_op,
+ RTE_MULTI_FN_OP_STATUS_NOT_PROCESSED,
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ RTE_MULTI_FN_ERR_DETECT_OP_STATUS_NOT_PROCESSED);
+
+ m_src = op->m_src;
+
+ if (op->m_dst == NULL || op->m_dst == op->m_src) {
+ /* in-place operation */
+ m_dst = m_src;
+ } else {
+ /* out-of-place operation not supported */
+ op->overall_status = RTE_MULTI_FN_OP_STATUS_FAILURE;
+ return -EINVAL;
+ }
+
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG
+ switch (session->op) {
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+ if (docsis_crypto_crc_check(op, cipher_op, crc_op) < 0)
+ return -EINVAL;
+ break;
+ case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+ case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+ /*
+ * session->op is known to be ok at this point so ok to include
+ * default case here
+ */
+ default:
+ if (pon_crypto_crc_bip_check(op, crc_op, bip_op, m_src) < 0)
+ return -EINVAL;
+ break;
+ }
+#endif
+
+ /* Set order */
+ job->chain_order = session->chain_order;
+
+ /* Set cipher parameters */
+ job->cipher_direction = session->cipher.direction;
+ job->cipher_mode = session->cipher.mode;
+
+ job->key_len_in_bytes = session->cipher.key_length_in_bytes;
+ job->enc_keys = session->cipher.expanded_aes_keys.encode;
+ job->dec_keys = session->cipher.expanded_aes_keys.decode;
+
+ /*
+ * Set error detection parameters
+ * In intel-ipsec-mb, error detection is treated as a hash algorithm
+ */
+ job->hash_alg = session->err_detect.algo;
+
+ job->auth_tag_output = qp->temp_outputs[*output_idx];
+ *output_idx = (*output_idx + 1) % MAX_JOBS;
+
+ job->auth_tag_output_len_in_bytes = session->err_detect.gen_output_len;
+
+ /* Set data parameters */
+ cipher_offset = cipher_op->crypto_sym.cipher.data.offset;
+
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, cipher_offset);
+
+ job->cipher_start_src_offset_in_bytes = cipher_offset;
+ job->msg_len_to_cipher_in_bytes =
+ cipher_op->crypto_sym.cipher.data.length;
+
+ switch (session->op) {
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+ job->hash_start_src_offset_in_bytes =
+ crc_op->err_detect.data.offset;
+ job->msg_len_to_hash_in_bytes = crc_op->err_detect.data.length;
+
+ break;
+ case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+ case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+ /*
+ * session->op is known to be ok at this point so ok to include
+ * default case here
+ */
+ default:
+ job->hash_start_src_offset_in_bytes =
+ bip_op->err_detect.data.offset;
+ job->msg_len_to_hash_in_bytes = bip_op->err_detect.data.length;
+
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG
+#endif
+ break;
+ }
+
+ /* Set IV parameters */
+ job->iv_len_in_bytes = session->iv.length;
+ job->iv = (uint8_t *)cipher_op + session->iv.offset;
+
+ job->user_data = op;
+
+ return 0;
+}
+
+static inline void
+bip_copy(JOB_AES_HMAC *job, struct rte_multi_fn_op *bip_op)
+{
+ if (bip_op->err_detect.data.length == 0)
+ return;
+
+ /* Copy BIP to output location */
+ memcpy(bip_op->err_detect.output.data,
+ job->auth_tag_output,
+ PON_BIP_LEN);
+}
+
+static inline void
+crc_verify(JOB_AES_HMAC *job,
+ struct rte_multi_fn_op *crc_op,
+ uint8_t auth_tag_crc_offset)
+{
+ if (crc_op->err_detect.data.length == 0)
+ return;
+
+ /* Verify CRC */
+ if (memcmp(job->auth_tag_output + auth_tag_crc_offset,
+ crc_op->err_detect.output.data,
+ RTE_ETHER_CRC_LEN) != 0)
+ crc_op->op_status =
+ RTE_MULTI_FN_ERR_DETECT_OP_STATUS_VERIFY_FAILED;
+}
+
+static inline struct rte_multi_fn_op *
+mb_job_post_process(JOB_AES_HMAC *job)
+{
+ struct rte_multi_fn_op *op = (struct rte_multi_fn_op *)job->user_data;
+ struct aesni_mb_rawdev_session *sess = op->sess->sess_private_data;
+ struct rte_multi_fn_op *cipher_op;
+ struct rte_multi_fn_op *crc_op;
+ struct rte_multi_fn_op *bip_op;
+
+ if (unlikely(op_chain_parse(sess,
+ op,
+ &cipher_op,
+ &crc_op,
+ &bip_op) < 0)) {
+ op_statuses_set(
+ op,
+ cipher_op,
+ crc_op,
+ bip_op,
+ RTE_MULTI_FN_OP_STATUS_FAILURE,
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR);
+
+ } else if (op->overall_status ==
+ RTE_MULTI_FN_OP_STATUS_NOT_PROCESSED) {
+ switch (job->status) {
+ case STS_COMPLETED:
+ if (unlikely(job->hash_alg == IMB_AUTH_NULL))
+ break;
+
+ op_statuses_set(
+ op,
+ cipher_op,
+ crc_op,
+ bip_op,
+ RTE_MULTI_FN_OP_STATUS_SUCCESS,
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ RTE_MULTI_FN_ERR_DETECT_OP_STATUS_SUCCESS);
+
+ if (job->hash_alg == IMB_AUTH_PON_CRC_BIP)
+ bip_copy(job, bip_op);
+
+ if (sess->err_detect.operation ==
+ RTE_MULTI_FN_ERR_DETECT_OP_VERIFY)
+ crc_verify(
+ job,
+ crc_op,
+ job->hash_alg == IMB_AUTH_PON_CRC_BIP ?
+ PON_AUTH_TAG_CRC_OFFSET : 0);
+
+ if (crc_op->op_status !=
+ RTE_MULTI_FN_ERR_DETECT_OP_STATUS_SUCCESS)
+ op->overall_status =
+ RTE_MULTI_FN_OP_STATUS_FAILURE;
+ break;
+ default:
+ op_statuses_set(
+ op,
+ cipher_op,
+ crc_op,
+ bip_op,
+ RTE_MULTI_FN_OP_STATUS_FAILURE,
+ RTE_CRYPTO_OP_STATUS_ERROR,
+ RTE_MULTI_FN_ERR_DETECT_OP_STATUS_ERROR);
+ break;
+ }
+ }
+
+ return op;
+}
+
+static unsigned
+completed_jobs_handle(struct aesni_mb_rawdev_qp *qp,
+ JOB_AES_HMAC *job,
+ struct rte_multi_fn_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_multi_fn_op *op = NULL;
+ unsigned int processed_jobs = 0;
+
+ while (job != NULL) {
+ op = mb_job_post_process(job);
+
+ if (op) {
+ ops[processed_jobs++] = op;
+ qp->stats.dequeued_count++;
+ } else {
+ qp->stats.dequeue_err_count++;
+ break;
+ }
+ if (processed_jobs == nb_ops)
+ break;
+
+ job = IMB_GET_COMPLETED_JOB(qp->mb_mgr);
+ }
+
+ return processed_jobs;
+}
+
+static inline uint16_t
+mb_mgr_flush(struct aesni_mb_rawdev_qp *qp,
+ struct rte_multi_fn_op **ops,
+ uint16_t nb_ops)
+{
+ int processed_ops = 0;
+
+ /* Flush the remaining jobs */
+ JOB_AES_HMAC *job = IMB_FLUSH_JOB(qp->mb_mgr);
+
+ if (job)
+ processed_ops += completed_jobs_handle(qp,
+ job,
+ &ops[processed_ops],
+ nb_ops - processed_ops);
+
+ return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+mb_job_params_null_set(JOB_AES_HMAC *job, struct rte_multi_fn_op *op)
+{
+ job->chain_order = IMB_ORDER_HASH_CIPHER;
+ job->cipher_mode = IMB_CIPHER_NULL;
+ job->hash_alg = IMB_AUTH_NULL;
+ job->cipher_direction = IMB_DIR_DECRYPT;
+
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+
+ return job;
+}
+
+static int
+aesni_mb_rawdev_pmd_config(const struct rte_rawdev *rawdev,
+ rte_rawdev_obj_t config)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct rte_multi_fn_dev_config *conf = config;
+
+ aesni_mb_dev->nb_queue_pairs = conf->nb_queues;
+
+ aesni_mb_dev->queue_pairs =
+ rte_zmalloc_socket(
+ "aesni_mb_rawdev_qps",
+ aesni_mb_dev->nb_queue_pairs *
+ sizeof(struct aesni_mb_rawdev_qp *),
+ RTE_CACHE_LINE_SIZE,
+ rawdev->socket_id);
+
+ if (aesni_mb_dev->queue_pairs == NULL) {
+ AESNI_MB_RAWDEV_ERR("Unable to allocate queue pairs");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+aesni_mb_rawdev_pmd_info_get(struct rte_rawdev *rawdev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct rte_multi_fn_dev_info *info = dev_info;
+
+ if (info != NULL)
+ info->max_nb_queues = aesni_mb_dev->max_nb_queue_pairs;
+}
+
+static int
+aesni_mb_rawdev_pmd_start(__rte_unused struct rte_rawdev *rawdev)
+{
+ return 0;
+}
+
+static void
+aesni_mb_rawdev_pmd_stop(__rte_unused struct rte_rawdev *rawdev)
+{
+}
+
+static int
+aesni_mb_rawdev_pmd_close(struct rte_rawdev *rawdev)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+
+ if (aesni_mb_dev->queue_pairs != NULL)
+ rte_free(aesni_mb_dev->queue_pairs);
+
+ return 0;
+}
+
+static int
+aesni_mb_rawdev_pmd_qp_release(struct rte_rawdev *rawdev, uint16_t qp_id)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct aesni_mb_rawdev_qp *qp = aesni_mb_dev->queue_pairs[qp_id];
+ struct rte_ring *r = NULL;
+
+ if (qp != NULL) {
+ r = rte_ring_lookup(qp->name);
+ if (r)
+ rte_ring_free(r);
+ if (qp->mb_mgr)
+ free_mb_mgr(qp->mb_mgr);
+ rte_free(qp);
+ aesni_mb_dev->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+static int
+aesni_mb_rawdev_pmd_qp_setup(struct rte_rawdev *rawdev,
+ uint16_t qp_id,
+ rte_rawdev_obj_t qp_c)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct aesni_mb_rawdev_qp *qp = NULL;
+ const struct rte_multi_fn_qp_config *qp_conf =
+ (const struct rte_multi_fn_qp_config *)qp_c;
+ int ret = -1;
+
+ if (qp_id >= aesni_mb_dev->max_nb_queue_pairs) {
+ AESNI_MB_RAWDEV_ERR("Invalid queue pair id=%d", qp_id);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (aesni_mb_dev->queue_pairs[qp_id] != NULL)
+ aesni_mb_rawdev_pmd_qp_release(rawdev, qp_id);
+
+ /* Allocate the queue pair data structure */
+ qp = rte_zmalloc_socket("aesni_mb_rawdev_qp",
+ sizeof(struct aesni_mb_rawdev_qp),
+ RTE_CACHE_LINE_SIZE,
+ rawdev->socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ aesni_mb_dev->queue_pairs[qp_id] = qp;
+
+ if (qp_unique_name_set(rawdev, qp))
+ goto qp_setup_cleanup;
+
+ qp->mb_mgr = alloc_mb_mgr(0);
+ if (qp->mb_mgr == NULL) {
+ ret = -ENOMEM;
+ goto qp_setup_cleanup;
+ }
+
+ switch (aesni_mb_dev->vector_mode) {
+ case AESNI_MB_RAWDEV_SSE:
+ init_mb_mgr_sse(qp->mb_mgr);
+ break;
+ case AESNI_MB_RAWDEV_AVX:
+ init_mb_mgr_avx(qp->mb_mgr);
+ break;
+ case AESNI_MB_RAWDEV_AVX2:
+ init_mb_mgr_avx2(qp->mb_mgr);
+ break;
+ case AESNI_MB_RAWDEV_AVX512:
+ init_mb_mgr_avx512(qp->mb_mgr);
+ break;
+ default:
+ AESNI_MB_RAWDEV_ERR("Unsupported vector mode %u",
+ aesni_mb_dev->vector_mode);
+ goto qp_setup_cleanup;
+ }
+
+ qp->ingress_queue = qp_processed_ops_ring_create(
+ qp,
+ qp_conf->nb_descriptors,
+ rawdev->socket_id);
+ if (qp->ingress_queue == NULL) {
+ ret = -1;
+ goto qp_setup_cleanup;
+ }
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp) {
+ if (qp->mb_mgr)
+ free_mb_mgr(qp->mb_mgr);
+ rte_free(qp);
+ }
+
+ return ret;
+}
+
+static uint16_t
+aesni_mb_rawdev_pmd_qp_count(struct rte_rawdev *rawdev)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+
+ return aesni_mb_dev->nb_queue_pairs;
+}
+
+static int
+aesni_mb_rawdev_pmd_enq(struct rte_rawdev *rawdev,
+ struct rte_rawdev_buf **ops,
+ unsigned int nb_ops,
+ rte_rawdev_obj_t q_id)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct aesni_mb_rawdev_qp *qp;
+ unsigned int nb_enqueued;
+
+ qp = aesni_mb_dev->queue_pairs[*(uint16_t *)q_id];
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+ (void **)ops,
+ nb_ops,
+ NULL);
+
+ qp->stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+static int
+aesni_mb_rawdev_pmd_deq(struct rte_rawdev *rawdev,
+ struct rte_rawdev_buf **ops,
+ unsigned int nb_ops,
+ rte_rawdev_obj_t q_id)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct aesni_mb_rawdev_qp *qp;
+ struct rte_multi_fn_op *op;
+ JOB_AES_HMAC *job;
+ uint8_t output_idx;
+ unsigned int processed_jobs = 0;
+ int ret;
+
+ qp = aesni_mb_dev->queue_pairs[*(uint16_t *)q_id];
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ output_idx = qp->output_idx;
+
+ do {
+ /* Get next free mb job struct from mb manager */
+ job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+ if (unlikely(job == NULL)) {
+ /* if no free mb job structs we need to flush mb_mgr */
+ processed_jobs += mb_mgr_flush(
+ qp,
+ (struct rte_multi_fn_op **)
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ if (nb_ops == processed_jobs)
+ break;
+
+ job = IMB_GET_NEXT_JOB(qp->mb_mgr);
+ }
+
+ /*
+ * Get next operation to process from ingress queue.
+ * There is no need to return the job to the MB_MGR if there
+ * are no more operations to process, since the MB_MGR can use
+ * that pointer again in next get_next calls.
+ */
+ ret = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (ret < 0)
+ break;
+
+ ret = mb_job_params_set(job, qp, op, &output_idx);
+ if (unlikely(ret != 0)) {
+ qp->stats.dequeue_err_count++;
+ mb_job_params_null_set(job, op);
+ }
+
+ /* Submit job to multi-buffer for processing */
+#ifdef RTE_LIBRTE_PMD_AESNI_MB_RAWDEV_DEBUG
+ job = IMB_SUBMIT_JOB(qp->mb_mgr);
+#else
+ job = IMB_SUBMIT_JOB_NOCHECK(qp->mb_mgr);
+#endif
+ /*
+ * If submit returns a processed job then handle it,
+ * before submitting subsequent jobs
+ */
+ if (job)
+ processed_jobs += completed_jobs_handle(
+ qp,
+ job,
+ (struct rte_multi_fn_op **)
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ } while (processed_jobs < nb_ops);
+
+ qp->output_idx = output_idx;
+
+ if (processed_jobs < 1)
+ processed_jobs += mb_mgr_flush(qp,
+ (struct rte_multi_fn_op **)
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ return processed_jobs;
+}
+
+static int
+aesni_mb_rawdev_pmd_xstats_get(const struct rte_rawdev *rawdev,
+ const unsigned int ids[],
+ uint64_t values[],
+ unsigned int n)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct aesni_mb_rawdev_qp *qp;
+ struct aesni_mb_rawdev_stats stats = {0};
+ int qp_id;
+ unsigned int i;
+
+ for (qp_id = 0; qp_id < aesni_mb_dev->nb_queue_pairs; qp_id++) {
+ qp = aesni_mb_dev->queue_pairs[qp_id];
+
+ stats.enqueued_count += qp->stats.enqueued_count;
+ stats.dequeued_count += qp->stats.dequeued_count;
+
+ stats.enqueue_err_count += qp->stats.enqueue_err_count;
+ stats.dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+
+ for (i = 0; i < n; i++) {
+ switch (ids[i]) {
+ case 0:
+ values[i] = stats.enqueued_count;
+ break;
+ case 1:
+ values[i] = stats.dequeued_count;
+ break;
+ case 2:
+ values[i] = stats.enqueue_err_count;
+ break;
+ case 3:
+ values[i] = stats.dequeue_err_count;
+ break;
+ default:
+ values[i] = 0;
+ break;
+ }
+ }
+
+ return n;
+}
+
+static int
+aesni_mb_rawdev_pmd_xstats_get_names(
+ __rte_unused const struct rte_rawdev *rawdev,
+ struct rte_rawdev_xstats_name *names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (size < RTE_DIM(xstat_names))
+ return RTE_DIM(xstat_names);
+
+ for (i = 0; i < RTE_DIM(xstat_names); i++)
+ strlcpy(names[i].name, xstat_names[i], sizeof(names[i]));
+
+ return RTE_DIM(xstat_names);
+}
+
+static int
+aesni_mb_rawdev_pmd_xstats_reset(struct rte_rawdev *rawdev,
+ const uint32_t *ids,
+ uint32_t nb_ids)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct aesni_mb_rawdev_qp *qp;
+ unsigned int i;
+ int qp_id;
+
+ if (!ids) {
+ for (qp_id = 0; qp_id < aesni_mb_dev->nb_queue_pairs; qp_id++) {
+ qp = aesni_mb_dev->queue_pairs[qp_id];
+ qp->stats.enqueued_count = 0;
+ qp->stats.dequeued_count = 0;
+ qp->stats.enqueue_err_count = 0;
+ qp->stats.dequeue_err_count = 0;
+ }
+
+ return 0;
+ }
+
+ for (i = 0; i < nb_ids; i++) {
+ switch (ids[i]) {
+ case 0:
+ for (qp_id = 0;
+ qp_id < aesni_mb_dev->nb_queue_pairs;
+ qp_id++) {
+ qp = aesni_mb_dev->queue_pairs[qp_id];
+ qp->stats.enqueued_count = 0;
+ }
+ break;
+ case 1:
+ for (qp_id = 0;
+ qp_id < aesni_mb_dev->nb_queue_pairs;
+ qp_id++) {
+ qp = aesni_mb_dev->queue_pairs[qp_id];
+ qp->stats.dequeued_count = 0;
+ }
+ break;
+ case 2:
+ for (qp_id = 0;
+ qp_id < aesni_mb_dev->nb_queue_pairs;
+ qp_id++) {
+ qp = aesni_mb_dev->queue_pairs[qp_id];
+ qp->stats.enqueue_err_count = 0;
+ }
+ break;
+ case 3:
+ for (qp_id = 0;
+ qp_id < aesni_mb_dev->nb_queue_pairs;
+ qp_id++) {
+ qp = aesni_mb_dev->queue_pairs[qp_id];
+ qp->stats.dequeue_err_count = 0;
+ }
+ break;
+ default:
+ AESNI_MB_RAWDEV_ERR("Invalid xstat id - cannot reset");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+aesni_mb_rawdev_pmd_selftest(uint16_t dev_id)
+{
+ return aesni_mb_rawdev_test(dev_id);
+}
+
+static struct rte_multi_fn_session *
+aesni_mb_rawdev_pmd_session_create(struct rte_rawdev *rawdev,
+ struct rte_multi_fn_xform *xform,
+ int socket_id)
+{
+ struct aesni_mb_rawdev *aesni_mb_dev = rawdev->dev_private;
+ struct aesni_mb_rawdev_session *aesni_sess = NULL;
+ struct rte_multi_fn_session *session;
+ struct rte_crypto_sym_xform *cipher_xform;
+ enum aesni_mb_rawdev_op op;
+ int ret;
+
+ op = session_support_check(xform);
+
+ /* Allocate multi-function session */
+ session = rte_zmalloc_socket("multi_fn_session",
+ sizeof(struct rte_multi_fn_session),
+ RTE_CACHE_LINE_MIN_SIZE,
+ socket_id);
+
+ if (session == NULL) {
+ AESNI_MB_RAWDEV_ERR("Multi-function session allocation failed");
+ return NULL;
+ }
+
+ /* Allocate AESNI-MB_rawdev session */
+ aesni_sess = rte_zmalloc_socket("aesni_mb_rawdev_session",
+ sizeof(struct aesni_mb_rawdev_session),
+ RTE_CACHE_LINE_MIN_SIZE,
+ socket_id);
+
+ if (aesni_sess == NULL) {
+ AESNI_MB_RAWDEV_ERR(
+ "AESNI-MB rawdev session allocation failed");
+ return NULL;
+ }
+
+ session->sess_private_data = aesni_sess;
+ aesni_sess->op = op;
+
+ switch (op) {
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO:
+ case AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP:
+ aesni_sess->chain_order = IMB_ORDER_HASH_CIPHER;
+ cipher_xform = &xform->next->crypto_sym;
+ break;
+ case AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC:
+ aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
+ cipher_xform = &xform->crypto_sym;
+ break;
+ case AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC:
+ aesni_sess->chain_order = IMB_ORDER_CIPHER_HASH;
+ cipher_xform = &xform->next->crypto_sym;
+ break;
+ default:
+ AESNI_MB_RAWDEV_ERR("Unsupported multi-function xform chain");
+ return NULL;
+ }
+
+ ret = session_err_detect_parameters_set(aesni_sess);
+
+ if (ret != 0) {
+ AESNI_MB_RAWDEV_ERR(
+ "Invalid/unsupported error detect parameters");
+ return NULL;
+ }
+
+ ret = session_cipher_parameters_set(aesni_mb_dev->mb_mgr,
+ aesni_sess,
+ cipher_xform);
+
+ if (ret != 0) {
+ AESNI_MB_RAWDEV_ERR("Invalid/unsupported cipher parameters");
+ return NULL;
+ }
+
+ return session;
+}
+
+static int
+aesni_mb_rawdev_pmd_session_destroy(__rte_unused struct rte_rawdev *rawdev,
+ struct rte_multi_fn_session *sess)
+{
+
+ if (sess) {
+ if (sess->sess_private_data)
+ rte_free(sess->sess_private_data);
+ rte_free(sess);
+ }
+
+ return 0;
+}
+
+static const struct rte_rawdev_ops aesni_mb_rawdev_ops = {
+ .dev_configure = aesni_mb_rawdev_pmd_config,
+ .dev_info_get = aesni_mb_rawdev_pmd_info_get,
+ .dev_start = aesni_mb_rawdev_pmd_start,
+ .dev_stop = aesni_mb_rawdev_pmd_stop,
+ .dev_close = aesni_mb_rawdev_pmd_close,
+ .queue_setup = aesni_mb_rawdev_pmd_qp_setup,
+ .queue_release = aesni_mb_rawdev_pmd_qp_release,
+ .queue_count = aesni_mb_rawdev_pmd_qp_count,
+ .enqueue_bufs = aesni_mb_rawdev_pmd_enq,
+ .dequeue_bufs = aesni_mb_rawdev_pmd_deq,
+ .xstats_get = aesni_mb_rawdev_pmd_xstats_get,
+ .xstats_get_names = aesni_mb_rawdev_pmd_xstats_get_names,
+ .xstats_reset = aesni_mb_rawdev_pmd_xstats_reset,
+ .dev_selftest = aesni_mb_rawdev_pmd_selftest,
+};
+
+static const struct rte_multi_fn_ops mf_ops = {
+ .session_create = aesni_mb_rawdev_pmd_session_create,
+ .session_destroy = aesni_mb_rawdev_pmd_session_destroy,
+};
+
+static int
+aesni_mb_rawdev_create(const char *name,
+ struct rte_vdev_device *vdev,
+ unsigned int socket_id)
+{
+ struct rte_rawdev *rawdev;
+ struct aesni_mb_rawdev *aesni_mb_dev;
+ enum aesni_mb_rawdev_vector_mode vector_mode;
+ MB_MGR *mb_mgr;
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name,
+ sizeof(struct aesni_mb_rawdev),
+ socket_id);
+ if (!rawdev) {
+ AESNI_MB_RAWDEV_ERR("Unable to allocate raw device");
+ return -EINVAL;
+ }
+
+ rawdev->dev_ops = &aesni_mb_rawdev_ops;
+ rawdev->device = &vdev->device;
+ rawdev->driver_name = driver_name;
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ vector_mode = AESNI_MB_RAWDEV_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = AESNI_MB_RAWDEV_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = AESNI_MB_RAWDEV_AVX;
+ else
+ vector_mode = AESNI_MB_RAWDEV_SSE;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
+ AESNI_MB_RAWDEV_WARN("AES instructions not supported by CPU");
+
+ mb_mgr = alloc_mb_mgr(0);
+
+ if (mb_mgr == NULL)
+ return -ENOMEM;
+
+ switch (vector_mode) {
+ case AESNI_MB_RAWDEV_SSE:
+ init_mb_mgr_sse(mb_mgr);
+ break;
+ case AESNI_MB_RAWDEV_AVX:
+ init_mb_mgr_avx(mb_mgr);
+ break;
+ case AESNI_MB_RAWDEV_AVX2:
+ init_mb_mgr_avx2(mb_mgr);
+ break;
+ case AESNI_MB_RAWDEV_AVX512:
+ init_mb_mgr_avx512(mb_mgr);
+ break;
+ default:
+ AESNI_MB_RAWDEV_ERR("Unsupported vector mode %u",
+ vector_mode);
+ free_mb_mgr(mb_mgr);
+ mb_mgr = NULL;
+ break;
+ }
+
+ if (mb_mgr == NULL) {
+ rte_rawdev_pmd_release(rawdev);
+ return -1;
+ }
+
+ /* Set the device's private data */
+ aesni_mb_dev = rawdev->dev_private;
+ aesni_mb_dev->mf_ops = &mf_ops;
+ aesni_mb_dev->vector_mode = vector_mode;
+ aesni_mb_dev->max_nb_queue_pairs = MAX_QUEUES;
+ aesni_mb_dev->mb_mgr = mb_mgr;
+
+ AESNI_MB_RAWDEV_INFO("IPSec Multi-buffer library version used: %s",
+ imb_get_version_str());
+
+ return 0;
+}
+
+static int
+aesni_mb_rawdev_destroy(const char *name)
+{
+ struct rte_rawdev *rawdev;
+ struct aesni_mb_rawdev *aesni_mb_dev;
+ int ret;
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (rawdev == NULL) {
+ AESNI_MB_RAWDEV_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ aesni_mb_dev = rawdev->dev_private;
+ free_mb_mgr(aesni_mb_dev->mb_mgr);
+
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ AESNI_MB_RAWDEV_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+aesni_mb_rawdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ AESNI_MB_RAWDEV_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ return aesni_mb_rawdev_create(name, vdev, rte_socket_id());
+}
+
+static int
+aesni_mb_rawdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -1;
+
+ AESNI_MB_RAWDEV_INFO("Closing %s on NUMA node %d",
+ name,
+ rte_socket_id());
+
+ return aesni_mb_rawdev_destroy(name);
+}
+
+static struct rte_vdev_driver rawdev_aesni_mb_pmd_drv = {
+ .probe = aesni_mb_rawdev_probe,
+ .remove = aesni_mb_rawdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(rawdev_aesni_mb, rawdev_aesni_mb_pmd_drv);
+
+RTE_INIT(aesni_mb_raw_init_log)
+{
+ aesni_mb_rawdev_pmd_logtype = rte_log_register("rawdev.aesni_mb");
+ if (aesni_mb_rawdev_pmd_logtype >= 0)
+ rte_log_set_level(aesni_mb_rawdev_pmd_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/aesni_mb/aesni_mb_rawdev.h b/drivers/raw/aesni_mb/aesni_mb_rawdev.h
new file mode 100644
index 000000000..59d78b8d8
--- /dev/null
+++ b/drivers/raw/aesni_mb/aesni_mb_rawdev.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#ifndef _AESNI_MB_RAWDEV_H_
+#define _AESNI_MB_RAWDEV_H_
+
+#include <intel-ipsec-mb.h>
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#include <rte_multi_fn_driver.h>
+
+/* AESNI-MB Rawdev PMD logtype */
+int aesni_mb_rawdev_pmd_logtype;
+
+#define AESNI_MB_RAWDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, aesni_mb_rawdev_pmd_logtype, \
+ "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ##args)
+#define AESNI_MB_RAWDEV_DEBUG(fmt, args...) \
+ AESNI_MB_RAWDEV_LOG(DEBUG, fmt, ## args)
+#define AESNI_MB_RAWDEV_INFO(fmt, args...) \
+ AESNI_MB_RAWDEV_LOG(INFO, fmt, ## args)
+#define AESNI_MB_RAWDEV_ERR(fmt, args...) \
+ AESNI_MB_RAWDEV_LOG(ERR, fmt, ## args)
+#define AESNI_MB_RAWDEV_WARN(fmt, args...) \
+ AESNI_MB_RAWDEV_LOG(WARNING, fmt, ## args)
+
+
+/* Maximum length for output */
+#define OUTPUT_LENGTH_MAX 8
+
+/* AESNI-MB supported operations */
+enum aesni_mb_rawdev_op {
+ AESNI_MB_RAWDEV_OP_DOCSIS_CRC_CRYPTO, /* DOCSIS encrypt direction */
+ AESNI_MB_RAWDEV_OP_DOCSIS_CRYPTO_CRC, /* DOCSIS decrypt direction */
+ AESNI_MB_RAWDEV_OP_PON_CRC_CRYPTO_BIP, /* PON encrypt direction */
+ AESNI_MB_RAWDEV_OP_PON_BIP_CRYPTO_CRC, /* PON decrypt direction */
+ AESNI_MB_RAWDEV_OP_NOT_SUPPORTED
+};
+
+/* AESNI-MB device statistics */
+struct aesni_mb_rawdev_stats {
+ uint64_t enqueued_count;
+ uint64_t dequeued_count;
+ uint64_t enqueue_err_count;
+ uint64_t dequeue_err_count;
+};
+
+/* AESNI-MB queue pair */
+struct aesni_mb_rawdev_qp {
+ uint16_t id;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ MB_MGR *mb_mgr;
+ struct rte_ring *ingress_queue;
+ struct aesni_mb_rawdev_stats stats;
+ uint8_t output_idx;
+ uint8_t temp_outputs[MAX_JOBS][OUTPUT_LENGTH_MAX];
+} __rte_cache_aligned;
+
+/* AESNI-MB vector modes */
+enum aesni_mb_rawdev_vector_mode {
+ AESNI_MB_RAWDEV_NOT_SUPPORTED = 0,
+ AESNI_MB_RAWDEV_SSE,
+ AESNI_MB_RAWDEV_AVX,
+ AESNI_MB_RAWDEV_AVX2,
+ AESNI_MB_RAWDEV_AVX512
+};
+
+/* AESNI-MB device data */
+struct aesni_mb_rawdev {
+ const struct rte_multi_fn_ops *mf_ops; /* MUST be first */
+ MB_MGR *mb_mgr;
+ struct aesni_mb_rawdev_qp **queue_pairs;
+ enum aesni_mb_rawdev_vector_mode vector_mode;
+ uint16_t max_nb_queue_pairs;
+ uint16_t nb_queue_pairs;
+};
+
+/* AESNI-MB private session structure */
+struct aesni_mb_rawdev_session {
+ enum aesni_mb_rawdev_op op;
+ JOB_CHAIN_ORDER chain_order;
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ struct {
+ JOB_CIPHER_DIRECTION direction;
+ JOB_CIPHER_MODE mode;
+
+ uint64_t key_length_in_bytes;
+
+ union {
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ uint32_t decode[60] __rte_aligned(16);
+ } expanded_aes_keys;
+ };
+ } cipher;
+ struct {
+ JOB_HASH_ALG algo;
+ enum rte_multi_fn_err_detect_operation operation;
+ uint16_t gen_output_len;
+
+ } err_detect;
+} __rte_cache_aligned;
+
+int
+aesni_mb_rawdev_test(uint16_t dev_id);
+
+#endif /* _AESNI_MB_RAWDEV_H_ */
diff --git a/drivers/raw/aesni_mb/aesni_mb_rawdev_test.c b/drivers/raw/aesni_mb/aesni_mb_rawdev_test.c
new file mode 100644
index 000000000..a8051cc80
--- /dev/null
+++ b/drivers/raw/aesni_mb/aesni_mb_rawdev_test.c
@@ -0,0 +1,1102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_dev.h>
+#include <rte_bus_vdev.h>
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#include <rte_ether.h>
+#include <rte_test.h>
+
+#include "aesni_mb_rawdev.h"
+#include "aesni_mb_rawdev_test_vectors.h"
+
+#define TEST_DEV_NAME "rawdev_aesni_mb"
+
+#define TEST(setup, teardown, run, data, suffix) \
+ test_run(setup, teardown, run, data, RTE_STR(run)"_"suffix)
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED -1
+
+#define QP_NB_DESC (4096)
+
+#define MBUF_POOL_NAME "aesni_mb_rawdev_mbuf_pool"
+#define MBUF_POOL_SIZE (8191)
+#define MBUF_CACHE_SIZE (256)
+#define MBUF_DATAPAYLOAD_SIZE (2048)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
+
+#define OP_POOL_NAME "aesni_mb_rawdev_op_pool"
+#define OP_POOL_SIZE (8191)
+#define OP_PRIV_SIZE (16)
+#define OP_CACHE_SIZE (256)
+
+#define MAX_OPS (3)
+
+static int eal_log_level;
+
+struct testsuite_params {
+ uint16_t dev_id;
+ struct rte_mempool *mbuf_pool;
+ struct rte_mempool *op_pool;
+};
+
+struct unittest_params {
+ struct rte_multi_fn_session *sess;
+ struct rte_multi_fn_op *ops[MAX_OPS];
+ struct rte_mbuf *ibuf;
+ struct rte_mbuf *obuf;
+};
+
+static struct testsuite_params testsuite_params;
+static struct unittest_params unittest_params;
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int
+testsuite_setup(uint16_t dev_id)
+{
+ struct testsuite_params *ts_params = &testsuite_params;
+ uint8_t count = rte_rawdev_count();
+
+ eal_log_level = rte_log_get_level(RTE_LOGTYPE_EAL);
+ rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
+
+ memset(ts_params, 0, sizeof(*ts_params));
+
+ if (!count) {
+ AESNI_MB_RAWDEV_INFO("No existing rawdev found - creating %s",
+ TEST_DEV_NAME);
+ return rte_vdev_init(TEST_DEV_NAME, NULL);
+ }
+
+ ts_params->dev_id = dev_id;
+
+ ts_params->mbuf_pool = rte_mempool_lookup(MBUF_POOL_NAME);
+ if (ts_params->mbuf_pool == NULL) {
+ /* Not already created so create */
+ ts_params->mbuf_pool = rte_pktmbuf_pool_create(
+ MBUF_POOL_NAME,
+ MBUF_POOL_SIZE,
+ MBUF_CACHE_SIZE,
+ 0,
+ MBUF_SIZE,
+ rte_socket_id());
+ if (ts_params->mbuf_pool == NULL) {
+ AESNI_MB_RAWDEV_ERR("Cannot create AESNI-MB rawdev "
+ "mbuf pool");
+ return TEST_FAILED;
+ }
+ }
+
+ ts_params->op_pool = rte_multi_fn_op_pool_create(OP_POOL_NAME,
+ OP_POOL_SIZE,
+ OP_CACHE_SIZE,
+ OP_PRIV_SIZE,
+ rte_socket_id());
+
+ if (ts_params->op_pool == NULL) {
+ AESNI_MB_RAWDEV_ERR("Cannot create AESNI-MB rawdev operation "
+ "pool");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(void)
+{
+ struct testsuite_params *ts_params = &testsuite_params;
+
+ if (ts_params->mbuf_pool != NULL) {
+ rte_mempool_free(ts_params->mbuf_pool);
+ ts_params->mbuf_pool = NULL;
+ }
+
+ if (ts_params->op_pool != NULL) {
+ rte_mempool_free(ts_params->op_pool);
+ ts_params->op_pool = NULL;
+ }
+
+ rte_vdev_uninit(TEST_DEV_NAME);
+
+ rte_log_set_level(RTE_LOGTYPE_EAL, eal_log_level);
+}
+
+static int
+test_setup(void)
+{
+ struct testsuite_params *ts_params = &testsuite_params;
+ struct unittest_params *ut_params = &unittest_params;
+
+ struct rte_rawdev_info info = {0};
+ struct rte_multi_fn_dev_config mf_dev_conf = {0};
+ struct rte_multi_fn_qp_config qp_conf = {0};
+ uint16_t qp_id;
+ int ret;
+
+ /* Clear unit test parameters before running test */
+ memset(ut_params, 0, sizeof(*ut_params));
+
+ /* Configure device and queue pairs */
+ mf_dev_conf.nb_queues = 1;
+ info.dev_private = &mf_dev_conf;
+ qp_conf.nb_descriptors = QP_NB_DESC;
+
+ ret = rte_rawdev_configure(ts_params->dev_id, &info);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to configure rawdev %u",
+ ts_params->dev_id);
+
+ for (qp_id = 0; qp_id < mf_dev_conf.nb_queues; qp_id++) {
+ ret = rte_rawdev_queue_setup(ts_params->dev_id,
+ qp_id,
+ &qp_conf);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to setup queue pair %u on "
+ "rawdev %u",
+ qp_id,
+ ts_params->dev_id);
+ }
+
+ ret = rte_rawdev_xstats_reset(ts_params->dev_id, NULL, 0);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to reset stats on rawdev %u",
+ ts_params->dev_id);
+
+ /* Start the device */
+ ret = rte_rawdev_start(ts_params->dev_id);
+ RTE_TEST_ASSERT_SUCCESS(ret,
+ "Failed to start rawdev %u",
+ ts_params->dev_id);
+
+ return 0;
+}
+
+static void
+test_teardown(void)
+{
+ struct testsuite_params *ts_params = &testsuite_params;
+ struct unittest_params *ut_params = &unittest_params;
+
+ int i;
+
+ /* Free multi-function operations */
+ for (i = 0; i < MAX_OPS; i++) {
+ if (ut_params->ops[i] != NULL) {
+ rte_multi_fn_op_free(ut_params->ops[i]);
+ ut_params->ops[i] = NULL;
+ }
+ }
+
+ /* Free multi-function session */
+ if (ut_params->sess != NULL) {
+ rte_multi_fn_session_destroy(ts_params->dev_id,
+ ut_params->sess);
+ ut_params->sess = NULL;
+ }
+
+ /*
+ * Free mbuf - both obuf and ibuf are usually the same,
+ * so check if they point at the same address is necessary,
+ * to avoid freeing the mbuf twice.
+ */
+ if (ut_params->obuf != NULL) {
+ rte_pktmbuf_free(ut_params->obuf);
+ if (ut_params->ibuf == ut_params->obuf)
+ ut_params->ibuf = NULL;
+ ut_params->obuf = NULL;
+ }
+ if (ut_params->ibuf != NULL) {
+ rte_pktmbuf_free(ut_params->ibuf);
+ ut_params->ibuf = NULL;
+ }
+
+ /* Stop the device */
+ rte_rawdev_stop(ts_params->dev_id);
+}
+
+static int
+test_docsis_encrypt(void *vtdata)
+{
+ struct docsis_test_data *tdata = (struct docsis_test_data *)vtdata;
+ struct testsuite_params *ts_params = &testsuite_params;
+ struct unittest_params *ut_params = &unittest_params;
+
+ /* Xforms */
+ struct rte_multi_fn_xform xform1 = {0};
+ struct rte_multi_fn_xform xform2 = {0};
+ struct rte_crypto_cipher_xform *xform_cipher;
+
+ /* Operations */
+ struct rte_multi_fn_op *result;
+ struct rte_crypto_sym_op *cipher_op;
+ struct rte_multi_fn_err_detect_op *crc_op;
+
+ /* Cipher params */
+ int cipher_len = 0;
+ uint8_t *iv_ptr;
+
+ /* CRC params */
+ int crc_len = 0, crc_data_len = 0;
+
+ /* Test data */
+ uint8_t *plaintext = NULL, *ciphertext = NULL;
+
+ /* Stats */
+ uint64_t stats[4] = {0};
+ struct rte_rawdev_xstats_name stats_names[4] = {0};
+ const unsigned int stats_id[4] = {0, 1, 2, 3};
+ int num_stats = 0, num_names = 0;
+
+ uint16_t qp_id = 0, nb_enq, nb_deq = 0, nb_ops;
+ int i, ret = TEST_SUCCESS;
+
+ /* Setup source mbuf */
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ RTE_TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+ "Failed to allocate source mbuf");
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *),
+ 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ tdata->plaintext.len);
+ memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+
+ /* Create session */
+ xform1.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+ xform1.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+ xform1.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+ xform1.next = &xform2;
+
+ xform2.type = RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+ xform2.crypto_sym.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform_cipher = &xform2.crypto_sym.cipher;
+ xform_cipher->op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ xform_cipher->algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI;
+ xform_cipher->key.data = tdata->key.data;
+ xform_cipher->key.length = tdata->key.len;
+ xform_cipher->iv.offset = sizeof(struct rte_multi_fn_op);
+ xform_cipher->iv.length = tdata->cipher_iv.len;
+ xform2.next = NULL;
+
+ ut_params->sess = rte_multi_fn_session_create(ts_params->dev_id,
+ &xform1,
+ rte_socket_id());
+
+ RTE_TEST_ASSERT((ut_params->sess != NULL &&
+ ut_params->sess->sess_private_data != NULL),
+ "Failed to create multi-function session");
+
+ /* Create operations */
+ nb_ops = rte_multi_fn_op_bulk_alloc(ts_params->op_pool,
+ ut_params->ops,
+ 2);
+ RTE_TEST_ASSERT_EQUAL(nb_ops,
+ 2,
+ "Failed to allocate multi-function operations");
+
+ ut_params->ops[0]->next = ut_params->ops[1];
+ ut_params->ops[0]->m_src = ut_params->ibuf;
+ ut_params->ops[0]->m_dst = NULL;
+ ut_params->ops[1]->next = NULL;
+
+ /* CRC op config */
+ crc_len = tdata->plaintext.no_crc == false ?
+ (tdata->plaintext.len -
+ tdata->plaintext.crc_offset -
+ RTE_ETHER_CRC_LEN) :
+ 0;
+ crc_len = crc_len > 0 ? crc_len : 0;
+ crc_data_len = crc_len == 0 ? 0 : RTE_ETHER_CRC_LEN;
+ crc_op = &ut_params->ops[0]->err_detect;
+ crc_op->data.offset = tdata->plaintext.crc_offset;
+ crc_op->data.length = crc_len;
+ crc_op->output.data = rte_pktmbuf_mtod_offset(
+ ut_params->ibuf,
+ uint8_t *,
+ ut_params->ibuf->data_len -
+ crc_data_len);
+
+ /* Cipher encrypt op config */
+ cipher_len = tdata->plaintext.no_cipher == false ?
+ (tdata->plaintext.len -
+ tdata->plaintext.cipher_offset) :
+ 0;
+ cipher_len = cipher_len > 0 ? cipher_len : 0;
+ cipher_op = &ut_params->ops[1]->crypto_sym;
+ cipher_op->cipher.data.offset = tdata->plaintext.cipher_offset;
+ cipher_op->cipher.data.length = cipher_len;
+ iv_ptr = (uint8_t *)(ut_params->ops[1]) +
+ sizeof(struct rte_multi_fn_op);
+ rte_memcpy(iv_ptr, tdata->cipher_iv.data, tdata->cipher_iv.len);
+
+ /* Attach session to operation */
+ ut_params->ops[0]->sess = ut_params->sess;
+
+ /* Enqueue to device */
+ nb_enq = rte_rawdev_enqueue_buffers(
+ ts_params->dev_id,
+ (struct rte_rawdev_buf **)ut_params->ops,
+ 1,
+ (rte_rawdev_obj_t)&qp_id);
+
+ RTE_TEST_ASSERT_EQUAL(nb_enq,
+ 1,
+ "Failed to enqueue multi-function operations");
+
+ /* Dequeue from device */
+ do {
+ nb_deq = rte_rawdev_dequeue_buffers(
+ ts_params->dev_id,
+ (struct rte_rawdev_buf **)&result,
+ 1,
+ (rte_rawdev_obj_t)&qp_id);
+ } while (nb_deq < 1);
+
+ RTE_TEST_ASSERT_EQUAL(nb_deq,
+ 1,
+ "Failed to dequeue multi-function operations");
+
+ /* Check results */
+ ciphertext = plaintext;
+
+ /* Validate ciphertext */
+ ret = memcmp(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Ciphertext not as expected");
+
+ RTE_TEST_ASSERT_EQUAL(result->overall_status,
+ RTE_MULTI_FN_OP_STATUS_SUCCESS,
+ "Multi-function op processing failed");
+
+ /* Print stats */
+ num_stats = rte_rawdev_xstats_get(ts_params->dev_id,
+ stats_id,
+ stats,
+ 4);
+ num_names = rte_rawdev_xstats_names_get(ts_params->dev_id,
+ stats_names,
+ 4);
+ RTE_TEST_ASSERT_EQUAL(num_stats, 4, "Failed to get stats");
+ RTE_TEST_ASSERT_EQUAL(num_names, 4, "Failed to get stats names");
+
+ for (i = 0; i < num_stats; i++)
+ AESNI_MB_RAWDEV_DEBUG("%s: %"PRIu64,
+ stats_names[i].name,
+ stats[i]);
+
+ return 0;
+}
+
+static int
+test_docsis_decrypt(void *vtdata)
+{
+ struct docsis_test_data *tdata = (struct docsis_test_data *)vtdata;
+ struct testsuite_params *ts_params = &testsuite_params;
+ struct unittest_params *ut_params = &unittest_params;
+
+ /* Xforms */
+ struct rte_multi_fn_xform xform1 = {0};
+ struct rte_multi_fn_xform xform2 = {0};
+ struct rte_crypto_cipher_xform *xform_cipher;
+
+ /* Operations */
+ struct rte_multi_fn_op *result;
+ struct rte_crypto_sym_op *cipher_op;
+ struct rte_multi_fn_err_detect_op *crc_op;
+
+ /* Cipher params */
+ int cipher_len = 0;
+ uint8_t *iv_ptr;
+
+ /* CRC params */
+ int crc_len = 0, crc_data_len;
+
+ /* Test data */
+ uint8_t *plaintext = NULL, *ciphertext = NULL;
+
+ /* Stats */
+ uint64_t stats[4] = {0};
+ struct rte_rawdev_xstats_name stats_names[4] = {0};
+ const unsigned int stats_id[4] = {0, 1, 2, 3};
+ int num_stats = 0, num_names = 0;
+
+ uint16_t qp_id = 0, nb_enq, nb_deq = 0, nb_ops;
+ int i, ret = TEST_SUCCESS;
+
+ /* Setup source mbuf */
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ RTE_TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+ "Failed to allocate source mbuf");
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *),
+ 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+ ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ tdata->ciphertext.len);
+ memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+
+ /* Create session */
+ xform1.type = RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+ xform1.crypto_sym.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform_cipher = &xform1.crypto_sym.cipher;
+ xform_cipher->op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ xform_cipher->algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI;
+ xform_cipher->key.data = tdata->key.data;
+ xform_cipher->key.length = tdata->key.len;
+ xform_cipher->iv.offset = sizeof(struct rte_multi_fn_op);
+ xform_cipher->iv.length = tdata->cipher_iv.len;
+ xform1.next = &xform2;
+
+ xform2.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+ xform2.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+ xform2.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+ xform2.next = NULL;
+
+ ut_params->sess = rte_multi_fn_session_create(ts_params->dev_id,
+ &xform1,
+ rte_socket_id());
+
+ RTE_TEST_ASSERT((ut_params->sess != NULL &&
+ ut_params->sess->sess_private_data != NULL),
+ "Failed to create multi-function session");
+
+ /* Create operations */
+ nb_ops = rte_multi_fn_op_bulk_alloc(ts_params->op_pool,
+ ut_params->ops,
+ 2);
+ RTE_TEST_ASSERT_EQUAL(nb_ops,
+ 2,
+ "Failed to allocate multi-function operations");
+
+ ut_params->ops[0]->next = ut_params->ops[1];
+ ut_params->ops[0]->m_src = ut_params->ibuf;
+ ut_params->ops[0]->m_dst = NULL;
+ ut_params->ops[1]->next = NULL;
+
+ /* Cipher decrypt op config */
+ cipher_len = tdata->ciphertext.no_cipher == false ?
+ (tdata->ciphertext.len -
+ tdata->ciphertext.cipher_offset) :
+ 0;
+ cipher_len = cipher_len > 0 ? cipher_len : 0;
+ cipher_op = &ut_params->ops[0]->crypto_sym;
+ cipher_op->cipher.data.offset = tdata->ciphertext.cipher_offset;
+ cipher_op->cipher.data.length = cipher_len;
+ iv_ptr = (uint8_t *)(ut_params->ops[1]) +
+ sizeof(struct rte_multi_fn_op);
+ rte_memcpy(iv_ptr, tdata->cipher_iv.data, tdata->cipher_iv.len);
+
+ /* CRC op config */
+ crc_len = tdata->plaintext.no_crc == false ?
+ (tdata->ciphertext.len -
+ tdata->ciphertext.crc_offset -
+ RTE_ETHER_CRC_LEN) :
+ 0;
+ crc_len = crc_len > 0 ? crc_len : 0;
+ crc_data_len = crc_len == 0 ? 0 : RTE_ETHER_CRC_LEN;
+ crc_op = &ut_params->ops[1]->err_detect;
+ crc_op->data.offset = tdata->ciphertext.crc_offset;
+ crc_op->data.length = crc_len;
+ crc_op->output.data = rte_pktmbuf_mtod_offset(
+ ut_params->ibuf,
+ uint8_t *,
+ ut_params->ibuf->data_len -
+ crc_data_len);
+
+ /* Attach session to operation */
+ ut_params->ops[0]->sess = ut_params->sess;
+
+ /* Enqueue to device */
+ nb_enq = rte_rawdev_enqueue_buffers(
+ ts_params->dev_id,
+ (struct rte_rawdev_buf **)ut_params->ops,
+ 1,
+ (rte_rawdev_obj_t)&qp_id);
+
+ RTE_TEST_ASSERT_EQUAL(nb_enq,
+ 1,
+ "Failed to enqueue multi-function operations");
+
+ /* Dequeue to device */
+ do {
+ nb_deq = rte_rawdev_dequeue_buffers(
+ ts_params->dev_id,
+ (struct rte_rawdev_buf **)&result,
+ 1,
+ (rte_rawdev_obj_t)&qp_id);
+ } while (nb_deq < 1);
+
+ RTE_TEST_ASSERT_EQUAL(nb_deq,
+ 1,
+ "Failed to dequeue multi-function operations");
+
+ /* Check results */
+ plaintext = ciphertext;
+
+ /* Validate plaintext */
+ ret = memcmp(plaintext,
+ tdata->plaintext.data,
+ /* Check only as far as CRC - CRC is checked internally */
+ tdata->plaintext.len - crc_data_len);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Plaintext not as expected");
+
+ RTE_TEST_ASSERT_EQUAL(result->overall_status,
+ RTE_MULTI_FN_OP_STATUS_SUCCESS,
+ "Multi-function op processing failed");
+
+ /* Print stats */
+ num_stats = rte_rawdev_xstats_get(ts_params->dev_id,
+ stats_id,
+ stats,
+ 4);
+ num_names = rte_rawdev_xstats_names_get(ts_params->dev_id,
+ stats_names,
+ 4);
+ RTE_TEST_ASSERT_EQUAL(num_stats, 4, "Failed to get stats");
+ RTE_TEST_ASSERT_EQUAL(num_names, 4, "Failed to get stats names");
+
+ for (i = 0; i < num_stats; i++)
+ AESNI_MB_RAWDEV_DEBUG("%s: %"PRIu64,
+ stats_names[i].name,
+ stats[i]);
+
+ return 0;
+}
+
+static int
+test_pon_encrypt(void *vtdata)
+{
+ struct pon_test_data *tdata = (struct pon_test_data *)vtdata;
+ struct testsuite_params *ts_params = &testsuite_params;
+ struct unittest_params *ut_params = &unittest_params;
+
+ /* Xforms */
+ struct rte_multi_fn_xform xform1 = {0};
+ struct rte_multi_fn_xform xform2 = {0};
+ struct rte_multi_fn_xform xform3 = {0};
+ struct rte_crypto_cipher_xform *xform_cipher;
+
+ /* Operations */
+ struct rte_multi_fn_op *result;
+ struct rte_crypto_sym_op *cipher_op;
+ struct rte_multi_fn_err_detect_op *crc_op;
+ struct rte_multi_fn_err_detect_op *bip_op;
+
+ /* Cipher params */
+ int cipher_len = 0;
+ uint8_t *iv_ptr;
+
+ /* CRC params */
+ int crc_len = 0, crc_data_len = 0;
+
+ /* BIP params */
+ int bip_len = 0;
+
+ /* Test data */
+ uint8_t *plaintext = NULL, *ciphertext = NULL;
+
+ /* Stats */
+ uint64_t stats[4] = {0};
+ struct rte_rawdev_xstats_name stats_names[4] = {0};
+ const unsigned int stats_id[4] = {0, 1, 2, 3};
+ int num_stats = 0, num_names = 0;
+
+ uint16_t qp_id = 0, nb_enq, nb_deq = 0, nb_ops;
+ int i, ret = TEST_SUCCESS;
+
+ /* Setup source mbuf */
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ RTE_TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+ "Failed to allocate source mbuf");
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *),
+ 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ tdata->plaintext.len);
+ memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+
+ /* Create session */
+ xform1.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+ xform1.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+ xform1.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+ xform1.next = &xform2;
+
+ xform2.type = RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+ xform2.crypto_sym.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform_cipher = &xform2.crypto_sym.cipher;
+ xform_cipher->op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ xform_cipher->algo = RTE_CRYPTO_CIPHER_AES_CTR;
+ xform_cipher->key.data = tdata->key.data;
+ xform_cipher->key.length = tdata->key.len;
+ xform_cipher->iv.offset = sizeof(struct rte_multi_fn_op);
+ xform_cipher->iv.length = tdata->cipher_iv.len;
+ xform2.next = &xform3;
+
+ xform3.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+ xform3.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_BIP32;
+ xform3.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+ xform3.next = NULL;
+
+ ut_params->sess = rte_multi_fn_session_create(ts_params->dev_id,
+ &xform1,
+ rte_socket_id());
+
+ RTE_TEST_ASSERT((ut_params->sess != NULL &&
+ ut_params->sess->sess_private_data != NULL),
+ "Failed to create multi-function session");
+
+ /* Create operations */
+ nb_ops = rte_multi_fn_op_bulk_alloc(ts_params->op_pool,
+ ut_params->ops,
+ 3);
+ RTE_TEST_ASSERT_EQUAL(nb_ops,
+ 3,
+ "Failed to allocate multi-function operations");
+
+ ut_params->ops[0]->next = ut_params->ops[1];
+ ut_params->ops[0]->m_src = ut_params->ibuf;
+ ut_params->ops[0]->m_dst = NULL;
+ ut_params->ops[1]->next = ut_params->ops[2];
+ ut_params->ops[2]->next = NULL;
+
+ /* CRC op config */
+ crc_len = tdata->plaintext.len -
+ tdata->plaintext.crc_offset -
+ tdata->plaintext.padding_len -
+ RTE_ETHER_CRC_LEN;
+ crc_len = crc_len > 0 ? crc_len : 0;
+ crc_data_len = crc_len == 0 ? 0 : RTE_ETHER_CRC_LEN;
+ crc_op = &ut_params->ops[0]->err_detect;
+ crc_op->data.offset = tdata->plaintext.crc_offset;
+ crc_op->data.length = crc_len;
+ crc_op->output.data = rte_pktmbuf_mtod_offset(
+ ut_params->ibuf,
+ uint8_t *,
+ ut_params->ibuf->data_len -
+ tdata->plaintext.padding_len -
+ crc_data_len);
+
+ /* Cipher encrypt op config */
+ cipher_len = tdata->plaintext.no_cipher == false ?
+ (tdata->plaintext.len -
+ tdata->plaintext.cipher_offset) :
+ 0;
+ cipher_len = cipher_len > 0 ? cipher_len : 0;
+ cipher_op = &ut_params->ops[1]->crypto_sym;
+ cipher_op->cipher.data.offset = tdata->plaintext.cipher_offset;
+ cipher_op->cipher.data.length = cipher_len;
+ iv_ptr = (uint8_t *)(ut_params->ops[1]) +
+ sizeof(struct rte_multi_fn_op);
+ rte_memcpy(iv_ptr, tdata->cipher_iv.data, tdata->cipher_iv.len);
+
+ /* BIP op config */
+ bip_len = tdata->plaintext.len - tdata->plaintext.bip_offset;
+ bip_len = bip_len > 0 ? bip_len : 0;
+ bip_op = &ut_params->ops[2]->err_detect;
+ bip_op->data.offset = tdata->plaintext.bip_offset;
+ bip_op->data.length = bip_len;
+ bip_op->output.data = (uint8_t *)(ut_params->ops[2]) +
+ sizeof(struct rte_multi_fn_op);
+
+ /* Attach session to op */
+ ut_params->ops[0]->sess = ut_params->sess;
+
+ /* Enqueue to device */
+ nb_enq = rte_rawdev_enqueue_buffers(
+ ts_params->dev_id,
+ (struct rte_rawdev_buf **)ut_params->ops,
+ 1,
+ (rte_rawdev_obj_t)&qp_id);
+
+ RTE_TEST_ASSERT_EQUAL(nb_enq,
+ 1,
+ "Failed to enqueue multi-function operations");
+
+ /* Dequeue from device */
+ do {
+ nb_deq = rte_rawdev_dequeue_buffers(
+ ts_params->dev_id,
+ (struct rte_rawdev_buf **)&result,
+ 1,
+ (rte_rawdev_obj_t)&qp_id);
+ } while (nb_deq < 1);
+
+ /* Check results */
+ ciphertext = plaintext;
+
+ /* Validate ciphertext */
+ ret = memcmp(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Ciphertext not as expected");
+
+ ret = memcmp(bip_op->output.data,
+ tdata->output.data,
+ tdata->output.len);
+ RTE_TEST_ASSERT_SUCCESS(ret, "BIP not as expected");
+
+ RTE_TEST_ASSERT_EQUAL(result->overall_status,
+ RTE_MULTI_FN_OP_STATUS_SUCCESS,
+ "Multi-function op processing failed");
+
+ /* Print stats */
+ num_stats = rte_rawdev_xstats_get(ts_params->dev_id,
+ stats_id,
+ stats,
+ 4);
+ num_names = rte_rawdev_xstats_names_get(ts_params->dev_id,
+ stats_names,
+ 4);
+ RTE_TEST_ASSERT_EQUAL(num_stats, 4, "Failed to get stats");
+ RTE_TEST_ASSERT_EQUAL(num_names, 4, "Failed to get stats names");
+
+ for (i = 0; i < num_stats; i++)
+ AESNI_MB_RAWDEV_DEBUG("%s: %"PRIu64,
+ stats_names[i].name,
+ stats[i]);
+
+ return 0;
+}
+
+static int
+test_pon_decrypt(void *vtdata)
+{
+ struct pon_test_data *tdata = (struct pon_test_data *)vtdata;
+ struct testsuite_params *ts_params = &testsuite_params;
+ struct unittest_params *ut_params = &unittest_params;
+
+ /* Xforms */
+ struct rte_multi_fn_xform xform1 = {0};
+ struct rte_multi_fn_xform xform2 = {0};
+ struct rte_multi_fn_xform xform3 = {0};
+ struct rte_crypto_cipher_xform *xform_cipher;
+
+ /* Operations */
+ struct rte_multi_fn_op *result;
+ struct rte_crypto_sym_op *cipher_op;
+ struct rte_multi_fn_err_detect_op *crc_op;
+ struct rte_multi_fn_err_detect_op *bip_op;
+
+ /* Cipher params */
+ int cipher_len = 0;
+ uint8_t *iv_ptr;
+
+ /* CRC params */
+ int crc_len = 0, crc_data_len = 0;
+
+ /* BIP params */
+ int bip_len = 0;
+
+ /* Test data */
+ uint8_t *plaintext = NULL, *ciphertext = NULL;
+
+ /* Stats */
+ uint64_t stats[4] = {0};
+ struct rte_rawdev_xstats_name stats_names[4] = {0};
+ const unsigned int stats_id[4] = {0, 1, 2, 3};
+ int num_stats = 0, num_names = 0;
+
+ uint16_t qp_id = 0, nb_enq, nb_deq = 0, nb_ops;
+ int i, ret = TEST_SUCCESS;
+
+ /* Setup source mbuf */
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ RTE_TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+ "Failed to allocate source mbuf");
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *),
+ 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+ ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ tdata->ciphertext.len);
+ memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
+
+ /* Create session */
+ xform1.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+ xform1.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_BIP32;
+ xform1.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+ xform1.next = &xform2;
+
+ xform2.type = RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+ xform2.crypto_sym.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform_cipher = &xform2.crypto_sym.cipher;
+ xform_cipher->op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ xform_cipher->algo = RTE_CRYPTO_CIPHER_AES_CTR;
+ xform_cipher->key.data = tdata->key.data;
+ xform_cipher->key.length = tdata->key.len;
+ xform_cipher->iv.offset = sizeof(struct rte_multi_fn_op);
+ xform_cipher->iv.length = tdata->cipher_iv.len;
+ xform2.next = &xform3;
+
+ xform3.type = RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+ xform3.err_detect.algo = RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+ xform3.err_detect.op = RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+ xform3.next = NULL;
+
+ ut_params->sess = rte_multi_fn_session_create(ts_params->dev_id,
+ &xform1,
+ rte_socket_id());
+
+ RTE_TEST_ASSERT((ut_params->sess != NULL &&
+ ut_params->sess->sess_private_data != NULL),
+ "Failed to create multi-function session");
+
+ /* Create operations */
+ nb_ops = rte_multi_fn_op_bulk_alloc(ts_params->op_pool,
+ ut_params->ops,
+ 3);
+ RTE_TEST_ASSERT_EQUAL(nb_ops,
+ 3,
+ "Failed to allocate multi-function operations");
+
+ ut_params->ops[0]->next = ut_params->ops[1];
+ ut_params->ops[0]->m_src = ut_params->ibuf;
+ ut_params->ops[0]->m_dst = NULL;
+ ut_params->ops[1]->next = ut_params->ops[2];
+ ut_params->ops[2]->next = NULL;
+
+ /* BIP op config */
+ bip_len = tdata->ciphertext.len - tdata->ciphertext.bip_offset;
+ bip_len = bip_len > 0 ? bip_len : 0;
+ bip_op = &ut_params->ops[0]->err_detect;
+ bip_op->data.offset = tdata->ciphertext.bip_offset;
+ bip_op->data.length = bip_len;
+ bip_op->output.data = (uint8_t *)(ut_params->ops[0]) +
+ sizeof(struct rte_multi_fn_op);
+
+ /* Cipher encrypt op config */
+ cipher_len = tdata->ciphertext.no_cipher == false ?
+ (tdata->ciphertext.len -
+ tdata->ciphertext.cipher_offset) :
+ 0;
+ cipher_len = cipher_len > 0 ? cipher_len : 0;
+ cipher_op = &ut_params->ops[1]->crypto_sym;
+ cipher_op->cipher.data.offset = tdata->ciphertext.cipher_offset;
+ cipher_op->cipher.data.length = cipher_len;
+ iv_ptr = (uint8_t *)(ut_params->ops[1]) +
+ sizeof(struct rte_multi_fn_op);
+ rte_memcpy(iv_ptr, tdata->cipher_iv.data, tdata->cipher_iv.len);
+
+ /* CRC op config */
+ crc_len = tdata->ciphertext.len -
+ tdata->ciphertext.crc_offset -
+ tdata->ciphertext.padding_len -
+ RTE_ETHER_CRC_LEN;
+ crc_len = crc_len > 0 ? crc_len : 0;
+ crc_data_len = crc_len == 0 ? 0 : RTE_ETHER_CRC_LEN;
+ crc_op = &ut_params->ops[2]->err_detect;
+ crc_op->data.offset = tdata->ciphertext.crc_offset;
+ crc_op->data.length = crc_len;
+ crc_op->output.data = rte_pktmbuf_mtod_offset(
+ ut_params->ibuf,
+ uint8_t *,
+ ut_params->ibuf->data_len -
+ tdata->ciphertext.padding_len -
+ crc_data_len);
+
+ /* Attach session to op */
+ ut_params->ops[0]->sess = ut_params->sess;
+
+ /* Enqueue to device */
+ nb_enq = rte_rawdev_enqueue_buffers(
+ ts_params->dev_id,
+ (struct rte_rawdev_buf **)ut_params->ops,
+ 1,
+ (rte_rawdev_obj_t)&qp_id);
+
+ RTE_TEST_ASSERT_EQUAL(nb_enq,
+ 1,
+ "Failed to enqueue multi-function operations");
+
+ /* Dequeue from device */
+ do {
+ nb_deq = rte_rawdev_dequeue_buffers(
+ ts_params->dev_id,
+ (struct rte_rawdev_buf **)&result,
+ 1,
+ (rte_rawdev_obj_t)&qp_id);
+ } while (nb_deq < 1);
+
+ /* Check results */
+ plaintext = ciphertext;
+
+ /* Validate plaintext */
+ ret = memcmp(plaintext,
+ tdata->plaintext.data,
+ /* Check only as far as CRC - CRC is checked internally */
+ tdata->plaintext.len -
+ tdata->plaintext.padding_len -
+ crc_data_len);
+ RTE_TEST_ASSERT_SUCCESS(ret, "Plaintext not as expected");
+
+ ret = memcmp(bip_op->output.data,
+ tdata->output.data,
+ tdata->output.len);
+ RTE_TEST_ASSERT_SUCCESS(ret, "BIP not as expected");
+
+ RTE_TEST_ASSERT_EQUAL(result->overall_status,
+ RTE_MULTI_FN_OP_STATUS_SUCCESS,
+ "Multi-function op processing failed");
+
+ /* Print stats */
+ num_stats = rte_rawdev_xstats_get(ts_params->dev_id,
+ stats_id,
+ stats,
+ 4);
+ num_names = rte_rawdev_xstats_names_get(ts_params->dev_id,
+ stats_names,
+ 4);
+ RTE_TEST_ASSERT_EQUAL(num_stats, 4, "Failed to get stats");
+ RTE_TEST_ASSERT_EQUAL(num_names, 4, "Failed to get stats names");
+
+ for (i = 0; i < num_stats; i++)
+ AESNI_MB_RAWDEV_DEBUG("%s: %"PRIu64,
+ stats_names[i].name,
+ stats[i]);
+
+ return 0;
+}
+
+static void
+test_run(int (*setup)(void),
+ void (*teardown)(void),
+ int (*run)(void *),
+ void *data,
+ const char *name)
+{
+ int ret = 0;
+
+ if (setup != NULL) {
+ ret = setup();
+ if (ret < 0) {
+ AESNI_MB_RAWDEV_INFO("Error setting up test %s", name);
+ unsupported++;
+ }
+ }
+
+ if (run != NULL) {
+ ret = run(data);
+ if (ret < 0) {
+ failed++;
+ AESNI_MB_RAWDEV_INFO("%s Failed", name);
+ } else {
+ passed++;
+ AESNI_MB_RAWDEV_INFO("%s Passed", name);
+ }
+ }
+
+ if (teardown != NULL)
+ teardown();
+
+ total++;
+}
+
+int
+aesni_mb_rawdev_test(uint16_t dev_id)
+{
+ if (testsuite_setup(dev_id) != TEST_SUCCESS) {
+ AESNI_MB_RAWDEV_ERR("Setup failed");
+ testsuite_teardown();
+ return TEST_FAILED;
+ }
+
+ /* DOCSIS: Crypto-CRC */
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_1, "1");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_2, "2");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_3, "3");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_4, "4");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_5, "5");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_6, "6");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_7, "7");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_8, "8");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_9, "9");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_10, "10");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_11, "11");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_12, "12");
+ TEST(test_setup, test_teardown, test_docsis_encrypt,
+ &docsis_test_case_13, "13");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_1, "1");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_2, "2");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_3, "3");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_4, "4");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_5, "5");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_6, "6");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_7, "7");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_8, "8");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_9, "9");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_10, "10");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_11, "11");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_12, "12");
+ TEST(test_setup, test_teardown, test_docsis_decrypt,
+ &docsis_test_case_13, "13");
+
+ /* PON: Crypto-CRC-BIP */
+ TEST(test_setup, test_teardown, test_pon_encrypt,
+ &pon_test_case_1, "1");
+ TEST(test_setup, test_teardown, test_pon_encrypt,
+ &pon_test_case_2, "2");
+ TEST(test_setup, test_teardown, test_pon_encrypt,
+ &pon_test_case_3, "3");
+ TEST(test_setup, test_teardown, test_pon_encrypt,
+ &pon_test_case_4, "4");
+ TEST(test_setup, test_teardown, test_pon_encrypt,
+ &pon_test_case_5, "5");
+ TEST(test_setup, test_teardown, test_pon_encrypt,
+ &pon_test_case_6, "6");
+ TEST(test_setup, test_teardown, test_pon_decrypt,
+ &pon_test_case_1, "1");
+ TEST(test_setup, test_teardown, test_pon_decrypt,
+ &pon_test_case_2, "2");
+ TEST(test_setup, test_teardown, test_pon_decrypt,
+ &pon_test_case_3, "3");
+ TEST(test_setup, test_teardown, test_pon_decrypt,
+ &pon_test_case_4, "4");
+ TEST(test_setup, test_teardown, test_pon_decrypt,
+ &pon_test_case_5, "5");
+ TEST(test_setup, test_teardown, test_pon_decrypt,
+ &pon_test_case_6, "6");
+
+ testsuite_teardown();
+
+ printf("Total tests : %d\n", total);
+ printf("Passed : %d\n", passed);
+ printf("Failed : %d\n", failed);
+ printf("Not supported : %d\n", unsupported);
+
+ if (failed)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
diff --git a/drivers/raw/aesni_mb/aesni_mb_rawdev_test_vectors.h b/drivers/raw/aesni_mb/aesni_mb_rawdev_test_vectors.h
new file mode 100644
index 000000000..46bb220f4
--- /dev/null
+++ b/drivers/raw/aesni_mb/aesni_mb_rawdev_test_vectors.h
@@ -0,0 +1,1183 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#ifndef _AESNI_MB_RAWDEV_TEST_VECTORS_H_
+#define _AESNI_MB_RAWDEV_TEST_VECTORS_H_
+
+#include <stdbool.h>
+
+/*
+ * DOCSIS test data and cases
+ * - encrypt direction: CRC-Crypto
+ * - decrypt direction: Crypto-CRC
+ */
+struct docsis_test_data {
+ struct {
+ uint8_t data[16];
+ unsigned int len;
+ } key;
+
+ struct {
+ uint8_t data[16] __rte_aligned(16);
+ unsigned int len;
+ } cipher_iv;
+
+ struct {
+ uint8_t data[1024];
+ unsigned int len;
+ unsigned int cipher_offset;
+ unsigned int crc_offset;
+ bool no_cipher;
+ bool no_crc;
+ } plaintext;
+
+ struct {
+ uint8_t data[1024];
+ unsigned int len;
+ unsigned int cipher_offset;
+ unsigned int crc_offset;
+ bool no_cipher;
+ bool no_crc;
+ } ciphertext;
+};
+
+struct docsis_test_data docsis_test_case_1 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 24,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x7A, 0xF0,
+ /* CRC */
+ 0x61, 0xF8, 0x63, 0x42
+ },
+ .len = 24,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_2 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 25,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x7A, 0xF0, 0xDF,
+ /* CRC */
+ 0xFE, 0x12, 0x99, 0xE5
+ },
+ .len = 25,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_3 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 34,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0xD6, 0xE2, 0x70, 0x5C,
+ 0xE6, 0x4D, 0xCC, 0x8C, 0x47, 0xB7, 0x09, 0xD6,
+ /* CRC */
+ 0x54, 0x85, 0xF8, 0x32
+ },
+ .len = 34,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_4 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 35,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x92, 0x6A, 0xC2, 0xDC,
+ 0xEE, 0x3B, 0x31, 0xEC, 0x03, 0xDE, 0x95, 0x33,
+ 0x5E,
+ /* CRC */
+ 0xFE, 0x47, 0x3E, 0x22
+ },
+ .len = 35,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_5 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 82,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x77, 0x74, 0x56, 0x05,
+ 0xD1, 0x14, 0xA2, 0x8D, 0x2C, 0x9A, 0x11, 0xFC,
+ 0x7D, 0xB0, 0xE7, 0x18, 0xCE, 0x75, 0x7C, 0x89,
+ 0x14, 0x56, 0xE2, 0xF2, 0xB7, 0x47, 0x08, 0x27,
+ 0xF7, 0x08, 0x7A, 0x13, 0x90, 0x81, 0x75, 0xB0,
+ 0xC7, 0x91, 0x04, 0x83, 0xAD, 0x11, 0x46, 0x46,
+ 0xF8, 0x54, 0x87, 0xA0, 0x42, 0xF3, 0x71, 0xA9,
+ 0x8A, 0xCD, 0x59, 0x77, 0x67, 0x11, 0x1A, 0x87,
+ /* CRC */
+ 0xAB, 0xED, 0x2C, 0x26
+ },
+ .len = 82,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_6 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 83,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x77, 0x74, 0x56, 0x05,
+ 0xD1, 0x14, 0xA2, 0x8D, 0x2C, 0x9A, 0x11, 0xFC,
+ 0x7D, 0xB0, 0xE7, 0x18, 0xCE, 0x75, 0x7C, 0x89,
+ 0x14, 0x56, 0xE2, 0xF2, 0xB7, 0x47, 0x08, 0x27,
+ 0xF7, 0x08, 0x7A, 0x13, 0x90, 0x81, 0x75, 0xB0,
+ 0xC7, 0x91, 0x04, 0x83, 0xAD, 0x11, 0x46, 0x46,
+ 0xF8, 0x54, 0x87, 0xA0, 0xA4, 0x0C, 0xC2, 0xF0,
+ 0x81, 0x49, 0xA8, 0xA6, 0x6C, 0x48, 0xEB, 0x1F,
+ 0x4B,
+ /* CRC */
+ 0x2F, 0xD4, 0x48, 0x18
+ },
+ .len = 83,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_7 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 83,
+ .cipher_offset = 40,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0x3B, 0x9F, 0x72, 0x4C, 0xB5, 0x72,
+ 0x3E, 0x56, 0x54, 0x49, 0x13, 0x53, 0xC4, 0xAA,
+ 0xCD, 0xEA, 0x6A, 0x88, 0x99, 0x07, 0x86, 0xF4,
+ 0xCF, 0x03, 0x4E, 0xDF, 0x65, 0x61, 0x47, 0x5B,
+ 0x2F, 0x81, 0x09, 0x12, 0x9A, 0xC2, 0x24, 0x8C,
+ 0x09,
+ /* CRC */
+ 0x11, 0xB4, 0x06, 0x33
+ },
+ .len = 83,
+ .cipher_offset = 40,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_8 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 24,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = true
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x7A, 0xF0,
+ /* CRC */
+ 0x8A, 0x0F, 0x74, 0xE8
+ },
+ .len = 24,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = true
+ }
+};
+
+struct docsis_test_data docsis_test_case_9 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 83,
+ .cipher_offset = 40,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = true
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0x3B, 0x9F, 0x72, 0x4C, 0xB5, 0x72,
+ 0x3E, 0x56, 0x54, 0x49, 0x13, 0x53, 0xC4, 0xAA,
+ 0xCD, 0xEA, 0x6A, 0x88, 0x99, 0x07, 0x86, 0xF4,
+ 0xCF, 0x03, 0x4E, 0xDF, 0x65, 0x61, 0x47, 0x5B,
+ 0x2F, 0x81, 0x09, 0x12, 0x9A, 0xC2, 0x24, 0x8C,
+ 0x09,
+ /* CRC */
+ 0x5D, 0x2B, 0x12, 0xF4
+ },
+ .len = 83,
+ .cipher_offset = 40,
+ .crc_offset = 6,
+ .no_cipher = false,
+ .no_crc = true
+ }
+};
+
+struct docsis_test_data docsis_test_case_10 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 24,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = true,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+ /* CRC */
+ 0x14, 0x08, 0xE8, 0x55
+ },
+ .len = 24,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = true,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_11 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 83,
+ .cipher_offset = 40,
+ .crc_offset = 6,
+ .no_cipher = true,
+ .no_crc = false
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA,
+ /* CRC */
+ 0xB3, 0x60, 0xEB, 0x38
+ },
+ .len = 83,
+ .cipher_offset = 40,
+ .crc_offset = 6,
+ .no_cipher = true,
+ .no_crc = false
+ }
+};
+
+struct docsis_test_data docsis_test_case_12 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 24,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = true,
+ .no_crc = true
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 24,
+ .cipher_offset = 18,
+ .crc_offset = 6,
+ .no_cipher = true,
+ .no_crc = true
+ }
+};
+
+struct docsis_test_data docsis_test_case_13 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0xAA, 0xBB, 0xCC, 0xDD,
+ 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 83,
+ .cipher_offset = 40,
+ .crc_offset = 6,
+ .no_cipher = true,
+ .no_crc = true
+ },
+ .ciphertext = {
+ .data = {
+ /* DOCSIS header */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x04, 0x03, 0x02, 0x01, 0x08, 0x00, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xAA,
+ /* CRC */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ },
+ .len = 83,
+ .cipher_offset = 40,
+ .crc_offset = 6,
+ .no_cipher = true,
+ .no_crc = true
+ }
+};
+
+/*
+ * PON test data and cases
+ * - encrypt direction: CRC-Crypto-BIP
+ * - decrypt direction: BIP-Crypto-CRC
+ */
+struct pon_test_data {
+ struct {
+ uint8_t data[16];
+ unsigned int len;
+ } key;
+
+ struct {
+ uint8_t data[16] __rte_aligned(16);
+ unsigned int len;
+ } cipher_iv;
+
+ struct {
+ uint8_t data[1024];
+ unsigned int len;
+ unsigned int cipher_offset;
+ unsigned int crc_offset;
+ unsigned int bip_offset;
+ unsigned int padding_len;
+ bool no_cipher;
+ } plaintext;
+
+ struct {
+ uint8_t data[1024];
+ unsigned int len;
+ unsigned int cipher_offset;
+ unsigned int crc_offset;
+ unsigned int bip_offset;
+ unsigned int padding_len;
+ bool no_cipher;
+ } ciphertext;
+
+ struct {
+ uint8_t data[8];
+ unsigned int len;
+ } output;
+};
+
+struct pon_test_data pon_test_case_1 = {
+ .key = {
+ .data = {
+ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* XGEM header */
+ 0x00, 0x20, 0x27, 0x11, 0x00, 0x00, 0x21, 0x23,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04,
+ /* CRC */
+ 0x05, 0x06, 0x01, 0x01
+ },
+ .len = 16,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 0,
+ .no_cipher = false
+ },
+ .ciphertext = {
+ .data = {
+ /* XGEM header */
+ 0x00, 0x20, 0x27, 0x11, 0x00, 0x00, 0x21, 0x23,
+ /* Ethernet frame */
+ 0xC7, 0x62, 0x82, 0xCA,
+ /* CRC */
+ 0x3E, 0x92, 0xC8, 0x5A
+ },
+ .len = 16,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 0,
+ .no_cipher = false
+ },
+ .output = {
+ /* Expected BIP */
+ .data = {0xF9, 0xD0, 0x4C, 0xA2},
+ .len = 4
+ }
+};
+
+struct pon_test_data pon_test_case_2 = {
+ .key = {
+ .data = {
+ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* XGEM header */
+ 0x00, 0x40, 0x27, 0x11, 0x00, 0x00, 0x29, 0x3C,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01,
+ /* CRC */
+ 0x81, 0x00, 0x00, 0x01
+ },
+ .len = 24,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 0,
+ .no_cipher = false
+ },
+ .ciphertext = {
+ .data = {
+ /* XGEM header */
+ 0x00, 0x40, 0x27, 0x11, 0x00, 0x00, 0x29, 0x3C,
+ /* Ethernet frame */
+ 0xC7, 0x62, 0x82, 0xCA, 0xF6, 0x6F, 0xF5, 0xED,
+ 0xB7, 0x90, 0x1E, 0x02,
+ /* CRC */
+ 0xEA, 0x38, 0xA1, 0x78
+ },
+ .len = 24,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 0,
+ .no_cipher = false
+ },
+ .output = {
+ /* Expected BIP */
+ .data = {0x6C, 0xE5, 0xC6, 0x70},
+ .len = 4
+ }
+};
+
+struct pon_test_data pon_test_case_3 = {
+ .key = {
+ .data = {
+ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* XGEM header */
+ 0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6A, 0xB0, 0x7E,
+ 0x00, 0x00, 0x04, 0x06, 0x83, 0xBD, 0xC0, 0xA8,
+ 0x00, 0x01, 0xC0, 0xA8, 0x01, 0x01, 0x04, 0xD2,
+ 0x16, 0x2E, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xA6, 0x33,
+ 0x00, 0x00, 0x30, 0x31,
+ /* CRC */
+ 0x32, 0x33, 0x34, 0x35
+ },
+ .len = 72,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 0,
+ .no_cipher = false
+ },
+ .ciphertext = {
+ .data = {
+ /* XGEM header */
+ 0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B,
+ /* Ethernet frame */
+ 0xC7, 0x62, 0x82, 0xCA, 0xF6, 0x6F, 0xF5, 0xED,
+ 0xB7, 0x90, 0x1E, 0x02, 0x6B, 0x2C, 0x08, 0x7D,
+ 0x3C, 0x90, 0xE8, 0x2C, 0x44, 0x30, 0x03, 0x29,
+ 0x5F, 0x88, 0xA9, 0xD6, 0x1E, 0xF9, 0xD1, 0xF1,
+ 0xD6, 0x16, 0x8C, 0x72, 0xA4, 0xCD, 0xD2, 0x8F,
+ 0x63, 0x26, 0xC9, 0x66, 0xB0, 0x65, 0x24, 0x9B,
+ 0x60, 0x5B, 0x18, 0x60, 0xBD, 0xD5, 0x06, 0x13,
+ 0x40, 0xC9, 0x60, 0x64,
+ /* CRC */
+ 0x36, 0x5F, 0x86, 0x8C
+ },
+ .len = 72,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 0,
+ .no_cipher = false
+ },
+ .output = {
+ /* Expected BIP */
+ .data = {0xDF, 0xE0, 0xAD, 0xFB},
+ .len = 4
+ }
+};
+
+struct pon_test_data pon_test_case_4 = {
+ .key = {
+ .data = {
+ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* XGEM header */
+ 0x00, 0x39, 0x03, 0xFD, 0x00, 0x00, 0xB3, 0x6A,
+ /* Ethernet frame */
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x10, 0x11,
+ /* CRC */
+ 0x20, 0x21, 0x22, 0x23,
+ /* Padding */
+ 0x55, 0x55
+ },
+ .len = 24,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 2,
+ .no_cipher = false
+ },
+ .ciphertext = {
+ .data = {
+ /* XGEM header */
+ 0x00, 0x39, 0x03, 0xFD, 0x00, 0x00, 0xB3, 0x6A,
+ /* Ethernet frame */
+ 0x73, 0xE0, 0x5D, 0x5D, 0x32, 0x9C, 0x3B, 0xFA,
+ 0x6B, 0x66,
+ /* CRC */
+ 0xF6, 0x8E, 0x5B, 0xD5,
+ /* Padding */
+ 0xAB, 0xCD
+ },
+ .len = 24,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 2,
+ .no_cipher = false
+ },
+ .output = {
+ /* Expected BIP */
+ .data = {0x71, 0xF6, 0x8B, 0x73},
+ .len = 4
+ }
+};
+
+struct pon_test_data pon_test_case_5 = {
+ .key = {
+ .data = {
+ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* XGEM header */
+ 0x00, 0x05, 0x03, 0xFD, 0x00, 0x00, 0xB9, 0xB4,
+ /* Ethernet frame */
+ 0x08,
+ /* Padding */
+ 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55
+ },
+ .len = 16,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 7,
+ .no_cipher = false
+ },
+ .ciphertext = {
+ .data = {
+ /* XGEM header */
+ 0x00, 0x05, 0x03, 0xFD, 0x00, 0x00, 0xB9, 0xB4,
+ /* Ethernet frame */
+ 0x73,
+ /* Padding */
+ 0xBC, 0x02, 0x03, 0x6B, 0xC4, 0x60, 0xA0
+ },
+ .len = 16,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 7,
+ .no_cipher = false
+ },
+ .output = {
+ /* Expected BIP */
+ .data = {0x18, 0x7D, 0xD8, 0xEA},
+ .len = 4
+ }
+};
+
+struct pon_test_data pon_test_case_6 = {
+ .key = {
+ .data = {
+ 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88,
+ 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00
+ },
+ .len = 16
+ },
+ .cipher_iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ /* XGEM header */
+ 0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6A, 0xB0, 0x7E,
+ 0x00, 0x00, 0x04, 0x06, 0x83, 0xBD, 0xC0, 0xA8,
+ 0x00, 0x01, 0xC0, 0xA8, 0x01, 0x01, 0x04, 0xD2,
+ 0x16, 0x2E, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xA6, 0x33,
+ 0x00, 0x00, 0x30, 0x31,
+ /* CRC */
+ 0x32, 0x33, 0x34, 0x35
+ },
+ .len = 72,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 0,
+ .no_cipher = true
+ },
+ .ciphertext = {
+ .data = {
+ /* XGEM header */
+ 0x01, 0x00, 0x27, 0x11, 0x00, 0x00, 0x33, 0x0B,
+ /* Ethernet frame */
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x81, 0x00, 0x00, 0x01,
+ 0x08, 0x00, 0x45, 0x00, 0x00, 0x6a, 0xb0, 0x7e,
+ 0x00, 0x00, 0x04, 0x06, 0x83, 0xbd, 0xc0, 0xa8,
+ 0x00, 0x01, 0xc0, 0xa8, 0x01, 0x01, 0x04, 0xd2,
+ 0x16, 0x2e, 0x12, 0x34, 0x56, 0x78, 0x12, 0x34,
+ 0x56, 0x90, 0x50, 0x10, 0x20, 0x00, 0xa6, 0x33,
+ 0x00, 0x00, 0x30, 0x31,
+ /* CRC */
+ 0x53, 0xC1, 0xE6, 0x0C
+ },
+ .len = 72,
+ .cipher_offset = 8,
+ .crc_offset = 8,
+ .bip_offset = 0,
+ .padding_len = 0,
+ .no_cipher = true
+ },
+ .output = {
+ /* Expected BIP */
+ .data = {0x6A, 0xD5, 0xC2, 0xAB},
+ .len = 4
+ }
+};
+#endif /* _AESNI_MB_RAWDEV_TEST_VECTORS_H_ */
diff --git a/drivers/raw/aesni_mb/meson.build b/drivers/raw/aesni_mb/meson.build
new file mode 100644
index 000000000..085f629be
--- /dev/null
+++ b/drivers/raw/aesni_mb/meson.build
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020 Intel Corporation.
+
+IMB_required_ver = '0.53.3-dev'
+lib = cc.find_library('IPSec_MB', required: false)
+if not lib.found()
+ build = false
+ reason = 'missing dependency, "libIPSec_MB"'
+else
+ ext_deps += lib
+
+ # version comes with quotes, so we split based on " and take the middle
+ imb_ver = cc.get_define('IMB_VERSION_STR',
+ prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
+
+ if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
+ reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
+ IMB_required_ver, imb_ver)
+ build = false
+ endif
+
+endif
+
+sources = files('aesni_mb_rawdev.c', 'aesni_mb_rawdev_test.c')
+allow_experimental_apis = true
+deps += ['bus_vdev', 'net', 'rawdev', 'cryptodev', 'common_multi_fn']
diff --git a/drivers/raw/aesni_mb/rte_rawdev_aesni_mb_version.map b/drivers/raw/aesni_mb/rte_rawdev_aesni_mb_version.map
new file mode 100644
index 000000000..fa9e17c29
--- /dev/null
+++ b/drivers/raw/aesni_mb/rte_rawdev_aesni_mb_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
\ No newline at end of file
diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build
index bb5797760..c0bbc84a6 100644
--- a/drivers/raw/meson.build
+++ b/drivers/raw/meson.build
@@ -5,7 +5,8 @@ drivers = ['dpaa2_cmdif', 'dpaa2_qdma',
'ifpga', 'ioat', 'ntb',
'octeontx2_dma',
'octeontx2_ep',
- 'skeleton']
+ 'skeleton',
+ 'aesni_mb']
std_deps = ['rawdev']
config_flag_fmt = 'RTE_LIBRTE_PMD_@0@_RAWDEV'
driver_name_fmt = 'rte_rawdev_@0@'
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index b836d220d..a6e1e925f 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -347,6 +347,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV) += -lrte_rawdev_ioat
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV) += -lrte_rawdev_ntb
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV) += -lrte_rawdev_octeontx2_dma
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EP_RAWDEV) += -lrte_rawdev_octeontx2_ep
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += -lrte_pmd_aesni_mb_rawdev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB_RAWDEV) += -lIPSec_MB
_LDLIBS-$(CONFIG_RTE_LIBRTE_MULTI_FN_COMMON) += -lrte_multi_fn
endif # CONFIG_RTE_LIBRTE_RAWDEV
--
2.17.1
^ permalink raw reply [flat|nested] 22+ messages in thread
* [dpdk-dev] [PATCH v2 4/4] app/crypto-perf: add support for multi-function processing
2020-04-03 16:36 [dpdk-dev] [PATCH v2 0/4] introduce multi-function processing support David Coyle
` (2 preceding siblings ...)
2020-04-03 16:36 ` [dpdk-dev] [PATCH v2 3/4] test/rawdev: add aesni_mb raw device tests David Coyle
@ 2020-04-03 16:36 ` David Coyle
2020-04-07 18:55 ` De Lara Guarch, Pablo
2020-04-06 14:28 ` [dpdk-dev] [PATCH v2 0/4] introduce multi-function processing support Ferruh Yigit
4 siblings, 1 reply; 22+ messages in thread
From: David Coyle @ 2020-04-03 16:36 UTC (permalink / raw)
To: dev
Cc: declan.doherty, fiona.trahe, pablo.de.lara.guarch, brendan.ryan,
shreyansh.jain, hemant.agrawal, David Coyle, Mairtin o Loingsigh
Support for multi-function operations, via a raw device, has been
added to the test-crypto-perf app.
A new optype has been added: multi-fn
A new parameter has been added for multi-fn mode:
--multi-fn-params <params>
The <params> field specify what type of multi-function processing
is required and the options associated with that. Currently the
following are supported:
docsis-cipher-crc,<cipher_offset>,<crc_offset>
pon-cipher-crc-bip,<buffer_padding_sz>
Signed-off-by: David Coyle <david.coyle@intel.com>
Signed-off-by: Mairtin o Loingsigh <mairtin.oloingsigh@intel.com>
---
app/test-crypto-perf/Makefile | 5 +
app/test-crypto-perf/cperf_ops.c | 265 ++++++++++++
app/test-crypto-perf/cperf_options.h | 37 +-
app/test-crypto-perf/cperf_options_parsing.c | 396 ++++++++++++++++--
app/test-crypto-perf/cperf_test_common.c | 88 +++-
app/test-crypto-perf/cperf_test_latency.c | 176 ++++++--
.../cperf_test_pmd_cyclecount.c | 96 ++++-
app/test-crypto-perf/cperf_test_throughput.c | 164 ++++++--
.../cperf_test_vector_parsing.c | 35 +-
app/test-crypto-perf/cperf_test_vectors.c | 53 +++
app/test-crypto-perf/cperf_test_vectors.h | 9 +
app/test-crypto-perf/cperf_test_verify.c | 205 +++++++--
app/test-crypto-perf/main.c | 255 +++++++++--
app/test-crypto-perf/meson.build | 6 +
14 files changed, 1584 insertions(+), 206 deletions(-)
diff --git a/app/test-crypto-perf/Makefile b/app/test-crypto-perf/Makefile
index 78135f38c..baf706e4a 100644
--- a/app/test-crypto-perf/Makefile
+++ b/app/test-crypto-perf/Makefile
@@ -26,4 +26,9 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y)
LDLIBS += -lrte_pmd_crypto_scheduler
endif
+ifeq ($(CONFIG_RTE_LIBRTE_MULTI_FN_COMMON)$(CONFIG_RTE_LIBRTE_RAWDEV),yy)
+CFLAGS += -DMULTI_FN_SUPPORTED
+LDLIBS += -lrte_multi_fn
+endif
+
include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index 97584ceed..29f294ac5 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -3,6 +3,10 @@
*/
#include <rte_cryptodev.h>
+#include <rte_ether.h>
+#ifdef MULTI_FN_SUPPORTED
+#include <rte_multi_fn.h>
+#endif /* MULTI_FN_SUPPORTED */
#include "cperf_ops.h"
#include "cperf_test_vectors.h"
@@ -505,6 +509,168 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
return 0;
}
+#ifdef MULTI_FN_SUPPORTED
+static int
+cperf_set_ops_multi_fn_cipher_crc(struct rte_crypto_op **ops,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset __rte_unused,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ uint16_t iv_offset, uint32_t *imix_idx)
+{
+ uint32_t buffer_sz, offset;
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_multi_fn_op *mf_op =
+ (struct rte_multi_fn_op *)ops[i];
+ struct rte_crypto_sym_op *cipher_op;
+ struct rte_multi_fn_err_detect_op *crc_op;
+ struct rte_multi_fn_op *mf_cipher_op;
+
+ mf_op->sess = (struct rte_multi_fn_session *)sess;
+ mf_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+ mf_op->m_dst = NULL;
+
+ if (options->imix_distribution_count) {
+ buffer_sz =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ buffer_sz = options->test_buffer_size;
+
+ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* CRC -> Cipher */
+ crc_op = &mf_op->err_detect;
+ cipher_op = &mf_op->next->crypto_sym;
+ mf_cipher_op = mf_op->next;
+ } else {
+ /* Cipher -> CRC */
+ cipher_op = &mf_op->crypto_sym;
+ crc_op = &mf_op->next->err_detect;
+ mf_cipher_op = mf_op;
+ }
+
+ crc_op->data.offset = test_vector->multi_fn_data.crc_offset;
+ crc_op->data.length = buffer_sz - crc_op->data.offset -
+ RTE_ETHER_CRC_LEN;
+ offset = crc_op->data.offset + crc_op->data.length;
+ crc_op->output.data = rte_pktmbuf_mtod_offset(mf_op->m_src,
+ uint8_t *,
+ offset);
+ crc_op->output.phys_addr = rte_pktmbuf_iova_offset(mf_op->m_src,
+ offset);
+
+ cipher_op->cipher.data.offset = test_vector->data.cipher_offset;
+ cipher_op->cipher.data.length = buffer_sz -
+ cipher_op->cipher.data.offset;
+
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ uint8_t *iv_ptr = (uint8_t *)mf_cipher_op + iv_offset;
+ memcpy(iv_ptr, test_vector->cipher_iv.data,
+ test_vector->cipher_iv.length);
+ }
+ }
+
+ return 0;
+}
+
+#define PLI_SHIFT_BITS 2
+
+static int
+cperf_set_ops_multi_fn_cipher_crc_bip(struct rte_crypto_op **ops,
+ uint32_t src_buf_offset, uint32_t dst_buf_offset __rte_unused,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector, uint16_t iv_offset,
+ uint32_t *imix_idx)
+{
+ uint32_t buffer_sz, offset;
+ uint16_t i;
+ int crc_len;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_multi_fn_op *mf_op =
+ (struct rte_multi_fn_op *)ops[i];
+ struct rte_crypto_sym_op *cipher_op;
+ struct rte_multi_fn_err_detect_op *crc_op;
+ struct rte_multi_fn_err_detect_op *bip_op;
+ struct rte_multi_fn_op *mf_cipher_op;
+
+ mf_op->sess = (struct rte_multi_fn_session *)sess;
+ mf_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] +
+ src_buf_offset);
+ mf_op->m_dst = NULL;
+
+ if (options->imix_distribution_count) {
+ buffer_sz =
+ options->imix_buffer_sizes[*imix_idx];
+ *imix_idx = (*imix_idx + 1) % options->pool_sz;
+ } else
+ buffer_sz = options->test_buffer_size;
+
+ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* CRC -> Cipher -> BIP */
+ crc_op = &mf_op->err_detect;
+ cipher_op = &mf_op->next->crypto_sym;
+ bip_op = &mf_op->next->next->err_detect;
+ } else {
+ /* BIP-> Cipher -> CRC */
+ bip_op = &mf_op->err_detect;
+ cipher_op = &mf_op->next->crypto_sym;
+ crc_op = &mf_op->next->next->err_detect;
+ }
+ mf_cipher_op = mf_op->next;
+
+ crc_op->data.offset = test_vector->multi_fn_data.crc_offset;
+ crc_len = buffer_sz - crc_op->data.offset -
+ options->multi_fn_opts.buffer_padding -
+ RTE_ETHER_CRC_LEN;
+ crc_len = crc_len > 0 ? crc_len : 0;
+ crc_op->data.length = crc_len;
+ offset = crc_op->data.offset + crc_op->data.length;
+ crc_op->output.data = rte_pktmbuf_mtod_offset(mf_op->m_src,
+ uint8_t *,
+ offset);
+ crc_op->output.phys_addr = rte_pktmbuf_iova_offset(mf_op->m_src,
+ offset);
+
+ cipher_op->cipher.data.offset = test_vector->data.cipher_offset;
+ cipher_op->cipher.data.length = buffer_sz -
+ cipher_op->cipher.data.offset;
+
+ bip_op->data.offset = test_vector->multi_fn_data.bip_offset;
+ bip_op->data.length = buffer_sz - bip_op->data.offset;
+ offset = options->test_buffer_size;
+ bip_op->output.data = rte_pktmbuf_mtod_offset(mf_op->m_src,
+ uint8_t *,
+ offset);
+ bip_op->output.phys_addr = rte_pktmbuf_iova_offset(mf_op->m_src,
+ offset);
+
+ if (options->test == CPERF_TEST_TYPE_VERIFY) {
+ uint8_t *iv_ptr = (uint8_t *)mf_cipher_op + iv_offset;
+ memcpy(iv_ptr, test_vector->cipher_iv.data,
+ test_vector->cipher_iv.length);
+ }
+
+ /*
+ * This is very protocol specific but IPSec MB uses the PLI
+ * (Payload Length Indication) field of the PON frame header
+ * to get the CRC length. So set the PLI here now
+ */
+ uint16_t *pli_key_idx = rte_pktmbuf_mtod(mf_op->m_src,
+ uint16_t *);
+ uint16_t pli = cipher_op->cipher.data.length -
+ options->multi_fn_opts.buffer_padding;
+ *pli_key_idx = rte_bswap16(pli) << PLI_SHIFT_BITS;
+ }
+
+ return 0;
+}
+#endif /* MULTI_FN_SUPPORTED */
+
static struct rte_cryptodev_sym_session *
cperf_create_session(struct rte_mempool *sess_mp,
struct rte_mempool *priv_mp,
@@ -590,6 +756,90 @@ cperf_create_session(struct rte_mempool *sess_mp,
&sess_conf, sess_mp);
}
#endif
+
+#ifdef MULTI_FN_SUPPORTED
+ /*
+ * multi function
+ */
+ if (options->op_type == CPERF_MULTI_FN) {
+ struct rte_multi_fn_xform mf_cipher_xform;
+ struct rte_multi_fn_xform mf_crc_xform;
+ struct rte_multi_fn_xform mf_bip_xform;
+ struct rte_multi_fn_xform *first_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform;
+
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC ||
+ options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+
+ mf_cipher_xform.type =
+ RTE_MULTI_FN_XFORM_TYPE_CRYPTO_SYM;
+ mf_cipher_xform.crypto_sym.type =
+ RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform = &mf_cipher_xform.crypto_sym.cipher;
+ cipher_xform->algo = options->cipher_algo;
+ cipher_xform->op = options->cipher_op;
+ cipher_xform->iv.offset = iv_offset;
+ cipher_xform->key.data = test_vector->cipher_key.data;
+ cipher_xform->key.length =
+ test_vector->cipher_key.length;
+ cipher_xform->iv.length = test_vector->cipher_iv.length;
+
+ mf_crc_xform.type =
+ RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+ mf_crc_xform.err_detect.algo =
+ RTE_MULTI_FN_ERR_DETECT_CRC32_ETH;
+
+ if (cipher_xform->op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ mf_crc_xform.err_detect.op =
+ RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+ } else {
+ mf_crc_xform.err_detect.op =
+ RTE_MULTI_FN_ERR_DETECT_OP_VERIFY;
+ }
+
+ mf_bip_xform.type =
+ RTE_MULTI_FN_XFORM_TYPE_ERR_DETECT;
+ mf_bip_xform.err_detect.algo =
+ RTE_MULTI_FN_ERR_DETECT_BIP32;
+ mf_bip_xform.err_detect.op =
+ RTE_MULTI_FN_ERR_DETECT_OP_GENERATE;
+
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) {
+ if (cipher_xform->op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ first_xform = &mf_crc_xform;
+ mf_crc_xform.next = &mf_cipher_xform;
+ mf_cipher_xform.next = NULL;
+ } else {
+ first_xform = &mf_cipher_xform;
+ mf_cipher_xform.next = &mf_crc_xform;
+ mf_crc_xform.next = NULL;
+ }
+ } else {
+ if (cipher_xform->op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ first_xform = &mf_crc_xform;
+ mf_crc_xform.next = &mf_cipher_xform;
+ mf_cipher_xform.next = &mf_bip_xform;
+ mf_bip_xform.next = NULL;
+ } else {
+ first_xform = &mf_bip_xform;
+ mf_bip_xform.next = &mf_cipher_xform;
+ mf_cipher_xform.next = &mf_crc_xform;
+ mf_crc_xform.next = NULL;
+ }
+ }
+ }
+
+ return (void *)rte_multi_fn_session_create(dev_id,
+ first_xform, rte_socket_id());
+ }
+#endif /* MULTI_FN_SUPPORTED */
+
sess = rte_cryptodev_sym_session_create(sess_mp);
/*
* cipher only
@@ -773,5 +1023,20 @@ cperf_get_op_functions(const struct cperf_options *options,
return 0;
}
#endif
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN) {
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC)
+ op_fns->populate_ops =
+ cperf_set_ops_multi_fn_cipher_crc;
+ else if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP)
+ op_fns->populate_ops =
+ cperf_set_ops_multi_fn_cipher_crc_bip;
+ else
+ return -1;
+ return 0;
+ }
+#endif /* MULTI_FN_SUPPORTED */
return -1;
}
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index 1ed0a77e5..0ca542224 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -10,6 +10,9 @@
#ifdef RTE_LIBRTE_SECURITY
#include <rte_security.h>
#endif
+#ifdef MULTI_FN_SUPPORTED
+#include <rte_multi_fn.h>
+#endif /* MULTI_FN_SUPPORTED */
#define CPERF_PTEST_TYPE ("ptest")
#define CPERF_SILENT ("silent")
@@ -52,6 +55,10 @@
#define CPERF_PDCP_DOMAIN ("pdcp-domain")
#endif
+#ifdef MULTI_FN_SUPPORTED
+#define CPERF_MULTI_FN_PARAMS ("multi-fn-params")
+#endif /* MULTI_FN_SUPPORTED */
+
#define CPERF_CSV ("csv-friendly")
/* benchmark-specific options */
@@ -75,11 +82,34 @@ enum cperf_op_type {
CPERF_CIPHER_THEN_AUTH,
CPERF_AUTH_THEN_CIPHER,
CPERF_AEAD,
- CPERF_PDCP
+ CPERF_PDCP,
+#ifdef MULTI_FN_SUPPORTED
+ CPERF_MULTI_FN
+#endif /* MULTI_FN_SUPPORTED */
};
extern const char *cperf_op_type_strs[];
+#ifdef MULTI_FN_SUPPORTED
+enum cperf_multi_fn_ops {
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC,
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP
+};
+
+extern const char *cperf_multi_fn_ops_strs[];
+
+struct cperf_multi_fn_options {
+ enum cperf_multi_fn_ops ops;
+
+ /* DOCSIS_CIPHER_CRC */
+ uint32_t cipher_offset;
+ uint32_t crc_offset;
+
+ /* PON_CIPHER_CRC_BIP */
+ uint32_t buffer_padding;
+};
+#endif /* MULTI_FN_SUPPORTED */
+
struct cperf_options {
enum cperf_perf_test_type test;
@@ -123,6 +153,11 @@ struct cperf_options {
uint16_t pdcp_sn_sz;
enum rte_security_pdcp_domain pdcp_domain;
#endif
+
+#ifdef MULTI_FN_SUPPORTED
+ struct cperf_multi_fn_options multi_fn_opts;
+#endif /* MULTI_FN_SUPPORTED */
+
char device_type[RTE_CRYPTODEV_NAME_MAX_LEN];
enum cperf_op_type op_type;
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index f43c5bede..25da68105 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -7,6 +7,7 @@
#include <rte_cryptodev.h>
#include <rte_malloc.h>
+#include <rte_ether.h>
#include "cperf_options.h"
@@ -34,7 +35,7 @@ usage(char *progname)
" --desc-nb N: set number of descriptors for each crypto device\n"
" --devtype TYPE: set crypto device type to use\n"
" --optype cipher-only / auth-only / cipher-then-auth /\n"
- " auth-then-cipher / aead : set operation type\n"
+ " auth-then-cipher / aead%s : set operation type\n"
" --sessionless: enable session-less crypto operations\n"
" --out-of-place: enable out-of-place crypto operations\n"
" --test-file NAME: set the test vector file path\n"
@@ -53,11 +54,20 @@ usage(char *progname)
" --aead-iv-sz N: set the AEAD IV size\n"
" --aead-aad-sz N: set the AEAD AAD size\n"
" --digest-sz N: set the digest size\n"
+#ifdef MULTI_FN_SUPPORTED
+ " --multi-fn-params PARAMS: set multi function parameters\n"
+#endif /* MULTI_FN_SUPPORTED */
" --pmd-cyclecount-delay-ms N: set delay between enqueue\n"
" and dequeue in pmd-cyclecount benchmarking mode\n"
" --csv-friendly: enable test result output CSV friendly\n"
" -h: prints this help\n",
- progname);
+ progname,
+#ifdef MULTI_FN_SUPPORTED
+ " / multi-fn"
+#else
+ ""
+#endif /* MULTI_FN_SUPPORTED */
+ );
}
static int
@@ -446,7 +456,13 @@ parse_op_type(struct cperf_options *opts, const char *arg)
{
cperf_op_type_strs[CPERF_PDCP],
CPERF_PDCP
+ },
+#ifdef MULTI_FN_SUPPORTED
+ {
+ cperf_op_type_strs[CPERF_MULTI_FN],
+ CPERF_MULTI_FN
}
+#endif /* MULTI_FN_SUPPORTED */
};
int id = get_str_key_id_mapping(optype_namemap,
@@ -744,6 +760,112 @@ parse_aead_aad_sz(struct cperf_options *opts, const char *arg)
return parse_uint16_t(&opts->aead_aad_sz, arg);
}
+#ifdef MULTI_FN_SUPPORTED
+static int
+parse_multi_fn_ops(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map multi_fn_ops_namemap[] = {
+ {
+ cperf_multi_fn_ops_strs
+ [CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC],
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC
+ },
+ {
+ cperf_multi_fn_ops_strs
+ [CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP],
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP
+ }
+ };
+
+ int id = get_str_key_id_mapping(multi_fn_ops_namemap,
+ RTE_DIM(multi_fn_ops_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid multi function operation specified\n");
+ return -1;
+ }
+
+ opts->multi_fn_opts.ops = (enum cperf_multi_fn_ops)id;
+
+ return 0;
+}
+
+static int
+parse_multi_fn_params(struct cperf_options *opts, const char *arg)
+{
+ char *token;
+ char *copy_arg = strdup(arg);
+
+ if (copy_arg == NULL)
+ return -1;
+
+ errno = 0;
+ token = strtok(copy_arg, ",");
+
+ /* Parse first value */
+ if (token == NULL || parse_multi_fn_ops(opts, token) < 0)
+ goto err_multi_fn_opts;
+
+ if (opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) {
+ /* Next params is cipher_offset */
+ token = strtok(NULL, ",");
+
+ if (token == NULL ||
+ parse_uint32_t(&opts->multi_fn_opts.cipher_offset,
+ token) < 0) {
+ RTE_LOG(ERR, USER1, "invalid %s multi function cipher "
+ "offset specified\n",
+ cperf_multi_fn_ops_strs[
+ opts->multi_fn_opts.ops]);
+ goto err_multi_fn_opts;
+ }
+
+ /* Next params is crc_offset */
+ token = strtok(NULL, ",");
+
+ if (token == NULL ||
+ parse_uint32_t(&opts->multi_fn_opts.crc_offset,
+ token) < 0) {
+ RTE_LOG(ERR, USER1, "invalid %s multi function crc "
+ "offset specified\n",
+ cperf_multi_fn_ops_strs[
+ opts->multi_fn_opts.ops]);
+ goto err_multi_fn_opts;
+ }
+
+ } else if (opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ /* Next param is buffer_padding */
+ token = strtok(NULL, ",");
+
+ if (token == NULL ||
+ parse_uint32_t(&opts->multi_fn_opts.buffer_padding,
+ token) < 0) {
+ RTE_LOG(ERR, USER1, "invalid %s multi function buffer "
+ "padding specified\n",
+ cperf_multi_fn_ops_strs[
+ opts->multi_fn_opts.ops]);
+ goto err_multi_fn_opts;
+ }
+ }
+
+ token = strtok(NULL, ",");
+
+ if (token != NULL) {
+ RTE_LOG(ERR, USER1, "unknown %s multi function parameter\n",
+ cperf_multi_fn_ops_strs[opts->multi_fn_opts.ops]);
+ goto err_multi_fn_opts;
+ }
+
+ free(copy_arg);
+ return 0;
+
+err_multi_fn_opts:
+ free(copy_arg);
+ return -1;
+}
+#endif /* MULTI_FN_SUPPORTED */
+
static int
parse_csv_friendly(struct cperf_options *opts, const char *arg __rte_unused)
{
@@ -821,6 +943,11 @@ static struct option lgopts[] = {
{ CPERF_PDCP_SN_SZ, required_argument, 0, 0 },
{ CPERF_PDCP_DOMAIN, required_argument, 0, 0 },
#endif
+
+#ifdef MULTI_FN_SUPPORTED
+ { CPERF_MULTI_FN_PARAMS, required_argument, 0, 0 },
+#endif /* MULTI_FN_SUPPORTED */
+
{ CPERF_CSV, no_argument, 0, 0},
{ CPERF_PMDCC_DELAY_MS, required_argument, 0, 0 },
@@ -891,47 +1018,57 @@ cperf_options_default(struct cperf_options *opts)
opts->pdcp_sn_sz = 12;
opts->pdcp_domain = RTE_SECURITY_PDCP_MODE_CONTROL;
#endif
+
+#ifdef MULTI_FN_SUPPORTED
+ opts->multi_fn_opts.ops = CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC;
+ opts->multi_fn_opts.cipher_offset = 0;
+ opts->multi_fn_opts.crc_offset = 0;
+ opts->multi_fn_opts.buffer_padding = 0;
+#endif /* MULTI_FN_SUPPORTED */
}
static int
cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{
struct long_opt_parser parsermap[] = {
- { CPERF_PTEST_TYPE, parse_cperf_test_type },
- { CPERF_SILENT, parse_silent },
- { CPERF_POOL_SIZE, parse_pool_sz },
- { CPERF_TOTAL_OPS, parse_total_ops },
- { CPERF_BURST_SIZE, parse_burst_sz },
- { CPERF_BUFFER_SIZE, parse_buffer_sz },
- { CPERF_SEGMENT_SIZE, parse_segment_sz },
- { CPERF_DESC_NB, parse_desc_nb },
- { CPERF_DEVTYPE, parse_device_type },
- { CPERF_OPTYPE, parse_op_type },
- { CPERF_SESSIONLESS, parse_sessionless },
- { CPERF_OUT_OF_PLACE, parse_out_of_place },
- { CPERF_IMIX, parse_imix },
- { CPERF_TEST_FILE, parse_test_file },
- { CPERF_TEST_NAME, parse_test_name },
- { CPERF_CIPHER_ALGO, parse_cipher_algo },
- { CPERF_CIPHER_OP, parse_cipher_op },
- { CPERF_CIPHER_KEY_SZ, parse_cipher_key_sz },
- { CPERF_CIPHER_IV_SZ, parse_cipher_iv_sz },
- { CPERF_AUTH_ALGO, parse_auth_algo },
- { CPERF_AUTH_OP, parse_auth_op },
- { CPERF_AUTH_KEY_SZ, parse_auth_key_sz },
- { CPERF_AUTH_IV_SZ, parse_auth_iv_sz },
- { CPERF_AEAD_ALGO, parse_aead_algo },
- { CPERF_AEAD_OP, parse_aead_op },
- { CPERF_AEAD_KEY_SZ, parse_aead_key_sz },
- { CPERF_AEAD_IV_SZ, parse_aead_iv_sz },
- { CPERF_AEAD_AAD_SZ, parse_aead_aad_sz },
- { CPERF_DIGEST_SZ, parse_digest_sz },
+ { CPERF_PTEST_TYPE, parse_cperf_test_type },
+ { CPERF_SILENT, parse_silent },
+ { CPERF_POOL_SIZE, parse_pool_sz },
+ { CPERF_TOTAL_OPS, parse_total_ops },
+ { CPERF_BURST_SIZE, parse_burst_sz },
+ { CPERF_BUFFER_SIZE, parse_buffer_sz },
+ { CPERF_SEGMENT_SIZE, parse_segment_sz },
+ { CPERF_DESC_NB, parse_desc_nb },
+ { CPERF_DEVTYPE, parse_device_type },
+ { CPERF_OPTYPE, parse_op_type },
+ { CPERF_SESSIONLESS, parse_sessionless },
+ { CPERF_OUT_OF_PLACE, parse_out_of_place },
+ { CPERF_IMIX, parse_imix },
+ { CPERF_TEST_FILE, parse_test_file },
+ { CPERF_TEST_NAME, parse_test_name },
+ { CPERF_CIPHER_ALGO, parse_cipher_algo },
+ { CPERF_CIPHER_OP, parse_cipher_op },
+ { CPERF_CIPHER_KEY_SZ, parse_cipher_key_sz },
+ { CPERF_CIPHER_IV_SZ, parse_cipher_iv_sz },
+ { CPERF_AUTH_ALGO, parse_auth_algo },
+ { CPERF_AUTH_OP, parse_auth_op },
+ { CPERF_AUTH_KEY_SZ, parse_auth_key_sz },
+ { CPERF_AUTH_IV_SZ, parse_auth_iv_sz },
+ { CPERF_AEAD_ALGO, parse_aead_algo },
+ { CPERF_AEAD_OP, parse_aead_op },
+ { CPERF_AEAD_KEY_SZ, parse_aead_key_sz },
+ { CPERF_AEAD_IV_SZ, parse_aead_iv_sz },
+ { CPERF_AEAD_AAD_SZ, parse_aead_aad_sz },
+ { CPERF_DIGEST_SZ, parse_digest_sz },
#ifdef RTE_LIBRTE_SECURITY
- { CPERF_PDCP_SN_SZ, parse_pdcp_sn_sz },
- { CPERF_PDCP_DOMAIN, parse_pdcp_domain },
+ { CPERF_PDCP_SN_SZ, parse_pdcp_sn_sz },
+ { CPERF_PDCP_DOMAIN, parse_pdcp_domain },
#endif
- { CPERF_CSV, parse_csv_friendly},
- { CPERF_PMDCC_DELAY_MS, parse_pmd_cyclecount_delay_ms},
+#ifdef MULTI_FN_SUPPORTED
+ { CPERF_MULTI_FN_PARAMS, parse_multi_fn_params },
+#endif /* MULTI_FN_SUPPORTED */
+ { CPERF_CSV, parse_csv_friendly },
+ { CPERF_PMDCC_DELAY_MS, parse_pmd_cyclecount_delay_ms },
};
unsigned int i;
@@ -1031,6 +1168,155 @@ check_cipher_buffer_length(struct cperf_options *options)
return 0;
}
+#ifdef MULTI_FN_SUPPORTED
+#define DOCSIS_CIPHER_CRC_OFFSET_DIFF (RTE_ETHER_HDR_LEN - RTE_ETHER_TYPE_LEN)
+#define DOCSIS_MIN_CIPHER_SIZE (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+
+#define PON_FRAME_HDR_SIZE (8U)
+#define PON_FRAME_MULTIPLE_SIZE (4)
+#define PON_FRAME_INVALID_SIZE (12)
+
+static int
+check_multi_fn_options(struct cperf_options *options)
+{
+ uint32_t buffer_size, buffer_size_idx = 0;
+
+ options->digest_sz = 0;
+
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC ||
+ options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+
+ /* Check the device type is a rawdev */
+ if (strcmp(options->device_type, "rawdev_aesni_mb") != 0) {
+ RTE_LOG(ERR, USER1, "Invalid device type %s for "
+ "multi-function\n",
+ options->device_type);
+ return -EINVAL;
+ }
+
+ /* Only single segment supported */
+ if (options->segment_sz < options->max_buffer_size) {
+ RTE_LOG(ERR, USER1, "Segmented buffers not supported "
+ "for multi-function\n");
+ return -EINVAL;
+ }
+
+ /* Out-of-place not supported */
+ if (options->out_of_place) {
+ RTE_LOG(ERR, USER1, "Out-of-place not supported for "
+ "multi-function\n");
+ return -EINVAL;
+ }
+ }
+
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) {
+ /*
+ * Cipher offset must be at least 12 bytes (Ethernet SRC and
+ * DEST MACs) greater than CRC offset
+ */
+ if (options->multi_fn_opts.cipher_offset <
+ options->multi_fn_opts.crc_offset +
+ DOCSIS_CIPHER_CRC_OFFSET_DIFF) {
+ RTE_LOG(ERR, USER1, "Cipher and CRC offsets not valid "
+ "for %s multi-function operation - "
+ "cipher_offset must greater than or equal to "
+ "crc_offset + %u\n",
+ cperf_multi_fn_ops_strs[
+ options->multi_fn_opts.ops],
+ DOCSIS_CIPHER_CRC_OFFSET_DIFF);
+ return -EINVAL;
+ }
+ }
+
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ options->multi_fn_opts.cipher_offset = PON_FRAME_HDR_SIZE;
+ options->multi_fn_opts.crc_offset = PON_FRAME_HDR_SIZE;
+ }
+
+ if (options->inc_buffer_size != 0)
+ buffer_size = options->min_buffer_size;
+ else
+ buffer_size = options->buffer_size_list[0];
+
+ while (buffer_size <= options->max_buffer_size) {
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) {
+ /* Buffer must be large enough to accommodate offsets */
+ if (buffer_size <
+ (options->multi_fn_opts.cipher_offset +
+ DOCSIS_MIN_CIPHER_SIZE) ||
+ buffer_size <
+ (options->multi_fn_opts.crc_offset +
+ RTE_ETHER_CRC_LEN)) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes "
+ "are not valid for %s multi-function "
+ "operation\n",
+ cperf_multi_fn_ops_strs[
+ options->multi_fn_opts.ops]);
+ return -EINVAL;
+ }
+ } else if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ /*
+ * Buffer length must be:
+ * - a multiple of 4
+ * - large enough to accommodate PON frame header and
+ * any padding
+ * - not 12
+ */
+ if (((buffer_size % PON_FRAME_MULTIPLE_SIZE) != 0) ||
+ (buffer_size < (PON_FRAME_HDR_SIZE +
+ options->multi_fn_opts.buffer_padding)) ||
+ (buffer_size == PON_FRAME_INVALID_SIZE)) {
+
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes "
+ "are not suitable for %s "
+ "multi-function operation\n",
+ cperf_multi_fn_ops_strs[
+ options->multi_fn_opts.ops]);
+ return -EINVAL;
+ }
+
+ /*
+ * Padding length must be valid:
+ * - 0, if buffer length == 8
+ * - less than 8, if buffer length >= 16
+ * - less than 4, if buffer length >= 20
+ */
+ if ((buffer_size == 8 &&
+ options->multi_fn_opts.buffer_padding != 0) ||
+ (buffer_size >= 16 &&
+ options->multi_fn_opts.buffer_padding >= 8) ||
+ (buffer_size >= 20 &&
+ options->multi_fn_opts.buffer_padding >= 4)) {
+
+ RTE_LOG(ERR, USER1, "Padding length not valid "
+ "for some of the buffer sizes for %s "
+ "multi-function operation\n",
+ cperf_multi_fn_ops_strs[
+ options->multi_fn_opts.ops]);
+ return -EINVAL;
+ }
+ }
+
+ if (options->inc_buffer_size != 0)
+ buffer_size += options->inc_buffer_size;
+ else {
+ if (++buffer_size_idx == options->buffer_size_count)
+ break;
+ buffer_size =
+ options->buffer_size_list[buffer_size_idx];
+ }
+ }
+
+ return 0;
+}
+#endif /* MULTI_FN_SUPPORTED */
+
int
cperf_options_check(struct cperf_options *options)
{
@@ -1151,6 +1437,13 @@ cperf_options_check(struct cperf_options *options)
return -EINVAL;
}
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN) {
+ if (check_multi_fn_options(options) < 0)
+ return -EINVAL;
+ }
+#endif /* MULTI_FN_SUPPORTED */
+
return 0;
}
@@ -1236,4 +1529,37 @@ cperf_options_dump(struct cperf_options *opts)
printf("# aead aad size: %u\n", opts->aead_aad_sz);
printf("#\n");
}
+
+#ifdef MULTI_FN_SUPPORTED
+ if (opts->op_type == CPERF_MULTI_FN) {
+ if (opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC ||
+ opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ printf("# cipher algorithm: %s\n",
+ rte_crypto_cipher_algorithm_strings[
+ opts->cipher_algo]);
+ printf("# cipher operation: %s\n",
+ rte_crypto_cipher_operation_strings[
+ opts->cipher_op]);
+ printf("# cipher key size: %u\n", opts->cipher_key_sz);
+ printf("# cipher iv size: %u\n", opts->cipher_iv_sz);
+ printf("# multi fn operations: %s\n",
+ cperf_multi_fn_ops_strs[
+ opts->multi_fn_opts.ops]);
+ }
+ if (opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) {
+ printf("# multi fn cipher offset: %u\n",
+ opts->multi_fn_opts.cipher_offset);
+ printf("# multi fn crc offset: %u\n",
+ opts->multi_fn_opts.crc_offset);
+ }
+ if (opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP)
+ printf("# multi fn buffer padding: %u\n",
+ opts->multi_fn_opts.buffer_padding);
+ printf("#\n");
+ }
+#endif /* MULTI_FN_SUPPORTED */
}
diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c
index 85603eed5..6a92c1ae6 100644
--- a/app/test-crypto-perf/cperf_test_common.c
+++ b/app/test-crypto-perf/cperf_test_common.c
@@ -14,6 +14,8 @@ struct obj_params {
uint16_t headroom_sz;
uint16_t data_len;
uint16_t segments_nb;
+ uint16_t ops_per_obj_nb;
+ uint8_t multi_fn;
};
static void
@@ -92,15 +94,39 @@ mempool_obj_init(struct rte_mempool *mp,
struct rte_crypto_op *op = obj;
struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
params->src_buf_offset);
- /* Set crypto operation */
- op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
- op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
- op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
- op->phys_addr = rte_mem_virt2iova(obj);
- op->mempool = mp;
+
+ if (!params->multi_fn) {
+ /* Set crypto operation */
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->phys_addr = rte_mem_virt2iova(obj);
+ op->mempool = mp;
+ op->sym->m_src = m;
+ op->sym->m_dst = NULL;
+ } else {
+#ifdef MULTI_FN_SUPPORTED
+ /* Set multi-function operation(s) */
+ struct rte_multi_fn_op *mf_op, *next_mf_op;
+ uint16_t remaining_ops, op_sz;
+ remaining_ops = params->ops_per_obj_nb;
+ mf_op = obj;
+ op_sz = params->src_buf_offset / params->ops_per_obj_nb;
+ do {
+ mf_op->mempool = mp;
+ mf_op->m_src = m;
+ mf_op->m_dst = NULL;
+ next_mf_op = (struct rte_multi_fn_op *)
+ ((uint8_t *) mf_op + op_sz);
+ mf_op->next = next_mf_op;
+ mf_op = next_mf_op;
+
+ remaining_ops--;
+ } while (remaining_ops > 0);
+#endif /* MULTI_FN_SUPPORTED */
+ }
/* Set source buffer */
- op->sym->m_src = m;
if (params->segments_nb == 1)
fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
params->segment_sz, params->headroom_sz,
@@ -118,9 +144,15 @@ mempool_obj_init(struct rte_mempool *mp,
fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
params->segment_sz, params->headroom_sz,
params->data_len);
- op->sym->m_dst = m;
- } else
- op->sym->m_dst = NULL;
+ if (!params->multi_fn) {
+ op->sym->m_dst = m;
+ } else {
+#ifdef MULTI_FN_SUPPORTED
+ struct rte_multi_fn_op *mf_op = obj;
+ mf_op->m_dst = m;
+#endif /* MULTI_FN_SUPPORTED */
+ }
+ }
}
int
@@ -134,12 +166,38 @@ cperf_alloc_common_memory(const struct cperf_options *options,
{
const char *mp_ops_name;
char pool_name[32] = "";
+ uint8_t multi_fn = 0;
int ret;
/* Calculate the object size */
- uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op);
+ uint16_t crypto_op_size;
uint16_t crypto_op_private_size;
+ uint16_t ops_per_obj_nb;
+
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN) {
+ crypto_op_size = sizeof(struct rte_multi_fn_op);
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC) {
+ ops_per_obj_nb = 2;
+ } else if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ ops_per_obj_nb = 3;
+ } else {
+ RTE_LOG(ERR, USER1,
+ "Invalid multi-function operations for pool "
+ "creation\n");
+ return -1;
+ }
+ multi_fn = 1;
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ crypto_op_size = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+ ops_per_obj_nb = 1;
+ }
+
/*
* If doing AES-CCM, IV field needs to be 16 bytes long,
* and AAD field needs to be long enough to have 18 bytes,
@@ -162,7 +220,7 @@ cperf_alloc_common_memory(const struct cperf_options *options,
uint16_t crypto_op_total_size = crypto_op_size +
crypto_op_private_size;
- uint16_t crypto_op_total_size_padded =
+ uint16_t crypto_op_total_size_padded = ops_per_obj_nb *
RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
uint32_t max_size = options->max_buffer_size + options->digest_sz;
@@ -186,7 +244,9 @@ cperf_alloc_common_memory(const struct cperf_options *options,
options->tailroom_sz,
.segments_nb = segments_nb,
.src_buf_offset = crypto_op_total_size_padded,
- .dst_buf_offset = 0
+ .dst_buf_offset = 0,
+ .ops_per_obj_nb = ops_per_obj_nb,
+ .multi_fn = multi_fn,
};
if (options->out_of_place) {
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 0e4d0e153..92df4fb74 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -6,6 +6,10 @@
#include <rte_cycles.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
+#ifdef MULTI_FN_SUPPORTED
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#endif /* MULTI_FN_SUPPORTED */
#include "cperf_test_latency.h"
#include "cperf_ops.h"
@@ -43,18 +47,27 @@ struct priv_op_data {
static void
cperf_latency_test_free(struct cperf_latency_ctx *ctx)
{
- if (ctx) {
- if (ctx->sess) {
+ if (!ctx)
+ return;
+
+ if (ctx->sess) {
+#ifdef MULTI_FN_SUPPORTED
+ if (ctx->options->op_type == CPERF_MULTI_FN) {
+ rte_multi_fn_session_destroy(ctx->dev_id,
+ (struct rte_multi_fn_session *)ctx->sess);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
+ }
- if (ctx->pool)
- rte_mempool_free(ctx->pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
- rte_free(ctx->res);
- rte_free(ctx);
- }
+ rte_free(ctx->res);
+ rte_free(ctx);
}
void *
@@ -67,6 +80,7 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
{
struct cperf_latency_ctx *ctx = NULL;
size_t extra_op_priv_size = sizeof(struct priv_op_data);
+ uint16_t iv_offset;
ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0);
if (ctx == NULL)
@@ -79,10 +93,19 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the crypto operation */
- uint16_t iv_offset = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op) +
- sizeof(struct cperf_op_result *);
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN) {
+ /* IV goes at the end of the multi-function operation */
+ iv_offset = sizeof(struct rte_multi_fn_op) +
+ sizeof(struct cperf_op_result *);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* IV goes at the end of the crypto operation */
+ iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ sizeof(struct cperf_op_result *);
+ }
ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options,
test_vector, iv_offset);
@@ -138,34 +161,65 @@ cperf_latency_test_runner(void *arg)
uint32_t lcore = rte_lcore_id();
+ uint16_t iv_offset;
+
+ int multi_fn = 0;
+
+#ifdef MULTI_FN_SUPPORTED
+ if (ctx->options->op_type == CPERF_MULTI_FN)
+ multi_fn = 1;
+#else
+ RTE_SET_USED(multi_fn);
+#endif /* MULTI_FN_SUPPORTED */
+
#ifdef CPERF_LINEARIZATION_ENABLE
- struct rte_cryptodev_info dev_info;
int linearize = 0;
/* Check if source mbufs require coalescing */
if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
- rte_cryptodev_info_get(ctx->dev_id, &dev_info);
- if ((dev_info.feature_flags &
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
+ if (!multi_fn) {
+ struct rte_cryptodev_info dev_info;
+ rte_cryptodev_info_get(ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
+ 0)
+ linearize = 1;
+ } else
linearize = 1;
}
#endif /* CPERF_LINEARIZATION_ENABLE */
ctx->lcore_id = lcore;
- /* Warm up the host CPU before starting the test */
- for (i = 0; i < ctx->options->total_ops; i++)
- rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
-
/* Get first size from range or list */
if (ctx->options->inc_burst_size != 0)
test_burst_size = ctx->options->min_burst_size;
else
test_burst_size = ctx->options->burst_size_list[0];
- uint16_t iv_offset = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op) +
- sizeof(struct cperf_op_result *);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn) {
+ /* Warm up the host CPU before starting the test */
+ for (i = 0; i < ctx->options->total_ops; i++)
+ rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+
+ iv_offset = sizeof(struct rte_multi_fn_op) +
+ sizeof(struct cperf_op_result *);
+
+ multi_fn = 1;
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* Warm up the host CPU before starting the test */
+ for (i = 0; i < ctx->options->total_ops; i++)
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ NULL, 0);
+
+ iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op) +
+ sizeof(struct cperf_op_result *);
+ }
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_deqd = 0;
@@ -215,13 +269,40 @@ cperf_latency_test_runner(void *arg)
}
#endif /* CPERF_LINEARIZATION_ENABLE */
- /* Enqueue burst of ops on crypto device */
- ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
- ops, burst_size);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn) {
+ /* Enqueue burst of op on raw device */
+ ops_enqd = rte_rawdev_enqueue_buffers(
+ ctx->dev_id,
+ (struct rte_rawdev_buf **)ops,
+ burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+
+ /*
+ * Dequeue processed burst of ops from raw
+ * device
+ */
+ ops_deqd = rte_rawdev_dequeue_buffers(
+ ctx->dev_id,
+ (struct rte_rawdev_buf **)ops_processed,
+ test_burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(
+ ctx->dev_id, ctx->qp_id, ops,
+ burst_size);
- /* Dequeue processed burst of ops from crypto device */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, test_burst_size);
+ /*
+ * Dequeue processed burst of ops from crypto
+ * device
+ */
+ ops_deqd = rte_cryptodev_dequeue_burst(
+ ctx->dev_id, ctx->qp_id, ops_processed,
+ test_burst_size);
+ }
tsc_end = rte_rdtsc_precise();
@@ -262,14 +343,41 @@ cperf_latency_test_runner(void *arg)
b_idx++;
}
- /* Dequeue any operations still in the crypto device */
+ /* Dequeue any operations still in the device */
while (deqd_tot < ctx->options->total_ops) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn) {
+ /*
+ * Sending 0 length burst to flush sw raw
+ * device
+ */
+ rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0,
+ (rte_rawdev_obj_t)&ctx->qp_id);
- /* dequeue burst */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, test_burst_size);
+ /*
+ * Dequeue processed burst of ops from raw
+ * device
+ */
+ ops_deqd = rte_rawdev_dequeue_buffers(
+ ctx->dev_id,
+ (struct rte_rawdev_buf **)ops_processed,
+ test_burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /*
+ * Sending 0 length burst to flush sw crypto
+ * device
+ */
+ rte_cryptodev_enqueue_burst(ctx->dev_id,
+ ctx->qp_id, NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(
+ ctx->dev_id, ctx->qp_id, ops_processed,
+ test_burst_size);
+ }
tsc_end = rte_rdtsc_precise();
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 74371faa8..f0a7dbf7c 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -6,6 +6,10 @@
#include <rte_crypto.h>
#include <rte_cryptodev.h>
+#ifdef MULTI_FN_SUPPORTED
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#endif /* MULTI_FN_SUPPORTED */
#include <rte_cycles.h>
#include <rte_malloc.h>
@@ -44,6 +48,7 @@ struct pmd_cyclecount_state {
uint32_t lcore;
uint64_t delay;
int linearize;
+ int multi_fn;
uint32_t ops_enqd;
uint32_t ops_deqd;
uint32_t ops_enq_retries;
@@ -53,29 +58,37 @@ struct pmd_cyclecount_state {
double cycles_per_deq;
};
-static const uint16_t iv_offset =
- sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op);
+static uint16_t iv_offset;
static void
cperf_pmd_cyclecount_test_free(struct cperf_pmd_cyclecount_ctx *ctx)
{
- if (ctx) {
- if (ctx->sess) {
+ if (!ctx)
+ return;
+
+ if (ctx->sess) {
+#ifdef MULTI_FN_SUPPORTED
+ if (ctx->options->op_type == CPERF_MULTI_FN) {
+ rte_multi_fn_session_destroy(ctx->dev_id,
+ (struct rte_multi_fn_session *)ctx->sess);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
+ }
- if (ctx->pool)
- rte_mempool_free(ctx->pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
- if (ctx->ops)
- rte_free(ctx->ops);
+ if (ctx->ops)
+ rte_free(ctx->ops);
- if (ctx->ops_processed)
- rte_free(ctx->ops_processed);
+ if (ctx->ops_processed)
+ rte_free(ctx->ops_processed);
- rte_free(ctx);
- }
+ rte_free(ctx);
}
void *
@@ -103,9 +116,17 @@ cperf_pmd_cyclecount_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the crypto operation */
- uint16_t iv_offset = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op);
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN) {
+ /* IV goes at the end of the multi-function operation */
+ iv_offset = sizeof(struct rte_multi_fn_op);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* IV goes at the end of the crypto operation */
+ iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+ }
ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options,
test_vector, iv_offset);
@@ -237,8 +258,18 @@ pmd_cyclecount_bench_enq(struct pmd_cyclecount_state *state,
struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
uint32_t burst_enqd;
- burst_enqd = rte_cryptodev_enqueue_burst(state->ctx->dev_id,
- state->ctx->qp_id, ops, burst_size);
+#ifdef MULTI_FN_SUPPORTED
+ if (state->multi_fn)
+ burst_enqd = rte_rawdev_enqueue_buffers(
+ state->ctx->dev_id,
+ (struct rte_rawdev_buf **)ops,
+ burst_size,
+ (rte_rawdev_obj_t)&state->ctx->qp_id);
+ else
+#endif /* MULTI_FN_SUPPORTED */
+ burst_enqd = rte_cryptodev_enqueue_burst(
+ state->ctx->dev_id, state->ctx->qp_id,
+ ops, burst_size);
/* if we couldn't enqueue anything, the queue is full */
if (!burst_enqd) {
@@ -268,8 +299,18 @@ pmd_cyclecount_bench_deq(struct pmd_cyclecount_state *state,
&state->ctx->ops[cur_iter_op];
uint32_t burst_deqd;
- burst_deqd = rte_cryptodev_dequeue_burst(state->ctx->dev_id,
- state->ctx->qp_id, ops_processed, burst_size);
+#ifdef MULTI_FN_SUPPORTED
+ if (state->multi_fn)
+ burst_deqd = rte_rawdev_dequeue_buffers(
+ state->ctx->dev_id,
+ (struct rte_rawdev_buf **)ops_processed,
+ burst_size,
+ (rte_rawdev_obj_t)&state->ctx->qp_id);
+ else
+#endif /* MULTI_FN_SUPPORTED */
+ burst_deqd = rte_cryptodev_dequeue_burst(
+ state->ctx->dev_id, state->ctx->qp_id,
+ ops_processed, burst_size);
if (burst_deqd < burst_size)
state->ops_deq_retries++;
@@ -390,6 +431,12 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx)
state.opts = opts;
state.lcore = rte_lcore_id();
state.linearize = 0;
+ state.multi_fn = 0;
+
+#ifdef MULTI_FN_SUPPORTED
+ if (opts->op_type == CPERF_MULTI_FN)
+ state.multi_fn = 1;
+#endif /* MULTI_FN_SUPPORTED */
static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0);
static bool warmup = true;
@@ -406,12 +453,15 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx)
/* Check if source mbufs require coalescing */
if (opts->segments_sz < ctx->options->max_buffer_size) {
- rte_cryptodev_info_get(state.ctx->dev_id, &dev_info);
- if ((dev_info.feature_flags &
+ if (!state.multi_fn) {
+ rte_cryptodev_info_get(state.ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
- 0) {
+ 0) {
+ state.linearize = 1;
+ }
+ } else
state.linearize = 1;
- }
}
#endif /* CPERF_LINEARIZATION_ENABLE */
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 35c51026f..e569d820d 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -6,6 +6,10 @@
#include <rte_cycles.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
+#ifdef MULTI_FN_SUPPORTED
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#endif /* MULTI_FN_SUPPORTED */
#include "cperf_test_throughput.h"
#include "cperf_ops.h"
@@ -44,11 +48,20 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx)
(struct rte_security_session *)ctx->sess);
} else
#endif
- {
- rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
- rte_cryptodev_sym_session_free(ctx->sess);
+#ifdef MULTI_FN_SUPPORTED
+ if (ctx->options->op_type == CPERF_MULTI_FN) {
+ rte_multi_fn_session_destroy(ctx->dev_id,
+ (struct rte_multi_fn_session *)
+ ctx->sess);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ rte_cryptodev_sym_session_clear(ctx->dev_id,
+ ctx->sess);
+ rte_cryptodev_sym_session_free(ctx->sess);
+ }
}
- }
+
if (ctx->pool)
rte_mempool_free(ctx->pool);
@@ -64,6 +77,7 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
const struct cperf_op_fns *op_fns)
{
struct cperf_throughput_ctx *ctx = NULL;
+ uint16_t iv_offset;
ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0);
if (ctx == NULL)
@@ -76,9 +90,17 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the crypto operation */
- uint16_t iv_offset = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op);
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN) {
+ /* IV goes at the end of the multi-function operation */
+ iv_offset = sizeof(struct rte_multi_fn_op);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* IV goes at the end of the crypto operation */
+ iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+ }
ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options,
test_vector, iv_offset);
@@ -113,24 +135,55 @@ cperf_throughput_test_runner(void *test_ctx)
uint32_t lcore = rte_lcore_id();
+ uint16_t iv_offset;
+
+ int multi_fn = 0;
+
+#ifdef MULTI_FN_SUPPORTED
+ if (ctx->options->op_type == CPERF_MULTI_FN)
+ multi_fn = 1;
+#else
+ RTE_SET_USED(multi_fn);
+#endif /* MULTI_FN_SUPPORTED */
+
#ifdef CPERF_LINEARIZATION_ENABLE
- struct rte_cryptodev_info dev_info;
int linearize = 0;
/* Check if source mbufs require coalescing */
if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
- rte_cryptodev_info_get(ctx->dev_id, &dev_info);
- if ((dev_info.feature_flags &
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
+ if (!multi_fn) {
+ struct rte_cryptodev_info dev_info;
+ rte_cryptodev_info_get(ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
+ 0)
+ linearize = 1;
+ } else
linearize = 1;
}
#endif /* CPERF_LINEARIZATION_ENABLE */
ctx->lcore_id = lcore;
- /* Warm up the host CPU before starting the test */
- for (i = 0; i < ctx->options->total_ops; i++)
- rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn) {
+ iv_offset = sizeof(struct rte_multi_fn_op);
+
+ /* Warm up the host CPU before starting the test */
+ for (i = 0; i < ctx->options->total_ops; i++)
+ rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+
+ /* Warm up the host CPU before starting the test */
+ for (i = 0; i < ctx->options->total_ops; i++)
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ NULL, 0);
+ }
/* Get first size from range or list */
if (ctx->options->inc_burst_size != 0)
@@ -138,9 +191,6 @@ cperf_throughput_test_runner(void *test_ctx)
else
test_burst_size = ctx->options->burst_size_list[0];
- uint16_t iv_offset = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op);
-
while (test_burst_size <= ctx->options->max_burst_size) {
uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
@@ -203,9 +253,41 @@ cperf_throughput_test_runner(void *test_ctx)
}
#endif /* CPERF_LINEARIZATION_ENABLE */
- /* Enqueue burst of ops on crypto device */
- ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
- ops, burst_size);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn) {
+ /* Enqueue burst of op on raw device */
+ ops_enqd = rte_rawdev_enqueue_buffers(
+ ctx->dev_id,
+ (struct rte_rawdev_buf **)ops,
+ burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+
+ /*
+ * Dequeue processed burst of ops from raw
+ * device
+ */
+ ops_deqd = rte_rawdev_dequeue_buffers(
+ ctx->dev_id,
+ (struct rte_rawdev_buf **)ops_processed,
+ test_burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(
+ ctx->dev_id, ctx->qp_id, ops,
+ burst_size);
+
+ /*
+ * Dequeue processed burst of ops from crypto
+ * device
+ */
+ ops_deqd = rte_cryptodev_dequeue_burst(
+ ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
+ }
+
if (ops_enqd < burst_size)
ops_enqd_failed++;
@@ -216,11 +298,6 @@ cperf_throughput_test_runner(void *test_ctx)
ops_unused = burst_size - ops_enqd;
ops_enqd_total += ops_enqd;
-
- /* Dequeue processed burst of ops from crypto device */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, test_burst_size);
-
if (likely(ops_deqd)) {
/* Free crypto ops so they can be reused. */
rte_mempool_put_bulk(ctx->pool,
@@ -238,15 +315,38 @@ cperf_throughput_test_runner(void *test_ctx)
}
- /* Dequeue any operations still in the crypto device */
-
+ /* Dequeue any operations still in the device */
while (ops_deqd_total < ctx->options->total_ops) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn) {
+ /*
+ * Sending 0 length burst to flush sw raw
+ * device
+ */
+ rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0,
+ (rte_rawdev_obj_t)&ctx->qp_id);
- /* dequeue burst */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, test_burst_size);
+ /* dequeue burst */
+ ops_deqd = rte_rawdev_dequeue_buffers(
+ ctx->dev_id,
+ (struct rte_rawdev_buf **)ops_processed,
+ test_burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /*
+ * Sending 0 length burst to flush sw crypto
+ * device
+ */
+ rte_cryptodev_enqueue_burst(
+ ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(
+ ctx->dev_id, ctx->qp_id, ops_processed,
+ test_burst_size);
+ }
if (ops_deqd == 0)
ops_deqd_failed++;
else {
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index 1e9dfcfff..1395d86f2 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -581,11 +581,38 @@ cperf_test_vector_get_from_file(struct cperf_options *opts)
}
/* other values not included in the file */
- test_vector->data.cipher_offset = 0;
- test_vector->data.cipher_length = opts->max_buffer_size;
+#ifdef MULTI_FN_SUPPORTED
+ if (opts->op_type == CPERF_MULTI_FN) {
+ if (opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC ||
+ opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ test_vector->data.cipher_offset =
+ opts->multi_fn_opts.cipher_offset;
+ test_vector->data.cipher_length =
+ opts->max_buffer_size;
+
+ test_vector->multi_fn_data.crc_offset =
+ opts->multi_fn_opts.crc_offset;
+ test_vector->multi_fn_data.crc_length =
+ opts->max_buffer_size;
+ }
- test_vector->data.auth_offset = 0;
- test_vector->data.auth_length = opts->max_buffer_size;
+ if (opts->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ test_vector->multi_fn_data.bip_offset = 0;
+ test_vector->multi_fn_data.bip_length =
+ opts->max_buffer_size;
+ }
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ test_vector->data.cipher_offset = 0;
+ test_vector->data.cipher_length = opts->max_buffer_size;
+
+ test_vector->data.auth_offset = 0;
+ test_vector->data.auth_length = opts->max_buffer_size;
+ }
return test_vector;
}
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index 41641650c..5b6a048b1 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -587,5 +587,58 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
memcpy(t_vec->aead_iv.data, iv, options->aead_iv_sz);
t_vec->aead_iv.length = options->aead_iv_sz;
}
+
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN) {
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC ||
+ options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+ t_vec->cipher_key.length = 0;
+ t_vec->ciphertext.data = plaintext;
+ t_vec->cipher_key.data = NULL;
+ } else {
+ t_vec->cipher_key.length =
+ options->cipher_key_sz;
+ t_vec->ciphertext.data = ciphertext;
+ t_vec->cipher_key.data = cipher_key;
+ }
+
+ /* Init IV data ptr */
+ t_vec->cipher_iv.data = NULL;
+
+ if (options->cipher_iv_sz != 0) {
+ /* Set IV parameters */
+ t_vec->cipher_iv.data = rte_malloc(NULL,
+ options->cipher_iv_sz, 16);
+ if (t_vec->cipher_iv.data == NULL) {
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->cipher_iv.data, iv,
+ options->cipher_iv_sz);
+ }
+ t_vec->ciphertext.length = options->max_buffer_size;
+ t_vec->cipher_iv.length = options->cipher_iv_sz;
+ t_vec->data.cipher_offset =
+ options->multi_fn_opts.cipher_offset;
+ t_vec->data.cipher_length = options->max_buffer_size;
+
+ t_vec->multi_fn_data.crc_offset =
+ options->multi_fn_opts.crc_offset;
+ t_vec->multi_fn_data.crc_length =
+ options->max_buffer_size;
+ }
+
+ if (options->multi_fn_opts.ops ==
+ CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP) {
+ t_vec->multi_fn_data.bip_offset = 0;
+ t_vec->multi_fn_data.bip_length =
+ options->max_buffer_size;
+ }
+ }
+#endif /* MULTI_FN_SUPPORTED */
+
return t_vec;
}
diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h
index 6f10823ef..ad9ea89e6 100644
--- a/app/test-crypto-perf/cperf_test_vectors.h
+++ b/app/test-crypto-perf/cperf_test_vectors.h
@@ -68,6 +68,15 @@ struct cperf_test_vector {
uint32_t aead_offset;
uint32_t aead_length;
} data;
+
+#ifdef MULTI_FN_SUPPORTED
+ struct {
+ uint32_t crc_offset;
+ uint32_t crc_length;
+ uint32_t bip_offset;
+ uint32_t bip_length;
+ } multi_fn_data;
+#endif /* MULTI_FN_SUPPORTED */
};
struct cperf_test_vector*
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index 833bc9a55..30ef2f568 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -6,6 +6,10 @@
#include <rte_cycles.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
+#ifdef MULTI_FN_SUPPORTED
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#endif /* MULTI_FN_SUPPORTED */
#include "cperf_test_verify.h"
#include "cperf_ops.h"
@@ -36,17 +40,26 @@ struct cperf_op_result {
static void
cperf_verify_test_free(struct cperf_verify_ctx *ctx)
{
- if (ctx) {
- if (ctx->sess) {
+ if (ctx)
+ return;
+
+ if (ctx->sess) {
+#ifdef MULTI_FN_SUPPORTED
+ if (ctx->options->op_type == CPERF_MULTI_FN) {
+ rte_multi_fn_session_destroy(ctx->dev_id,
+ (struct rte_multi_fn_session *)ctx->sess);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
rte_cryptodev_sym_session_free(ctx->sess);
}
+ }
- if (ctx->pool)
- rte_mempool_free(ctx->pool);
+ if (ctx->pool)
+ rte_mempool_free(ctx->pool);
- rte_free(ctx);
- }
+ rte_free(ctx);
}
void *
@@ -58,6 +71,7 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
const struct cperf_op_fns *op_fns)
{
struct cperf_verify_ctx *ctx = NULL;
+ uint16_t iv_offset;
ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
if (ctx == NULL)
@@ -70,9 +84,17 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
ctx->options = options;
ctx->test_vector = test_vector;
- /* IV goes at the end of the crypto operation */
- uint16_t iv_offset = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op);
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN) {
+ /* IV goes at the end of the multi-function operation */
+ iv_offset = sizeof(struct rte_multi_fn_op);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* IV goes at the end of the crypto operation */
+ iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
+ }
ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options,
test_vector, iv_offset);
@@ -91,6 +113,65 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
return NULL;
}
+#ifdef MULTI_FN_SUPPORTED
+static int
+cperf_verify_mf_op(struct rte_multi_fn_op *op,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *vector)
+{
+ const struct rte_mbuf *m;
+ uint32_t len;
+ uint16_t nb_segs;
+ uint8_t *data;
+ int res = 0;
+
+ if (op->overall_status != RTE_MULTI_FN_OP_STATUS_SUCCESS)
+ return 1;
+
+ if (op->m_dst)
+ m = op->m_dst;
+ else
+ m = op->m_src;
+ nb_segs = m->nb_segs;
+ len = 0;
+ while (m && nb_segs != 0) {
+ len += m->data_len;
+ m = m->next;
+ nb_segs--;
+ }
+
+ data = rte_malloc(NULL, len, 0);
+ if (data == NULL)
+ return 1;
+
+ if (op->m_dst)
+ m = op->m_dst;
+ else
+ m = op->m_src;
+ nb_segs = m->nb_segs;
+ len = 0;
+ while (m && nb_segs != 0) {
+ memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
+ m->data_len);
+ len += m->data_len;
+ m = m->next;
+ nb_segs--;
+ }
+
+ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ res += memcmp(data,
+ vector->ciphertext.data,
+ options->test_buffer_size);
+ else
+ res += memcmp(data,
+ vector->plaintext.data,
+ options->test_buffer_size);
+
+ rte_free(data);
+ return !!res;
+}
+#endif /* MULTI_FN_SUPPORTED */
+
static int
cperf_verify_op(struct rte_crypto_op *op,
const struct cperf_options *options,
@@ -104,6 +185,12 @@ cperf_verify_op(struct rte_crypto_op *op,
uint8_t cipher, auth;
int res = 0;
+#ifdef MULTI_FN_SUPPORTED
+ if (options->op_type == CPERF_MULTI_FN)
+ return cperf_verify_mf_op((struct rte_multi_fn_op *)op, options,
+ vector);
+#endif /* MULTI_FN_SUPPORTED */
+
if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
return 1;
@@ -252,15 +339,30 @@ cperf_verify_test_runner(void *test_ctx)
uint32_t lcore = rte_lcore_id();
+ uint16_t iv_offset;
+
+ int multi_fn = 0;
+
+#ifdef MULTI_FN_SUPPORTED
+ if (ctx->options->op_type == CPERF_MULTI_FN)
+ multi_fn = 1;
+#else
+ RTE_SET_USED(multi_fn);
+#endif /* MULTI_FN_SUPPORTED */
+
#ifdef CPERF_LINEARIZATION_ENABLE
- struct rte_cryptodev_info dev_info;
int linearize = 0;
/* Check if source mbufs require coalescing */
if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
- rte_cryptodev_info_get(ctx->dev_id, &dev_info);
- if ((dev_info.feature_flags &
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
+ if (!multi_fn) {
+ struct rte_cryptodev_info dev_info;
+ rte_cryptodev_info_get(ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) ==
+ 0)
+ linearize = 1;
+ } else
linearize = 1;
}
#endif /* CPERF_LINEARIZATION_ENABLE */
@@ -271,12 +373,18 @@ cperf_verify_test_runner(void *test_ctx)
printf("\n# Running verify test on device: %u, lcore: %u\n",
ctx->dev_id, lcore);
- uint16_t iv_offset = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn)
+ iv_offset = sizeof(struct rte_multi_fn_op);
+ else
+#endif /* MULTI_FN_SUPPORTED */
+ iv_offset = sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op);
while (ops_enqd_total < ctx->options->total_ops) {
- uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
+ uint16_t burst_size = ((ops_enqd_total +
+ ctx->options->max_burst_size)
<= ctx->options->total_ops) ?
ctx->options->max_burst_size :
ctx->options->total_ops -
@@ -319,9 +427,32 @@ cperf_verify_test_runner(void *test_ctx)
}
#endif /* CPERF_LINEARIZATION_ENABLE */
- /* Enqueue burst of ops on crypto device */
- ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
- ops, burst_size);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn) {
+ /* Enqueue burst of op on raw device */
+ ops_enqd = rte_rawdev_enqueue_buffers(ctx->dev_id,
+ (struct rte_rawdev_buf **)ops,
+ burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+
+ /* Dequeue processed burst of ops from raw device */
+ ops_deqd = rte_rawdev_dequeue_buffers(ctx->dev_id,
+ (struct rte_rawdev_buf **)ops_processed,
+ ctx->options->max_burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id,
+ ctx->qp_id, ops, burst_size);
+
+ /* Dequeue processed burst of ops from crypto device */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id,
+ ctx->qp_id, ops_processed,
+ ctx->options->max_burst_size);
+ }
+
if (ops_enqd < burst_size)
ops_enqd_failed++;
@@ -332,11 +463,6 @@ cperf_verify_test_runner(void *test_ctx)
ops_unused = burst_size - ops_enqd;
ops_enqd_total += ops_enqd;
-
- /* Dequeue processed burst of ops from crypto device */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->max_burst_size);
-
if (ops_deqd == 0) {
/**
* Count dequeue polls which didn't return any
@@ -358,15 +484,32 @@ cperf_verify_test_runner(void *test_ctx)
ops_deqd_total += ops_deqd;
}
- /* Dequeue any operations still in the crypto device */
-
+ /* Dequeue any operations still in the device */
while (ops_deqd_total < ctx->options->total_ops) {
- /* Sending 0 length burst to flush sw crypto device */
- rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+#ifdef MULTI_FN_SUPPORTED
+ if (multi_fn) {
+ /* Sending 0 length burst to flush sw raw device */
+ rte_rawdev_enqueue_buffers(ctx->dev_id, NULL, 0,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+
+ /* dequeue burst */
+ ops_deqd = rte_rawdev_dequeue_buffers(ctx->dev_id,
+ (struct rte_rawdev_buf **)ops_processed,
+ ctx->options->max_burst_size,
+ (rte_rawdev_obj_t)&ctx->qp_id);
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id,
+ ctx->qp_id, ops_processed,
+ ctx->options->max_burst_size);
+ }
- /* dequeue burst */
- ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
- ops_processed, ctx->options->max_burst_size);
if (ops_deqd == 0) {
ops_deqd_failed++;
continue;
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 52a1860fb..c8334160e 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -12,6 +12,10 @@
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
#include <rte_cryptodev_scheduler.h>
#endif
+#ifdef MULTI_FN_SUPPORTED
+#include <rte_rawdev.h>
+#include <rte_multi_fn.h>
+#endif /* MULTI_FN_SUPPORTED */
#include "cperf.h"
#include "cperf_options.h"
@@ -39,9 +43,19 @@ const char *cperf_op_type_strs[] = {
[CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
[CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
[CPERF_AEAD] = "aead",
- [CPERF_PDCP] = "pdcp"
+ [CPERF_PDCP] = "pdcp",
+#ifdef MULTI_FN_SUPPORTED
+ [CPERF_MULTI_FN] = "multi-fn"
+#endif /* MULTI_FN_SUPPORTED */
};
+#ifdef MULTI_FN_SUPPORTED
+const char *cperf_multi_fn_ops_strs[] = {
+ [CPERF_MULTI_FN_OPS_DOCSIS_CIPHER_CRC] = "docsis-cipher-crc",
+ [CPERF_MULTI_FN_OPS_PON_CIPHER_CRC_BIP] = "pon-cipher-crc-bip"
+};
+#endif /* MULTI_FN_SUPPORTED */
+
const struct cperf_test cperf_testmap[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = {
cperf_throughput_test_constructor,
@@ -294,7 +308,7 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
}
static int
-cperf_verify_devices_capabilities(struct cperf_options *opts,
+cperf_verify_crypto_devices_capabilities(struct cperf_options *opts,
uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
{
struct rte_cryptodev_sym_capability_idx cap_idx;
@@ -369,8 +383,136 @@ cperf_verify_devices_capabilities(struct cperf_options *opts,
}
}
+#ifdef MULTI_FN_SUPPORTED
+ if (opts->op_type == CPERF_MULTI_FN)
+ return -1;
+#endif /* MULTI_FN_SUPPORTED */
+
+ return 0;
+}
+
+#ifdef MULTI_FN_SUPPORTED
+static uint8_t
+cperf_get_rawdevs(const char *driver_name, uint8_t *devices,
+ uint8_t nb_devices)
+{
+ struct rte_rawdev_info rdev_info;
+ uint8_t i, count = 0;
+
+ for (i = 0; i < RTE_RAWDEV_MAX_DEVS && count < nb_devices; i++) {
+ memset(&rdev_info, 0, sizeof(struct rte_rawdev_info));
+ if (!rte_rawdev_info_get(i, &rdev_info) &&
+ !strncmp(rdev_info.driver_name,
+ driver_name,
+ strlen(driver_name) + 1))
+ devices[count++] = i;
+ }
+
+ return count;
+}
+
+static int
+cperf_initialize_rawdev(struct cperf_options *opts, uint8_t *enabled_rdevs)
+{
+ uint8_t enabled_rdev_count = 0, nb_lcores, rdev_id;
+ unsigned int i, j;
+ int ret;
+
+ enabled_rdev_count = cperf_get_rawdevs(opts->device_type,
+ enabled_rdevs, RTE_RAWDEV_MAX_DEVS);
+ if (enabled_rdev_count == 0) {
+ printf("No raw devices type %s available\n",
+ opts->device_type);
+ return -EINVAL;
+ }
+
+ nb_lcores = rte_lcore_count() - 1;
+
+ if (nb_lcores < 1) {
+ RTE_LOG(ERR, USER1,
+ "Number of enabled cores need to be higher than 1\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Calculate number of needed queue pairs, based on the amount
+ * of available number of logical cores and crypto devices.
+ * For instance, if there are 4 cores and 2 crypto devices,
+ * 2 queue pairs will be set up per device.
+ */
+ opts->nb_qps = (nb_lcores % enabled_rdev_count) ?
+ (nb_lcores / enabled_rdev_count) + 1 :
+ nb_lcores / enabled_rdev_count;
+
+ for (i = 0; i < enabled_rdev_count &&
+ i < RTE_RAWDEV_MAX_DEVS; i++) {
+ rdev_id = enabled_rdevs[i];
+
+ struct rte_rawdev_info rdev_info = {0};
+ struct rte_multi_fn_dev_info mf_info = {0};
+ struct rte_multi_fn_dev_config mf_dev_conf = {0};
+ struct rte_multi_fn_qp_config qp_conf = {0};
+ uint8_t socket_id = rte_cryptodev_socket_id(rdev_id);
+
+ /*
+ * Range check the socket_id - negative values become big
+ * positive ones due to use of unsigned value
+ */
+ if (socket_id >= RTE_MAX_NUMA_NODES)
+ socket_id = 0;
+
+ rdev_info.dev_private = &mf_info;
+ rte_rawdev_info_get(rdev_id, &rdev_info);
+ if (opts->nb_qps > mf_info.max_nb_queues) {
+ printf("Number of needed queue pairs is higher "
+ "than the maximum number of queue pairs "
+ "per device.\n");
+ printf("Lower the number of cores or increase "
+ "the number of raw devices\n");
+ return -EINVAL;
+ }
+
+ mf_dev_conf.nb_queues = opts->nb_qps;
+ rdev_info.dev_private = &mf_dev_conf;
+ qp_conf.nb_descriptors = opts->nb_descriptors;
+
+ ret = rte_rawdev_configure(rdev_id, &rdev_info);
+ if (ret < 0) {
+ printf("Failed to configure rawdev %u", rdev_id);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < opts->nb_qps; j++) {
+ ret = rte_rawdev_queue_setup(rdev_id, j, &qp_conf);
+ if (ret < 0) {
+ printf("Failed to setup queue pair %u on "
+ "rawdev %u", j, rdev_id);
+ return -EINVAL;
+ }
+ }
+
+ ret = rte_rawdev_start(rdev_id);
+ if (ret < 0) {
+ printf("Failed to start raw device %u: error %d\n",
+ rdev_id, ret);
+ return -EPERM;
+ }
+ }
+
+ return enabled_rdev_count;
+}
+
+static int
+cperf_verify_raw_devices_capabilities(struct cperf_options *opts,
+ __rte_unused uint8_t *enabled_rdevs,
+ __rte_unused uint8_t nb_rawdevs)
+{
+ if (opts->op_type != CPERF_MULTI_FN)
+ return -1;
+
return 0;
}
+#endif /* MULTI_FN_SUPPORTED */
static int
cperf_check_test_vector(struct cperf_options *opts,
@@ -499,10 +641,16 @@ main(int argc, char **argv)
struct cperf_test_vector *t_vec = NULL;
struct cperf_op_fns op_fns;
void *ctx[RTE_MAX_LCORE] = { };
- int nb_cryptodevs = 0;
+ int nb_devs = 0;
uint16_t total_nb_qps = 0;
- uint8_t cdev_id, i;
- uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
+ uint8_t dev_id, i;
+#ifndef MULTI_FN_SUPPORTED
+ uint8_t enabled_devs[RTE_CRYPTO_MAX_DEVS] = { 0 };
+#else
+ uint8_t max_devs = RTE_MAX(RTE_CRYPTO_MAX_DEVS, RTE_RAWDEV_MAX_DEVS);
+ uint8_t enabled_devs[max_devs];
+ memset(enabled_devs, 0x0, max_devs);
+#endif /* MULTI_FN_SUPPORTED */
uint8_t buffer_size_idx = 0;
@@ -531,24 +679,49 @@ main(int argc, char **argv)
goto err;
}
- nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
+#ifdef MULTI_FN_SUPPORTED
+ if (opts.op_type == CPERF_MULTI_FN) {
+ nb_devs = cperf_initialize_rawdev(&opts, enabled_devs);
- if (!opts.silent)
- cperf_options_dump(&opts);
+ if (!opts.silent)
+ cperf_options_dump(&opts);
- if (nb_cryptodevs < 1) {
- RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
- "device type\n");
- nb_cryptodevs = 0;
- goto err;
- }
+ if (nb_devs < 1) {
+ RTE_LOG(ERR, USER1, "Failed to initialise requested "
+ "raw device type\n");
+ nb_devs = 0;
+ goto err;
+ }
- ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
- nb_cryptodevs);
- if (ret) {
- RTE_LOG(ERR, USER1, "Crypto device type does not support "
- "capabilities requested\n");
- goto err;
+ ret = cperf_verify_raw_devices_capabilities(&opts,
+ enabled_devs, nb_devs);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Raw device type does not "
+ "support capabilities requested\n");
+ goto err;
+ }
+ } else
+#endif /* MULTI_FN_SUPPORTED */
+ {
+ nb_devs = cperf_initialize_cryptodev(&opts, enabled_devs);
+
+ if (!opts.silent)
+ cperf_options_dump(&opts);
+
+ if (nb_devs < 1) {
+ RTE_LOG(ERR, USER1, "Failed to initialise requested "
+ "crypto device type\n");
+ nb_devs = 0;
+ goto err;
+ }
+
+ ret = cperf_verify_crypto_devices_capabilities(&opts,
+ enabled_devs, nb_devs);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Crypto device type does not "
+ "support capabilities requested\n");
+ goto err;
+ }
}
if (opts.test_file != NULL) {
@@ -585,23 +758,29 @@ main(int argc, char **argv)
if (!opts.silent)
show_test_vector(t_vec);
- total_nb_qps = nb_cryptodevs * opts.nb_qps;
+ total_nb_qps = nb_devs * opts.nb_qps;
i = 0;
- uint8_t qp_id = 0, cdev_index = 0;
+ uint8_t qp_id = 0, dev_index = 0;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (i == total_nb_qps)
break;
- cdev_id = enabled_cdevs[cdev_index];
+ dev_id = enabled_devs[dev_index];
- uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+ uint8_t socket_id;
+#ifdef MULTI_FN_SUPPORTED
+ if (opts.op_type == CPERF_MULTI_FN)
+ socket_id = rte_rawdev_socket_id(dev_id);
+ else
+#endif /* MULTI_FN_SUPPORTED */
+ socket_id = rte_cryptodev_socket_id(dev_id);
ctx[i] = cperf_testmap[opts.test].constructor(
session_pool_socket[socket_id].sess_mp,
session_pool_socket[socket_id].priv_mp,
- cdev_id, qp_id,
+ dev_id, qp_id,
&opts, t_vec, &op_fns);
if (ctx[i] == NULL) {
RTE_LOG(ERR, USER1, "Test run constructor failed\n");
@@ -609,7 +788,7 @@ main(int argc, char **argv)
}
qp_id = (qp_id + 1) % opts.nb_qps;
if (qp_id == 0)
- cdev_index++;
+ dev_index++;
i++;
}
@@ -726,9 +905,15 @@ main(int argc, char **argv)
i++;
}
- for (i = 0; i < nb_cryptodevs &&
- i < RTE_CRYPTO_MAX_DEVS; i++)
- rte_cryptodev_stop(enabled_cdevs[i]);
+ for (i = 0; i < nb_devs &&
+ i < RTE_DIM(enabled_devs); i++) {
+#ifdef MULTI_FN_SUPPORTED
+ if (opts.op_type == CPERF_MULTI_FN)
+ rte_rawdev_stop(enabled_devs[i]);
+ else
+#endif /* MULTI_FN_SUPPORTED */
+ rte_cryptodev_stop(enabled_devs[i]);
+ }
free_test_vector(t_vec, &opts);
@@ -746,9 +931,15 @@ main(int argc, char **argv)
i++;
}
- for (i = 0; i < nb_cryptodevs &&
- i < RTE_CRYPTO_MAX_DEVS; i++)
- rte_cryptodev_stop(enabled_cdevs[i]);
+ for (i = 0; i < nb_devs &&
+ i < RTE_DIM(enabled_devs); i++) {
+#ifdef MULTI_FN_SUPPORTED
+ if (opts.op_type == CPERF_MULTI_FN)
+ rte_rawdev_stop(enabled_devs[i]);
+ else
+#endif /* MULTI_FN_SUPPORTED */
+ rte_cryptodev_stop(enabled_devs[i]);
+ }
rte_free(opts.imix_buffer_sizes);
free_test_vector(t_vec, &opts);
diff --git a/app/test-crypto-perf/meson.build b/app/test-crypto-perf/meson.build
index 0674396da..28b54611f 100644
--- a/app/test-crypto-perf/meson.build
+++ b/app/test-crypto-perf/meson.build
@@ -13,3 +13,9 @@ sources = files('cperf_ops.c',
'cperf_test_verify.c',
'main.c')
deps += ['cryptodev', 'security']
+#deps += ['cryptodev', 'security', 'rawdev', 'common_multi_fn']
+
+if dpdk_conf.has('RTE_LIBRTE_MULTI_FN_COMMON') and dpdk_conf.has('RTE_LIBRTE_PMD_AESNI_MB_RAWDEV')
+ deps += ['rawdev', 'common_multi_fn']
+ cflags += ['-DMULTI_FN_SUPPORTED']
+endif
--
2.17.1
^ permalink raw reply [flat|nested] 22+ messages in thread