DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ciara Power <ciara.power@intel.com>
To: dev@dpdk.org
Cc: roy.fan.zhang@intel.com, piotrx.bronowski@intel.com,
	Ray Kinsella <mdr@ashroe.eu>
Subject: [dpdk-dev] [PATCH v1 1/8] drivers/crypto: introduce IPsec-mb framework
Date: Thu, 26 Aug 2021 15:16:12 +0000	[thread overview]
Message-ID: <20210826151619.577237-2-ciara.power@intel.com> (raw)
In-Reply-To: <20210826151619.577237-1-ciara.power@intel.com>

From: Fan Zhang <roy.fan.zhang@intel.com>

This patch introduces the new framework making
all common code of SW crypto PMDs implementations built on top of
intel-ipsec-mb library shareable, also helps to reduce future effort
on the code maintenance and feature updates.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/ipsec_mb/meson.build           |  27 ++
 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c    | 170 ++++++++++
 .../crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c    | 289 +++++++++++++++++
 .../ipsec_mb/rte_ipsec_mb_pmd_private.h       | 292 ++++++++++++++++++
 drivers/crypto/ipsec_mb/version.map           |   3 +
 drivers/crypto/meson.build                    |   1 +
 6 files changed, 782 insertions(+)
 create mode 100644 drivers/crypto/ipsec_mb/meson.build
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
 create mode 100644 drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
 create mode 100644 drivers/crypto/ipsec_mb/version.map

diff --git a/drivers/crypto/ipsec_mb/meson.build b/drivers/crypto/ipsec_mb/meson.build
new file mode 100644
index 0000000000..3d48da60ed
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 - 2021 Intel Corporation
+
+IMB_required_ver = '1.0.0'
+lib = cc.find_library('IPSec_MB', required: false)
+if not lib.found()
+	build = false
+	reason = 'missing dependency, "libIPSec_MB"'
+else
+	ext_deps += lib
+
+	# version comes with quotes, so we split based on " and take the middle
+	imb_ver = cc.get_define('IMB_VERSION_STR',
+		prefix : '#include<intel-ipsec-mb.h>').split('"')[1]
+
+	if (imb_ver == '') or (imb_ver.version_compare('<' + IMB_required_ver))
+		reason = 'IPSec_MB version >= @0@ is required, found version @1@'.format(
+				IMB_required_ver, imb_ver)
+		build = false
+	endif
+
+endif
+
+sources = files('rte_ipsec_mb_pmd.c',
+		'rte_ipsec_mb_pmd_ops.c',
+		)
+deps += ['bus_vdev', 'net', 'security']
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
new file mode 100644
index 0000000000..1e9c1dff55
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd.c
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+RTE_DEFINE_PER_LCORE(MB_MGR *, mb_mgr);
+
+struct ipsec_mb_pmd_data ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
+int ipsec_mb_logtype_driver;
+enum ipsec_mb_vector_mode vector_mode;
+
+/**
+ * Generic burst enqueue, place crypto operations on ingress queue for
+ * processing.
+ *
+ * @param __qp         Queue Pair to process
+ * @param ops          Crypto operations for processing
+ * @param nb_ops       Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+ipsec_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct ipsec_mb_qp *qp = __qp;
+
+	unsigned int nb_enqueued;
+
+	nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+			(void **)ops, nb_ops, NULL);
+
+	qp->stats.enqueued_count += nb_enqueued;
+	qp->stats.enqueue_err_count += nb_ops - nb_enqueued;
+
+	return nb_enqueued;
+}
+
+int
+cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
+	enum ipsec_mb_pmd_types pmd_type)
+{
+	struct rte_cryptodev *dev;
+	struct ipsec_mb_private *internals;
+	struct ipsec_mb_pmd_data *pmd_data = &ipsec_mb_pmds[pmd_type];
+	struct rte_cryptodev_pmd_init_params init_params = {};
+	const char *name, *args;
+	int retval;
+
+	if (vector_mode == IPSEC_MB_NOT_SUPPORTED) {
+		/* Check CPU for supported vector instruction set */
+		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+			vector_mode = IPSEC_MB_AVX512;
+		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+			vector_mode = IPSEC_MB_AVX2;
+		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+			vector_mode = IPSEC_MB_AVX;
+		else
+			vector_mode = IPSEC_MB_SSE;
+	}
+
+	init_params.private_data_size = sizeof(struct ipsec_mb_private) +
+		pmd_data->internals_priv_size;
+	init_params.max_nb_queue_pairs =
+		RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS;
+	init_params.socket_id = rte_socket_id();
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	args = rte_vdev_device_args(vdev);
+
+	retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+	if (retval) {
+		IPSEC_MB_LOG(
+		    ERR, "Failed to parse initialisation arguments[%s]", args);
+		return -EINVAL;
+	}
+
+	dev = rte_cryptodev_pmd_create(name, &vdev->device, &init_params);
+	if (dev == NULL) {
+		IPSEC_MB_LOG(ERR, "driver %s: create failed",
+			     init_params.name);
+		return -ENODEV;
+	}
+
+	/* Set vector instructions mode supported */
+	internals = dev->data->dev_private;
+	internals->pmd_type = pmd_type;
+	internals->max_nb_queue_pairs = init_params.max_nb_queue_pairs;
+
+	dev->driver_id = ipsec_mb_get_driver_id(pmd_type);
+	if (dev->driver_id == UINT8_MAX) {
+		IPSEC_MB_LOG(ERR, "driver %s: create failed",
+			     init_params.name);
+		return -ENODEV;
+	}
+	dev->dev_ops = ipsec_mb_pmds[pmd_type].ops;
+	dev->enqueue_burst = ipsec_mb_pmd_enqueue_burst;
+	dev->dequeue_burst = ipsec_mb_pmds[pmd_type].dequeue_burst;
+
+	if (pmd_data->dev_config) {
+		retval = (*pmd_data->dev_config)(dev);
+		if (retval < 0) {
+			IPSEC_MB_LOG(ERR,
+				"Failed to configure device %s", name);
+			rte_cryptodev_pmd_destroy(dev);
+			return retval;
+		}
+	}
+
+	dev->feature_flags = pmd_data->feature_flags;
+
+	switch (vector_mode) {
+	case IPSEC_MB_AVX512:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+		break;
+	case IPSEC_MB_AVX2:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+		break;
+	case IPSEC_MB_AVX:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+		break;
+	case IPSEC_MB_SSE:
+		dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+		break;
+	default:
+		break;
+	}
+
+	IPSEC_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+		     imb_get_version_str());
+
+	return 0;
+}
+
+int
+cryptodev_ipsec_mb_remove(struct rte_vdev_device *vdev)
+{
+	struct rte_cryptodev *cryptodev;
+	const char *name;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	if (RTE_PER_LCORE(mb_mgr)) {
+		free_mb_mgr(RTE_PER_LCORE(mb_mgr));
+		RTE_PER_LCORE(mb_mgr) = NULL;
+	}
+
+	if (cryptodev->security_ctx) {
+		rte_free(cryptodev->security_ctx);
+		cryptodev->security_ctx = NULL;
+	}
+
+	return rte_cryptodev_pmd_destroy(cryptodev);
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
new file mode 100644
index 0000000000..f184c70ca6
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_ops.c
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "rte_ipsec_mb_pmd_private.h"
+
+/** Configure device */
+int
+ipsec_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+		    __rte_unused struct rte_cryptodev_config *config)
+{
+	return 0;
+}
+
+/** Start device */
+int
+ipsec_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Stop device */
+void
+ipsec_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+int
+ipsec_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+/** Get device statistics */
+void
+ipsec_mb_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		stats->enqueued_count += qp->stats.enqueued_count;
+		stats->dequeued_count += qp->stats.dequeued_count;
+
+		stats->enqueue_err_count += qp->stats.enqueue_err_count;
+		stats->dequeue_err_count += qp->stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+void
+ipsec_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	int qp_id;
+
+	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+		struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+		memset(&qp->stats, 0, sizeof(qp->stats));
+	}
+}
+
+/** Get device info */
+void
+ipsec_mb_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct ipsec_mb_private *internals = dev->data->dev_private;
+	struct ipsec_mb_pmd_data *pmd_info =
+		&ipsec_mb_pmds[internals->pmd_type];
+
+	if (dev_info != NULL) {
+		dev_info->driver_id = dev->driver_id;
+		dev_info->feature_flags = dev->feature_flags;
+		dev_info->capabilities = pmd_info->caps;
+		dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+		/* No limit of number of sessions */
+		dev_info->sym.max_nb_sessions = 0;
+	}
+}
+
+/** Release queue pair */
+int
+ipsec_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct ipsec_mb_qp *qp = dev->data->queue_pairs[qp_id];
+	struct rte_ring *r = NULL;
+
+	if (qp != NULL) {
+		r = rte_ring_lookup(qp->name);
+		if (r)
+			rte_ring_free(r);
+		rte_free(qp);
+		dev->data->queue_pairs[qp_id] = NULL;
+	}
+	return 0;
+}
+
+/** Set a unique name for the queue pair */
+int
+ipsec_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+					   struct ipsec_mb_qp *qp)
+{
+	uint32_t n =
+	    snprintf(qp->name, sizeof(qp->name), "ipsec_mb_pmd_%u_qp_%u",
+		     dev->data->dev_id, qp->id);
+
+	if (n >= sizeof(qp->name))
+		return -1;
+
+	return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring
+*ipsec_mb_pmd_qp_create_processed_ops_ring(
+	struct ipsec_mb_qp *qp, unsigned int ring_size, int socket_id)
+{
+	struct rte_ring *r;
+	char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	unsigned int n = strlcpy(ring_name, qp->name, sizeof(ring_name));
+
+	if (n >= sizeof(ring_name))
+		return NULL;
+
+	r = rte_ring_lookup(ring_name);
+	if (r) {
+		if (rte_ring_get_size(r) >= ring_size) {
+			IPSEC_MB_LOG(
+			    INFO, "Reusing existing ring %s for processed ops",
+			    ring_name);
+			return r;
+		}
+		IPSEC_MB_LOG(
+		    ERR, "Unable to reuse existing ring %s for processed ops",
+		    ring_name);
+		return NULL;
+	}
+
+	return rte_ring_create(ring_name, ring_size, socket_id,
+			       RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+int
+ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+				const struct rte_cryptodev_qp_conf *qp_conf,
+				int socket_id)
+{
+	struct ipsec_mb_qp *qp = NULL;
+	struct ipsec_mb_private *internals = dev->data->dev_private;
+	struct ipsec_mb_pmd_data *pmd_data =
+		&ipsec_mb_pmds[internals->pmd_type];
+	uint32_t qp_size;
+	int ret = -1;
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		ipsec_mb_pmd_qp_release(dev, qp_id);
+
+	qp_size = sizeof(*qp) + pmd_data->qp_priv_size;
+	/* Allocate the queue pair data structure. */
+	qp = rte_zmalloc_socket("IPSEC PMD Queue Pair", qp_size,
+				RTE_CACHE_LINE_SIZE, socket_id);
+	if (qp == NULL)
+		return -ENOMEM;
+
+	qp->id = qp_id;
+	dev->data->queue_pairs[qp_id] = qp;
+	if (ipsec_mb_pmd_qp_set_unique_name(dev, qp))
+		goto qp_setup_cleanup;
+
+
+	qp->pmd_type = internals->pmd_type;
+	qp->sess_mp = qp_conf->mp_session;
+	qp->sess_mp_priv = qp_conf->mp_session_private;
+
+	qp->ingress_queue = ipsec_mb_pmd_qp_create_processed_ops_ring(qp,
+		qp_conf->nb_descriptors, socket_id);
+	if (qp->ingress_queue == NULL) {
+		ret = -1;
+		goto qp_setup_cleanup;
+	}
+
+	qp->mb_mgr = alloc_init_mb_mgr();
+	if (!qp->mb_mgr) {
+		ret = -ENOMEM;
+		goto qp_setup_cleanup;
+	}
+
+	memset(&qp->stats, 0, sizeof(qp->stats));
+
+	if (pmd_data->queue_pair_configure) {
+		ret = pmd_data->queue_pair_configure(qp);
+		if (ret < 0)
+			goto qp_setup_cleanup;
+	}
+
+	return 0;
+
+qp_setup_cleanup:
+	if (qp->mb_mgr)
+		free_mb_mgr(qp->mb_mgr);
+	if (qp)
+		rte_free(qp);
+	return ret;
+}
+
+/** Return the size of the specific pmd session structure */
+unsigned
+ipsec_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev)
+{
+	struct ipsec_mb_private *internals = dev->data->dev_private;
+	struct ipsec_mb_pmd_data *pmd_data =
+		&ipsec_mb_pmds[internals->pmd_type];
+
+	return pmd_data->session_priv_size;
+}
+
+/** Configure pmd specific multi-buffer session from a crypto xform chain */
+int
+ipsec_mb_pmd_sym_session_configure(
+	struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform,
+	struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct ipsec_mb_private *internals = dev->data->dev_private;
+	struct ipsec_mb_pmd_data *pmd_data =
+		&ipsec_mb_pmds[internals->pmd_type];
+	MB_MGR *mb_mgr = alloc_init_mb_mgr();
+	int ret = 0;
+
+	if (!mb_mgr)
+		return -ENOMEM;
+
+	if (unlikely(sess == NULL)) {
+		IPSEC_MB_LOG(ERR, "invalid session struct");
+		free_mb_mgr(mb_mgr);
+		return -EINVAL;
+	}
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		IPSEC_MB_LOG(ERR, "Couldn't get object from session mempool");
+		free_mb_mgr(mb_mgr);
+		return -ENOMEM;
+	}
+
+	ret = (*pmd_data->session_configure)(mb_mgr, sess_private_data, xform);
+	if (ret != 0) {
+		IPSEC_MB_LOG(ERR, "failed configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		free_mb_mgr(mb_mgr);
+		return ret;
+	}
+
+	set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
+
+	return 0;
+}
+
+/** Clear the session memory */
+void
+ipsec_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+			       struct rte_cryptodev_sym_session *sess)
+{
+	uint8_t index = dev->driver_id;
+	void *sess_priv = get_sym_session_private_data(sess, index);
+
+	/* Zero out the whole structure */
+	if (sess_priv) {
+		memset(sess_priv, 0, ipsec_mb_pmd_sym_session_get_size(dev));
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		set_sym_session_private_data(sess, index, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+}
diff --git a/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
new file mode 100644
index 0000000000..8f8d6b0a74
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/rte_ipsec_mb_pmd_private.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2021 Intel Corporation
+ */
+
+#ifndef _IPSEC_MB_PMD_PRIVATE_H_
+#define _IPSEC_MB_PMD_PRIVATE_H_
+
+#include <intel-ipsec-mb.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+
+#if defined(RTE_LIB_SECURITY)
+#define IPSEC_MB_DOCSIS_SEC_ENABLED 1
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#endif
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+
+enum ipsec_mb_vector_mode {
+	IPSEC_MB_NOT_SUPPORTED = 0,
+	IPSEC_MB_SSE,
+	IPSEC_MB_AVX,
+	IPSEC_MB_AVX2,
+	IPSEC_MB_AVX512
+};
+
+extern enum ipsec_mb_vector_mode vector_mode;
+
+/** MB_MGR instances, one per thread */
+extern RTE_DEFINE_PER_LCORE(MB_MGR *, mb_mgr);
+
+/** PMD LOGTYPE DRIVER, common to all PMDs */
+extern int ipsec_mb_logtype_driver;
+#define IPSEC_MB_LOG(level, fmt, ...)                                         \
+	rte_log(RTE_LOG_##level, ipsec_mb_logtype_driver,                     \
+		"%s() line %u: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+/** All supported device types */
+enum ipsec_mb_pmd_types {
+	IPSEC_MB_N_PMD_TYPES
+};
+
+/** Crypto operations */
+enum ipsec_mb_operation {
+	IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN = 0,
+	IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT,
+	IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT,
+	IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY,
+	IPSEC_MB_OP_ENCRYPT_ONLY,
+	IPSEC_MB_OP_DECRYPT_ONLY,
+	IPSEC_MB_OP_HASH_GEN_ONLY,
+	IPSEC_MB_OP_HASH_VERIFY_ONLY,
+	IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT,
+	IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT,
+	IPSEC_MB_OP_NOT_SUPPORTED
+};
+
+/** Helper function. Gets driver ID based on PMD type */
+static __rte_always_inline uint8_t
+ipsec_mb_get_driver_id(__rte_unused enum ipsec_mb_pmd_types pmd_type)
+{
+	return UINT8_MAX;
+}
+
+/** Common private data structure for each PMD */
+struct ipsec_mb_private {
+	enum ipsec_mb_pmd_types pmd_type;
+	/**< PMD  type */
+	uint32_t max_nb_queue_pairs;
+	/**< Max number of queue pairs supported by device */
+	__extension__ uint8_t priv[0];
+};
+
+/** IPSEC Multi buffer queue pair common queue pair data for all PMDs */
+struct ipsec_mb_qp {
+	uint16_t id;
+	/**< Queue Pair Identifier */
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_ring *ingress_queue;
+	/**< Ring for placing operations ready for processing */
+	struct rte_mempool *sess_mp;
+	/**< Session Mempool */
+	struct rte_mempool *sess_mp_priv;
+	/**< Session Private Data Mempool */
+	struct rte_cryptodev_stats stats;
+	/**< Queue pair statistics */
+	enum ipsec_mb_pmd_types pmd_type;
+	/**< pmd type */
+	uint8_t digest_idx;
+	/**< Index of the next
+	 * slot to be used in temp_digests,
+	 * to store the digest for a given operation
+	 */
+	MB_MGR *mb_mgr;
+	/* Multi buffer manager */
+	__extension__ uint8_t additional_data[0];
+	/**< Storing PMD specific additional data */
+};
+
+static __rte_always_inline void *
+ipsec_mb_get_qp_private_data(struct ipsec_mb_qp *qp)
+{
+	return (void *)qp->additional_data;
+}
+
+/** Helper function. Allocates job manager */
+static __rte_always_inline MB_MGR *
+alloc_init_mb_mgr(void)
+{
+	MB_MGR *mb_mgr = alloc_mb_mgr(0);
+
+	if (unlikely(mb_mgr == NULL)) {
+		IPSEC_MB_LOG(ERR, "Failed to allocate MB_MGR data\n");
+		return NULL;
+	}
+
+	switch (vector_mode) {
+	case IPSEC_MB_SSE:
+		init_mb_mgr_sse(mb_mgr);
+		break;
+	case IPSEC_MB_AVX:
+		init_mb_mgr_avx(mb_mgr);
+		break;
+	case IPSEC_MB_AVX2:
+		init_mb_mgr_avx2(mb_mgr);
+		break;
+	case IPSEC_MB_AVX512:
+		init_mb_mgr_avx512(mb_mgr);
+		break;
+	default:
+		IPSEC_MB_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
+		free_mb_mgr(mb_mgr);
+		return NULL;
+	}
+
+	return mb_mgr;
+}
+
+/** Helper function. Gets per thread job manager */
+static __rte_always_inline MB_MGR *
+get_per_thread_mb_mgr(void)
+{
+	if (unlikely(RTE_PER_LCORE(mb_mgr) == NULL))
+		RTE_PER_LCORE(mb_mgr) = alloc_init_mb_mgr();
+
+	return RTE_PER_LCORE(mb_mgr);
+}
+
+/** Device creation function */
+int
+cryptodev_ipsec_mb_create(struct rte_vdev_device *vdev,
+	enum ipsec_mb_pmd_types pmd_type);
+
+/** Device remove function */
+int
+cryptodev_ipsec_mb_remove(struct rte_vdev_device *vdev);
+
+/** Configure queue pair PMD type specific data */
+typedef int (*ipsec_mb_queue_pair_configure_t)(struct ipsec_mb_qp *qp);
+
+/** Configure session PMD type specific data */
+typedef int (*ipsec_mb_session_configure_t)(MB_MGR *mbr_mgr,
+		void *session_private,
+		const struct rte_crypto_sym_xform *xform);
+
+/** Configure internals PMD type specific data */
+typedef int (*ipsec_mb_dev_configure_t)(struct rte_cryptodev *dev);
+
+/** Per PMD type operation and data */
+struct ipsec_mb_pmd_data {
+	uint8_t is_configured;
+	dequeue_pkt_burst_t dequeue_burst;
+	ipsec_mb_dev_configure_t dev_config;
+	ipsec_mb_queue_pair_configure_t queue_pair_configure;
+	ipsec_mb_session_configure_t session_configure;
+	const struct rte_cryptodev_capabilities *caps;
+	struct rte_cryptodev_ops *ops;
+	struct rte_security_ops *security_ops;
+	uint64_t feature_flags;
+	uint32_t session_priv_size;
+	uint32_t qp_priv_size;
+	uint32_t internals_priv_size;
+};
+
+/** Global PMD type specific data */
+extern struct ipsec_mb_pmd_data ipsec_mb_pmds[IPSEC_MB_N_PMD_TYPES];
+
+int
+ipsec_mb_pmd_config(struct rte_cryptodev *dev,
+	struct rte_cryptodev_config *config);
+
+int
+ipsec_mb_pmd_start(struct rte_cryptodev *dev);
+
+void
+ipsec_mb_pmd_stop(struct rte_cryptodev *dev);
+
+int
+ipsec_mb_pmd_close(struct rte_cryptodev *dev);
+
+void
+ipsec_mb_pmd_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats);
+
+void
+ipsec_mb_pmd_stats_reset(struct rte_cryptodev *dev);
+
+void
+ipsec_mb_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info);
+
+int
+ipsec_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id);
+
+int
+ipsec_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+					   struct ipsec_mb_qp *qp);
+
+int
+ipsec_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+				 const struct rte_cryptodev_qp_conf *qp_conf,
+				 int socket_id);
+
+/** Returns the size of the aesni multi-buffer session structure */
+unsigned
+ipsec_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev);
+
+/** Configure an aesni multi-buffer session from a crypto xform chain */
+int ipsec_mb_pmd_sym_session_configure(
+	struct rte_cryptodev *dev,
+	struct rte_crypto_sym_xform *xform,
+	struct rte_cryptodev_sym_session *sess,
+	struct rte_mempool *mempool);
+
+/** Clear the memory of session so it does not leave key material behind */
+void
+ipsec_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
+				struct rte_cryptodev_sym_session *sess);
+
+/** Get session from op. If sessionless create a session */
+static __rte_always_inline void *
+ipsec_mb_get_session_private(struct ipsec_mb_qp *qp, struct rte_crypto_op *op)
+{
+	void *sess = NULL;
+	uint32_t driver_id = ipsec_mb_get_driver_id(qp->pmd_type);
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	uint sess_type = op->sess_type;
+	void *_sess;
+	void *_sess_private_data = NULL;
+	struct ipsec_mb_pmd_data *pmd_data = &ipsec_mb_pmds[qp->pmd_type];
+
+	switch (sess_type) {
+	case RTE_CRYPTO_OP_WITH_SESSION:
+		if (likely(sym_op->session != NULL))
+			sess = get_sym_session_private_data(sym_op->session,
+							    driver_id);
+	break;
+	case RTE_CRYPTO_OP_SESSIONLESS:
+		if (!qp->sess_mp ||
+		    rte_mempool_get(qp->sess_mp, (void **)&_sess))
+			return NULL;
+
+		if (!qp->sess_mp_priv ||
+		    rte_mempool_get(qp->sess_mp_priv,
+					(void **)&_sess_private_data))
+			return NULL;
+
+		sess = _sess_private_data;
+		if (unlikely(pmd_data->session_configure(qp->mb_mgr,
+				sess, sym_op->xform) != 0)) {
+			rte_mempool_put(qp->sess_mp, _sess);
+			rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
+			sess = NULL;
+		}
+
+		sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+		set_sym_session_private_data(sym_op->session, driver_id,
+					     _sess_private_data);
+	break;
+	default:
+		IPSEC_MB_LOG(ERR, "Unrecognized session type %u", sess_type);
+	}
+
+	if (unlikely(sess == NULL))
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+	return sess;
+}
+
+#endif /* _IPSEC_MB_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ipsec_mb/version.map b/drivers/crypto/ipsec_mb/version.map
new file mode 100644
index 0000000000..4a76d1d52d
--- /dev/null
+++ b/drivers/crypto/ipsec_mb/version.map
@@ -0,0 +1,3 @@
+DPDK_21 {
+	local: *;
+};
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index ea239f4c56..e40b18b17b 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -6,6 +6,7 @@ if is_windows
 endif
 
 drivers = [
+        'ipsec_mb',
         'aesni_gcm',
         'aesni_mb',
         'armv8',
-- 
2.25.1


  reply	other threads:[~2021-08-26 15:16 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-18 12:17 [dpdk-dev] [RFC 0/7] crypto/ipsec_mb: introduce ipsec_mb framework pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 1/7] " pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 2/7] crypto/ipsec_mb: move aesni-mb PMD to " pbronowx
2021-06-18 12:17 ` [dpdk-dev] [RFC 3/7] crypto/ipsec_mb: move aesni-gcm " pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 4/7] crypto/ipsec_mb: move kasumi " pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 5/7] crypto/ipsec_mb: move snow3g " pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 6/7] crypto/snow3g: add support for digest appended ops pbronowx
2021-06-18 12:18 ` [dpdk-dev] [RFC 7/7] crypto/ipsec_mb: move zuc PMD to ipsec_mb framework pbronowx
2021-06-18 13:11 ` [dpdk-dev] [RFC 0/7] crypto/ipsec_mb: introduce " David Marchand
2021-06-18 16:05   ` [dpdk-dev] [EXT] " Akhil Goyal
2021-06-21  8:52     ` Zhang, Roy Fan
2021-08-26 15:16 ` [dpdk-dev] [PATCH v1 0/8] drivers/crypto: " Ciara Power
2021-08-26 15:16   ` Ciara Power [this message]
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 2/8] drivers/crypto: move aesni-mb PMD to IPsec-mb framework Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 3/8] drivers/crypto: move aesni-gcm " Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 4/8] drivers/crypto: move kasumi " Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 5/8] drivers/crypto: move snow3g " Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 6/8] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 7/8] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-08-26 15:16   ` [dpdk-dev] [PATCH v1 8/8] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-09-23 15:28 ` [dpdk-dev] [PATCH v2 0/9] drivers/crypto: introduce ipsec_mb framework Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 1/9] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-09-23 15:46     ` Thomas Monjalon
2021-09-27 10:02       ` Power, Ciara
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 2/9] drivers/crypto: move aesni-mb PMD to " Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 3/9] drivers/crypto: move aesni-gcm " Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 4/9] drivers/crypto: move kasumi " Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 5/9] drivers/crypto: move snow3g " Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 6/9] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 7/9] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 8/9] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-09-23 15:28   ` [dpdk-dev] [PATCH v2 9/9] doc/rel_notes: added note for SW Crypto PMD change Ciara Power
2021-09-28 16:43   ` [dpdk-dev] [EXT] [PATCH v2 0/9] drivers/crypto: introduce ipsec_mb framework Akhil Goyal
2021-10-15 14:39 ` [dpdk-dev] [PATCH v4 00/14] " Ciara Power
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 01/14] drivers/crypto: introduce IPsec-mb framework Ciara Power
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 02/14] crypto/ipsec_mb: add multiprocess support Ciara Power
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 03/14] drivers/crypto: move aesni-mb PMD to IPsec-mb framework Ciara Power
2021-10-18  7:38     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 04/14] crypto/ipsec_mb: support ZUC-256 for aesni_mb Ciara Power
2021-10-18  7:39     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 05/14] test/crypto: check cipher parameters Ciara Power
2021-10-18  7:40     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 06/14] test/crypto: check auth parameters Ciara Power
2021-10-18  7:41     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 07/14] test/crypto: add ZUC-256 vectors Ciara Power
2021-10-18  7:42     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 08/14] drivers/crypto: move aesni-gcm PMD to IPsec-mb framework Ciara Power
2021-10-18  7:43     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 09/14] drivers/crypto: move kasumi " Ciara Power
2021-10-18  7:42     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 10/14] drivers/crypto: move snow3g " Ciara Power
2021-10-18  7:42     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 11/14] crypto/ipsec_mb: add snow3g digest appended ops support Ciara Power
2021-10-18  7:43     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 12/14] drivers/crypto: move zuc PMD to IPsec-mb framework Ciara Power
2021-10-18  7:44     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 13/14] crypto/ipsec_mb: add chacha20-poly1305 PMD to framework Ciara Power
2021-10-18  7:44     ` Zhang, Roy Fan
2021-10-15 14:39   ` [dpdk-dev] [PATCH v4 14/14] test/crypto: add test for chacha20_poly1305 PMD Ciara Power
2021-10-18  7:44     ` Zhang, Roy Fan
2021-10-18 15:21   ` [dpdk-dev] [EXT] [PATCH v4 00/14] drivers/crypto: introduce ipsec_mb framework Akhil Goyal
2021-10-19 23:09     ` Thomas Monjalon
2021-10-20  4:23       ` Akhil Goyal
2021-10-20  8:31         ` Akhil Goyal
2021-10-20  9:01           ` Thomas Monjalon
2021-10-20  9:00         ` Zhang, Roy Fan
2021-10-20  9:04           ` Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210826151619.577237-2-ciara.power@intel.com \
    --to=ciara.power@intel.com \
    --cc=dev@dpdk.org \
    --cc=mdr@ashroe.eu \
    --cc=piotrx.bronowski@intel.com \
    --cc=roy.fan.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).