DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode
@ 2022-08-04 10:36 Volodymyr Fialko
  2022-08-04 10:36 ` [PATCH 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
                   ` (7 more replies)
  0 siblings, 8 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-08-04 10:36 UTC (permalink / raw)
  To: dev; +Cc: jerinj, gakhil, anoobj, Volodymyr Fialko

Add support for lookaside event mode for ipsec-secgw example application.

Depends-on: Series-24201

Volodymyr Fialko (6):
  examples/ipsec-secgw: add event crypto adapter init
  examples/ipsec-secgw: add queue for event crypto adapter
  examples/ipsec-secgw: add lookaside event mode
  examples/ipsec-secgw: add stats for event mode
  examples/ipsec-secgw: add event vector support for lookaside
  examples/ipsec-secgw: reduce number of QP for event lookaside

 doc/guides/sample_app_ug/ipsec_secgw.rst |   7 +-
 examples/ipsec-secgw/event_helper.c      | 267 +++++++++++--
 examples/ipsec-secgw/event_helper.h      |   4 +
 examples/ipsec-secgw/ipsec-secgw.c       | 108 +++---
 examples/ipsec-secgw/ipsec-secgw.h       |   1 +
 examples/ipsec-secgw/ipsec.c             |  35 +-
 examples/ipsec-secgw/ipsec.h             |   8 +-
 examples/ipsec-secgw/ipsec_worker.c      | 461 ++++++++++++++++++++---
 examples/ipsec-secgw/sa.c                |  23 +-
 9 files changed, 783 insertions(+), 131 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH 1/6] examples/ipsec-secgw: add event crypto adapter init
  2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
@ 2022-08-04 10:36 ` Volodymyr Fialko
  2022-08-04 10:36 ` [PATCH 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-08-04 10:36 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal; +Cc: jerinj, anoobj, Volodymyr Fialko

Create, configure and start an event crypto adapter. This adapter will
be used in lookaside event mode processing.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 144 ++++++++++++++++++++++++++++
 examples/ipsec-secgw/event_helper.h |   2 +
 examples/ipsec-secgw/ipsec-secgw.c  |  44 ++++++---
 3 files changed, 175 insertions(+), 15 deletions(-)

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index b36f20a3fd..6b00a21b6a 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -2,8 +2,10 @@
  * Copyright (C) 2020 Marvell International Ltd.
  */
 #include <rte_bitmap.h>
+#include <rte_cryptodev.h>
 #include <rte_ethdev.h>
 #include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_malloc.h>
@@ -742,6 +744,126 @@ eh_start_eventdev(struct eventmode_conf *em_conf)
 	return 0;
 }
 
+static int
+eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	struct rte_event_dev_info evdev_default_conf = {0};
+	struct rte_event_port_conf port_conf = {0};
+	struct eventdev_params *eventdev_config;
+	uint8_t eventdev_id, cdev_id, n;
+	uint32_t cap;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	/*
+	 * More then one eventdev is not supported,
+	 * all event crypto adapters will be assigned to one eventdev
+	 */
+	RTE_ASSERT(em_conf->nb_eventdev == 1);
+
+	/* Get event device configuration */
+	eventdev_config = &(em_conf->eventdev_config[0]);
+	eventdev_id = eventdev_config->eventdev_id;
+
+	n = rte_cryptodev_count();
+
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		/* Check event's crypto capabilities */
+		ret = rte_event_crypto_adapter_caps_get(eventdev_id, cdev_id, &cap);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to get event device's crypto capabilities %d", ret);
+			return ret;
+		}
+
+		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) {
+			EH_LOG_ERR("Event crypto adapter does not support forward mode!");
+			return -EINVAL;
+		}
+
+		/* Create event crypto adapter */
+
+		/* Get default configuration of event dev */
+		ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to get event dev info %d", ret);
+			return ret;
+		}
+
+		/* Setup port conf */
+		port_conf.new_event_threshold =
+				evdev_default_conf.max_num_events;
+		port_conf.dequeue_depth =
+				evdev_default_conf.max_event_port_dequeue_depth;
+		port_conf.enqueue_depth =
+				evdev_default_conf.max_event_port_enqueue_depth;
+
+		/* Create adapter */
+		ret = rte_event_crypto_adapter_create(cdev_id, eventdev_id,
+				&port_conf, RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to create event crypto adapter %d", ret);
+			return ret;
+		}
+
+		/* Add crypto queue pairs to event crypto adapter */
+		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
+				-1, /* adds all the pre configured queue pairs to the instance */
+				NULL);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+eh_start_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	uint8_t cdev_id, n;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	n = rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		ret = rte_event_crypto_adapter_start(cdev_id);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to start event crypto device %d (%d)",
+					cdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+eh_stop_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	uint8_t cdev_id, n;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	n = rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		ret = rte_event_crypto_adapter_stop(cdev_id);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to stop event crypto device %d (%d)",
+					cdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static int
 eh_event_vector_limits_validate(struct eventmode_conf *em_conf,
 				uint8_t ev_dev_id, uint8_t ethdev_id)
@@ -1695,6 +1817,13 @@ eh_devs_init(struct eh_conf *conf)
 		return ret;
 	}
 
+	/* Setup event crypto adapter */
+	ret = eh_initialize_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event dev %d", ret);
+		return ret;
+	}
+
 	/* Setup Rx adapter */
 	ret = eh_initialize_rx_adapter(em_conf);
 	if (ret < 0) {
@@ -1716,6 +1845,14 @@ eh_devs_init(struct eh_conf *conf)
 		return ret;
 	}
 
+	/* Start event crypto adapter */
+	ret = eh_start_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
+		return ret;
+	}
+
+
 	/* Start eth devices after setting up adapter */
 	RTE_ETH_FOREACH_DEV(port_id) {
 
@@ -1786,6 +1923,13 @@ eh_devs_uninit(struct eh_conf *conf)
 		}
 	}
 
+	/* Stop event crypto adapter */
+	ret = eh_stop_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
+		return ret;
+	}
+
 	/* Stop and release event devices */
 	for (i = 0; i < em_conf->nb_eventdev; i++) {
 
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index f3cbe57cb3..4b26dc8fc2 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -185,6 +185,8 @@ struct eventmode_conf {
 		/**< Max vector timeout in nanoseconds */
 	uint64_t vector_pool_sz;
 		/**< Vector pool size */
+	bool enable_event_crypto_adapter;
+		/**< Enables event crypto adapter related configuration */
 };
 
 /**
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 815b9254ae..4ca5936bdf 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -44,6 +44,7 @@
 #include <rte_cryptodev.h>
 #include <rte_security.h>
 #include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_ip.h>
 #include <rte_ip_frag.h>
 #include <rte_alarm.h>
@@ -2094,7 +2095,7 @@ max_session_size(void)
 }
 
 static void
-session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
+session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz, size_t user_data_sz)
 {
 	char mp_name[RTE_MEMPOOL_NAMESIZE];
 	struct rte_mempool *sess_mp;
@@ -2107,8 +2108,8 @@ session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
 	nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
 			CDEV_MP_CACHE_MULTIPLIER);
 	sess_mp = rte_cryptodev_sym_session_pool_create(
-			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
-			socket_id);
+			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ,
+			user_data_sz, socket_id);
 	ctx->session_pool = sess_mp;
 
 	if (ctx->session_pool == NULL)
@@ -2441,7 +2442,8 @@ signal_handler(int signum)
 }
 
 static void
-ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
+ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa,
+		struct eventmode_conf *em_conf)
 {
 	struct rte_ipsec_session *ips;
 	int32_t i;
@@ -2451,9 +2453,11 @@ ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
 
 	for (i = 0; i < nb_sa; i++) {
 		ips = ipsec_get_primary_session(&sa[i]);
-		if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
-			rte_exit(EXIT_FAILURE, "Event mode supports only "
-				 "inline protocol sessions\n");
+		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+			em_conf->enable_event_crypto_adapter = true;
+		else if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+			rte_exit(EXIT_FAILURE, "Event mode supports inline "
+				 "and lookaside protocol sessions\n");
 	}
 
 }
@@ -2486,13 +2490,12 @@ check_event_mode_params(struct eh_conf *eh_conf)
 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
 
 	/*
-	 * Event mode currently supports only inline protocol sessions.
-	 * If there are other types of sessions configured then exit with
-	 * error.
+	 * Event mode currently supports inline and lookaside protocol
+	 * sessions. If there are other types of sessions configured then exit
+	 * with error.
 	 */
-	ev_mode_sess_verify(sa_in, nb_sa_in);
-	ev_mode_sess_verify(sa_out, nb_sa_out);
-
+	ev_mode_sess_verify(sa_in, nb_sa_in, em_conf);
+	ev_mode_sess_verify(sa_out, nb_sa_out, em_conf);
 
 	/* Option --config does not apply to event mode */
 	if (nb_lcore_params > 0) {
@@ -2925,7 +2928,7 @@ main(int32_t argc, char **argv)
 	uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
 	uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
 	struct eh_conf *eh_conf = NULL;
-	size_t sess_sz;
+	size_t sess_sz, user_data_sz;
 
 	nb_bufs_in_pool = 0;
 
@@ -2991,6 +2994,16 @@ main(int32_t argc, char **argv)
 	else
 		nb_crypto_qp = 0;
 
+	/*
+	 * In event lookaside mode request memory for crypto metadata. Should
+	 * be removed once API will no longer require usage of user data in
+	 * DPDK 22.11
+	 */
+	if (((struct eventmode_conf *)(eh_conf->mode_params))->enable_event_crypto_adapter)
+		user_data_sz = sizeof(union rte_event_crypto_metadata);
+	else
+		user_data_sz = 0;
+
 	nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
 
 	if (nb_bufs_in_pool == 0) {
@@ -3032,7 +3045,8 @@ main(int32_t argc, char **argv)
 		if (socket_ctx[socket_id].session_pool)
 			continue;
 
-		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz);
+		session_pool_init(&socket_ctx[socket_id], socket_id, sess_sz,
+				user_data_sz);
 		session_priv_pool_init(&socket_ctx[socket_id], socket_id,
 			sess_sz);
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH 2/6] examples/ipsec-secgw: add queue for event crypto adapter
  2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
  2022-08-04 10:36 ` [PATCH 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
@ 2022-08-04 10:36 ` Volodymyr Fialko
  2022-08-04 10:36 ` [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-08-04 10:36 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal; +Cc: jerinj, anoobj, Volodymyr Fialko

Add separate event queue for event crypto adapter processing, to resolve
queue contention between new and already processed events.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 95 +++++++++++++++++++++--------
 examples/ipsec-secgw/event_helper.h |  2 +
 2 files changed, 71 insertions(+), 26 deletions(-)

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 6b00a21b6a..9c20a05da8 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -17,6 +17,8 @@
 #define DEFAULT_VECTOR_SIZE  16
 #define DEFAULT_VECTOR_TMO   102400
 
+#define INVALID_EV_QUEUE_ID -1
+
 static volatile bool eth_core_running;
 
 static int
@@ -151,11 +153,10 @@ eh_dev_has_burst_mode(uint8_t dev_id)
 }
 
 static int
-eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
+eh_set_nb_eventdev(struct eventmode_conf *em_conf)
 {
-	int lcore_count, nb_eventdev, nb_eth_dev, ret;
 	struct eventdev_params *eventdev_config;
-	struct rte_event_dev_info dev_info;
+	int nb_eventdev;
 
 	/* Get the number of event devices */
 	nb_eventdev = rte_event_dev_count();
@@ -170,6 +171,23 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		return -EINVAL;
 	}
 
+	/* Set event dev id*/
+	eventdev_config = &(em_conf->eventdev_config[0]);
+	eventdev_config->eventdev_id = 0;
+
+	/* Update the number of event devices */
+	em_conf->nb_eventdev = 1;
+
+	return 0;
+}
+
+static int
+eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
+{
+	int lcore_count, nb_eth_dev, ret;
+	struct eventdev_params *eventdev_config;
+	struct rte_event_dev_info dev_info;
+
 	/* Get the number of eth devs */
 	nb_eth_dev = rte_eth_dev_count_avail();
 	if (nb_eth_dev == 0) {
@@ -197,15 +215,30 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 	eventdev_config = &(em_conf->eventdev_config[0]);
 
 	/* Save number of queues & ports available */
-	eventdev_config->eventdev_id = 0;
-	eventdev_config->nb_eventqueue = dev_info.max_event_queues;
+	eventdev_config->nb_eventqueue = nb_eth_dev;
 	eventdev_config->nb_eventport = dev_info.max_event_ports;
 	eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
 
-	/* Check if there are more queues than required */
-	if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
-		/* One queue is reserved for Tx */
-		eventdev_config->nb_eventqueue = nb_eth_dev + 1;
+	/* One queue is reserved for Tx */
+	eventdev_config->tx_queue_id = INVALID_EV_QUEUE_ID;
+	if (eventdev_config->all_internal_ports) {
+		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
+			EH_LOG_ERR("Not enough event queues available");
+			return -EINVAL;
+		}
+		eventdev_config->tx_queue_id =
+			eventdev_config->nb_eventqueue++;
+	}
+
+	/* One queue is reserved for event crypto adapter */
+	eventdev_config->ev_cpt_queue_id = INVALID_EV_QUEUE_ID;
+	if (em_conf->enable_event_crypto_adapter) {
+		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
+			EH_LOG_ERR("Not enough event queues available");
+			return -EINVAL;
+		}
+		eventdev_config->ev_cpt_queue_id =
+			eventdev_config->nb_eventqueue++;
 	}
 
 	/* Check if there are more ports than required */
@@ -214,9 +247,6 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		eventdev_config->nb_eventport = lcore_count;
 	}
 
-	/* Update the number of event devices */
-	em_conf->nb_eventdev++;
-
 	return 0;
 }
 
@@ -245,15 +275,10 @@ eh_do_capability_check(struct eventmode_conf *em_conf)
 
 	/*
 	 * If Rx & Tx internal ports are supported by all event devices then
-	 * eth cores won't be required. Override the eth core mask requested
-	 * and decrement number of event queues by one as it won't be needed
-	 * for Tx.
+	 * eth cores won't be required. Override the eth core mask requested.
 	 */
-	if (all_internal_ports) {
+	if (all_internal_ports)
 		rte_bitmap_reset(em_conf->eth_core_mask);
-		for (i = 0; i < em_conf->nb_eventdev; i++)
-			em_conf->eventdev_config[i].nb_eventqueue--;
-	}
 }
 
 static int
@@ -370,6 +395,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
 			eventdev_config->nb_eventqueue :
 			eventdev_config->nb_eventqueue - 1;
 
+	/* Reserve one queue for event crypto adapter */
+	if (em_conf->enable_event_crypto_adapter)
+		nb_eventqueue--;
+
 	/*
 	 * Map all queues of eth device (port) to an event queue. If there
 	 * are more event queues than eth ports then create 1:1 mapping.
@@ -541,14 +570,18 @@ eh_validate_conf(struct eventmode_conf *em_conf)
 	 * and initialize the config with all ports & queues available
 	 */
 	if (em_conf->nb_eventdev == 0) {
+		ret = eh_set_nb_eventdev(em_conf);
+		if (ret != 0)
+			return ret;
+		eh_do_capability_check(em_conf);
 		ret = eh_set_default_conf_eventdev(em_conf);
 		if (ret != 0)
 			return ret;
+	} else {
+		/* Perform capability check for the selected event devices */
+		eh_do_capability_check(em_conf);
 	}
 
-	/* Perform capability check for the selected event devices */
-	eh_do_capability_check(em_conf);
-
 	/*
 	 * Check if links are specified. Else generate a default config for
 	 * the event ports used.
@@ -594,8 +627,8 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 	uint8_t *queue = NULL;
 	uint8_t eventdev_id;
 	int nb_eventqueue;
-	uint8_t i, j;
-	int ret;
+	int ret, j;
+	uint8_t i;
 
 	for (i = 0; i < nb_eventdev; i++) {
 
@@ -657,14 +690,24 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 			 * stage if event device does not have internal
 			 * ports. This will be an atomic queue.
 			 */
-			if (!eventdev_config->all_internal_ports &&
-			    j == nb_eventqueue-1) {
+			if (j == eventdev_config->tx_queue_id) {
 				eventq_conf.schedule_type =
 					RTE_SCHED_TYPE_ATOMIC;
 			} else {
 				eventq_conf.schedule_type =
 					em_conf->ext_params.sched_type;
 			}
+			/*
+			 * Give event crypto device's queue higher priority then Rx queues. This
+			 * will allow crypto events to be processed with highest priority.
+			 */
+			if (j == eventdev_config->ev_cpt_queue_id) {
+				eventq_conf.priority =
+					RTE_EVENT_DEV_PRIORITY_HIGHEST;
+			} else {
+				eventq_conf.priority =
+					RTE_EVENT_DEV_PRIORITY_NORMAL;
+			}
 
 			/* Set max atomic flows to 1024 */
 			eventq_conf.nb_atomic_flows = 1024;
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index 4b26dc8fc2..af5cfcf794 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -88,6 +88,8 @@ struct eventdev_params {
 	uint8_t nb_eventport;
 	uint8_t ev_queue_mode;
 	uint8_t all_internal_ports;
+	int tx_queue_id;
+	int ev_cpt_queue_id;
 };
 
 /**
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode
  2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
  2022-08-04 10:36 ` [PATCH 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
  2022-08-04 10:36 ` [PATCH 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
@ 2022-08-04 10:36 ` Volodymyr Fialko
  2022-08-05  3:26   ` Suanming Mou
  2022-09-22  5:05   ` Gagandeep Singh
  2022-08-04 10:36 ` [PATCH 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
                   ` (4 subsequent siblings)
  7 siblings, 2 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-08-04 10:36 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal; +Cc: jerinj, anoobj, Volodymyr Fialko

Add base support for lookaside event mode. Events that are coming from
ethdev will be enqueued to the event crypto adapter, processed and
enqueued back to ethdev for the transmission.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/guides/sample_app_ug/ipsec_secgw.rst |   4 +-
 examples/ipsec-secgw/ipsec-secgw.c       |   3 +-
 examples/ipsec-secgw/ipsec.c             |  35 +++-
 examples/ipsec-secgw/ipsec.h             |   8 +-
 examples/ipsec-secgw/ipsec_worker.c      | 224 +++++++++++++++++++++--
 examples/ipsec-secgw/sa.c                |  23 ++-
 6 files changed, 262 insertions(+), 35 deletions(-)

diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 07686d2285..c7b87889f1 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -83,8 +83,8 @@ The application supports two modes of operation: poll mode and event mode.
   every type of event device without affecting existing paths/use cases. The worker
   to be used will be determined by the operating conditions and the underlying device
   capabilities. **Currently the application provides non-burst, internal port worker
-  threads and supports inline protocol only.** It also provides infrastructure for
-  non-internal port however does not define any worker threads.
+  threads.** It also provides infrastructure for non-internal port however does not
+  define any worker threads.
 
   Event mode also supports event vectorization. The event devices, ethernet device
   pairs which support the capability ``RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR`` can
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 4ca5936bdf..0bd1f15ae5 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -3121,7 +3121,8 @@ main(int32_t argc, char **argv)
 		if ((socket_ctx[socket_id].session_pool != NULL) &&
 			(socket_ctx[socket_id].sa_in == NULL) &&
 			(socket_ctx[socket_id].sa_out == NULL)) {
-			sa_init(&socket_ctx[socket_id], socket_id, lcore_conf);
+			sa_init(&socket_ctx[socket_id], socket_id, lcore_conf,
+				eh_conf->mode_params);
 			sp4_init(&socket_ctx[socket_id], socket_id);
 			sp6_init(&socket_ctx[socket_id], socket_id);
 			rt_init(&socket_ctx[socket_id], socket_id);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 7b7bfff696..030cfe7a82 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -6,6 +6,7 @@
 #include <netinet/ip.h>
 
 #include <rte_branch_prediction.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_log.h>
 #include <rte_crypto.h>
 #include <rte_security.h>
@@ -56,14 +57,17 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
 
 int
 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
-	struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
-	struct rte_ipsec_session *ips)
+	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
+	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
 {
 	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
+	enum rte_crypto_op_sess_type sess_type;
 	struct rte_cryptodev_info cdev_info;
+	enum rte_crypto_op_type op_type;
 	unsigned long cdev_id_qp = 0;
-	struct cdev_key key = { 0 };
 	struct ipsec_ctx *ipsec_ctx;
+	struct cdev_key key = { 0 };
+	void *sess = NULL;
 	uint32_t lcore_id;
 	int32_t ret = 0;
 
@@ -159,6 +163,10 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
 				return -1;
 			}
 			ips->security.ctx = ctx;
+
+			sess = ips->security.ses;
+			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
 		} else {
 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
 			return -1;
@@ -183,6 +191,27 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
 		rte_cryptodev_info_get(cdev_id, &cdev_info);
 	}
 
+	/* Setup meta data required by event crypto adapter */
+	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
+		union rte_event_crypto_metadata m_data = {0};
+		const struct eventdev_params *eventdev_conf;
+
+		eventdev_conf = &(em_conf->eventdev_config[0]);
+
+		/* Fill in response information */
+		m_data.response_info.sched_type = em_conf->ext_params.sched_type;
+		m_data.response_info.op = RTE_EVENT_OP_NEW;
+		m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
+
+		/* Fill in request information */
+		m_data.request_info.cdev_id = cdev_id;
+		m_data.request_info.queue_pair_id = 0;
+
+		/* Attach meta info to session */
+		rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
+				sess_type, &m_data, sizeof(m_data));
+	}
+
 	return 0;
 }
 
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 2005ae8fec..5ef63e8fc4 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -14,6 +14,7 @@
 #include <rte_flow.h>
 #include <rte_ipsec.h>
 
+#include "event_helper.h"
 #include "ipsec-secgw.h"
 
 #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2
@@ -424,7 +425,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound);
 
 void
 sa_init(struct socket_ctx *ctx, int32_t socket_id,
-		struct lcore_conf *lcore_conf);
+	struct lcore_conf *lcore_conf,
+	const struct eventmode_conf *em_conf);
 
 void
 rt_init(struct socket_ctx *ctx, int32_t socket_id);
@@ -441,8 +443,8 @@ enqueue_cop_burst(struct cdev_qp *cqp);
 
 int
 create_lookaside_session(struct ipsec_ctx *ipsec_ctx[],
-	struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
-	struct rte_ipsec_session *ips);
+	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
+	struct ipsec_sa *sa, struct rte_ipsec_session *ips);
 
 int
 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 803157d8ee..2661f0275f 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -3,6 +3,7 @@
  * Copyright (C) 2020 Marvell International Ltd.
  */
 #include <rte_acl.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_lpm.h>
 #include <rte_lpm6.h>
@@ -11,6 +12,7 @@
 #include "ipsec.h"
 #include "ipsec-secgw.h"
 #include "ipsec_worker.h"
+#include "sad.h"
 
 #if defined(__ARM_NEON)
 #include "ipsec_lpm_neon.h"
@@ -228,6 +230,43 @@ check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
 	ip->num = j;
 }
 
+static inline void
+pkt_l3_len_set(struct rte_mbuf *pkt)
+{
+	struct rte_ipv4_hdr *ipv4;
+	struct rte_ipv6_hdr *ipv6;
+	size_t l3len, ext_len;
+	uint32_t l3_type;
+	int next_proto;
+	uint8_t *p;
+
+	l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK;
+	if (l3_type == RTE_PTYPE_L3_IPV4) {
+		ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
+		pkt->l3_len = ipv4->ihl * 4;
+	} else if (l3_type & RTE_PTYPE_L3_IPV6) {
+		ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
+		l3len = sizeof(struct rte_ipv6_hdr);
+		if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
+		     l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
+			p = rte_pktmbuf_mtod(pkt, uint8_t *);
+			next_proto = ipv6->proto;
+			while (next_proto != IPPROTO_ESP &&
+			       l3len < pkt->data_len &&
+			       (next_proto = rte_ipv6_get_next_ext(p + l3len,
+						next_proto, &ext_len)) >= 0)
+				l3len += ext_len;
+
+			/* Drop pkt when IPv6 header exceeds first seg size */
+			if (unlikely(l3len > pkt->data_len)) {
+				free_pkts(&pkt, 1);
+				return;
+			}
+		}
+		pkt->l3_len = l3len;
+	}
+}
+
 static inline uint16_t
 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 {
@@ -287,9 +326,67 @@ get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
 	return RTE_MAX_ETHPORTS;
 }
 
+static inline void
+crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+		struct rte_crypto_op *cop[], uint16_t num)
+{
+	struct rte_crypto_sym_op *sop;
+	uint32_t i;
+
+	const struct rte_crypto_op unproc_cop = {
+		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+		.sess_type = RTE_CRYPTO_OP_SECURITY_SESSION,
+	};
+
+	for (i = 0; i != num; i++) {
+		cop[i]->raw = unproc_cop.raw;
+		sop = cop[i]->sym;
+		sop->m_src = mb[i];
+		sop->m_dst = NULL;
+		__rte_security_attach_session(sop, ss->security.ses);
+	}
+}
+
+static inline int
+event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
+		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+{
+	struct ipsec_mbuf_metadata *priv;
+	struct rte_ipsec_session *sess;
+	struct rte_crypto_op *cop;
+	struct rte_event cev;
+	int ret;
+
+	/* Get IPsec session */
+	sess = ipsec_get_primary_session(sa);
+
+	/* Get pkt private data */
+	priv = get_priv(pkt);
+	cop = &priv->cop;
+
+	/* Reset crypto operation data */
+	crypto_op_reset(sess, &pkt, &cop, 1);
+
+	/* Update event_ptr with rte_crypto_op */
+	cev.event = 0;
+	cev.event_ptr = cop;
+
+	/* Enqueue event to crypto adapter */
+	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+			ev_link->event_port_id, &cev, 1);
+	if (unlikely(ret <= 0)) {
+		/* pkt will be freed by the caller */
+		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
+		return rte_errno;
+	}
+
+	return 0;
+}
+
 static inline int
 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
-		struct rte_event *ev)
+	const struct eh_event_link_info *ev_link, struct rte_event *ev)
 {
 	struct ipsec_sa *sa = NULL;
 	struct rte_mbuf *pkt;
@@ -340,7 +437,22 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 		break;
+	case PKT_TYPE_IPSEC_IPV4:
+	case PKT_TYPE_IPSEC_IPV6:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		pkt_l3_len_set(pkt);
+
+		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
+		sa = ipsec_mask_saptr(sa);
+		if (unlikely(sa == NULL)) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
+			goto drop_pkt_and_exit;
+		}
 
+		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+			goto drop_pkt_and_exit;
+
+		return PKT_POSTED;
 	default:
 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 			   type);
@@ -389,7 +501,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 static inline int
 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
-		struct rte_event *ev)
+		const struct eh_event_link_info *ev_link, struct rte_event *ev)
 {
 	struct rte_ipsec_session *sess;
 	struct sa_ctx *sa_ctx;
@@ -456,11 +568,9 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	/* Get IPsec session */
 	sess = ipsec_get_primary_session(sa);
 
-	/* Allow only inline protocol for now */
-	if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
-		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
-		goto drop_pkt_and_exit;
-	}
+	/* Determine protocol type */
+	if (sess->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+		goto lookaside;
 
 	rte_security_set_pkt_metadata(sess->security.ctx,
 				      sess->security.ses, pkt, NULL);
@@ -482,6 +592,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	ipsec_event_pre_forward(pkt, port_id);
 	return PKT_FORWARDED;
 
+lookaside:
+	/* prepare pkt - advance start to L3 */
+	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+
+	if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
+		return PKT_POSTED;
+
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
 	rte_pktmbuf_free(pkt);
@@ -737,6 +854,67 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
 }
 
+static inline int
+ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+			   struct rte_event *ev)
+{
+	struct rte_ether_hdr *ethhdr;
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+	uint16_t port_id;
+	struct ip *ip;
+
+	/* Get pkt data */
+	cop = ev->event_ptr;
+	pkt = cop->sym->m_src;
+
+	/* If operation was not successful, drop the packet */
+	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
+		free_pkts(&pkt, 1);
+		return PKT_DROPPED;
+	}
+
+	ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+	/* Prepend Ether layer */
+	ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+	/* Route pkt and update required fields */
+	if (ip->ip_v == IPVERSION) {
+		pkt->ol_flags |= lconf->outbound.ipv4_offloads;
+		pkt->l3_len = sizeof(struct ip);
+		pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+
+		port_id = route4_pkt(pkt, lconf->rt.rt4_ctx);
+	} else {
+		pkt->ol_flags |= lconf->outbound.ipv6_offloads;
+		pkt->l3_len = sizeof(struct ip6_hdr);
+		pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+
+		port_id = route6_pkt(pkt, lconf->rt.rt6_ctx);
+	}
+
+	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
+		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
+		free_pkts(&pkt, 1);
+		return PKT_DROPPED;
+	}
+
+	/* Update Ether with port's MAC addresses */
+	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
+	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
+
+	/* Update event */
+	ev->mbuf = pkt;
+
+	return PKT_FORWARDED;
+}
+
 /*
  * Event mode exposes various operating modes depending on the
  * capabilities of the event device and the operating mode
@@ -924,6 +1102,14 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 		"Launching event mode worker (non-burst - Tx internal port - "
 		"app mode) on lcore %d\n", lcore_id);
 
+	ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
+	if (ret != 0) {
+		RTE_LOG(ERR, IPSEC,
+			"SAD cache init on lcore %u, failed with code: %d\n",
+			lcore_id, ret);
+		return;
+	}
+
 	/* Check if it's single link */
 	if (nb_links != 1) {
 		RTE_LOG(INFO, IPSEC,
@@ -950,6 +1136,20 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			ipsec_ev_vector_process(&lconf, links, &ev);
 			continue;
 		case RTE_EVENT_TYPE_ETHDEV:
+			if (is_unprotected_port(ev.mbuf->port))
+				ret = process_ipsec_ev_inbound(&lconf.inbound,
+								&lconf.rt, links, &ev);
+			else
+				ret = process_ipsec_ev_outbound(&lconf.outbound,
+								&lconf.rt, links, &ev);
+			if (ret != 1)
+				/* The pkt has been dropped or posted */
+				continue;
+			break;
+		case RTE_EVENT_TYPE_CRYPTODEV:
+			ret = ipsec_ev_cryptodev_process(&lconf, &ev);
+			if (unlikely(ret != PKT_FORWARDED))
+				continue;
 			break;
 		default:
 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
@@ -957,16 +1157,6 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			continue;
 		}
 
-		if (is_unprotected_port(ev.mbuf->port))
-			ret = process_ipsec_ev_inbound(&lconf.inbound,
-							&lconf.rt, &ev);
-		else
-			ret = process_ipsec_ev_outbound(&lconf.outbound,
-							&lconf.rt, &ev);
-		if (ret != 1)
-			/* The pkt has been dropped */
-			continue;
-
 		/*
 		 * Since tx internal port is available, events can be
 		 * directly enqueued to the adapter and it would be
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 5dca578790..7a0c528f75 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1235,7 +1235,8 @@ static int
 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, uint32_t inbound,
 		struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
 	struct ipsec_sa *sa;
 	uint32_t i, idx;
@@ -1408,7 +1409,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 				return -EINVAL;
 			}
 		} else {
-			rc = create_lookaside_session(ips_ctx, skt_ctx, sa, ips);
+			rc = create_lookaside_session(ips_ctx, skt_ctx,
+						      em_conf, sa, ips);
 			if (rc != 0) {
 				RTE_LOG(ERR, IPSEC_ESP,
 					"create_lookaside_session() failed\n");
@@ -1431,17 +1433,19 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 static inline int
 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
-	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx);
+	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx, em_conf);
 }
 
 static inline int
 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
-	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx);
+	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx, em_conf);
 }
 
 /*
@@ -1673,7 +1677,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
 
 void
 sa_init(struct socket_ctx *ctx, int32_t socket_id,
-		struct lcore_conf *lcore_conf)
+	struct lcore_conf *lcore_conf,
+	const struct eventmode_conf *em_conf)
 {
 	int32_t rc;
 	const char *name;
@@ -1705,7 +1710,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
 			rte_exit(EXIT_FAILURE, "failed to init SAD\n");
 		RTE_LCORE_FOREACH(lcore_id)
 			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
-		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx);
+		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx, em_conf);
 
 		if (app_sa_prm.enable != 0) {
 			rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
@@ -1727,7 +1732,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
 
 		RTE_LCORE_FOREACH(lcore_id)
 			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound;
-		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx);
+		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx, em_conf);
 
 		if (app_sa_prm.enable != 0) {
 			rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH 4/6] examples/ipsec-secgw: add stats for event mode
  2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                   ` (2 preceding siblings ...)
  2022-08-04 10:36 ` [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
@ 2022-08-04 10:36 ` Volodymyr Fialko
  2022-08-04 10:36 ` [PATCH 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-08-04 10:36 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal; +Cc: jerinj, anoobj, Volodymyr Fialko

Add per core statistic(rx/tx) counters for event mode worker.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/ipsec_worker.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 2661f0275f..f94ab10a5b 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -494,7 +494,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
-	rte_pktmbuf_free(pkt);
+	free_pkts(&pkt, 1);
 	ev->mbuf = NULL;
 	return PKT_DROPPED;
 }
@@ -601,7 +601,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
-	rte_pktmbuf_free(pkt);
+	free_pkts(&pkt, 1);
 	ev->mbuf = NULL;
 	return PKT_DROPPED;
 }
@@ -816,6 +816,7 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 	pkt = vec->mbufs[0];
 
 	ev_vector_attr_init(vec);
+	core_stats_update_rx(vec->nb_elem);
 	if (is_unprotected_port(pkt->port))
 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
 						      &lconf->rt, vec);
@@ -824,6 +825,7 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 						       &lconf->rt, vec);
 
 	if (likely(ret > 0)) {
+		core_stats_update_tx(vec->nb_elem);
 		vec->nb_elem = ret;
 		rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 						 links[0].event_port_id,
@@ -1136,6 +1138,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			ipsec_ev_vector_process(&lconf, links, &ev);
 			continue;
 		case RTE_EVENT_TYPE_ETHDEV:
+			core_stats_update_rx(1);
 			if (is_unprotected_port(ev.mbuf->port))
 				ret = process_ipsec_ev_inbound(&lconf.inbound,
 								&lconf.rt, links, &ev);
@@ -1157,6 +1160,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			continue;
 		}
 
+		core_stats_update_tx(1);
 		/*
 		 * Since tx internal port is available, events can be
 		 * directly enqueued to the adapter and it would be
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH 5/6] examples/ipsec-secgw: add event vector support for lookaside
  2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                   ` (3 preceding siblings ...)
  2022-08-04 10:36 ` [PATCH 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
@ 2022-08-04 10:36 ` Volodymyr Fialko
  2022-08-04 10:36 ` [PATCH 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-08-04 10:36 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal; +Cc: jerinj, anoobj, Volodymyr Fialko

Add vector support for event crypto adapter in lookaside mode.
Once --event-vector enabled, event crypto adapter will group processed
crypto operation into rte_event_vector event with type
RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/guides/sample_app_ug/ipsec_secgw.rst |   3 +
 examples/ipsec-secgw/event_helper.c      |  34 ++-
 examples/ipsec-secgw/ipsec-secgw.c       |   2 +-
 examples/ipsec-secgw/ipsec-secgw.h       |   1 +
 examples/ipsec-secgw/ipsec_worker.c      | 281 ++++++++++++++++++-----
 5 files changed, 265 insertions(+), 56 deletions(-)

diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index c7b87889f1..2a1aeae7c5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -94,6 +94,9 @@ The application supports two modes of operation: poll mode and event mode.
   (default vector-size is 16) and vector-tmo (default vector-tmo is 102400ns).
   By default event vectorization is disabled and it can be enabled using event-vector
   option.
+  For the event devices, crypto device pairs which support the capability
+  ``RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR`` vector aggregation could also be enable
+  using event-vector option.
 
 Additionally the event mode introduces two submodes of processing packets:
 
diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 9c20a05da8..635e6f24bf 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -790,12 +790,15 @@ eh_start_eventdev(struct eventmode_conf *em_conf)
 static int
 eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
 {
+	struct rte_event_crypto_adapter_queue_conf queue_conf;
 	struct rte_event_dev_info evdev_default_conf = {0};
 	struct rte_event_port_conf port_conf = {0};
 	struct eventdev_params *eventdev_config;
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+	const uint8_t nb_qp_per_cdev = 1;
 	uint8_t eventdev_id, cdev_id, n;
-	uint32_t cap;
-	int ret;
+	uint32_t cap, nb_elem;
+	int ret, socket_id;
 
 	if (!em_conf->enable_event_crypto_adapter)
 		return 0;
@@ -850,10 +853,35 @@ eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
 			return ret;
 		}
 
+		memset(&queue_conf, 0, sizeof(queue_conf));
+		if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) &&
+		    (em_conf->ext_params.event_vector)) {
+			queue_conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
+			queue_conf.vector_sz = em_conf->ext_params.vector_size;
+			/*
+			 * Currently all sessions configured with same response
+			 * info fields, so packets will be aggregated to the
+			 * same vector. This allows us to configure number of
+			 * vectors only to hold all queue pair descriptors.
+			 */
+			nb_elem = (qp_desc_nb / queue_conf.vector_sz) + 1;
+			nb_elem *= nb_qp_per_cdev;
+			socket_id = rte_cryptodev_socket_id(cdev_id);
+			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+					"QP_VEC_%u_%u", socket_id, cdev_id);
+			queue_conf.vector_mp = rte_event_vector_pool_create(
+					mp_name, nb_elem, 0,
+					queue_conf.vector_sz, socket_id);
+			if (queue_conf.vector_mp == NULL) {
+				EH_LOG_ERR("failed to create event vector pool");
+				return -ENOMEM;
+			}
+		}
+
 		/* Add crypto queue pairs to event crypto adapter */
 		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
 				-1, /* adds all the pre configured queue pairs to the instance */
-				NULL);
+				&queue_conf);
 		if (ret < 0) {
 			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
 			return ret;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 0bd1f15ae5..02b1fabaf5 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -85,7 +85,7 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 /*
  * Configurable number of descriptors per queue pair
  */
-static uint32_t qp_desc_nb = 2048;
+uint32_t qp_desc_nb = 2048;
 
 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
 		(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index f02736075b..c6d11f3aac 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -145,6 +145,7 @@ extern bool per_port_pool;
 
 extern uint32_t mtu_size;
 extern uint32_t frag_tbl_sz;
+extern uint32_t qp_desc_nb;
 
 #define SS_F		(1U << 0)	/* Single SA mode */
 #define INL_PR_F	(1U << 1)	/* Inline Protocol */
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index f94ab10a5b..466bb03bde 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -267,6 +267,21 @@ pkt_l3_len_set(struct rte_mbuf *pkt)
 	}
 }
 
+static inline void
+ipsec_sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[], void *sa[], uint16_t nb_pkts)
+{
+	uint16_t i;
+
+	if (nb_pkts == 0)
+		return;
+
+	for (i = 0; i < nb_pkts; i++) {
+		rte_pktmbuf_adj(pkts[i], RTE_ETHER_HDR_LEN);
+		pkt_l3_len_set(pkts[i]);
+	}
+	sad_lookup(sad, pkts, sa, nb_pkts);
+}
+
 static inline uint16_t
 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 {
@@ -348,18 +363,11 @@ crypto_op_reset(const struct rte_ipsec_session *ss,
        }
 }
 
-static inline int
-event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
-		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+static inline void
+crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev)
 {
 	struct ipsec_mbuf_metadata *priv;
-	struct rte_ipsec_session *sess;
 	struct rte_crypto_op *cop;
-	struct rte_event cev;
-	int ret;
-
-	/* Get IPsec session */
-	sess = ipsec_get_primary_session(sa);
 
 	/* Get pkt private data */
 	priv = get_priv(pkt);
@@ -369,13 +377,39 @@ event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
 	crypto_op_reset(sess, &pkt, &cop, 1);
 
 	/* Update event_ptr with rte_crypto_op */
-	cev.event = 0;
-	cev.event_ptr = cop;
+	ev->event = 0;
+	ev->event_ptr = cop;
+}
+
+static inline void
+free_pkts_from_events(struct rte_event events[], uint16_t count)
+{
+	struct rte_crypto_op *cop;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		cop = events[i].event_ptr;
+		free_pkts(&cop->sym->m_src, 1);
+	}
+}
+
+static inline int
+event_crypto_enqueue(struct rte_mbuf *pkt,
+		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+{
+	struct rte_ipsec_session *sess;
+	struct rte_event ev;
+	int ret;
+
+	/* Get IPsec session */
+	sess = ipsec_get_primary_session(sa);
+
+	crypto_prepare_event(pkt, sess, &ev);
 
 	/* Enqueue event to crypto adapter */
 	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
-			ev_link->event_port_id, &cev, 1);
-	if (unlikely(ret <= 0)) {
+			ev_link->event_port_id, &ev, 1);
+	if (unlikely(ret != 1)) {
 		/* pkt will be freed by the caller */
 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
 		return rte_errno;
@@ -449,7 +483,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 
-		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
 			goto drop_pkt_and_exit;
 
 		return PKT_POSTED;
@@ -596,7 +630,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	/* prepare pkt - advance start to L3 */
 	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
 
-	if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
+	if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0))
 		return PKT_POSTED;
 
 drop_pkt_and_exit:
@@ -607,14 +641,12 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 }
 
 static inline int
-ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
-		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
+ipsec_ev_route_ip_pkts(struct rte_event_vector *vec, struct route_table *rt,
+		    struct ipsec_traffic *t)
 {
-	struct rte_ipsec_session *sess;
-	uint32_t sa_idx, i, j = 0;
-	uint16_t port_id = 0;
 	struct rte_mbuf *pkt;
-	struct ipsec_sa *sa;
+	uint16_t port_id = 0;
+	uint32_t i, j = 0;
 
 	/* Route IPv4 packets */
 	for (i = 0; i < t->ip4.num; i++) {
@@ -646,34 +678,111 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 			free_pkts(&pkt, 1);
 	}
 
+	return j;
+}
+
+static inline int
+ipsec_ev_inbound_route_pkts(struct rte_event_vector *vec,
+			    struct route_table *rt,
+			    struct ipsec_traffic *t,
+			    const struct eh_event_link_info *ev_link)
+{
+	uint32_t ret, i, j, ev_len = 0;
+	struct rte_event events[MAX_PKTS];
+	struct rte_ipsec_session *sess;
+	struct rte_mbuf *pkt;
+	struct ipsec_sa *sa;
+
+	j = ipsec_ev_route_ip_pkts(vec, rt, t);
+
 	/* Route ESP packets */
+	for (i = 0; i < t->ipsec.num; i++) {
+		pkt = t->ipsec.pkts[i];
+		sa = ipsec_mask_saptr(t->ipsec.saptr[i]);
+		if (unlikely(sa == NULL)) {
+			free_pkts(&pkt, 1);
+			continue;
+		}
+		sess = ipsec_get_primary_session(sa);
+		crypto_prepare_event(pkt, sess, &events[ev_len]);
+		ev_len++;
+	}
+
+	if (ev_len) {
+		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+				ev_link->event_port_id, events, ev_len);
+		if (ret < ev_len) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
+					ev_len, rte_errno);
+			free_pkts_from_events(&events[ret], ev_len - ret);
+			return -rte_errno;
+		}
+	}
+
+	return j;
+}
+
+static inline int
+ipsec_ev_outbound_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
+		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx,
+		    const struct eh_event_link_info *ev_link)
+{
+	uint32_t sa_idx, ret, i, j, ev_len = 0;
+	struct rte_event events[MAX_PKTS];
+	struct rte_ipsec_session *sess;
+	uint16_t port_id = 0;
+	struct rte_mbuf *pkt;
+	struct ipsec_sa *sa;
+
+	j = ipsec_ev_route_ip_pkts(vec, rt, t);
+
+	/* Handle IPsec packets.
+	 * For lookaside IPsec packets, submit to cryptodev queue.
+	 * For inline IPsec packets, route the packet.
+	 */
 	for (i = 0; i < t->ipsec.num; i++) {
 		/* Validate sa_idx */
 		sa_idx = t->ipsec.res[i];
 		pkt = t->ipsec.pkts[i];
-		if (unlikely(sa_idx >= sa_ctx->nb_sa))
+		if (unlikely(sa_idx >= sa_ctx->nb_sa)) {
 			free_pkts(&pkt, 1);
-		else {
-			/* Else the packet has to be protected */
-			sa = &(sa_ctx->sa[sa_idx]);
-			/* Get IPsec session */
-			sess = ipsec_get_primary_session(sa);
-			/* Allow only inline protocol for now */
-			if (unlikely(sess->type !=
-				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
-				RTE_LOG(ERR, IPSEC, "SA type not supported\n");
-				free_pkts(&pkt, 1);
-				continue;
-			}
+			continue;
+		}
+		/* Else the packet has to be protected */
+		sa = &(sa_ctx->sa[sa_idx]);
+		/* Get IPsec session */
+		sess = ipsec_get_primary_session(sa);
+		switch (sess->type) {
+		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+			rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+			crypto_prepare_event(pkt, sess, &events[ev_len]);
+			ev_len++;
+			break;
+		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
 			rte_security_set_pkt_metadata(sess->security.ctx,
 						sess->security.ses, pkt, NULL);
-
 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 			port_id = sa->portid;
 			update_mac_addrs(pkt, port_id);
 			ipsec_event_pre_forward(pkt, port_id);
 			ev_vector_attr_update(vec, pkt);
 			vec->mbufs[j++] = pkt;
+			break;
+		default:
+			RTE_LOG(ERR, IPSEC, "SA type not supported\n");
+			free_pkts(&pkt, 1);
+			break;
+		}
+	}
+
+	if (ev_len) {
+		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+				ev_link->event_port_id, events, ev_len);
+		if (ret < ev_len) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
+				   ev_len, rte_errno);
+			free_pkts_from_events(&events[ret], ev_len - ret);
+			return -rte_errno;
 		}
 	}
 
@@ -698,6 +807,10 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 		t->ip6.data[t->ip6.num] = nlp;
 		t->ip6.pkts[(t->ip6.num)++] = pkt;
 		break;
+	case PKT_TYPE_IPSEC_IPV4:
+	case PKT_TYPE_IPSEC_IPV6:
+		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+		break;
 	default:
 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 			   type);
@@ -708,7 +821,8 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 
 static inline int
 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
-				struct rte_event_vector *vec)
+				struct rte_event_vector *vec,
+				const struct eh_event_link_info *ev_link)
 {
 	struct ipsec_traffic t;
 	struct rte_mbuf *pkt;
@@ -738,12 +852,15 @@ process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 	check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
 	check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
 
-	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+	ipsec_sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num);
+
+	return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link);
 }
 
 static inline int
 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
-				 struct rte_event_vector *vec)
+				 struct rte_event_vector *vec,
+				 const struct eh_event_link_info *ev_link)
 {
 	struct ipsec_traffic t;
 	struct rte_mbuf *pkt;
@@ -766,7 +883,7 @@ process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 	check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
 	check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
 
-	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+	return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link);
 }
 
 static inline int
@@ -817,12 +934,13 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 
 	ev_vector_attr_init(vec);
 	core_stats_update_rx(vec->nb_elem);
+
 	if (is_unprotected_port(pkt->port))
 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
-						      &lconf->rt, vec);
+						      &lconf->rt, vec, links);
 	else
 		ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
-						       &lconf->rt, vec);
+						       &lconf->rt, vec, links);
 
 	if (likely(ret > 0)) {
 		core_stats_update_tx(vec->nb_elem);
@@ -857,24 +975,19 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 }
 
 static inline int
-ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
-			   struct rte_event *ev)
+ipsec_ev_cryptodev_process_one_pkt(
+		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+		const struct rte_crypto_op *cop, struct rte_mbuf *pkt)
 {
 	struct rte_ether_hdr *ethhdr;
-	struct rte_crypto_op *cop;
-	struct rte_mbuf *pkt;
 	uint16_t port_id;
 	struct ip *ip;
 
-	/* Get pkt data */
-	cop = ev->event_ptr;
-	pkt = cop->sym->m_src;
-
-	/* If operation was not successful, drop the packet */
+	/* If operation was not successful, free the packet */
 	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
 		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
 		free_pkts(&pkt, 1);
-		return PKT_DROPPED;
+		return -1;
 	}
 
 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
@@ -904,13 +1017,74 @@ ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
 		free_pkts(&pkt, 1);
-		return PKT_DROPPED;
+		return -1;
 	}
 
 	/* Update Ether with port's MAC addresses */
 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
 
+	ipsec_event_pre_forward(pkt, port_id);
+
+	return 0;
+}
+
+static inline void
+ipsec_ev_cryptodev_vector_process(
+		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+		const struct eh_event_link_info *links,
+		struct rte_event *ev)
+{
+	struct rte_event_vector *vec = ev->vec;
+	const uint16_t nb_events = 1;
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+	uint16_t enqueued;
+	int i, n = 0;
+
+	ev_vector_attr_init(vec);
+	/* Transform cop vec into pkt vec */
+	for (i = 0; i < vec->nb_elem; i++) {
+		/* Get pkt data */
+		cop = vec->ptrs[i];
+		pkt = cop->sym->m_src;
+		if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
+			continue;
+
+		vec->mbufs[n++] = pkt;
+		ev_vector_attr_update(vec, pkt);
+	}
+
+	if (n == 0) {
+		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+		return;
+	}
+
+	vec->nb_elem = n;
+	enqueued = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+			links[0].event_port_id, ev, nb_events, 0);
+	if (enqueued != nb_events) {
+		RTE_LOG_DP(INFO, IPSEC, "Failed to enqueue to tx, ret = %u,"
+				" errno = %i\n", enqueued, rte_errno);
+		free_pkts(vec->mbufs, vec->nb_elem);
+		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+	}
+}
+
+static inline int
+ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+			   struct rte_event *ev)
+{
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+
+	/* Get pkt data */
+	cop = ev->event_ptr;
+	pkt = cop->sym->m_src;
+
+	if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
+		return PKT_DROPPED;
+
 	/* Update event */
 	ev->mbuf = pkt;
 
@@ -1154,6 +1328,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			if (unlikely(ret != PKT_FORWARDED))
 				continue;
 			break;
+		case RTE_EVENT_TYPE_CRYPTODEV_VECTOR:
+			ipsec_ev_cryptodev_vector_process(&lconf, links, &ev);
+			continue;
 		default:
 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 				ev.event_type);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside
  2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                   ` (4 preceding siblings ...)
  2022-08-04 10:36 ` [PATCH 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
@ 2022-08-04 10:36 ` Volodymyr Fialko
  2022-09-21 18:28 ` [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Akhil Goyal
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
  7 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-08-04 10:36 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal; +Cc: jerinj, anoobj, Volodymyr Fialko

Limit number of queue pairs to one for event lookaside mode, since all
cores are using same queue in this mode.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/ipsec-secgw.c | 67 +++++++++++++++++-------------
 1 file changed, 37 insertions(+), 30 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 02b1fabaf5..d6b5b73811 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1541,7 +1541,7 @@ add_mapping(const char *str, uint16_t cdev_id,
 }
 
 static int32_t
-add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
+add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
 		uint16_t qp, struct lcore_params *params)
 {
 	int32_t ret = 0;
@@ -1597,6 +1597,37 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
 	return ret;
 }
 
+static uint16_t
+map_cdev_to_cores_from_config(enum eh_pkt_transfer_mode mode, int16_t cdev_id,
+		const struct rte_cryptodev_info *cdev_info,
+		uint16_t *last_used_lcore_id)
+{
+	uint16_t nb_qp = 0, i = 0, max_nb_qps;
+
+	/* For event lookaside mode all sessions are bound to single qp.
+	 * It's enough to bind one core, since all cores will share same qp
+	 * Event inline mode do not use this functionality.
+	 */
+	if (mode == EH_PKT_TRANSFER_MODE_EVENT) {
+		add_cdev_mapping(cdev_info, cdev_id, nb_qp, &lcore_params[0]);
+		return 1;
+	}
+
+	/* Check if there are enough queue pairs for all configured cores */
+	max_nb_qps = RTE_MIN(nb_lcore_params, cdev_info->max_nb_queue_pairs);
+
+	while (nb_qp < max_nb_qps && i < nb_lcore_params) {
+		if (add_cdev_mapping(cdev_info, cdev_id, nb_qp,
+					&lcore_params[*last_used_lcore_id]))
+			nb_qp++;
+		(*last_used_lcore_id)++;
+		*last_used_lcore_id %= nb_lcore_params;
+		i++;
+	}
+
+	return nb_qp;
+}
+
 /* Check if the device is enabled by cryptodev_mask */
 static int
 check_cryptodev_mask(uint8_t cdev_id)
@@ -1608,13 +1639,13 @@ check_cryptodev_mask(uint8_t cdev_id)
 }
 
 static uint16_t
-cryptodevs_init(uint16_t req_queue_num)
+cryptodevs_init(enum eh_pkt_transfer_mode mode)
 {
+	struct rte_hash_parameters params = { 0 };
 	struct rte_cryptodev_config dev_conf;
 	struct rte_cryptodev_qp_conf qp_conf;
-	uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
+	uint16_t idx, qp, total_nb_qps;
 	int16_t cdev_id;
-	struct rte_hash_parameters params = { 0 };
 
 	const uint64_t mseg_flag = multi_seg_required() ?
 				RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
@@ -1655,23 +1686,8 @@ cryptodevs_init(uint16_t req_queue_num)
 				cdev_id,
 				rte_cryptodev_get_feature_name(mseg_flag));
 
-		if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
-			max_nb_qps = cdev_info.max_nb_queue_pairs;
-		else
-			max_nb_qps = nb_lcore_params;
-
-		qp = 0;
-		i = 0;
-		while (qp < max_nb_qps && i < nb_lcore_params) {
-			if (add_cdev_mapping(&cdev_info, cdev_id, qp,
-						&lcore_params[idx]))
-				qp++;
-			idx++;
-			idx = idx % nb_lcore_params;
-			i++;
-		}
 
-		qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
+		qp = map_cdev_to_cores_from_config(mode, cdev_id, &cdev_info, &idx);
 		if (qp == 0)
 			continue;
 
@@ -2985,15 +3001,6 @@ main(int32_t argc, char **argv)
 
 	sess_sz = max_session_size();
 
-	/*
-	 * In event mode request minimum number of crypto queues
-	 * to be reserved equal to number of ports.
-	 */
-	if (eh_conf->mode == EH_PKT_TRANSFER_MODE_EVENT)
-		nb_crypto_qp = rte_eth_dev_count_avail();
-	else
-		nb_crypto_qp = 0;
-
 	/*
 	 * In event lookaside mode request memory for crypto metadata. Should
 	 * be removed once API will no longer require usage of user data in
@@ -3004,7 +3011,7 @@ main(int32_t argc, char **argv)
 	else
 		user_data_sz = 0;
 
-	nb_crypto_qp = cryptodevs_init(nb_crypto_qp);
+	nb_crypto_qp = cryptodevs_init(eh_conf->mode);
 
 	if (nb_bufs_in_pool == 0) {
 		RTE_ETH_FOREACH_DEV(portid) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* RE: [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode
  2022-08-04 10:36 ` [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
@ 2022-08-05  3:26   ` Suanming Mou
  2022-08-05 10:06     ` Volodymyr Fialko
  2022-09-22  5:05   ` Gagandeep Singh
  1 sibling, 1 reply; 27+ messages in thread
From: Suanming Mou @ 2022-08-05  3:26 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Radu Nicolau, Akhil Goyal; +Cc: jerinj, anoobj

Hi,

> -----Original Message-----
> From: Volodymyr Fialko <vfialko@marvell.com>
> Sent: Thursday, August 4, 2022 6:36 PM
> To: dev@dpdk.org; Radu Nicolau <radu.nicolau@intel.com>; Akhil Goyal
> <gakhil@marvell.com>
> Cc: jerinj@marvell.com; anoobj@marvell.com; Volodymyr Fialko
> <vfialko@marvell.com>
> Subject: [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode
> 
> Add base support for lookaside event mode. Events that are coming from ethdev
> will be enqueued to the event crypto adapter, processed and enqueued back to
> ethdev for the transmission.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> ---
>  doc/guides/sample_app_ug/ipsec_secgw.rst |   4 +-
>  examples/ipsec-secgw/ipsec-secgw.c       |   3 +-
>  examples/ipsec-secgw/ipsec.c             |  35 +++-
>  examples/ipsec-secgw/ipsec.h             |   8 +-
>  examples/ipsec-secgw/ipsec_worker.c      | 224 +++++++++++++++++++++--
>  examples/ipsec-secgw/sa.c                |  23 ++-
>  6 files changed, 262 insertions(+), 35 deletions(-)
> 

snip

>  create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, diff --git
> a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-
> secgw/ipsec_worker.c
> index 803157d8ee..2661f0275f 100644
> --- a/examples/ipsec-secgw/ipsec_worker.c
> +++ b/examples/ipsec-secgw/ipsec_worker.c
> @@ -3,6 +3,7 @@
>   * Copyright (C) 2020 Marvell International Ltd.
>   */
>  #include <rte_acl.h>
> +#include <rte_event_crypto_adapter.h>
>  #include <rte_event_eth_tx_adapter.h>
>  #include <rte_lpm.h>
>  #include <rte_lpm6.h>
> @@ -11,6 +12,7 @@
>  #include "ipsec.h"
>  #include "ipsec-secgw.h"
>  #include "ipsec_worker.h"
> +#include "sad.h"
> 
>  #if defined(__ARM_NEON)
>  #include "ipsec_lpm_neon.h"
> @@ -228,6 +230,43 @@ check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx
> *sa_ctx,
>  	ip->num = j;
>  }
> 
> +static inline void
> +pkt_l3_len_set(struct rte_mbuf *pkt)
> +{
> +	struct rte_ipv4_hdr *ipv4;
> +	struct rte_ipv6_hdr *ipv6;
> +	size_t l3len, ext_len;
> +	uint32_t l3_type;
> +	int next_proto;
> +	uint8_t *p;
> +
> +	l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK;
> +	if (l3_type == RTE_PTYPE_L3_IPV4) {
> +		ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
> +		pkt->l3_len = ipv4->ihl * 4;
> +	} else if (l3_type & RTE_PTYPE_L3_IPV6) {
> +		ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
> +		l3len = sizeof(struct rte_ipv6_hdr);
> +		if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
> +		     l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
> +			p = rte_pktmbuf_mtod(pkt, uint8_t *);
> +			next_proto = ipv6->proto;
> +			while (next_proto != IPPROTO_ESP &&
> +			       l3len < pkt->data_len &&
> +			       (next_proto = rte_ipv6_get_next_ext(p + l3len,
> +						next_proto, &ext_len)) >= 0)
> +				l3len += ext_len;
> +
> +			/* Drop pkt when IPv6 header exceeds first seg size */
> +			if (unlikely(l3len > pkt->data_len)) {
> +				free_pkts(&pkt, 1);
> +				return;
> +			}
> +		}
> +		pkt->l3_len = l3len;
> +	}
> +}
> +
>  static inline uint16_t
>  route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)  { @@ -287,9 +326,67
> @@ get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
>  	return RTE_MAX_ETHPORTS;
>  }
> 
> +static inline void
> +crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> +		struct rte_crypto_op *cop[], uint16_t num) {
> +	struct rte_crypto_sym_op *sop;
> +	uint32_t i;
> +
> +	const struct rte_crypto_op unproc_cop = {
> +		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
> +		.sess_type = RTE_CRYPTO_OP_SECURITY_SESSION,
> +	};
> +
> +	for (i = 0; i != num; i++) {
> +		cop[i]->raw = unproc_cop.raw;
> +		sop = cop[i]->sym;
> +		sop->m_src = mb[i];
> +		sop->m_dst = NULL;
> +		__rte_security_attach_session(sop, ss->security.ses);
> +	}
> +}
> +
> +static inline int
> +event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf
> *pkt,
> +		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link) {
> +	struct ipsec_mbuf_metadata *priv;
> +	struct rte_ipsec_session *sess;
> +	struct rte_crypto_op *cop;
> +	struct rte_event cev;
> +	int ret;
> +
> +	/* Get IPsec session */
> +	sess = ipsec_get_primary_session(sa);
> +
> +	/* Get pkt private data */
> +	priv = get_priv(pkt);
> +	cop = &priv->cop;
> +
> +	/* Reset crypto operation data */
> +	crypto_op_reset(sess, &pkt, &cop, 1);
> +
> +	/* Update event_ptr with rte_crypto_op */
> +	cev.event = 0;
> +	cev.event_ptr = cop;
> +
> +	/* Enqueue event to crypto adapter */
> +	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
> +			ev_link->event_port_id, &cev, 1);
> +	if (unlikely(ret <= 0)) {
> +		/* pkt will be freed by the caller */
> +		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i
> (errno: %i)\n", ret, rte_errno);
> +		return rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
>  static inline int
>  process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
> -		struct rte_event *ev)
> +	const struct eh_event_link_info *ev_link, struct rte_event *ev)
>  {
>  	struct ipsec_sa *sa = NULL;
>  	struct rte_mbuf *pkt;
> @@ -340,7 +437,22 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct
> route_table *rt,
>  			goto drop_pkt_and_exit;
>  		}
>  		break;
> +	case PKT_TYPE_IPSEC_IPV4:
> +	case PKT_TYPE_IPSEC_IPV6:
> +		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
> +		pkt_l3_len_set(pkt);

One small question, in case free_pkts() happens inside pkt_l3_len_set(pkt),  can the pkt still be used in sad_lookup() below?
Should return value be added to pkt_l3_len_set() in case pkt free?

> +
> +		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
> +		sa = ipsec_mask_saptr(sa);
> +		if (unlikely(sa == NULL)) {
> +			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
> +			goto drop_pkt_and_exit;
> +		}
> 
snip

^ permalink raw reply	[flat|nested] 27+ messages in thread

* RE: [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode
  2022-08-05  3:26   ` Suanming Mou
@ 2022-08-05 10:06     ` Volodymyr Fialko
  0 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-08-05 10:06 UTC (permalink / raw)
  To: Suanming Mou, dev, Radu Nicolau, Akhil Goyal
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph

snip
> >  static inline int
> >  process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
> > -		struct rte_event *ev)
> > +	const struct eh_event_link_info *ev_link, struct rte_event *ev)
> >  {
> >  	struct ipsec_sa *sa = NULL;
> >  	struct rte_mbuf *pkt;
> > @@ -340,7 +437,22 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx,
> > struct route_table *rt,
> >  			goto drop_pkt_and_exit;
> >  		}
> >  		break;
> > +	case PKT_TYPE_IPSEC_IPV4:
> > +	case PKT_TYPE_IPSEC_IPV6:
> > +		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
> > +		pkt_l3_len_set(pkt);
> 
> One small question, in case free_pkts() happens inside pkt_l3_len_set(pkt),
> can the pkt still be used in sad_lookup() below?
> Should return value be added to pkt_l3_len_set() in case pkt free?
> 
Hi, thank you for noticing this.
Return value should be checked, I'll address this issue in v2.

> > +
> > +		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
> > +		sa = ipsec_mask_saptr(sa);
> > +		if (unlikely(sa == NULL)) {
> > +			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
> > +			goto drop_pkt_and_exit;
> > +		}
> >
snip

^ permalink raw reply	[flat|nested] 27+ messages in thread

* RE: [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode
  2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                   ` (5 preceding siblings ...)
  2022-08-04 10:36 ` [PATCH 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
@ 2022-09-21 18:28 ` Akhil Goyal
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
  7 siblings, 0 replies; 27+ messages in thread
From: Akhil Goyal @ 2022-09-21 18:28 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, hemant.agrawal
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, Volodymyr Fialko,
	Gagandeep Singh

Hi Hemant/Gagan,

Can you review this series? Or else it will be merged in RC1.

Regards,
Akhil
> Subject: [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode
> 
> Add support for lookaside event mode for ipsec-secgw example application.
> 
> Depends-on: Series-24201
> 
> Volodymyr Fialko (6):
>   examples/ipsec-secgw: add event crypto adapter init
>   examples/ipsec-secgw: add queue for event crypto adapter
>   examples/ipsec-secgw: add lookaside event mode
>   examples/ipsec-secgw: add stats for event mode
>   examples/ipsec-secgw: add event vector support for lookaside
>   examples/ipsec-secgw: reduce number of QP for event lookaside
> 
>  doc/guides/sample_app_ug/ipsec_secgw.rst |   7 +-
>  examples/ipsec-secgw/event_helper.c      | 267 +++++++++++--
>  examples/ipsec-secgw/event_helper.h      |   4 +
>  examples/ipsec-secgw/ipsec-secgw.c       | 108 +++---
>  examples/ipsec-secgw/ipsec-secgw.h       |   1 +
>  examples/ipsec-secgw/ipsec.c             |  35 +-
>  examples/ipsec-secgw/ipsec.h             |   8 +-
>  examples/ipsec-secgw/ipsec_worker.c      | 461 ++++++++++++++++++++---
>  examples/ipsec-secgw/sa.c                |  23 +-
>  9 files changed, 783 insertions(+), 131 deletions(-)
> 


^ permalink raw reply	[flat|nested] 27+ messages in thread

* RE: [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode
  2022-08-04 10:36 ` [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
  2022-08-05  3:26   ` Suanming Mou
@ 2022-09-22  5:05   ` Gagandeep Singh
  2022-09-22 11:07     ` Volodymyr Fialko
  1 sibling, 1 reply; 27+ messages in thread
From: Gagandeep Singh @ 2022-09-22  5:05 UTC (permalink / raw)
  To: Volodymyr Fialko, dev, Radu Nicolau, Akhil Goyal; +Cc: jerinj, anoobj

Hi Volodymyr,

I found these patches do not support "RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND" capability in ipsec-secgw application. Will you also plan to add this support? 

> -----Original Message-----
> From: Volodymyr Fialko <vfialko@marvell.com>
> Sent: Thursday, August 4, 2022 4:06 PM
> To: dev@dpdk.org; Radu Nicolau <radu.nicolau@intel.com>; Akhil Goyal
> <gakhil@marvell.com>
> Cc: jerinj@marvell.com; anoobj@marvell.com; Volodymyr Fialko
> <vfialko@marvell.com>
> Subject: [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode
> 
> Add base support for lookaside event mode. Events that are coming from
> ethdev will be enqueued to the event crypto adapter, processed and
> enqueued back to ethdev for the transmission.
> 
> Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
> ---
>  doc/guides/sample_app_ug/ipsec_secgw.rst |   4 +-
>  examples/ipsec-secgw/ipsec-secgw.c       |   3 +-
>  examples/ipsec-secgw/ipsec.c             |  35 +++-
>  examples/ipsec-secgw/ipsec.h             |   8 +-
>  examples/ipsec-secgw/ipsec_worker.c      | 224 +++++++++++++++++++++--
>  examples/ipsec-secgw/sa.c                |  23 ++-
>  6 files changed, 262 insertions(+), 35 deletions(-)
> 
> diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst
> b/doc/guides/sample_app_ug/ipsec_secgw.rst
> index 07686d2285..c7b87889f1 100644
> --- a/doc/guides/sample_app_ug/ipsec_secgw.rst
> +++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
> @@ -83,8 +83,8 @@ The application supports two modes of operation: poll
> mode and event mode.
>    every type of event device without affecting existing paths/use cases. The
> worker
>    to be used will be determined by the operating conditions and the
> underlying device
>    capabilities. **Currently the application provides non-burst, internal port
> worker
> -  threads and supports inline protocol only.** It also provides infrastructure
> for
> -  non-internal port however does not define any worker threads.
> +  threads.** It also provides infrastructure for non-internal port
> + however does not  define any worker threads.
> 
>    Event mode also supports event vectorization. The event devices, ethernet
> device
>    pairs which support the capability
> ``RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR`` can diff --git
> a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-
> secgw.c
> index 4ca5936bdf..0bd1f15ae5 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -3121,7 +3121,8 @@ main(int32_t argc, char **argv)
>  		if ((socket_ctx[socket_id].session_pool != NULL) &&
>  			(socket_ctx[socket_id].sa_in == NULL) &&
>  			(socket_ctx[socket_id].sa_out == NULL)) {
> -			sa_init(&socket_ctx[socket_id], socket_id,
> lcore_conf);
> +			sa_init(&socket_ctx[socket_id], socket_id,
> lcore_conf,
> +				eh_conf->mode_params);
>  			sp4_init(&socket_ctx[socket_id], socket_id);
>  			sp6_init(&socket_ctx[socket_id], socket_id);
>  			rt_init(&socket_ctx[socket_id], socket_id); diff --git
> a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index
> 7b7bfff696..030cfe7a82 100644
> --- a/examples/ipsec-secgw/ipsec.c
> +++ b/examples/ipsec-secgw/ipsec.c
> @@ -6,6 +6,7 @@
>  #include <netinet/ip.h>
> 
>  #include <rte_branch_prediction.h>
> +#include <rte_event_crypto_adapter.h>
>  #include <rte_log.h>
>  #include <rte_crypto.h>
>  #include <rte_security.h>
> @@ -56,14 +57,17 @@ set_ipsec_conf(struct ipsec_sa *sa, struct
> rte_security_ipsec_xform *ipsec)
> 
>  int
>  create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
> -	struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
> -	struct rte_ipsec_session *ips)
> +	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
> +	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
>  {
>  	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
> +	enum rte_crypto_op_sess_type sess_type;
>  	struct rte_cryptodev_info cdev_info;
> +	enum rte_crypto_op_type op_type;
>  	unsigned long cdev_id_qp = 0;
> -	struct cdev_key key = { 0 };
>  	struct ipsec_ctx *ipsec_ctx;
> +	struct cdev_key key = { 0 };
> +	void *sess = NULL;
>  	uint32_t lcore_id;
>  	int32_t ret = 0;
> 
> @@ -159,6 +163,10 @@ create_lookaside_session(struct ipsec_ctx
> *ipsec_ctx_lcore[],
>  				return -1;
>  			}
>  			ips->security.ctx = ctx;
> +
> +			sess = ips->security.ses;
> +			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
> +			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
>  		} else {
>  			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
>  			return -1;
> @@ -183,6 +191,27 @@ create_lookaside_session(struct ipsec_ctx
> *ipsec_ctx_lcore[],
>  		rte_cryptodev_info_get(cdev_id, &cdev_info);
>  	}
> 
> +	/* Setup meta data required by event crypto adapter */
> +	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
> +		union rte_event_crypto_metadata m_data = {0};
> +		const struct eventdev_params *eventdev_conf;
> +
> +		eventdev_conf = &(em_conf->eventdev_config[0]);
> +
> +		/* Fill in response information */
> +		m_data.response_info.sched_type = em_conf-
> >ext_params.sched_type;
> +		m_data.response_info.op = RTE_EVENT_OP_NEW;
> +		m_data.response_info.queue_id = eventdev_conf-
> >ev_cpt_queue_id;
> +
> +		/* Fill in request information */
> +		m_data.request_info.cdev_id = cdev_id;
> +		m_data.request_info.queue_pair_id = 0;
> +
> +		/* Attach meta info to session */
> +		rte_cryptodev_session_event_mdata_set(cdev_id, sess,
> op_type,
> +				sess_type, &m_data, sizeof(m_data));
> +	}
> +
>  	return 0;
>  }
> 
> diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
> index 2005ae8fec..5ef63e8fc4 100644
> --- a/examples/ipsec-secgw/ipsec.h
> +++ b/examples/ipsec-secgw/ipsec.h
> @@ -14,6 +14,7 @@
>  #include <rte_flow.h>
>  #include <rte_ipsec.h>
> 
> +#include "event_helper.h"
>  #include "ipsec-secgw.h"
> 
>  #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2
> @@ -424,7 +425,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi,
> int inbound);
> 
>  void
>  sa_init(struct socket_ctx *ctx, int32_t socket_id,
> -		struct lcore_conf *lcore_conf);
> +	struct lcore_conf *lcore_conf,
> +	const struct eventmode_conf *em_conf);
> 
>  void
>  rt_init(struct socket_ctx *ctx, int32_t socket_id); @@ -441,8 +443,8 @@
> enqueue_cop_burst(struct cdev_qp *cqp);
> 
>  int
>  create_lookaside_session(struct ipsec_ctx *ipsec_ctx[],
> -	struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
> -	struct rte_ipsec_session *ips);
> +	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
> +	struct ipsec_sa *sa, struct rte_ipsec_session *ips);
> 
>  int
>  create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa, diff --
> git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-
> secgw/ipsec_worker.c
> index 803157d8ee..2661f0275f 100644
> --- a/examples/ipsec-secgw/ipsec_worker.c
> +++ b/examples/ipsec-secgw/ipsec_worker.c
> @@ -3,6 +3,7 @@
>   * Copyright (C) 2020 Marvell International Ltd.
>   */
>  #include <rte_acl.h>
> +#include <rte_event_crypto_adapter.h>
>  #include <rte_event_eth_tx_adapter.h>
>  #include <rte_lpm.h>
>  #include <rte_lpm6.h>
> @@ -11,6 +12,7 @@
>  #include "ipsec.h"
>  #include "ipsec-secgw.h"
>  #include "ipsec_worker.h"
> +#include "sad.h"
> 
>  #if defined(__ARM_NEON)
>  #include "ipsec_lpm_neon.h"
> @@ -228,6 +230,43 @@ check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx
> *sa_ctx,
>  	ip->num = j;
>  }
> 
> +static inline void
> +pkt_l3_len_set(struct rte_mbuf *pkt)
> +{
> +	struct rte_ipv4_hdr *ipv4;
> +	struct rte_ipv6_hdr *ipv6;
> +	size_t l3len, ext_len;
> +	uint32_t l3_type;
> +	int next_proto;
> +	uint8_t *p;
> +
> +	l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK;
> +	if (l3_type == RTE_PTYPE_L3_IPV4) {
> +		ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
> +		pkt->l3_len = ipv4->ihl * 4;
> +	} else if (l3_type & RTE_PTYPE_L3_IPV6) {
> +		ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
> +		l3len = sizeof(struct rte_ipv6_hdr);
> +		if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
> +		     l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
> +			p = rte_pktmbuf_mtod(pkt, uint8_t *);
> +			next_proto = ipv6->proto;
> +			while (next_proto != IPPROTO_ESP &&
> +			       l3len < pkt->data_len &&
> +			       (next_proto = rte_ipv6_get_next_ext(p + l3len,
> +						next_proto, &ext_len)) >= 0)
> +				l3len += ext_len;
> +
> +			/* Drop pkt when IPv6 header exceeds first seg size
> */
> +			if (unlikely(l3len > pkt->data_len)) {
> +				free_pkts(&pkt, 1);
> +				return;
> +			}
> +		}
> +		pkt->l3_len = l3len;
> +	}
> +}
> +
>  static inline uint16_t
>  route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)  { @@ -287,9 +326,67
> @@ get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type
> type)
>  	return RTE_MAX_ETHPORTS;
>  }
> 
> +static inline void
> +crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
> +		struct rte_crypto_op *cop[], uint16_t num) {
> +	struct rte_crypto_sym_op *sop;
> +	uint32_t i;
> +
> +	const struct rte_crypto_op unproc_cop = {
> +		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
> +		.sess_type = RTE_CRYPTO_OP_SECURITY_SESSION,
> +	};
> +
> +	for (i = 0; i != num; i++) {
> +		cop[i]->raw = unproc_cop.raw;
> +		sop = cop[i]->sym;
> +		sop->m_src = mb[i];
> +		sop->m_dst = NULL;
> +		__rte_security_attach_session(sop, ss->security.ses);
> +	}
> +}
> +
> +static inline int
> +event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf
> *pkt,
> +		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
> {
> +	struct ipsec_mbuf_metadata *priv;
> +	struct rte_ipsec_session *sess;
> +	struct rte_crypto_op *cop;
> +	struct rte_event cev;
> +	int ret;
> +
> +	/* Get IPsec session */
> +	sess = ipsec_get_primary_session(sa);
> +
> +	/* Get pkt private data */
> +	priv = get_priv(pkt);
> +	cop = &priv->cop;
> +
> +	/* Reset crypto operation data */
> +	crypto_op_reset(sess, &pkt, &cop, 1);
> +
> +	/* Update event_ptr with rte_crypto_op */
> +	cev.event = 0;
> +	cev.event_ptr = cop;
> +
> +	/* Enqueue event to crypto adapter */
> +	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
> +			ev_link->event_port_id, &cev, 1);
> +	if (unlikely(ret <= 0)) {
> +		/* pkt will be freed by the caller */
> +		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i
> (errno: %i)\n", ret, rte_errno);
> +		return rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
>  static inline int
>  process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
> -		struct rte_event *ev)
> +	const struct eh_event_link_info *ev_link, struct rte_event *ev)
>  {
>  	struct ipsec_sa *sa = NULL;
>  	struct rte_mbuf *pkt;
> @@ -340,7 +437,22 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx,
> struct route_table *rt,
>  			goto drop_pkt_and_exit;
>  		}
>  		break;
> +	case PKT_TYPE_IPSEC_IPV4:
> +	case PKT_TYPE_IPSEC_IPV6:
> +		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
> +		pkt_l3_len_set(pkt);
> +
> +		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
> +		sa = ipsec_mask_saptr(sa);
> +		if (unlikely(sa == NULL)) {
> +			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
> +			goto drop_pkt_and_exit;
> +		}
> 
> +		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
> +			goto drop_pkt_and_exit;
> +
> +		return PKT_POSTED;
>  	default:
>  		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type
> = %d\n",
>  			   type);
> @@ -389,7 +501,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx,
> struct route_table *rt,
> 
>  static inline int
>  process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
> -		struct rte_event *ev)
> +		const struct eh_event_link_info *ev_link, struct rte_event
> *ev)
>  {
>  	struct rte_ipsec_session *sess;
>  	struct sa_ctx *sa_ctx;
> @@ -456,11 +568,9 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx,
> struct route_table *rt,
>  	/* Get IPsec session */
>  	sess = ipsec_get_primary_session(sa);
> 
> -	/* Allow only inline protocol for now */
> -	if (unlikely(sess->type !=
> RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
> -		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
> -		goto drop_pkt_and_exit;
> -	}
> +	/* Determine protocol type */
> +	if (sess->type ==
> RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
> +		goto lookaside;
> 
>  	rte_security_set_pkt_metadata(sess->security.ctx,
>  				      sess->security.ses, pkt, NULL); @@ -
> 482,6 +592,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct
> route_table *rt,
>  	ipsec_event_pre_forward(pkt, port_id);
>  	return PKT_FORWARDED;
> 
> +lookaside:
> +	/* prepare pkt - advance start to L3 */
> +	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
> +
> +	if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
> +		return PKT_POSTED;
> +
>  drop_pkt_and_exit:
>  	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
>  	rte_pktmbuf_free(pkt);
> @@ -737,6 +854,67 @@ ipsec_ev_vector_drv_mode_process(struct
> eh_event_link_info *links,
>  		rte_mempool_put(rte_mempool_from_obj(vec), vec);  }
> 
> +static inline int
> +ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr
> *lconf,
> +			   struct rte_event *ev)
> +{
> +	struct rte_ether_hdr *ethhdr;
> +	struct rte_crypto_op *cop;
> +	struct rte_mbuf *pkt;
> +	uint16_t port_id;
> +	struct ip *ip;
> +
> +	/* Get pkt data */
> +	cop = ev->event_ptr;
> +	pkt = cop->sym->m_src;
> +
> +	/* If operation was not successful, drop the packet */
> +	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
> +		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
> +		free_pkts(&pkt, 1);
> +		return PKT_DROPPED;
> +	}
> +
> +	ip = rte_pktmbuf_mtod(pkt, struct ip *);
> +
> +	/* Prepend Ether layer */
> +	ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt,
> +RTE_ETHER_HDR_LEN);
> +
> +	/* Route pkt and update required fields */
> +	if (ip->ip_v == IPVERSION) {
> +		pkt->ol_flags |= lconf->outbound.ipv4_offloads;
> +		pkt->l3_len = sizeof(struct ip);
> +		pkt->l2_len = RTE_ETHER_HDR_LEN;
> +
> +		ethhdr->ether_type =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> +
> +		port_id = route4_pkt(pkt, lconf->rt.rt4_ctx);
> +	} else {
> +		pkt->ol_flags |= lconf->outbound.ipv6_offloads;
> +		pkt->l3_len = sizeof(struct ip6_hdr);
> +		pkt->l2_len = RTE_ETHER_HDR_LEN;
> +
> +		ethhdr->ether_type =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
> +
> +		port_id = route6_pkt(pkt, lconf->rt.rt6_ctx);
> +	}
> +
> +	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> +		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed
> packet\n");
> +		free_pkts(&pkt, 1);
> +		return PKT_DROPPED;
> +	}
> +
> +	/* Update Ether with port's MAC addresses */
> +	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct
> rte_ether_addr));
> +	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct
> +rte_ether_addr));
> +
> +	/* Update event */
> +	ev->mbuf = pkt;
> +
> +	return PKT_FORWARDED;
> +}
> +
>  /*
>   * Event mode exposes various operating modes depending on the
>   * capabilities of the event device and the operating mode @@ -924,6
> +1102,14 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct
> eh_event_link_info *links,
>  		"Launching event mode worker (non-burst - Tx internal port -
> "
>  		"app mode) on lcore %d\n", lcore_id);
> 
> +	ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
> +	if (ret != 0) {
> +		RTE_LOG(ERR, IPSEC,
> +			"SAD cache init on lcore %u, failed with code: %d\n",
> +			lcore_id, ret);
> +		return;
> +	}
> +
>  	/* Check if it's single link */
>  	if (nb_links != 1) {
>  		RTE_LOG(INFO, IPSEC,
> @@ -950,6 +1136,20 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct
> eh_event_link_info *links,
>  			ipsec_ev_vector_process(&lconf, links, &ev);
>  			continue;
>  		case RTE_EVENT_TYPE_ETHDEV:
> +			if (is_unprotected_port(ev.mbuf->port))
> +				ret =
> process_ipsec_ev_inbound(&lconf.inbound,
> +								&lconf.rt,
> links, &ev);
> +			else
> +				ret =
> process_ipsec_ev_outbound(&lconf.outbound,
> +								&lconf.rt,
> links, &ev);
> +			if (ret != 1)
> +				/* The pkt has been dropped or posted */
> +				continue;
> +			break;
> +		case RTE_EVENT_TYPE_CRYPTODEV:
> +			ret = ipsec_ev_cryptodev_process(&lconf, &ev);
> +			if (unlikely(ret != PKT_FORWARDED))
> +				continue;
>  			break;
>  		default:
>  			RTE_LOG(ERR, IPSEC, "Invalid event type %u", @@ -
> 957,16 +1157,6 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct
> eh_event_link_info *links,
>  			continue;
>  		}
> 
> -		if (is_unprotected_port(ev.mbuf->port))
> -			ret = process_ipsec_ev_inbound(&lconf.inbound,
> -							&lconf.rt, &ev);
> -		else
> -			ret = process_ipsec_ev_outbound(&lconf.outbound,
> -							&lconf.rt, &ev);
> -		if (ret != 1)
> -			/* The pkt has been dropped */
> -			continue;
> -
>  		/*
>  		 * Since tx internal port is available, events can be
>  		 * directly enqueued to the adapter and it would be diff --git
> a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index
> 5dca578790..7a0c528f75 100644
> --- a/examples/ipsec-secgw/sa.c
> +++ b/examples/ipsec-secgw/sa.c
> @@ -1235,7 +1235,8 @@ static int
>  sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
>  		uint32_t nb_entries, uint32_t inbound,
>  		struct socket_ctx *skt_ctx,
> -		struct ipsec_ctx *ips_ctx[])
> +		struct ipsec_ctx *ips_ctx[],
> +		const struct eventmode_conf *em_conf)
>  {
>  	struct ipsec_sa *sa;
>  	uint32_t i, idx;
> @@ -1408,7 +1409,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct
> ipsec_sa entries[],
>  				return -EINVAL;
>  			}
>  		} else {
> -			rc = create_lookaside_session(ips_ctx, skt_ctx, sa,
> ips);
> +			rc = create_lookaside_session(ips_ctx, skt_ctx,
> +						      em_conf, sa, ips);
>  			if (rc != 0) {
>  				RTE_LOG(ERR, IPSEC_ESP,
>  					"create_lookaside_session()
> failed\n"); @@ -1431,17 +1433,19 @@ sa_add_rules(struct sa_ctx *sa_ctx,
> const struct ipsec_sa entries[],  static inline int  sa_out_add_rules(struct
> sa_ctx *sa_ctx, const struct ipsec_sa entries[],
>  		uint32_t nb_entries, struct socket_ctx *skt_ctx,
> -		struct ipsec_ctx *ips_ctx[])
> +		struct ipsec_ctx *ips_ctx[],
> +		const struct eventmode_conf *em_conf)
>  {
> -	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx);
> +	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx,
> +em_conf);
>  }
> 
>  static inline int
>  sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
>  		uint32_t nb_entries, struct socket_ctx *skt_ctx,
> -		struct ipsec_ctx *ips_ctx[])
> +		struct ipsec_ctx *ips_ctx[],
> +		const struct eventmode_conf *em_conf)
>  {
> -	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx);
> +	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx,
> +em_conf);
>  }
> 
>  /*
> @@ -1673,7 +1677,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi,
> int inbound)
> 
>  void
>  sa_init(struct socket_ctx *ctx, int32_t socket_id,
> -		struct lcore_conf *lcore_conf)
> +	struct lcore_conf *lcore_conf,
> +	const struct eventmode_conf *em_conf)
>  {
>  	int32_t rc;
>  	const char *name;
> @@ -1705,7 +1710,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
>  			rte_exit(EXIT_FAILURE, "failed to init SAD\n");
>  		RTE_LCORE_FOREACH(lcore_id)
>  			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
> -		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx);
> +		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx,
> +em_conf);
> 
>  		if (app_sa_prm.enable != 0) {
>  			rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in, @@ -
> 1727,7 +1732,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
> 
>  		RTE_LCORE_FOREACH(lcore_id)
>  			ipsec_ctx[lcore_id] =
> &lcore_conf[lcore_id].outbound;
> -		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx,
> ipsec_ctx);
> +		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx,
> ipsec_ctx,
> +em_conf);
> 
>  		if (app_sa_prm.enable != 0) {
>  			rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* RE: [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode
  2022-09-22  5:05   ` Gagandeep Singh
@ 2022-09-22 11:07     ` Volodymyr Fialko
  0 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-09-22 11:07 UTC (permalink / raw)
  To: Gagandeep Singh, dev, Radu Nicolau, Akhil Goyal
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph


> -----Original Message-----
> From: Gagandeep Singh <G.Singh@nxp.com>
> Sent: Thursday, September 22, 2022 7:05 AM
> To: Volodymyr Fialko <vfialko@marvell.com>; dev@dpdk.org; Radu Nicolau
> <radu.nicolau@intel.com>; Akhil Goyal <gakhil@marvell.com>
> Cc: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Anoob Joseph
> <anoobj@marvell.com>
> Subject: [EXT] RE: [PATCH 3/6] examples/ipsec-secgw: add lookaside event
> mode
> 
> External Email
> 
> ----------------------------------------------------------------------
> Hi Volodymyr,
> 
> I found these patches do not support
> "RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND"
> capability in ipsec-secgw application. Will you also plan to add this support?
> 

Hi Gagandeep,

Yes. Currently we are adding support without QP_EV_BIND. We would like to target that separately after the initial support is merged.

^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 0/6] examples/ipsec-secgw: add lookaside event mode
  2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                   ` (6 preceding siblings ...)
  2022-09-21 18:28 ` [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Akhil Goyal
@ 2022-10-10 12:30 ` Volodymyr Fialko
  2022-10-10 12:30   ` [PATCH v2 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
                     ` (6 more replies)
  7 siblings, 7 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 12:30 UTC (permalink / raw)
  To: dev; +Cc: jerinj, gakhil, anoobj, suanmingm, Volodymyr Fialko

Add support for lookaside event mode for ipsec-secgw example application.

* Changes since v1
- Resolve issue with ipv6 free packet in case of error
- Rebase on top of dpdk-next-crypto
- Update release note

Volodymyr Fialko (6):
  examples/ipsec-secgw: add event crypto adapter init
  examples/ipsec-secgw: add queue for event crypto adapter
  examples/ipsec-secgw: add lookaside event mode
  examples/ipsec-secgw: add stats for event mode
  examples/ipsec-secgw: add event vector support for lookaside
  examples/ipsec-secgw: reduce number of QP for event lookaside

 doc/guides/rel_notes/release_22_11.rst   |   5 +
 doc/guides/sample_app_ug/ipsec_secgw.rst |   7 +-
 examples/ipsec-secgw/event_helper.c      | 267 +++++++++++--
 examples/ipsec-secgw/event_helper.h      |   4 +
 examples/ipsec-secgw/ipsec-secgw.c       |  88 +++--
 examples/ipsec-secgw/ipsec-secgw.h       |   1 +
 examples/ipsec-secgw/ipsec.c             |  35 +-
 examples/ipsec-secgw/ipsec.h             |   8 +-
 examples/ipsec-secgw/ipsec_worker.c      | 476 ++++++++++++++++++++---
 examples/ipsec-secgw/sa.c                |  37 +-
 10 files changed, 800 insertions(+), 128 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 1/6] examples/ipsec-secgw: add event crypto adapter init
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
@ 2022-10-10 12:30   ` Volodymyr Fialko
  2022-10-10 12:30   ` [PATCH v2 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 12:30 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Create, configure and start an event crypto adapter. This adapter will
be used in lookaside event mode processing.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 144 ++++++++++++++++++++++++++++
 examples/ipsec-secgw/event_helper.h |   2 +
 examples/ipsec-secgw/ipsec-secgw.c  |  27 +++---
 3 files changed, 161 insertions(+), 12 deletions(-)

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 22b1760949..30a1f253c8 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -4,8 +4,10 @@
 #include <stdlib.h>
 
 #include <rte_bitmap.h>
+#include <rte_cryptodev.h>
 #include <rte_ethdev.h>
 #include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_malloc.h>
@@ -744,6 +746,126 @@ eh_start_eventdev(struct eventmode_conf *em_conf)
 	return 0;
 }
 
+static int
+eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	struct rte_event_dev_info evdev_default_conf = {0};
+	struct rte_event_port_conf port_conf = {0};
+	struct eventdev_params *eventdev_config;
+	uint8_t eventdev_id, cdev_id, n;
+	uint32_t cap;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	/*
+	 * More then one eventdev is not supported,
+	 * all event crypto adapters will be assigned to one eventdev
+	 */
+	RTE_ASSERT(em_conf->nb_eventdev == 1);
+
+	/* Get event device configuration */
+	eventdev_config = &(em_conf->eventdev_config[0]);
+	eventdev_id = eventdev_config->eventdev_id;
+
+	n = rte_cryptodev_count();
+
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		/* Check event's crypto capabilities */
+		ret = rte_event_crypto_adapter_caps_get(eventdev_id, cdev_id, &cap);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to get event device's crypto capabilities %d", ret);
+			return ret;
+		}
+
+		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) {
+			EH_LOG_ERR("Event crypto adapter does not support forward mode!");
+			return -EINVAL;
+		}
+
+		/* Create event crypto adapter */
+
+		/* Get default configuration of event dev */
+		ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to get event dev info %d", ret);
+			return ret;
+		}
+
+		/* Setup port conf */
+		port_conf.new_event_threshold =
+				evdev_default_conf.max_num_events;
+		port_conf.dequeue_depth =
+				evdev_default_conf.max_event_port_dequeue_depth;
+		port_conf.enqueue_depth =
+				evdev_default_conf.max_event_port_enqueue_depth;
+
+		/* Create adapter */
+		ret = rte_event_crypto_adapter_create(cdev_id, eventdev_id,
+				&port_conf, RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to create event crypto adapter %d", ret);
+			return ret;
+		}
+
+		/* Add crypto queue pairs to event crypto adapter */
+		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
+				-1, /* adds all the pre configured queue pairs to the instance */
+				NULL);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+eh_start_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	uint8_t cdev_id, n;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	n = rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		ret = rte_event_crypto_adapter_start(cdev_id);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to start event crypto device %d (%d)",
+					cdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+eh_stop_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	uint8_t cdev_id, n;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	n = rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		ret = rte_event_crypto_adapter_stop(cdev_id);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to stop event crypto device %d (%d)",
+					cdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static int
 eh_event_vector_limits_validate(struct eventmode_conf *em_conf,
 				uint8_t ev_dev_id, uint8_t ethdev_id)
@@ -1697,6 +1819,13 @@ eh_devs_init(struct eh_conf *conf)
 		return ret;
 	}
 
+	/* Setup event crypto adapter */
+	ret = eh_initialize_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event dev %d", ret);
+		return ret;
+	}
+
 	/* Setup Rx adapter */
 	ret = eh_initialize_rx_adapter(em_conf);
 	if (ret < 0) {
@@ -1718,6 +1847,14 @@ eh_devs_init(struct eh_conf *conf)
 		return ret;
 	}
 
+	/* Start event crypto adapter */
+	ret = eh_start_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
+		return ret;
+	}
+
+
 	/* Start eth devices after setting up adapter */
 	RTE_ETH_FOREACH_DEV(port_id) {
 
@@ -1788,6 +1925,13 @@ eh_devs_uninit(struct eh_conf *conf)
 		}
 	}
 
+	/* Stop event crypto adapter */
+	ret = eh_stop_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
+		return ret;
+	}
+
 	/* Stop and release event devices */
 	for (i = 0; i < em_conf->nb_eventdev; i++) {
 
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index f3cbe57cb3..4b26dc8fc2 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -185,6 +185,8 @@ struct eventmode_conf {
 		/**< Max vector timeout in nanoseconds */
 	uint64_t vector_pool_sz;
 		/**< Vector pool size */
+	bool enable_event_crypto_adapter;
+		/**< Enables event crypto adapter related configuration */
 };
 
 /**
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index bcf2dfa6d8..50b0cf158a 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -44,6 +44,7 @@
 #include <rte_cryptodev.h>
 #include <rte_security.h>
 #include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_ip.h>
 #include <rte_ip_frag.h>
 #include <rte_alarm.h>
@@ -2098,8 +2099,8 @@ session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
 	nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
 			CDEV_MP_CACHE_MULTIPLIER);
 	sess_mp = rte_cryptodev_sym_session_pool_create(
-			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
-			socket_id);
+			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ,
+			0, socket_id);
 	ctx->session_pool = sess_mp;
 
 	if (ctx->session_pool == NULL)
@@ -2378,7 +2379,8 @@ signal_handler(int signum)
 }
 
 static void
-ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
+ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa,
+		struct eventmode_conf *em_conf)
 {
 	struct rte_ipsec_session *ips;
 	int32_t i;
@@ -2388,9 +2390,11 @@ ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
 
 	for (i = 0; i < nb_sa; i++) {
 		ips = ipsec_get_primary_session(&sa[i]);
-		if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
-			rte_exit(EXIT_FAILURE, "Event mode supports only "
-				 "inline protocol sessions\n");
+		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+			em_conf->enable_event_crypto_adapter = true;
+		else if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+			rte_exit(EXIT_FAILURE, "Event mode supports inline "
+				 "and lookaside protocol sessions\n");
 	}
 
 }
@@ -2423,13 +2427,12 @@ check_event_mode_params(struct eh_conf *eh_conf)
 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
 
 	/*
-	 * Event mode currently supports only inline protocol sessions.
-	 * If there are other types of sessions configured then exit with
-	 * error.
+	 * Event mode currently supports inline and lookaside protocol
+	 * sessions. If there are other types of sessions configured then exit
+	 * with error.
 	 */
-	ev_mode_sess_verify(sa_in, nb_sa_in);
-	ev_mode_sess_verify(sa_out, nb_sa_out);
-
+	ev_mode_sess_verify(sa_in, nb_sa_in, em_conf);
+	ev_mode_sess_verify(sa_out, nb_sa_out, em_conf);
 
 	/* Option --config does not apply to event mode */
 	if (nb_lcore_params > 0) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 2/6] examples/ipsec-secgw: add queue for event crypto adapter
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
  2022-10-10 12:30   ` [PATCH v2 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
@ 2022-10-10 12:30   ` Volodymyr Fialko
  2022-10-10 12:30   ` [PATCH v2 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 12:30 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Add separate event queue for event crypto adapter processing, to resolve
queue contention between new and already processed events.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 95 +++++++++++++++++++++--------
 examples/ipsec-secgw/event_helper.h |  2 +
 2 files changed, 71 insertions(+), 26 deletions(-)

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 30a1f253c8..90c5d716ff 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -19,6 +19,8 @@
 #define DEFAULT_VECTOR_SIZE  16
 #define DEFAULT_VECTOR_TMO   102400
 
+#define INVALID_EV_QUEUE_ID -1
+
 static volatile bool eth_core_running;
 
 static int
@@ -153,11 +155,10 @@ eh_dev_has_burst_mode(uint8_t dev_id)
 }
 
 static int
-eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
+eh_set_nb_eventdev(struct eventmode_conf *em_conf)
 {
-	int lcore_count, nb_eventdev, nb_eth_dev, ret;
 	struct eventdev_params *eventdev_config;
-	struct rte_event_dev_info dev_info;
+	int nb_eventdev;
 
 	/* Get the number of event devices */
 	nb_eventdev = rte_event_dev_count();
@@ -172,6 +173,23 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		return -EINVAL;
 	}
 
+	/* Set event dev id*/
+	eventdev_config = &(em_conf->eventdev_config[0]);
+	eventdev_config->eventdev_id = 0;
+
+	/* Update the number of event devices */
+	em_conf->nb_eventdev = 1;
+
+	return 0;
+}
+
+static int
+eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
+{
+	int lcore_count, nb_eth_dev, ret;
+	struct eventdev_params *eventdev_config;
+	struct rte_event_dev_info dev_info;
+
 	/* Get the number of eth devs */
 	nb_eth_dev = rte_eth_dev_count_avail();
 	if (nb_eth_dev == 0) {
@@ -199,15 +217,30 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 	eventdev_config = &(em_conf->eventdev_config[0]);
 
 	/* Save number of queues & ports available */
-	eventdev_config->eventdev_id = 0;
-	eventdev_config->nb_eventqueue = dev_info.max_event_queues;
+	eventdev_config->nb_eventqueue = nb_eth_dev;
 	eventdev_config->nb_eventport = dev_info.max_event_ports;
 	eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
 
-	/* Check if there are more queues than required */
-	if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
-		/* One queue is reserved for Tx */
-		eventdev_config->nb_eventqueue = nb_eth_dev + 1;
+	/* One queue is reserved for Tx */
+	eventdev_config->tx_queue_id = INVALID_EV_QUEUE_ID;
+	if (eventdev_config->all_internal_ports) {
+		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
+			EH_LOG_ERR("Not enough event queues available");
+			return -EINVAL;
+		}
+		eventdev_config->tx_queue_id =
+			eventdev_config->nb_eventqueue++;
+	}
+
+	/* One queue is reserved for event crypto adapter */
+	eventdev_config->ev_cpt_queue_id = INVALID_EV_QUEUE_ID;
+	if (em_conf->enable_event_crypto_adapter) {
+		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
+			EH_LOG_ERR("Not enough event queues available");
+			return -EINVAL;
+		}
+		eventdev_config->ev_cpt_queue_id =
+			eventdev_config->nb_eventqueue++;
 	}
 
 	/* Check if there are more ports than required */
@@ -216,9 +249,6 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		eventdev_config->nb_eventport = lcore_count;
 	}
 
-	/* Update the number of event devices */
-	em_conf->nb_eventdev++;
-
 	return 0;
 }
 
@@ -247,15 +277,10 @@ eh_do_capability_check(struct eventmode_conf *em_conf)
 
 	/*
 	 * If Rx & Tx internal ports are supported by all event devices then
-	 * eth cores won't be required. Override the eth core mask requested
-	 * and decrement number of event queues by one as it won't be needed
-	 * for Tx.
+	 * eth cores won't be required. Override the eth core mask requested.
 	 */
-	if (all_internal_ports) {
+	if (all_internal_ports)
 		rte_bitmap_reset(em_conf->eth_core_mask);
-		for (i = 0; i < em_conf->nb_eventdev; i++)
-			em_conf->eventdev_config[i].nb_eventqueue--;
-	}
 }
 
 static int
@@ -372,6 +397,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
 			eventdev_config->nb_eventqueue :
 			eventdev_config->nb_eventqueue - 1;
 
+	/* Reserve one queue for event crypto adapter */
+	if (em_conf->enable_event_crypto_adapter)
+		nb_eventqueue--;
+
 	/*
 	 * Map all queues of eth device (port) to an event queue. If there
 	 * are more event queues than eth ports then create 1:1 mapping.
@@ -543,14 +572,18 @@ eh_validate_conf(struct eventmode_conf *em_conf)
 	 * and initialize the config with all ports & queues available
 	 */
 	if (em_conf->nb_eventdev == 0) {
+		ret = eh_set_nb_eventdev(em_conf);
+		if (ret != 0)
+			return ret;
+		eh_do_capability_check(em_conf);
 		ret = eh_set_default_conf_eventdev(em_conf);
 		if (ret != 0)
 			return ret;
+	} else {
+		/* Perform capability check for the selected event devices */
+		eh_do_capability_check(em_conf);
 	}
 
-	/* Perform capability check for the selected event devices */
-	eh_do_capability_check(em_conf);
-
 	/*
 	 * Check if links are specified. Else generate a default config for
 	 * the event ports used.
@@ -596,8 +629,8 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 	uint8_t *queue = NULL;
 	uint8_t eventdev_id;
 	int nb_eventqueue;
-	uint8_t i, j;
-	int ret;
+	int ret, j;
+	uint8_t i;
 
 	for (i = 0; i < nb_eventdev; i++) {
 
@@ -659,14 +692,24 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 			 * stage if event device does not have internal
 			 * ports. This will be an atomic queue.
 			 */
-			if (!eventdev_config->all_internal_ports &&
-			    j == nb_eventqueue-1) {
+			if (j == eventdev_config->tx_queue_id) {
 				eventq_conf.schedule_type =
 					RTE_SCHED_TYPE_ATOMIC;
 			} else {
 				eventq_conf.schedule_type =
 					em_conf->ext_params.sched_type;
 			}
+			/*
+			 * Give event crypto device's queue higher priority then Rx queues. This
+			 * will allow crypto events to be processed with highest priority.
+			 */
+			if (j == eventdev_config->ev_cpt_queue_id) {
+				eventq_conf.priority =
+					RTE_EVENT_DEV_PRIORITY_HIGHEST;
+			} else {
+				eventq_conf.priority =
+					RTE_EVENT_DEV_PRIORITY_NORMAL;
+			}
 
 			/* Set max atomic flows to 1024 */
 			eventq_conf.nb_atomic_flows = 1024;
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index 4b26dc8fc2..af5cfcf794 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -88,6 +88,8 @@ struct eventdev_params {
 	uint8_t nb_eventport;
 	uint8_t ev_queue_mode;
 	uint8_t all_internal_ports;
+	int tx_queue_id;
+	int ev_cpt_queue_id;
 };
 
 /**
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 3/6] examples/ipsec-secgw: add lookaside event mode
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
  2022-10-10 12:30   ` [PATCH v2 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
  2022-10-10 12:30   ` [PATCH v2 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
@ 2022-10-10 12:30   ` Volodymyr Fialko
  2022-10-10 12:31   ` [PATCH v2 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 12:30 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Add base support for lookaside event mode. Events that are coming from
ethdev will be enqueued to the event crypto adapter, processed and
enqueued back to ethdev for the transmission.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/guides/rel_notes/release_22_11.rst   |   5 +
 doc/guides/sample_app_ug/ipsec_secgw.rst |   4 +-
 examples/ipsec-secgw/ipsec-secgw.c       |   3 +-
 examples/ipsec-secgw/ipsec.c             |  35 +++-
 examples/ipsec-secgw/ipsec.h             |   8 +-
 examples/ipsec-secgw/ipsec_worker.c      | 241 +++++++++++++++++++++--
 examples/ipsec-secgw/sa.c                |  37 ++--
 7 files changed, 292 insertions(+), 41 deletions(-)

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index b0b981578b..f1bfba2af6 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -218,6 +218,11 @@ New Features
   and appears as a promising alternative to traditional approaches
   such as packet sampling.
 
+* **Updated ipsec-secgw sample application.**
+
+  Added support for lookaside sessions in event mode.
+  See the :doc:`../sample_app_ug/ipsec_secgw` for more details.
+
 
 Removed Items
 -------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 07686d2285..c7b87889f1 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -83,8 +83,8 @@ The application supports two modes of operation: poll mode and event mode.
   every type of event device without affecting existing paths/use cases. The worker
   to be used will be determined by the operating conditions and the underlying device
   capabilities. **Currently the application provides non-burst, internal port worker
-  threads and supports inline protocol only.** It also provides infrastructure for
-  non-internal port however does not define any worker threads.
+  threads.** It also provides infrastructure for non-internal port however does not
+  define any worker threads.
 
   Event mode also supports event vectorization. The event devices, ethernet device
   pairs which support the capability ``RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR`` can
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 50b0cf158a..912b73e5a8 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -3056,7 +3056,8 @@ main(int32_t argc, char **argv)
 		if ((socket_ctx[socket_id].session_pool != NULL) &&
 			(socket_ctx[socket_id].sa_in == NULL) &&
 			(socket_ctx[socket_id].sa_out == NULL)) {
-			sa_init(&socket_ctx[socket_id], socket_id, lcore_conf);
+			sa_init(&socket_ctx[socket_id], socket_id, lcore_conf,
+				eh_conf->mode_params);
 			sp4_init(&socket_ctx[socket_id], socket_id);
 			sp6_init(&socket_ctx[socket_id], socket_id);
 			rt_init(&socket_ctx[socket_id], socket_id);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index ee1cf871ca..47dca56201 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -6,6 +6,7 @@
 #include <netinet/ip.h>
 
 #include <rte_branch_prediction.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_log.h>
 #include <rte_crypto.h>
 #include <rte_security.h>
@@ -56,14 +57,17 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
 
 int
 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
-	struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
-	struct rte_ipsec_session *ips)
+	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
+	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
 {
 	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
+	enum rte_crypto_op_sess_type sess_type;
 	struct rte_cryptodev_info cdev_info;
+	enum rte_crypto_op_type op_type;
 	unsigned long cdev_id_qp = 0;
-	struct cdev_key key = { 0 };
 	struct ipsec_ctx *ipsec_ctx;
+	struct cdev_key key = { 0 };
+	void *sess = NULL;
 	uint32_t lcore_id;
 	int32_t ret = 0;
 
@@ -158,6 +162,10 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
 				return -1;
 			}
 			ips->security.ctx = ctx;
+
+			sess = ips->security.ses;
+			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
 		} else {
 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
 			return -1;
@@ -179,6 +187,27 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
 		rte_cryptodev_info_get(cdev_id, &cdev_info);
 	}
 
+	/* Setup meta data required by event crypto adapter */
+	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
+		union rte_event_crypto_metadata m_data = {0};
+		const struct eventdev_params *eventdev_conf;
+
+		eventdev_conf = &(em_conf->eventdev_config[0]);
+
+		/* Fill in response information */
+		m_data.response_info.sched_type = em_conf->ext_params.sched_type;
+		m_data.response_info.op = RTE_EVENT_OP_NEW;
+		m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
+
+		/* Fill in request information */
+		m_data.request_info.cdev_id = cdev_id;
+		m_data.request_info.queue_pair_id = 0;
+
+		/* Attach meta info to session */
+		rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
+				sess_type, &m_data, sizeof(m_data));
+	}
+
 	return 0;
 }
 
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 538eb17d94..3d373dfd77 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -14,6 +14,7 @@
 #include <rte_flow.h>
 #include <rte_ipsec.h>
 
+#include "event_helper.h"
 #include "ipsec-secgw.h"
 
 #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2
@@ -425,7 +426,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound);
 
 void
 sa_init(struct socket_ctx *ctx, int32_t socket_id,
-		struct lcore_conf *lcore_conf);
+	struct lcore_conf *lcore_conf,
+	const struct eventmode_conf *em_conf);
 
 void
 rt_init(struct socket_ctx *ctx, int32_t socket_id);
@@ -442,8 +444,8 @@ enqueue_cop_burst(struct cdev_qp *cqp);
 
 int
 create_lookaside_session(struct ipsec_ctx *ipsec_ctx[],
-	struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
-	struct rte_ipsec_session *ips);
+	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
+	struct ipsec_sa *sa, struct rte_ipsec_session *ips);
 
 int
 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 7f46177f8e..074c7a22d6 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -3,6 +3,7 @@
  * Copyright (C) 2020 Marvell International Ltd.
  */
 #include <rte_acl.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_lpm.h>
 #include <rte_lpm6.h>
@@ -11,6 +12,7 @@
 #include "ipsec.h"
 #include "ipsec-secgw.h"
 #include "ipsec_worker.h"
+#include "sad.h"
 
 #if defined(__ARM_NEON)
 #include "ipsec_lpm_neon.h"
@@ -225,6 +227,47 @@ check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
 	ip->num = j;
 }
 
+static inline void
+ipv4_pkt_l3_len_set(struct rte_mbuf *pkt)
+{
+	struct rte_ipv4_hdr *ipv4;
+
+	ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
+	pkt->l3_len = ipv4->ihl * 4;
+}
+
+static inline int
+ipv6_pkt_l3_len_set(struct rte_mbuf *pkt)
+{
+	struct rte_ipv6_hdr *ipv6;
+	size_t l3_len, ext_len;
+	uint32_t l3_type;
+	int next_proto;
+	uint8_t *p;
+
+	ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
+	l3_len = sizeof(struct rte_ipv6_hdr);
+	l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK;
+
+	if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
+		l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
+		p = rte_pktmbuf_mtod(pkt, uint8_t *);
+		next_proto = ipv6->proto;
+		while (next_proto != IPPROTO_ESP &&
+			l3_len < pkt->data_len &&
+			(next_proto = rte_ipv6_get_next_ext(p + l3_len,
+					next_proto, &ext_len)) >= 0)
+			l3_len += ext_len;
+
+		/* Drop pkt when IPv6 header exceeds first seg size */
+		if (unlikely(l3_len > pkt->data_len))
+			return -EINVAL;
+	}
+	pkt->l3_len = l3_len;
+
+	return 0;
+}
+
 static inline uint16_t
 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 {
@@ -284,9 +327,67 @@ get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
 	return RTE_MAX_ETHPORTS;
 }
 
+static inline void
+crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+		struct rte_crypto_op *cop[], uint16_t num)
+{
+	struct rte_crypto_sym_op *sop;
+	uint32_t i;
+
+	const struct rte_crypto_op unproc_cop = {
+		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+		.sess_type = RTE_CRYPTO_OP_SECURITY_SESSION,
+	};
+
+	for (i = 0; i != num; i++) {
+		cop[i]->raw = unproc_cop.raw;
+		sop = cop[i]->sym;
+		sop->m_src = mb[i];
+		sop->m_dst = NULL;
+		__rte_security_attach_session(sop, ss->security.ses);
+	}
+}
+
+static inline int
+event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
+		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+{
+	struct ipsec_mbuf_metadata *priv;
+	struct rte_ipsec_session *sess;
+	struct rte_crypto_op *cop;
+	struct rte_event cev;
+	int ret;
+
+	/* Get IPsec session */
+	sess = ipsec_get_primary_session(sa);
+
+	/* Get pkt private data */
+	priv = get_priv(pkt);
+	cop = &priv->cop;
+
+	/* Reset crypto operation data */
+	crypto_op_reset(sess, &pkt, &cop, 1);
+
+	/* Update event_ptr with rte_crypto_op */
+	cev.event = 0;
+	cev.event_ptr = cop;
+
+	/* Enqueue event to crypto adapter */
+	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+			ev_link->event_port_id, &cev, 1);
+	if (unlikely(ret <= 0)) {
+		/* pkt will be freed by the caller */
+		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
+		return rte_errno;
+	}
+
+	return 0;
+}
+
 static inline int
 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
-		struct rte_event *ev)
+	const struct eh_event_link_info *ev_link, struct rte_event *ev)
 {
 	struct ipsec_sa *sa = NULL;
 	struct rte_mbuf *pkt;
@@ -337,7 +438,35 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 		break;
+	case PKT_TYPE_IPSEC_IPV4:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		ipv4_pkt_l3_len_set(pkt);
+		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
+		sa = ipsec_mask_saptr(sa);
+		if (unlikely(sa == NULL)) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
+			goto drop_pkt_and_exit;
+		}
+
+		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+			goto drop_pkt_and_exit;
+
+		return PKT_POSTED;
+	case PKT_TYPE_IPSEC_IPV6:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		if (unlikely(ipv6_pkt_l3_len_set(pkt) != 0))
+			goto drop_pkt_and_exit;
+		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
+		sa = ipsec_mask_saptr(sa);
+		if (unlikely(sa == NULL)) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
+			goto drop_pkt_and_exit;
+		}
 
+		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+			goto drop_pkt_and_exit;
+
+		return PKT_POSTED;
 	default:
 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 			   type);
@@ -386,7 +515,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 static inline int
 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
-		struct rte_event *ev)
+		const struct eh_event_link_info *ev_link, struct rte_event *ev)
 {
 	struct rte_ipsec_session *sess;
 	struct rte_ether_hdr *ethhdr;
@@ -455,11 +584,9 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	/* Get IPsec session */
 	sess = ipsec_get_primary_session(sa);
 
-	/* Allow only inline protocol for now */
-	if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
-		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
-		goto drop_pkt_and_exit;
-	}
+	/* Determine protocol type */
+	if (sess->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+		goto lookaside;
 
 	rte_security_set_pkt_metadata(sess->security.ctx,
 				      sess->security.ses, pkt, NULL);
@@ -484,6 +611,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	ipsec_event_pre_forward(pkt, port_id);
 	return PKT_FORWARDED;
 
+lookaside:
+	/* prepare pkt - advance start to L3 */
+	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+
+	if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
+		return PKT_POSTED;
+
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
 	rte_pktmbuf_free(pkt);
@@ -762,6 +896,67 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
 }
 
+static inline int
+ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+			   struct rte_event *ev)
+{
+	struct rte_ether_hdr *ethhdr;
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+	uint16_t port_id;
+	struct ip *ip;
+
+	/* Get pkt data */
+	cop = ev->event_ptr;
+	pkt = cop->sym->m_src;
+
+	/* If operation was not successful, drop the packet */
+	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
+		free_pkts(&pkt, 1);
+		return PKT_DROPPED;
+	}
+
+	ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+	/* Prepend Ether layer */
+	ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+	/* Route pkt and update required fields */
+	if (ip->ip_v == IPVERSION) {
+		pkt->ol_flags |= lconf->outbound.ipv4_offloads;
+		pkt->l3_len = sizeof(struct ip);
+		pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+
+		port_id = route4_pkt(pkt, lconf->rt.rt4_ctx);
+	} else {
+		pkt->ol_flags |= lconf->outbound.ipv6_offloads;
+		pkt->l3_len = sizeof(struct ip6_hdr);
+		pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+
+		port_id = route6_pkt(pkt, lconf->rt.rt6_ctx);
+	}
+
+	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
+		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
+		free_pkts(&pkt, 1);
+		return PKT_DROPPED;
+	}
+
+	/* Update Ether with port's MAC addresses */
+	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
+	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
+
+	/* Update event */
+	ev->mbuf = pkt;
+
+	return PKT_FORWARDED;
+}
+
 /*
  * Event mode exposes various operating modes depending on the
  * capabilities of the event device and the operating mode
@@ -952,6 +1147,14 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 		"Launching event mode worker (non-burst - Tx internal port - "
 		"app mode) on lcore %d\n", lcore_id);
 
+	ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
+	if (ret != 0) {
+		RTE_LOG(ERR, IPSEC,
+			"SAD cache init on lcore %u, failed with code: %d\n",
+			lcore_id, ret);
+		return;
+	}
+
 	/* Check if it's single link */
 	if (nb_links != 1) {
 		RTE_LOG(INFO, IPSEC,
@@ -978,6 +1181,20 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			ipsec_ev_vector_process(&lconf, links, &ev);
 			continue;
 		case RTE_EVENT_TYPE_ETHDEV:
+			if (is_unprotected_port(ev.mbuf->port))
+				ret = process_ipsec_ev_inbound(&lconf.inbound,
+								&lconf.rt, links, &ev);
+			else
+				ret = process_ipsec_ev_outbound(&lconf.outbound,
+								&lconf.rt, links, &ev);
+			if (ret != 1)
+				/* The pkt has been dropped or posted */
+				continue;
+			break;
+		case RTE_EVENT_TYPE_CRYPTODEV:
+			ret = ipsec_ev_cryptodev_process(&lconf, &ev);
+			if (unlikely(ret != PKT_FORWARDED))
+				continue;
 			break;
 		default:
 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
@@ -985,16 +1202,6 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			continue;
 		}
 
-		if (is_unprotected_port(ev.mbuf->port))
-			ret = process_ipsec_ev_inbound(&lconf.inbound,
-							&lconf.rt, &ev);
-		else
-			ret = process_ipsec_ev_outbound(&lconf.outbound,
-							&lconf.rt, &ev);
-		if (ret != 1)
-			/* The pkt has been dropped */
-			continue;
-
 		/*
 		 * Since tx internal port is available, events can be
 		 * directly enqueued to the adapter and it would be
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 396b9f9694..dc3627aacc 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1236,7 +1236,8 @@ static int
 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, uint32_t inbound,
 		struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
 	struct ipsec_sa *sa;
 	uint32_t i, idx;
@@ -1409,7 +1410,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 				return -EINVAL;
 			}
 		} else {
-			rc = create_lookaside_session(ips_ctx, skt_ctx, sa, ips);
+			rc = create_lookaside_session(ips_ctx, skt_ctx,
+						      em_conf, sa, ips);
 			if (rc != 0) {
 				RTE_LOG(ERR, IPSEC_ESP,
 					"create_lookaside_session() failed\n");
@@ -1432,17 +1434,19 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 static inline int
 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
-	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx);
+	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx, em_conf);
 }
 
 static inline int
 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
-	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx);
+	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx, em_conf);
 }
 
 /*
@@ -1535,7 +1539,8 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
  */
 static int
 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
-		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[])
+		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
 	int rc;
 	struct rte_ipsec_sa_prm prm;
@@ -1577,7 +1582,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
 	if (lsa->fallback_sessions == 1) {
 		struct rte_ipsec_session *ipfs = ipsec_get_fallback_session(lsa);
 		if (ipfs->security.ses == NULL) {
-			rc = create_lookaside_session(ips_ctx, skt_ctx, lsa, ipfs);
+			rc = create_lookaside_session(ips_ctx, skt_ctx, em_conf, lsa, ipfs);
 			if (rc != 0)
 				return rc;
 		}
@@ -1593,7 +1598,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
  */
 static int
 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket,
-		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[])
+		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
 	int32_t rc, sz;
 	uint32_t i, idx;
@@ -1631,7 +1637,7 @@ ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket,
 		sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
 		lsa = ctx->sa + idx;
 
-		rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx);
+		rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx, em_conf);
 	}
 
 	return rc;
@@ -1674,7 +1680,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
 
 void
 sa_init(struct socket_ctx *ctx, int32_t socket_id,
-		struct lcore_conf *lcore_conf)
+	struct lcore_conf *lcore_conf,
+	const struct eventmode_conf *em_conf)
 {
 	int32_t rc;
 	const char *name;
@@ -1706,11 +1713,11 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
 			rte_exit(EXIT_FAILURE, "failed to init SAD\n");
 		RTE_LCORE_FOREACH(lcore_id)
 			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
-		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx);
+		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx, em_conf);
 
 		if (app_sa_prm.enable != 0) {
 			rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
-				socket_id, ctx, ipsec_ctx);
+				socket_id, ctx, ipsec_ctx, em_conf);
 			if (rc != 0)
 				rte_exit(EXIT_FAILURE,
 					"failed to init inbound SAs\n");
@@ -1728,11 +1735,11 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
 
 		RTE_LCORE_FOREACH(lcore_id)
 			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound;
-		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx);
+		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx, em_conf);
 
 		if (app_sa_prm.enable != 0) {
 			rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
-				socket_id, ctx, ipsec_ctx);
+				socket_id, ctx, ipsec_ctx, em_conf);
 			if (rc != 0)
 				rte_exit(EXIT_FAILURE,
 					"failed to init outbound SAs\n");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 4/6] examples/ipsec-secgw: add stats for event mode
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
                     ` (2 preceding siblings ...)
  2022-10-10 12:30   ` [PATCH v2 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
@ 2022-10-10 12:31   ` Volodymyr Fialko
  2022-10-10 12:31   ` [PATCH v2 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 12:31 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Add per core statistic(rx/tx) counters for event mode worker.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/ipsec_worker.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 074c7a22d6..034103bdf6 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -508,7 +508,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
-	rte_pktmbuf_free(pkt);
+	free_pkts(&pkt, 1);
 	ev->mbuf = NULL;
 	return PKT_DROPPED;
 }
@@ -620,7 +620,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
-	rte_pktmbuf_free(pkt);
+	free_pkts(&pkt, 1);
 	ev->mbuf = NULL;
 	return PKT_DROPPED;
 }
@@ -853,6 +853,7 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 	pkt = vec->mbufs[0];
 
 	ev_vector_attr_init(vec);
+	core_stats_update_rx(vec->nb_elem);
 	if (is_unprotected_port(pkt->port))
 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
 						      &lconf->rt, vec);
@@ -861,6 +862,7 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 						       &lconf->rt, vec);
 
 	if (likely(ret > 0)) {
+		core_stats_update_tx(vec->nb_elem);
 		vec->nb_elem = ret;
 		ret = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 						       links[0].event_port_id, ev, 1, 0);
@@ -1181,6 +1183,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			ipsec_ev_vector_process(&lconf, links, &ev);
 			continue;
 		case RTE_EVENT_TYPE_ETHDEV:
+			core_stats_update_rx(1);
 			if (is_unprotected_port(ev.mbuf->port))
 				ret = process_ipsec_ev_inbound(&lconf.inbound,
 								&lconf.rt, links, &ev);
@@ -1202,6 +1205,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			continue;
 		}
 
+		core_stats_update_tx(1);
 		/*
 		 * Since tx internal port is available, events can be
 		 * directly enqueued to the adapter and it would be
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 5/6] examples/ipsec-secgw: add event vector support for lookaside
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
                     ` (3 preceding siblings ...)
  2022-10-10 12:31   ` [PATCH v2 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
@ 2022-10-10 12:31   ` Volodymyr Fialko
  2022-10-10 12:31   ` [PATCH v2 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 12:31 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Add vector support for event crypto adapter in lookaside mode.
Once --event-vector enabled, event crypto adapter will group processed
crypto operation into rte_event_vector event with type
RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/guides/sample_app_ug/ipsec_secgw.rst |   3 +
 examples/ipsec-secgw/event_helper.c      |  34 ++-
 examples/ipsec-secgw/ipsec-secgw.c       |   2 +-
 examples/ipsec-secgw/ipsec-secgw.h       |   1 +
 examples/ipsec-secgw/ipsec_worker.c      | 281 ++++++++++++++++++-----
 5 files changed, 264 insertions(+), 57 deletions(-)

diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index c7b87889f1..2a1aeae7c5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -94,6 +94,9 @@ The application supports two modes of operation: poll mode and event mode.
   (default vector-size is 16) and vector-tmo (default vector-tmo is 102400ns).
   By default event vectorization is disabled and it can be enabled using event-vector
   option.
+  For the event devices, crypto device pairs which support the capability
+  ``RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR`` vector aggregation could also be enable
+  using event-vector option.
 
 Additionally the event mode introduces two submodes of processing packets:
 
diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 90c5d716ff..89fb7e62a5 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -792,12 +792,15 @@ eh_start_eventdev(struct eventmode_conf *em_conf)
 static int
 eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
 {
+	struct rte_event_crypto_adapter_queue_conf queue_conf;
 	struct rte_event_dev_info evdev_default_conf = {0};
 	struct rte_event_port_conf port_conf = {0};
 	struct eventdev_params *eventdev_config;
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+	const uint8_t nb_qp_per_cdev = 1;
 	uint8_t eventdev_id, cdev_id, n;
-	uint32_t cap;
-	int ret;
+	uint32_t cap, nb_elem;
+	int ret, socket_id;
 
 	if (!em_conf->enable_event_crypto_adapter)
 		return 0;
@@ -852,10 +855,35 @@ eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
 			return ret;
 		}
 
+		memset(&queue_conf, 0, sizeof(queue_conf));
+		if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) &&
+		    (em_conf->ext_params.event_vector)) {
+			queue_conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
+			queue_conf.vector_sz = em_conf->ext_params.vector_size;
+			/*
+			 * Currently all sessions configured with same response
+			 * info fields, so packets will be aggregated to the
+			 * same vector. This allows us to configure number of
+			 * vectors only to hold all queue pair descriptors.
+			 */
+			nb_elem = (qp_desc_nb / queue_conf.vector_sz) + 1;
+			nb_elem *= nb_qp_per_cdev;
+			socket_id = rte_cryptodev_socket_id(cdev_id);
+			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+					"QP_VEC_%u_%u", socket_id, cdev_id);
+			queue_conf.vector_mp = rte_event_vector_pool_create(
+					mp_name, nb_elem, 0,
+					queue_conf.vector_sz, socket_id);
+			if (queue_conf.vector_mp == NULL) {
+				EH_LOG_ERR("failed to create event vector pool");
+				return -ENOMEM;
+			}
+		}
+
 		/* Add crypto queue pairs to event crypto adapter */
 		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
 				-1, /* adds all the pre configured queue pairs to the instance */
-				NULL);
+				&queue_conf);
 		if (ret < 0) {
 			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
 			return ret;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 912b73e5a8..1d74aa60e5 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -85,7 +85,7 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 /*
  * Configurable number of descriptors per queue pair
  */
-static uint32_t qp_desc_nb = 2048;
+uint32_t qp_desc_nb = 2048;
 
 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
 		(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index f02736075b..c6d11f3aac 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -145,6 +145,7 @@ extern bool per_port_pool;
 
 extern uint32_t mtu_size;
 extern uint32_t frag_tbl_sz;
+extern uint32_t qp_desc_nb;
 
 #define SS_F		(1U << 0)	/* Single SA mode */
 #define INL_PR_F	(1U << 1)	/* Inline Protocol */
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 034103bdf6..3b7f7f4928 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -349,18 +349,11 @@ crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	}
 }
 
-static inline int
-event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
-		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+static inline void
+crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev)
 {
 	struct ipsec_mbuf_metadata *priv;
-	struct rte_ipsec_session *sess;
 	struct rte_crypto_op *cop;
-	struct rte_event cev;
-	int ret;
-
-	/* Get IPsec session */
-	sess = ipsec_get_primary_session(sa);
 
 	/* Get pkt private data */
 	priv = get_priv(pkt);
@@ -370,13 +363,39 @@ event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
 	crypto_op_reset(sess, &pkt, &cop, 1);
 
 	/* Update event_ptr with rte_crypto_op */
-	cev.event = 0;
-	cev.event_ptr = cop;
+	ev->event = 0;
+	ev->event_ptr = cop;
+}
+
+static inline void
+free_pkts_from_events(struct rte_event events[], uint16_t count)
+{
+	struct rte_crypto_op *cop;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		cop = events[i].event_ptr;
+		free_pkts(&cop->sym->m_src, 1);
+	}
+}
+
+static inline int
+event_crypto_enqueue(struct rte_mbuf *pkt,
+		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+{
+	struct rte_ipsec_session *sess;
+	struct rte_event ev;
+	int ret;
+
+	/* Get IPsec session */
+	sess = ipsec_get_primary_session(sa);
+
+	crypto_prepare_event(pkt, sess, &ev);
 
 	/* Enqueue event to crypto adapter */
 	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
-			ev_link->event_port_id, &cev, 1);
-	if (unlikely(ret <= 0)) {
+			ev_link->event_port_id, &ev, 1);
+	if (unlikely(ret != 1)) {
 		/* pkt will be freed by the caller */
 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
 		return rte_errno;
@@ -448,7 +467,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 
-		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
 			goto drop_pkt_and_exit;
 
 		return PKT_POSTED;
@@ -463,7 +482,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 
-		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
 			goto drop_pkt_and_exit;
 
 		return PKT_POSTED;
@@ -615,7 +634,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	/* prepare pkt - advance start to L3 */
 	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
 
-	if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
+	if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0))
 		return PKT_POSTED;
 
 drop_pkt_and_exit:
@@ -626,15 +645,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 }
 
 static inline int
-ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
-		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
+ipsec_ev_route_ip_pkts(struct rte_event_vector *vec, struct route_table *rt,
+		    struct ipsec_traffic *t)
 {
-	struct rte_ipsec_session *sess;
 	struct rte_ether_hdr *ethhdr;
-	uint32_t sa_idx, i, j = 0;
-	uint16_t port_id = 0;
 	struct rte_mbuf *pkt;
-	struct ipsec_sa *sa;
+	uint16_t port_id = 0;
+	uint32_t i, j = 0;
 
 	/* Route IPv4 packets */
 	for (i = 0; i < t->ip4.num; i++) {
@@ -668,28 +685,90 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 			free_pkts(&pkt, 1);
 	}
 
+	return j;
+}
+
+static inline int
+ipsec_ev_inbound_route_pkts(struct rte_event_vector *vec,
+			    struct route_table *rt,
+			    struct ipsec_traffic *t,
+			    const struct eh_event_link_info *ev_link)
+{
+	uint32_t ret, i, j, ev_len = 0;
+	struct rte_event events[MAX_PKTS];
+	struct rte_ipsec_session *sess;
+	struct rte_mbuf *pkt;
+	struct ipsec_sa *sa;
+
+	j = ipsec_ev_route_ip_pkts(vec, rt, t);
+
 	/* Route ESP packets */
+	for (i = 0; i < t->ipsec.num; i++) {
+		pkt = t->ipsec.pkts[i];
+		sa = ipsec_mask_saptr(t->ipsec.saptr[i]);
+		if (unlikely(sa == NULL)) {
+			free_pkts(&pkt, 1);
+			continue;
+		}
+		sess = ipsec_get_primary_session(sa);
+		crypto_prepare_event(pkt, sess, &events[ev_len]);
+		ev_len++;
+	}
+
+	if (ev_len) {
+		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+				ev_link->event_port_id, events, ev_len);
+		if (ret < ev_len) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
+					ev_len, rte_errno);
+			free_pkts_from_events(&events[ret], ev_len - ret);
+			return -rte_errno;
+		}
+	}
+
+	return j;
+}
+
+static inline int
+ipsec_ev_outbound_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
+		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx,
+		    const struct eh_event_link_info *ev_link)
+{
+	uint32_t sa_idx, ret, i, j, ev_len = 0;
+	struct rte_event events[MAX_PKTS];
+	struct rte_ipsec_session *sess;
+	struct rte_ether_hdr *ethhdr;
+	uint16_t port_id = 0;
+	struct rte_mbuf *pkt;
+	struct ipsec_sa *sa;
+
+	j = ipsec_ev_route_ip_pkts(vec, rt, t);
+
+	/* Handle IPsec packets.
+	 * For lookaside IPsec packets, submit to cryptodev queue.
+	 * For inline IPsec packets, route the packet.
+	 */
 	for (i = 0; i < t->ipsec.num; i++) {
 		/* Validate sa_idx */
 		sa_idx = t->ipsec.res[i];
 		pkt = t->ipsec.pkts[i];
-		if (unlikely(sa_idx >= sa_ctx->nb_sa))
+		if (unlikely(sa_idx >= sa_ctx->nb_sa)) {
 			free_pkts(&pkt, 1);
-		else {
-			/* Else the packet has to be protected */
-			sa = &(sa_ctx->sa[sa_idx]);
-			/* Get IPsec session */
-			sess = ipsec_get_primary_session(sa);
-			/* Allow only inline protocol for now */
-			if (unlikely(sess->type !=
-				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
-				RTE_LOG(ERR, IPSEC, "SA type not supported\n");
-				free_pkts(&pkt, 1);
-				continue;
-			}
+			continue;
+		}
+		/* Else the packet has to be protected */
+		sa = &(sa_ctx->sa[sa_idx]);
+		/* Get IPsec session */
+		sess = ipsec_get_primary_session(sa);
+		switch (sess->type) {
+		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+			rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+			crypto_prepare_event(pkt, sess, &events[ev_len]);
+			ev_len++;
+			break;
+		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
 			rte_security_set_pkt_metadata(sess->security.ctx,
 						sess->security.ses, pkt, NULL);
-
 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 			port_id = sa->portid;
 
@@ -703,6 +782,22 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 			ipsec_event_pre_forward(pkt, port_id);
 			ev_vector_attr_update(vec, pkt);
 			vec->mbufs[j++] = pkt;
+			break;
+		default:
+			RTE_LOG(ERR, IPSEC, "SA type not supported\n");
+			free_pkts(&pkt, 1);
+			break;
+		}
+	}
+
+	if (ev_len) {
+		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+				ev_link->event_port_id, events, ev_len);
+		if (ret < ev_len) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
+				   ev_len, rte_errno);
+			free_pkts_from_events(&events[ret], ev_len - ret);
+			return -rte_errno;
 		}
 	}
 
@@ -727,6 +822,19 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 		t->ip6.data[t->ip6.num] = nlp;
 		t->ip6.pkts[(t->ip6.num)++] = pkt;
 		break;
+	case PKT_TYPE_IPSEC_IPV4:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		ipv4_pkt_l3_len_set(pkt);
+		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+		break;
+	case PKT_TYPE_IPSEC_IPV6:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		if (ipv6_pkt_l3_len_set(pkt) != 0) {
+			free_pkts(&pkt, 1);
+			return;
+		}
+		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+		break;
 	default:
 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 			   type);
@@ -737,7 +845,8 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 
 static inline int
 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
-				struct rte_event_vector *vec)
+				struct rte_event_vector *vec,
+				const struct eh_event_link_info *ev_link)
 {
 	struct ipsec_traffic t;
 	struct rte_mbuf *pkt;
@@ -767,12 +876,16 @@ process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 	check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
 	check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
 
-	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+	if (t.ipsec.num != 0)
+		sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num);
+
+	return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link);
 }
 
 static inline int
 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
-				 struct rte_event_vector *vec)
+				 struct rte_event_vector *vec,
+				 const struct eh_event_link_info *ev_link)
 {
 	struct ipsec_traffic t;
 	struct rte_mbuf *pkt;
@@ -795,7 +908,7 @@ process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 	check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
 	check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
 
-	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+	return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link);
 }
 
 static inline int
@@ -854,12 +967,13 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 
 	ev_vector_attr_init(vec);
 	core_stats_update_rx(vec->nb_elem);
+
 	if (is_unprotected_port(pkt->port))
 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
-						      &lconf->rt, vec);
+						      &lconf->rt, vec, links);
 	else
 		ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
-						       &lconf->rt, vec);
+						       &lconf->rt, vec, links);
 
 	if (likely(ret > 0)) {
 		core_stats_update_tx(vec->nb_elem);
@@ -899,24 +1013,19 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 }
 
 static inline int
-ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
-			   struct rte_event *ev)
+ipsec_ev_cryptodev_process_one_pkt(
+		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+		const struct rte_crypto_op *cop, struct rte_mbuf *pkt)
 {
 	struct rte_ether_hdr *ethhdr;
-	struct rte_crypto_op *cop;
-	struct rte_mbuf *pkt;
 	uint16_t port_id;
 	struct ip *ip;
 
-	/* Get pkt data */
-	cop = ev->event_ptr;
-	pkt = cop->sym->m_src;
-
-	/* If operation was not successful, drop the packet */
+	/* If operation was not successful, free the packet */
 	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
 		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
 		free_pkts(&pkt, 1);
-		return PKT_DROPPED;
+		return -1;
 	}
 
 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
@@ -946,13 +1055,76 @@ ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
 		free_pkts(&pkt, 1);
-		return PKT_DROPPED;
+		return -1;
 	}
 
 	/* Update Ether with port's MAC addresses */
 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
 
+	ipsec_event_pre_forward(pkt, port_id);
+
+	return 0;
+}
+
+static inline void
+ipsec_ev_cryptodev_vector_process(
+		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+		const struct eh_event_link_info *links,
+		struct rte_event *ev)
+{
+	struct rte_event_vector *vec = ev->vec;
+	const uint16_t nb_events = 1;
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+	uint16_t enqueued;
+	int i, n = 0;
+
+	ev_vector_attr_init(vec);
+	/* Transform cop vec into pkt vec */
+	for (i = 0; i < vec->nb_elem; i++) {
+		/* Get pkt data */
+		cop = vec->ptrs[i];
+		pkt = cop->sym->m_src;
+		if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
+			continue;
+
+		vec->mbufs[n++] = pkt;
+		ev_vector_attr_update(vec, pkt);
+	}
+
+	if (n == 0) {
+		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+		return;
+	}
+
+	vec->nb_elem = n;
+	enqueued = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+			links[0].event_port_id, ev, nb_events, 0);
+	if (enqueued != nb_events) {
+		RTE_LOG_DP(DEBUG, IPSEC, "Failed to enqueue to tx, ret = %u,"
+				" errno = %i\n", enqueued, rte_errno);
+		free_pkts(vec->mbufs, vec->nb_elem);
+		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+	} else {
+		core_stats_update_tx(n);
+	}
+}
+
+static inline int
+ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+			   struct rte_event *ev)
+{
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+
+	/* Get pkt data */
+	cop = ev->event_ptr;
+	pkt = cop->sym->m_src;
+
+	if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
+		return PKT_DROPPED;
+
 	/* Update event */
 	ev->mbuf = pkt;
 
@@ -1199,6 +1371,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			if (unlikely(ret != PKT_FORWARDED))
 				continue;
 			break;
+		case RTE_EVENT_TYPE_CRYPTODEV_VECTOR:
+			ipsec_ev_cryptodev_vector_process(&lconf, links, &ev);
+			continue;
 		default:
 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 				ev.event_type);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v2 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
                     ` (4 preceding siblings ...)
  2022-10-10 12:31   ` [PATCH v2 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
@ 2022-10-10 12:31   ` Volodymyr Fialko
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 12:31 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Limit number of queue pairs to one for event lookaside mode, since all
cores are using same queue in this mode.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/ipsec-secgw.c | 56 +++++++++++++++++++-----------
 1 file changed, 36 insertions(+), 20 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 1d74aa60e5..e20bf50752 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1540,7 +1540,7 @@ add_mapping(const char *str, uint16_t cdev_id,
 }
 
 static int32_t
-add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
+add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
 		uint16_t qp, struct lcore_params *params)
 {
 	int32_t ret = 0;
@@ -1596,6 +1596,37 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
 	return ret;
 }
 
+static uint16_t
+map_cdev_to_cores_from_config(enum eh_pkt_transfer_mode mode, int16_t cdev_id,
+		const struct rte_cryptodev_info *cdev_info,
+		uint16_t *last_used_lcore_id)
+{
+	uint16_t nb_qp = 0, i = 0, max_nb_qps;
+
+	/* For event lookaside mode all sessions are bound to single qp.
+	 * It's enough to bind one core, since all cores will share same qp
+	 * Event inline mode do not use this functionality.
+	 */
+	if (mode == EH_PKT_TRANSFER_MODE_EVENT) {
+		add_cdev_mapping(cdev_info, cdev_id, nb_qp, &lcore_params[0]);
+		return 1;
+	}
+
+	/* Check if there are enough queue pairs for all configured cores */
+	max_nb_qps = RTE_MIN(nb_lcore_params, cdev_info->max_nb_queue_pairs);
+
+	while (nb_qp < max_nb_qps && i < nb_lcore_params) {
+		if (add_cdev_mapping(cdev_info, cdev_id, nb_qp,
+					&lcore_params[*last_used_lcore_id]))
+			nb_qp++;
+		(*last_used_lcore_id)++;
+		*last_used_lcore_id %= nb_lcore_params;
+		i++;
+	}
+
+	return nb_qp;
+}
+
 /* Check if the device is enabled by cryptodev_mask */
 static int
 check_cryptodev_mask(uint8_t cdev_id)
@@ -1607,13 +1638,13 @@ check_cryptodev_mask(uint8_t cdev_id)
 }
 
 static uint16_t
-cryptodevs_init(uint16_t req_queue_num)
+cryptodevs_init(enum eh_pkt_transfer_mode mode)
 {
+	struct rte_hash_parameters params = { 0 };
 	struct rte_cryptodev_config dev_conf;
 	struct rte_cryptodev_qp_conf qp_conf;
-	uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
+	uint16_t idx, qp, total_nb_qps;
 	int16_t cdev_id;
-	struct rte_hash_parameters params = { 0 };
 
 	const uint64_t mseg_flag = multi_seg_required() ?
 				RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
@@ -1654,23 +1685,8 @@ cryptodevs_init(uint16_t req_queue_num)
 				cdev_id,
 				rte_cryptodev_get_feature_name(mseg_flag));
 
-		if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
-			max_nb_qps = cdev_info.max_nb_queue_pairs;
-		else
-			max_nb_qps = nb_lcore_params;
-
-		qp = 0;
-		i = 0;
-		while (qp < max_nb_qps && i < nb_lcore_params) {
-			if (add_cdev_mapping(&cdev_info, cdev_id, qp,
-						&lcore_params[idx]))
-				qp++;
-			idx++;
-			idx = idx % nb_lcore_params;
-			i++;
-		}
 
-		qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
+		qp = map_cdev_to_cores_from_config(mode, cdev_id, &cdev_info, &idx);
 		if (qp == 0)
 			continue;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode
  2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
                     ` (5 preceding siblings ...)
  2022-10-10 12:31   ` [PATCH v2 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
@ 2022-10-10 16:56   ` Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
                       ` (6 more replies)
  6 siblings, 7 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 16:56 UTC (permalink / raw)
  To: dev; +Cc: jerinj, gakhil, anoobj, suanmingm, Volodymyr Fialko

Add support for lookaside event mode for ipsec-secgw example application.

* Changes since v1
- Resolve issue with ipv6 free packet in case of error
- Rebase on top of dpdk-next-crypto
- Update release note

* Changes since v2
- Fix compilation with old gcc

Volodymyr Fialko (6):
  examples/ipsec-secgw: add event crypto adapter init
  examples/ipsec-secgw: add queue for event crypto adapter
  examples/ipsec-secgw: add lookaside event mode
  examples/ipsec-secgw: add stats for event mode
  examples/ipsec-secgw: add event vector support for lookaside
  examples/ipsec-secgw: reduce number of QP for event lookaside

 doc/guides/rel_notes/release_22_11.rst   |   5 +
 doc/guides/sample_app_ug/ipsec_secgw.rst |   7 +-
 examples/ipsec-secgw/event_helper.c      | 267 +++++++++++--
 examples/ipsec-secgw/event_helper.h      |   4 +
 examples/ipsec-secgw/ipsec-secgw.c       |  88 +++--
 examples/ipsec-secgw/ipsec-secgw.h       |   1 +
 examples/ipsec-secgw/ipsec.c             |  36 +-
 examples/ipsec-secgw/ipsec.h             |   8 +-
 examples/ipsec-secgw/ipsec_worker.c      | 476 ++++++++++++++++++++---
 examples/ipsec-secgw/sa.c                |  37 +-
 10 files changed, 801 insertions(+), 128 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v3 1/6] examples/ipsec-secgw: add event crypto adapter init
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
@ 2022-10-10 16:56     ` Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
                       ` (5 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 16:56 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Create, configure and start an event crypto adapter. This adapter will
be used in lookaside event mode processing.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 144 ++++++++++++++++++++++++++++
 examples/ipsec-secgw/event_helper.h |   2 +
 examples/ipsec-secgw/ipsec-secgw.c  |  27 +++---
 3 files changed, 161 insertions(+), 12 deletions(-)

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 22b1760949..30a1f253c8 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -4,8 +4,10 @@
 #include <stdlib.h>
 
 #include <rte_bitmap.h>
+#include <rte_cryptodev.h>
 #include <rte_ethdev.h>
 #include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_malloc.h>
@@ -744,6 +746,126 @@ eh_start_eventdev(struct eventmode_conf *em_conf)
 	return 0;
 }
 
+static int
+eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	struct rte_event_dev_info evdev_default_conf = {0};
+	struct rte_event_port_conf port_conf = {0};
+	struct eventdev_params *eventdev_config;
+	uint8_t eventdev_id, cdev_id, n;
+	uint32_t cap;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	/*
+	 * More then one eventdev is not supported,
+	 * all event crypto adapters will be assigned to one eventdev
+	 */
+	RTE_ASSERT(em_conf->nb_eventdev == 1);
+
+	/* Get event device configuration */
+	eventdev_config = &(em_conf->eventdev_config[0]);
+	eventdev_id = eventdev_config->eventdev_id;
+
+	n = rte_cryptodev_count();
+
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		/* Check event's crypto capabilities */
+		ret = rte_event_crypto_adapter_caps_get(eventdev_id, cdev_id, &cap);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to get event device's crypto capabilities %d", ret);
+			return ret;
+		}
+
+		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)) {
+			EH_LOG_ERR("Event crypto adapter does not support forward mode!");
+			return -EINVAL;
+		}
+
+		/* Create event crypto adapter */
+
+		/* Get default configuration of event dev */
+		ret = rte_event_dev_info_get(eventdev_id, &evdev_default_conf);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to get event dev info %d", ret);
+			return ret;
+		}
+
+		/* Setup port conf */
+		port_conf.new_event_threshold =
+				evdev_default_conf.max_num_events;
+		port_conf.dequeue_depth =
+				evdev_default_conf.max_event_port_dequeue_depth;
+		port_conf.enqueue_depth =
+				evdev_default_conf.max_event_port_enqueue_depth;
+
+		/* Create adapter */
+		ret = rte_event_crypto_adapter_create(cdev_id, eventdev_id,
+				&port_conf, RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to create event crypto adapter %d", ret);
+			return ret;
+		}
+
+		/* Add crypto queue pairs to event crypto adapter */
+		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
+				-1, /* adds all the pre configured queue pairs to the instance */
+				NULL);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+eh_start_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	uint8_t cdev_id, n;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	n = rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		ret = rte_event_crypto_adapter_start(cdev_id);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to start event crypto device %d (%d)",
+					cdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+eh_stop_crypto_adapter(struct eventmode_conf *em_conf)
+{
+	uint8_t cdev_id, n;
+	int ret;
+
+	if (!em_conf->enable_event_crypto_adapter)
+		return 0;
+
+	n = rte_cryptodev_count();
+	for (cdev_id = 0; cdev_id != n; cdev_id++) {
+		ret = rte_event_crypto_adapter_stop(cdev_id);
+		if (ret < 0) {
+			EH_LOG_ERR("Failed to stop event crypto device %d (%d)",
+					cdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static int
 eh_event_vector_limits_validate(struct eventmode_conf *em_conf,
 				uint8_t ev_dev_id, uint8_t ethdev_id)
@@ -1697,6 +1819,13 @@ eh_devs_init(struct eh_conf *conf)
 		return ret;
 	}
 
+	/* Setup event crypto adapter */
+	ret = eh_initialize_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event dev %d", ret);
+		return ret;
+	}
+
 	/* Setup Rx adapter */
 	ret = eh_initialize_rx_adapter(em_conf);
 	if (ret < 0) {
@@ -1718,6 +1847,14 @@ eh_devs_init(struct eh_conf *conf)
 		return ret;
 	}
 
+	/* Start event crypto adapter */
+	ret = eh_start_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
+		return ret;
+	}
+
+
 	/* Start eth devices after setting up adapter */
 	RTE_ETH_FOREACH_DEV(port_id) {
 
@@ -1788,6 +1925,13 @@ eh_devs_uninit(struct eh_conf *conf)
 		}
 	}
 
+	/* Stop event crypto adapter */
+	ret = eh_stop_crypto_adapter(em_conf);
+	if (ret < 0) {
+		EH_LOG_ERR("Failed to start event crypto dev %d", ret);
+		return ret;
+	}
+
 	/* Stop and release event devices */
 	for (i = 0; i < em_conf->nb_eventdev; i++) {
 
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index f3cbe57cb3..4b26dc8fc2 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -185,6 +185,8 @@ struct eventmode_conf {
 		/**< Max vector timeout in nanoseconds */
 	uint64_t vector_pool_sz;
 		/**< Vector pool size */
+	bool enable_event_crypto_adapter;
+		/**< Enables event crypto adapter related configuration */
 };
 
 /**
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index bcf2dfa6d8..50b0cf158a 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -44,6 +44,7 @@
 #include <rte_cryptodev.h>
 #include <rte_security.h>
 #include <rte_eventdev.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_ip.h>
 #include <rte_ip_frag.h>
 #include <rte_alarm.h>
@@ -2098,8 +2099,8 @@ session_pool_init(struct socket_ctx *ctx, int32_t socket_id, size_t sess_sz)
 	nb_sess = RTE_MAX(nb_sess, CDEV_MP_CACHE_SZ *
 			CDEV_MP_CACHE_MULTIPLIER);
 	sess_mp = rte_cryptodev_sym_session_pool_create(
-			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ, 0,
-			socket_id);
+			mp_name, nb_sess, sess_sz, CDEV_MP_CACHE_SZ,
+			0, socket_id);
 	ctx->session_pool = sess_mp;
 
 	if (ctx->session_pool == NULL)
@@ -2378,7 +2379,8 @@ signal_handler(int signum)
 }
 
 static void
-ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
+ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa,
+		struct eventmode_conf *em_conf)
 {
 	struct rte_ipsec_session *ips;
 	int32_t i;
@@ -2388,9 +2390,11 @@ ev_mode_sess_verify(struct ipsec_sa *sa, int nb_sa)
 
 	for (i = 0; i < nb_sa; i++) {
 		ips = ipsec_get_primary_session(&sa[i]);
-		if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
-			rte_exit(EXIT_FAILURE, "Event mode supports only "
-				 "inline protocol sessions\n");
+		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+			em_conf->enable_event_crypto_adapter = true;
+		else if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+			rte_exit(EXIT_FAILURE, "Event mode supports inline "
+				 "and lookaside protocol sessions\n");
 	}
 
 }
@@ -2423,13 +2427,12 @@ check_event_mode_params(struct eh_conf *eh_conf)
 		em_conf->ext_params.sched_type = RTE_SCHED_TYPE_ORDERED;
 
 	/*
-	 * Event mode currently supports only inline protocol sessions.
-	 * If there are other types of sessions configured then exit with
-	 * error.
+	 * Event mode currently supports inline and lookaside protocol
+	 * sessions. If there are other types of sessions configured then exit
+	 * with error.
 	 */
-	ev_mode_sess_verify(sa_in, nb_sa_in);
-	ev_mode_sess_verify(sa_out, nb_sa_out);
-
+	ev_mode_sess_verify(sa_in, nb_sa_in, em_conf);
+	ev_mode_sess_verify(sa_out, nb_sa_out, em_conf);
 
 	/* Option --config does not apply to event mode */
 	if (nb_lcore_params > 0) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v3 2/6] examples/ipsec-secgw: add queue for event crypto adapter
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
@ 2022-10-10 16:56     ` Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                       ` (4 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 16:56 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Add separate event queue for event crypto adapter processing, to resolve
queue contention between new and already processed events.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/event_helper.c | 95 +++++++++++++++++++++--------
 examples/ipsec-secgw/event_helper.h |  2 +
 2 files changed, 71 insertions(+), 26 deletions(-)

diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 30a1f253c8..90c5d716ff 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -19,6 +19,8 @@
 #define DEFAULT_VECTOR_SIZE  16
 #define DEFAULT_VECTOR_TMO   102400
 
+#define INVALID_EV_QUEUE_ID -1
+
 static volatile bool eth_core_running;
 
 static int
@@ -153,11 +155,10 @@ eh_dev_has_burst_mode(uint8_t dev_id)
 }
 
 static int
-eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
+eh_set_nb_eventdev(struct eventmode_conf *em_conf)
 {
-	int lcore_count, nb_eventdev, nb_eth_dev, ret;
 	struct eventdev_params *eventdev_config;
-	struct rte_event_dev_info dev_info;
+	int nb_eventdev;
 
 	/* Get the number of event devices */
 	nb_eventdev = rte_event_dev_count();
@@ -172,6 +173,23 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		return -EINVAL;
 	}
 
+	/* Set event dev id*/
+	eventdev_config = &(em_conf->eventdev_config[0]);
+	eventdev_config->eventdev_id = 0;
+
+	/* Update the number of event devices */
+	em_conf->nb_eventdev = 1;
+
+	return 0;
+}
+
+static int
+eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
+{
+	int lcore_count, nb_eth_dev, ret;
+	struct eventdev_params *eventdev_config;
+	struct rte_event_dev_info dev_info;
+
 	/* Get the number of eth devs */
 	nb_eth_dev = rte_eth_dev_count_avail();
 	if (nb_eth_dev == 0) {
@@ -199,15 +217,30 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 	eventdev_config = &(em_conf->eventdev_config[0]);
 
 	/* Save number of queues & ports available */
-	eventdev_config->eventdev_id = 0;
-	eventdev_config->nb_eventqueue = dev_info.max_event_queues;
+	eventdev_config->nb_eventqueue = nb_eth_dev;
 	eventdev_config->nb_eventport = dev_info.max_event_ports;
 	eventdev_config->ev_queue_mode = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
 
-	/* Check if there are more queues than required */
-	if (eventdev_config->nb_eventqueue > nb_eth_dev + 1) {
-		/* One queue is reserved for Tx */
-		eventdev_config->nb_eventqueue = nb_eth_dev + 1;
+	/* One queue is reserved for Tx */
+	eventdev_config->tx_queue_id = INVALID_EV_QUEUE_ID;
+	if (eventdev_config->all_internal_ports) {
+		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
+			EH_LOG_ERR("Not enough event queues available");
+			return -EINVAL;
+		}
+		eventdev_config->tx_queue_id =
+			eventdev_config->nb_eventqueue++;
+	}
+
+	/* One queue is reserved for event crypto adapter */
+	eventdev_config->ev_cpt_queue_id = INVALID_EV_QUEUE_ID;
+	if (em_conf->enable_event_crypto_adapter) {
+		if (eventdev_config->nb_eventqueue >= dev_info.max_event_queues) {
+			EH_LOG_ERR("Not enough event queues available");
+			return -EINVAL;
+		}
+		eventdev_config->ev_cpt_queue_id =
+			eventdev_config->nb_eventqueue++;
 	}
 
 	/* Check if there are more ports than required */
@@ -216,9 +249,6 @@ eh_set_default_conf_eventdev(struct eventmode_conf *em_conf)
 		eventdev_config->nb_eventport = lcore_count;
 	}
 
-	/* Update the number of event devices */
-	em_conf->nb_eventdev++;
-
 	return 0;
 }
 
@@ -247,15 +277,10 @@ eh_do_capability_check(struct eventmode_conf *em_conf)
 
 	/*
 	 * If Rx & Tx internal ports are supported by all event devices then
-	 * eth cores won't be required. Override the eth core mask requested
-	 * and decrement number of event queues by one as it won't be needed
-	 * for Tx.
+	 * eth cores won't be required. Override the eth core mask requested.
 	 */
-	if (all_internal_ports) {
+	if (all_internal_ports)
 		rte_bitmap_reset(em_conf->eth_core_mask);
-		for (i = 0; i < em_conf->nb_eventdev; i++)
-			em_conf->eventdev_config[i].nb_eventqueue--;
-	}
 }
 
 static int
@@ -372,6 +397,10 @@ eh_set_default_conf_rx_adapter(struct eventmode_conf *em_conf)
 			eventdev_config->nb_eventqueue :
 			eventdev_config->nb_eventqueue - 1;
 
+	/* Reserve one queue for event crypto adapter */
+	if (em_conf->enable_event_crypto_adapter)
+		nb_eventqueue--;
+
 	/*
 	 * Map all queues of eth device (port) to an event queue. If there
 	 * are more event queues than eth ports then create 1:1 mapping.
@@ -543,14 +572,18 @@ eh_validate_conf(struct eventmode_conf *em_conf)
 	 * and initialize the config with all ports & queues available
 	 */
 	if (em_conf->nb_eventdev == 0) {
+		ret = eh_set_nb_eventdev(em_conf);
+		if (ret != 0)
+			return ret;
+		eh_do_capability_check(em_conf);
 		ret = eh_set_default_conf_eventdev(em_conf);
 		if (ret != 0)
 			return ret;
+	} else {
+		/* Perform capability check for the selected event devices */
+		eh_do_capability_check(em_conf);
 	}
 
-	/* Perform capability check for the selected event devices */
-	eh_do_capability_check(em_conf);
-
 	/*
 	 * Check if links are specified. Else generate a default config for
 	 * the event ports used.
@@ -596,8 +629,8 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 	uint8_t *queue = NULL;
 	uint8_t eventdev_id;
 	int nb_eventqueue;
-	uint8_t i, j;
-	int ret;
+	int ret, j;
+	uint8_t i;
 
 	for (i = 0; i < nb_eventdev; i++) {
 
@@ -659,14 +692,24 @@ eh_initialize_eventdev(struct eventmode_conf *em_conf)
 			 * stage if event device does not have internal
 			 * ports. This will be an atomic queue.
 			 */
-			if (!eventdev_config->all_internal_ports &&
-			    j == nb_eventqueue-1) {
+			if (j == eventdev_config->tx_queue_id) {
 				eventq_conf.schedule_type =
 					RTE_SCHED_TYPE_ATOMIC;
 			} else {
 				eventq_conf.schedule_type =
 					em_conf->ext_params.sched_type;
 			}
+			/*
+			 * Give event crypto device's queue higher priority then Rx queues. This
+			 * will allow crypto events to be processed with highest priority.
+			 */
+			if (j == eventdev_config->ev_cpt_queue_id) {
+				eventq_conf.priority =
+					RTE_EVENT_DEV_PRIORITY_HIGHEST;
+			} else {
+				eventq_conf.priority =
+					RTE_EVENT_DEV_PRIORITY_NORMAL;
+			}
 
 			/* Set max atomic flows to 1024 */
 			eventq_conf.nb_atomic_flows = 1024;
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index 4b26dc8fc2..af5cfcf794 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -88,6 +88,8 @@ struct eventdev_params {
 	uint8_t nb_eventport;
 	uint8_t ev_queue_mode;
 	uint8_t all_internal_ports;
+	int tx_queue_id;
+	int ev_cpt_queue_id;
 };
 
 /**
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v3 3/6] examples/ipsec-secgw: add lookaside event mode
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
@ 2022-10-10 16:56     ` Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
                       ` (3 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 16:56 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Add base support for lookaside event mode. Events that are coming from
ethdev will be enqueued to the event crypto adapter, processed and
enqueued back to ethdev for the transmission.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/guides/rel_notes/release_22_11.rst   |   5 +
 doc/guides/sample_app_ug/ipsec_secgw.rst |   4 +-
 examples/ipsec-secgw/ipsec-secgw.c       |   3 +-
 examples/ipsec-secgw/ipsec.c             |  36 +++-
 examples/ipsec-secgw/ipsec.h             |   8 +-
 examples/ipsec-secgw/ipsec_worker.c      | 241 +++++++++++++++++++++--
 examples/ipsec-secgw/sa.c                |  37 ++--
 7 files changed, 293 insertions(+), 41 deletions(-)

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index b0b981578b..f1bfba2af6 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -218,6 +218,11 @@ New Features
   and appears as a promising alternative to traditional approaches
   such as packet sampling.
 
+* **Updated ipsec-secgw sample application.**
+
+  Added support for lookaside sessions in event mode.
+  See the :doc:`../sample_app_ug/ipsec_secgw` for more details.
+
 
 Removed Items
 -------------
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index 07686d2285..c7b87889f1 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -83,8 +83,8 @@ The application supports two modes of operation: poll mode and event mode.
   every type of event device without affecting existing paths/use cases. The worker
   to be used will be determined by the operating conditions and the underlying device
   capabilities. **Currently the application provides non-burst, internal port worker
-  threads and supports inline protocol only.** It also provides infrastructure for
-  non-internal port however does not define any worker threads.
+  threads.** It also provides infrastructure for non-internal port however does not
+  define any worker threads.
 
   Event mode also supports event vectorization. The event devices, ethernet device
   pairs which support the capability ``RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR`` can
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 50b0cf158a..912b73e5a8 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -3056,7 +3056,8 @@ main(int32_t argc, char **argv)
 		if ((socket_ctx[socket_id].session_pool != NULL) &&
 			(socket_ctx[socket_id].sa_in == NULL) &&
 			(socket_ctx[socket_id].sa_out == NULL)) {
-			sa_init(&socket_ctx[socket_id], socket_id, lcore_conf);
+			sa_init(&socket_ctx[socket_id], socket_id, lcore_conf,
+				eh_conf->mode_params);
 			sp4_init(&socket_ctx[socket_id], socket_id);
 			sp6_init(&socket_ctx[socket_id], socket_id);
 			rt_init(&socket_ctx[socket_id], socket_id);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index ee1cf871ca..d90b59774d 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -6,6 +6,7 @@
 #include <netinet/ip.h>
 
 #include <rte_branch_prediction.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_log.h>
 #include <rte_crypto.h>
 #include <rte_security.h>
@@ -56,14 +57,17 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
 
 int
 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
-	struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
-	struct rte_ipsec_session *ips)
+	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
+	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
 {
 	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
+	enum rte_crypto_op_sess_type sess_type;
 	struct rte_cryptodev_info cdev_info;
+	enum rte_crypto_op_type op_type;
 	unsigned long cdev_id_qp = 0;
-	struct cdev_key key = { 0 };
 	struct ipsec_ctx *ipsec_ctx;
+	struct cdev_key key = { 0 };
+	void *sess = NULL;
 	uint32_t lcore_id;
 	int32_t ret = 0;
 
@@ -158,6 +162,10 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
 				return -1;
 			}
 			ips->security.ctx = ctx;
+
+			sess = ips->security.ses;
+			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
 		} else {
 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
 			return -1;
@@ -179,6 +187,28 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
 		rte_cryptodev_info_get(cdev_id, &cdev_info);
 	}
 
+	/* Setup meta data required by event crypto adapter */
+	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
+		union rte_event_crypto_metadata m_data;
+		const struct eventdev_params *eventdev_conf;
+
+		eventdev_conf = &(em_conf->eventdev_config[0]);
+		memset(&m_data, 0, sizeof(m_data));
+
+		/* Fill in response information */
+		m_data.response_info.sched_type = em_conf->ext_params.sched_type;
+		m_data.response_info.op = RTE_EVENT_OP_NEW;
+		m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
+
+		/* Fill in request information */
+		m_data.request_info.cdev_id = cdev_id;
+		m_data.request_info.queue_pair_id = 0;
+
+		/* Attach meta info to session */
+		rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
+				sess_type, &m_data, sizeof(m_data));
+	}
+
 	return 0;
 }
 
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 538eb17d94..3d373dfd77 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -14,6 +14,7 @@
 #include <rte_flow.h>
 #include <rte_ipsec.h>
 
+#include "event_helper.h"
 #include "ipsec-secgw.h"
 
 #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2
@@ -425,7 +426,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound);
 
 void
 sa_init(struct socket_ctx *ctx, int32_t socket_id,
-		struct lcore_conf *lcore_conf);
+	struct lcore_conf *lcore_conf,
+	const struct eventmode_conf *em_conf);
 
 void
 rt_init(struct socket_ctx *ctx, int32_t socket_id);
@@ -442,8 +444,8 @@ enqueue_cop_burst(struct cdev_qp *cqp);
 
 int
 create_lookaside_session(struct ipsec_ctx *ipsec_ctx[],
-	struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
-	struct rte_ipsec_session *ips);
+	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
+	struct ipsec_sa *sa, struct rte_ipsec_session *ips);
 
 int
 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 7f46177f8e..6c1ffbfb24 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -3,6 +3,7 @@
  * Copyright (C) 2020 Marvell International Ltd.
  */
 #include <rte_acl.h>
+#include <rte_event_crypto_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
 #include <rte_lpm.h>
 #include <rte_lpm6.h>
@@ -11,6 +12,7 @@
 #include "ipsec.h"
 #include "ipsec-secgw.h"
 #include "ipsec_worker.h"
+#include "sad.h"
 
 #if defined(__ARM_NEON)
 #include "ipsec_lpm_neon.h"
@@ -225,6 +227,47 @@ check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
 	ip->num = j;
 }
 
+static inline void
+ipv4_pkt_l3_len_set(struct rte_mbuf *pkt)
+{
+	struct rte_ipv4_hdr *ipv4;
+
+	ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
+	pkt->l3_len = ipv4->ihl * 4;
+}
+
+static inline int
+ipv6_pkt_l3_len_set(struct rte_mbuf *pkt)
+{
+	struct rte_ipv6_hdr *ipv6;
+	size_t l3_len, ext_len;
+	uint32_t l3_type;
+	int next_proto;
+	uint8_t *p;
+
+	ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
+	l3_len = sizeof(struct rte_ipv6_hdr);
+	l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK;
+
+	if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
+		l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
+		p = rte_pktmbuf_mtod(pkt, uint8_t *);
+		next_proto = ipv6->proto;
+		while (next_proto != IPPROTO_ESP &&
+			l3_len < pkt->data_len &&
+			(next_proto = rte_ipv6_get_next_ext(p + l3_len,
+					next_proto, &ext_len)) >= 0)
+			l3_len += ext_len;
+
+		/* Drop pkt when IPv6 header exceeds first seg size */
+		if (unlikely(l3_len > pkt->data_len))
+			return -EINVAL;
+	}
+	pkt->l3_len = l3_len;
+
+	return 0;
+}
+
 static inline uint16_t
 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 {
@@ -284,9 +327,67 @@ get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
 	return RTE_MAX_ETHPORTS;
 }
 
+static inline void
+crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
+		struct rte_crypto_op *cop[], uint16_t num)
+{
+	struct rte_crypto_sym_op *sop;
+	uint32_t i;
+
+	const struct rte_crypto_op unproc_cop = {
+		.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+		.sess_type = RTE_CRYPTO_OP_SECURITY_SESSION,
+	};
+
+	for (i = 0; i != num; i++) {
+		cop[i]->raw = unproc_cop.raw;
+		sop = cop[i]->sym;
+		sop->m_src = mb[i];
+		sop->m_dst = NULL;
+		__rte_security_attach_session(sop, ss->security.ses);
+	}
+}
+
+static inline int
+event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
+		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+{
+	struct ipsec_mbuf_metadata *priv;
+	struct rte_ipsec_session *sess;
+	struct rte_crypto_op *cop;
+	struct rte_event cev;
+	int ret;
+
+	/* Get IPsec session */
+	sess = ipsec_get_primary_session(sa);
+
+	/* Get pkt private data */
+	priv = get_priv(pkt);
+	cop = &priv->cop;
+
+	/* Reset crypto operation data */
+	crypto_op_reset(sess, &pkt, &cop, 1);
+
+	/* Update event_ptr with rte_crypto_op */
+	cev.event = 0;
+	cev.event_ptr = cop;
+
+	/* Enqueue event to crypto adapter */
+	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+			ev_link->event_port_id, &cev, 1);
+	if (unlikely(ret <= 0)) {
+		/* pkt will be freed by the caller */
+		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
+		return rte_errno;
+	}
+
+	return 0;
+}
+
 static inline int
 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
-		struct rte_event *ev)
+	const struct eh_event_link_info *ev_link, struct rte_event *ev)
 {
 	struct ipsec_sa *sa = NULL;
 	struct rte_mbuf *pkt;
@@ -337,7 +438,35 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 		break;
+	case PKT_TYPE_IPSEC_IPV4:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		ipv4_pkt_l3_len_set(pkt);
+		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
+		sa = ipsec_mask_saptr(sa);
+		if (unlikely(sa == NULL)) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
+			goto drop_pkt_and_exit;
+		}
+
+		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+			goto drop_pkt_and_exit;
+
+		return PKT_POSTED;
+	case PKT_TYPE_IPSEC_IPV6:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		if (unlikely(ipv6_pkt_l3_len_set(pkt) != 0))
+			goto drop_pkt_and_exit;
+		sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
+		sa = ipsec_mask_saptr(sa);
+		if (unlikely(sa == NULL)) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
+			goto drop_pkt_and_exit;
+		}
 
+		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+			goto drop_pkt_and_exit;
+
+		return PKT_POSTED;
 	default:
 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 			   type);
@@ -386,7 +515,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 static inline int
 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
-		struct rte_event *ev)
+		const struct eh_event_link_info *ev_link, struct rte_event *ev)
 {
 	struct rte_ipsec_session *sess;
 	struct rte_ether_hdr *ethhdr;
@@ -455,11 +584,9 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	/* Get IPsec session */
 	sess = ipsec_get_primary_session(sa);
 
-	/* Allow only inline protocol for now */
-	if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
-		RTE_LOG(ERR, IPSEC, "SA type not supported\n");
-		goto drop_pkt_and_exit;
-	}
+	/* Determine protocol type */
+	if (sess->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+		goto lookaside;
 
 	rte_security_set_pkt_metadata(sess->security.ctx,
 				      sess->security.ses, pkt, NULL);
@@ -484,6 +611,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	ipsec_event_pre_forward(pkt, port_id);
 	return PKT_FORWARDED;
 
+lookaside:
+	/* prepare pkt - advance start to L3 */
+	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+
+	if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
+		return PKT_POSTED;
+
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
 	rte_pktmbuf_free(pkt);
@@ -762,6 +896,67 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 		rte_mempool_put(rte_mempool_from_obj(vec), vec);
 }
 
+static inline int
+ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+			   struct rte_event *ev)
+{
+	struct rte_ether_hdr *ethhdr;
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+	uint16_t port_id;
+	struct ip *ip;
+
+	/* Get pkt data */
+	cop = ev->event_ptr;
+	pkt = cop->sym->m_src;
+
+	/* If operation was not successful, drop the packet */
+	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
+		free_pkts(&pkt, 1);
+		return PKT_DROPPED;
+	}
+
+	ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+	/* Prepend Ether layer */
+	ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
+
+	/* Route pkt and update required fields */
+	if (ip->ip_v == IPVERSION) {
+		pkt->ol_flags |= lconf->outbound.ipv4_offloads;
+		pkt->l3_len = sizeof(struct ip);
+		pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+
+		port_id = route4_pkt(pkt, lconf->rt.rt4_ctx);
+	} else {
+		pkt->ol_flags |= lconf->outbound.ipv6_offloads;
+		pkt->l3_len = sizeof(struct ip6_hdr);
+		pkt->l2_len = RTE_ETHER_HDR_LEN;
+
+		ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+
+		port_id = route6_pkt(pkt, lconf->rt.rt6_ctx);
+	}
+
+	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
+		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
+		free_pkts(&pkt, 1);
+		return PKT_DROPPED;
+	}
+
+	/* Update Ether with port's MAC addresses */
+	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
+	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
+
+	/* Update event */
+	ev->mbuf = pkt;
+
+	return PKT_FORWARDED;
+}
+
 /*
  * Event mode exposes various operating modes depending on the
  * capabilities of the event device and the operating mode
@@ -952,6 +1147,14 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 		"Launching event mode worker (non-burst - Tx internal port - "
 		"app mode) on lcore %d\n", lcore_id);
 
+	ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
+	if (ret != 0) {
+		RTE_LOG(ERR, IPSEC,
+			"SAD cache init on lcore %u, failed with code: %d\n",
+			lcore_id, ret);
+		return;
+	}
+
 	/* Check if it's single link */
 	if (nb_links != 1) {
 		RTE_LOG(INFO, IPSEC,
@@ -978,6 +1181,20 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			ipsec_ev_vector_process(&lconf, links, &ev);
 			continue;
 		case RTE_EVENT_TYPE_ETHDEV:
+			if (is_unprotected_port(ev.mbuf->port))
+				ret = process_ipsec_ev_inbound(&lconf.inbound,
+								&lconf.rt, links, &ev);
+			else
+				ret = process_ipsec_ev_outbound(&lconf.outbound,
+								&lconf.rt, links, &ev);
+			if (ret != 1)
+				/* The pkt has been dropped or posted */
+				continue;
+			break;
+		case RTE_EVENT_TYPE_CRYPTODEV:
+			ret = ipsec_ev_cryptodev_process(&lconf, &ev);
+			if (unlikely(ret != PKT_FORWARDED))
+				continue;
 			break;
 		default:
 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
@@ -985,16 +1202,6 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			continue;
 		}
 
-		if (is_unprotected_port(ev.mbuf->port))
-			ret = process_ipsec_ev_inbound(&lconf.inbound,
-							&lconf.rt, &ev);
-		else
-			ret = process_ipsec_ev_outbound(&lconf.outbound,
-							&lconf.rt, &ev);
-		if (ret != 1)
-			/* The pkt has been dropped */
-			continue;
-
 		/*
 		 * Since tx internal port is available, events can be
 		 * directly enqueued to the adapter and it would be
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 396b9f9694..dc3627aacc 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1236,7 +1236,8 @@ static int
 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, uint32_t inbound,
 		struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
 	struct ipsec_sa *sa;
 	uint32_t i, idx;
@@ -1409,7 +1410,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 				return -EINVAL;
 			}
 		} else {
-			rc = create_lookaside_session(ips_ctx, skt_ctx, sa, ips);
+			rc = create_lookaside_session(ips_ctx, skt_ctx,
+						      em_conf, sa, ips);
 			if (rc != 0) {
 				RTE_LOG(ERR, IPSEC_ESP,
 					"create_lookaside_session() failed\n");
@@ -1432,17 +1434,19 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 static inline int
 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
-	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx);
+	return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx, em_conf);
 }
 
 static inline int
 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 		uint32_t nb_entries, struct socket_ctx *skt_ctx,
-		struct ipsec_ctx *ips_ctx[])
+		struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
-	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx);
+	return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx, em_conf);
 }
 
 /*
@@ -1535,7 +1539,8 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
  */
 static int
 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
-		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[])
+		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
 	int rc;
 	struct rte_ipsec_sa_prm prm;
@@ -1577,7 +1582,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
 	if (lsa->fallback_sessions == 1) {
 		struct rte_ipsec_session *ipfs = ipsec_get_fallback_session(lsa);
 		if (ipfs->security.ses == NULL) {
-			rc = create_lookaside_session(ips_ctx, skt_ctx, lsa, ipfs);
+			rc = create_lookaside_session(ips_ctx, skt_ctx, em_conf, lsa, ipfs);
 			if (rc != 0)
 				return rc;
 		}
@@ -1593,7 +1598,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
  */
 static int
 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket,
-		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[])
+		struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
+		const struct eventmode_conf *em_conf)
 {
 	int32_t rc, sz;
 	uint32_t i, idx;
@@ -1631,7 +1637,7 @@ ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket,
 		sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
 		lsa = ctx->sa + idx;
 
-		rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx);
+		rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx, em_conf);
 	}
 
 	return rc;
@@ -1674,7 +1680,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
 
 void
 sa_init(struct socket_ctx *ctx, int32_t socket_id,
-		struct lcore_conf *lcore_conf)
+	struct lcore_conf *lcore_conf,
+	const struct eventmode_conf *em_conf)
 {
 	int32_t rc;
 	const char *name;
@@ -1706,11 +1713,11 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
 			rte_exit(EXIT_FAILURE, "failed to init SAD\n");
 		RTE_LCORE_FOREACH(lcore_id)
 			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
-		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx);
+		sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx, em_conf);
 
 		if (app_sa_prm.enable != 0) {
 			rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
-				socket_id, ctx, ipsec_ctx);
+				socket_id, ctx, ipsec_ctx, em_conf);
 			if (rc != 0)
 				rte_exit(EXIT_FAILURE,
 					"failed to init inbound SAs\n");
@@ -1728,11 +1735,11 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
 
 		RTE_LCORE_FOREACH(lcore_id)
 			ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound;
-		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx);
+		sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx, em_conf);
 
 		if (app_sa_prm.enable != 0) {
 			rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
-				socket_id, ctx, ipsec_ctx);
+				socket_id, ctx, ipsec_ctx, em_conf);
 			if (rc != 0)
 				rte_exit(EXIT_FAILURE,
 					"failed to init outbound SAs\n");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v3 4/6] examples/ipsec-secgw: add stats for event mode
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                       ` (2 preceding siblings ...)
  2022-10-10 16:56     ` [PATCH v3 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
@ 2022-10-10 16:56     ` Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
                       ` (2 subsequent siblings)
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 16:56 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Add per core statistic(rx/tx) counters for event mode worker.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/ipsec_worker.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 6c1ffbfb24..799f73e124 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -508,7 +508,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
-	rte_pktmbuf_free(pkt);
+	free_pkts(&pkt, 1);
 	ev->mbuf = NULL;
 	return PKT_DROPPED;
 }
@@ -620,7 +620,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 
 drop_pkt_and_exit:
 	RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
-	rte_pktmbuf_free(pkt);
+	free_pkts(&pkt, 1);
 	ev->mbuf = NULL;
 	return PKT_DROPPED;
 }
@@ -853,6 +853,7 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 	pkt = vec->mbufs[0];
 
 	ev_vector_attr_init(vec);
+	core_stats_update_rx(vec->nb_elem);
 	if (is_unprotected_port(pkt->port))
 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
 						      &lconf->rt, vec);
@@ -861,6 +862,7 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 						       &lconf->rt, vec);
 
 	if (likely(ret > 0)) {
+		core_stats_update_tx(vec->nb_elem);
 		vec->nb_elem = ret;
 		ret = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 						       links[0].event_port_id, ev, 1, 0);
@@ -1181,6 +1183,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			ipsec_ev_vector_process(&lconf, links, &ev);
 			continue;
 		case RTE_EVENT_TYPE_ETHDEV:
+			core_stats_update_rx(1);
 			if (is_unprotected_port(ev.mbuf->port))
 				ret = process_ipsec_ev_inbound(&lconf.inbound,
 								&lconf.rt, links, &ev);
@@ -1202,6 +1205,7 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			continue;
 		}
 
+		core_stats_update_tx(1);
 		/*
 		 * Since tx internal port is available, events can be
 		 * directly enqueued to the adapter and it would be
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v3 5/6] examples/ipsec-secgw: add event vector support for lookaside
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                       ` (3 preceding siblings ...)
  2022-10-10 16:56     ` [PATCH v3 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
@ 2022-10-10 16:56     ` Volodymyr Fialko
  2022-10-10 16:56     ` [PATCH v3 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
  2022-10-10 19:02     ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Akhil Goyal
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 16:56 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Add vector support for event crypto adapter in lookaside mode.
Once --event-vector enabled, event crypto adapter will group processed
crypto operation into rte_event_vector event with type
RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/guides/sample_app_ug/ipsec_secgw.rst |   3 +
 examples/ipsec-secgw/event_helper.c      |  34 ++-
 examples/ipsec-secgw/ipsec-secgw.c       |   2 +-
 examples/ipsec-secgw/ipsec-secgw.h       |   1 +
 examples/ipsec-secgw/ipsec_worker.c      | 281 ++++++++++++++++++-----
 5 files changed, 264 insertions(+), 57 deletions(-)

diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index c7b87889f1..2a1aeae7c5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -94,6 +94,9 @@ The application supports two modes of operation: poll mode and event mode.
   (default vector-size is 16) and vector-tmo (default vector-tmo is 102400ns).
   By default event vectorization is disabled and it can be enabled using event-vector
   option.
+  For the event devices, crypto device pairs which support the capability
+  ``RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR`` vector aggregation could also be enable
+  using event-vector option.
 
 Additionally the event mode introduces two submodes of processing packets:
 
diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 90c5d716ff..89fb7e62a5 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -792,12 +792,15 @@ eh_start_eventdev(struct eventmode_conf *em_conf)
 static int
 eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
 {
+	struct rte_event_crypto_adapter_queue_conf queue_conf;
 	struct rte_event_dev_info evdev_default_conf = {0};
 	struct rte_event_port_conf port_conf = {0};
 	struct eventdev_params *eventdev_config;
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+	const uint8_t nb_qp_per_cdev = 1;
 	uint8_t eventdev_id, cdev_id, n;
-	uint32_t cap;
-	int ret;
+	uint32_t cap, nb_elem;
+	int ret, socket_id;
 
 	if (!em_conf->enable_event_crypto_adapter)
 		return 0;
@@ -852,10 +855,35 @@ eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
 			return ret;
 		}
 
+		memset(&queue_conf, 0, sizeof(queue_conf));
+		if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) &&
+		    (em_conf->ext_params.event_vector)) {
+			queue_conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
+			queue_conf.vector_sz = em_conf->ext_params.vector_size;
+			/*
+			 * Currently all sessions configured with same response
+			 * info fields, so packets will be aggregated to the
+			 * same vector. This allows us to configure number of
+			 * vectors only to hold all queue pair descriptors.
+			 */
+			nb_elem = (qp_desc_nb / queue_conf.vector_sz) + 1;
+			nb_elem *= nb_qp_per_cdev;
+			socket_id = rte_cryptodev_socket_id(cdev_id);
+			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+					"QP_VEC_%u_%u", socket_id, cdev_id);
+			queue_conf.vector_mp = rte_event_vector_pool_create(
+					mp_name, nb_elem, 0,
+					queue_conf.vector_sz, socket_id);
+			if (queue_conf.vector_mp == NULL) {
+				EH_LOG_ERR("failed to create event vector pool");
+				return -ENOMEM;
+			}
+		}
+
 		/* Add crypto queue pairs to event crypto adapter */
 		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
 				-1, /* adds all the pre configured queue pairs to the instance */
-				NULL);
+				&queue_conf);
 		if (ret < 0) {
 			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
 			return ret;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 912b73e5a8..1d74aa60e5 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -85,7 +85,7 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 /*
  * Configurable number of descriptors per queue pair
  */
-static uint32_t qp_desc_nb = 2048;
+uint32_t qp_desc_nb = 2048;
 
 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
 		(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index f02736075b..c6d11f3aac 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -145,6 +145,7 @@ extern bool per_port_pool;
 
 extern uint32_t mtu_size;
 extern uint32_t frag_tbl_sz;
+extern uint32_t qp_desc_nb;
 
 #define SS_F		(1U << 0)	/* Single SA mode */
 #define INL_PR_F	(1U << 1)	/* Inline Protocol */
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 799f73e124..2a4f039991 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -349,18 +349,11 @@ crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	}
 }
 
-static inline int
-event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
-		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+static inline void
+crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev)
 {
 	struct ipsec_mbuf_metadata *priv;
-	struct rte_ipsec_session *sess;
 	struct rte_crypto_op *cop;
-	struct rte_event cev;
-	int ret;
-
-	/* Get IPsec session */
-	sess = ipsec_get_primary_session(sa);
 
 	/* Get pkt private data */
 	priv = get_priv(pkt);
@@ -370,13 +363,39 @@ event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
 	crypto_op_reset(sess, &pkt, &cop, 1);
 
 	/* Update event_ptr with rte_crypto_op */
-	cev.event = 0;
-	cev.event_ptr = cop;
+	ev->event = 0;
+	ev->event_ptr = cop;
+}
+
+static inline void
+free_pkts_from_events(struct rte_event events[], uint16_t count)
+{
+	struct rte_crypto_op *cop;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		cop = events[i].event_ptr;
+		free_pkts(&cop->sym->m_src, 1);
+	}
+}
+
+static inline int
+event_crypto_enqueue(struct rte_mbuf *pkt,
+		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+{
+	struct rte_ipsec_session *sess;
+	struct rte_event ev;
+	int ret;
+
+	/* Get IPsec session */
+	sess = ipsec_get_primary_session(sa);
+
+	crypto_prepare_event(pkt, sess, &ev);
 
 	/* Enqueue event to crypto adapter */
 	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
-			ev_link->event_port_id, &cev, 1);
-	if (unlikely(ret <= 0)) {
+			ev_link->event_port_id, &ev, 1);
+	if (unlikely(ret != 1)) {
 		/* pkt will be freed by the caller */
 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
 		return rte_errno;
@@ -448,7 +467,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 
-		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
 			goto drop_pkt_and_exit;
 
 		return PKT_POSTED;
@@ -463,7 +482,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 
-		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
 			goto drop_pkt_and_exit;
 
 		return PKT_POSTED;
@@ -615,7 +634,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	/* prepare pkt - advance start to L3 */
 	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
 
-	if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
+	if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0))
 		return PKT_POSTED;
 
 drop_pkt_and_exit:
@@ -626,15 +645,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 }
 
 static inline int
-ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
-		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
+ipsec_ev_route_ip_pkts(struct rte_event_vector *vec, struct route_table *rt,
+		    struct ipsec_traffic *t)
 {
-	struct rte_ipsec_session *sess;
 	struct rte_ether_hdr *ethhdr;
-	uint32_t sa_idx, i, j = 0;
-	uint16_t port_id = 0;
 	struct rte_mbuf *pkt;
-	struct ipsec_sa *sa;
+	uint16_t port_id = 0;
+	uint32_t i, j = 0;
 
 	/* Route IPv4 packets */
 	for (i = 0; i < t->ip4.num; i++) {
@@ -668,28 +685,90 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 			free_pkts(&pkt, 1);
 	}
 
+	return j;
+}
+
+static inline int
+ipsec_ev_inbound_route_pkts(struct rte_event_vector *vec,
+			    struct route_table *rt,
+			    struct ipsec_traffic *t,
+			    const struct eh_event_link_info *ev_link)
+{
+	uint32_t ret, i, j, ev_len = 0;
+	struct rte_event events[MAX_PKTS];
+	struct rte_ipsec_session *sess;
+	struct rte_mbuf *pkt;
+	struct ipsec_sa *sa;
+
+	j = ipsec_ev_route_ip_pkts(vec, rt, t);
+
 	/* Route ESP packets */
+	for (i = 0; i < t->ipsec.num; i++) {
+		pkt = t->ipsec.pkts[i];
+		sa = ipsec_mask_saptr(t->ipsec.saptr[i]);
+		if (unlikely(sa == NULL)) {
+			free_pkts(&pkt, 1);
+			continue;
+		}
+		sess = ipsec_get_primary_session(sa);
+		crypto_prepare_event(pkt, sess, &events[ev_len]);
+		ev_len++;
+	}
+
+	if (ev_len) {
+		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+				ev_link->event_port_id, events, ev_len);
+		if (ret < ev_len) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
+					ev_len, rte_errno);
+			free_pkts_from_events(&events[ret], ev_len - ret);
+			return -rte_errno;
+		}
+	}
+
+	return j;
+}
+
+static inline int
+ipsec_ev_outbound_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
+		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx,
+		    const struct eh_event_link_info *ev_link)
+{
+	uint32_t sa_idx, ret, i, j, ev_len = 0;
+	struct rte_event events[MAX_PKTS];
+	struct rte_ipsec_session *sess;
+	struct rte_ether_hdr *ethhdr;
+	uint16_t port_id = 0;
+	struct rte_mbuf *pkt;
+	struct ipsec_sa *sa;
+
+	j = ipsec_ev_route_ip_pkts(vec, rt, t);
+
+	/* Handle IPsec packets.
+	 * For lookaside IPsec packets, submit to cryptodev queue.
+	 * For inline IPsec packets, route the packet.
+	 */
 	for (i = 0; i < t->ipsec.num; i++) {
 		/* Validate sa_idx */
 		sa_idx = t->ipsec.res[i];
 		pkt = t->ipsec.pkts[i];
-		if (unlikely(sa_idx >= sa_ctx->nb_sa))
+		if (unlikely(sa_idx >= sa_ctx->nb_sa)) {
 			free_pkts(&pkt, 1);
-		else {
-			/* Else the packet has to be protected */
-			sa = &(sa_ctx->sa[sa_idx]);
-			/* Get IPsec session */
-			sess = ipsec_get_primary_session(sa);
-			/* Allow only inline protocol for now */
-			if (unlikely(sess->type !=
-				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
-				RTE_LOG(ERR, IPSEC, "SA type not supported\n");
-				free_pkts(&pkt, 1);
-				continue;
-			}
+			continue;
+		}
+		/* Else the packet has to be protected */
+		sa = &(sa_ctx->sa[sa_idx]);
+		/* Get IPsec session */
+		sess = ipsec_get_primary_session(sa);
+		switch (sess->type) {
+		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+			rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+			crypto_prepare_event(pkt, sess, &events[ev_len]);
+			ev_len++;
+			break;
+		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
 			rte_security_set_pkt_metadata(sess->security.ctx,
 						sess->security.ses, pkt, NULL);
-
 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 			port_id = sa->portid;
 
@@ -703,6 +782,22 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 			ipsec_event_pre_forward(pkt, port_id);
 			ev_vector_attr_update(vec, pkt);
 			vec->mbufs[j++] = pkt;
+			break;
+		default:
+			RTE_LOG(ERR, IPSEC, "SA type not supported\n");
+			free_pkts(&pkt, 1);
+			break;
+		}
+	}
+
+	if (ev_len) {
+		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+				ev_link->event_port_id, events, ev_len);
+		if (ret < ev_len) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
+				   ev_len, rte_errno);
+			free_pkts_from_events(&events[ret], ev_len - ret);
+			return -rte_errno;
 		}
 	}
 
@@ -727,6 +822,19 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 		t->ip6.data[t->ip6.num] = nlp;
 		t->ip6.pkts[(t->ip6.num)++] = pkt;
 		break;
+	case PKT_TYPE_IPSEC_IPV4:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		ipv4_pkt_l3_len_set(pkt);
+		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+		break;
+	case PKT_TYPE_IPSEC_IPV6:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		if (ipv6_pkt_l3_len_set(pkt) != 0) {
+			free_pkts(&pkt, 1);
+			return;
+		}
+		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+		break;
 	default:
 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 			   type);
@@ -737,7 +845,8 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 
 static inline int
 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
-				struct rte_event_vector *vec)
+				struct rte_event_vector *vec,
+				const struct eh_event_link_info *ev_link)
 {
 	struct ipsec_traffic t;
 	struct rte_mbuf *pkt;
@@ -767,12 +876,16 @@ process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 	check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
 	check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
 
-	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+	if (t.ipsec.num != 0)
+		sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num);
+
+	return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link);
 }
 
 static inline int
 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
-				 struct rte_event_vector *vec)
+				 struct rte_event_vector *vec,
+				 const struct eh_event_link_info *ev_link)
 {
 	struct ipsec_traffic t;
 	struct rte_mbuf *pkt;
@@ -795,7 +908,7 @@ process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 	check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
 	check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
 
-	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+	return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link);
 }
 
 static inline int
@@ -854,12 +967,13 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 
 	ev_vector_attr_init(vec);
 	core_stats_update_rx(vec->nb_elem);
+
 	if (is_unprotected_port(pkt->port))
 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
-						      &lconf->rt, vec);
+						      &lconf->rt, vec, links);
 	else
 		ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
-						       &lconf->rt, vec);
+						       &lconf->rt, vec, links);
 
 	if (likely(ret > 0)) {
 		core_stats_update_tx(vec->nb_elem);
@@ -899,24 +1013,19 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 }
 
 static inline int
-ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
-			   struct rte_event *ev)
+ipsec_ev_cryptodev_process_one_pkt(
+		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+		const struct rte_crypto_op *cop, struct rte_mbuf *pkt)
 {
 	struct rte_ether_hdr *ethhdr;
-	struct rte_crypto_op *cop;
-	struct rte_mbuf *pkt;
 	uint16_t port_id;
 	struct ip *ip;
 
-	/* Get pkt data */
-	cop = ev->event_ptr;
-	pkt = cop->sym->m_src;
-
-	/* If operation was not successful, drop the packet */
+	/* If operation was not successful, free the packet */
 	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
 		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
 		free_pkts(&pkt, 1);
-		return PKT_DROPPED;
+		return -1;
 	}
 
 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
@@ -946,13 +1055,76 @@ ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
 		free_pkts(&pkt, 1);
-		return PKT_DROPPED;
+		return -1;
 	}
 
 	/* Update Ether with port's MAC addresses */
 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
 
+	ipsec_event_pre_forward(pkt, port_id);
+
+	return 0;
+}
+
+static inline void
+ipsec_ev_cryptodev_vector_process(
+		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+		const struct eh_event_link_info *links,
+		struct rte_event *ev)
+{
+	struct rte_event_vector *vec = ev->vec;
+	const uint16_t nb_events = 1;
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+	uint16_t enqueued;
+	int i, n = 0;
+
+	ev_vector_attr_init(vec);
+	/* Transform cop vec into pkt vec */
+	for (i = 0; i < vec->nb_elem; i++) {
+		/* Get pkt data */
+		cop = vec->ptrs[i];
+		pkt = cop->sym->m_src;
+		if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
+			continue;
+
+		vec->mbufs[n++] = pkt;
+		ev_vector_attr_update(vec, pkt);
+	}
+
+	if (n == 0) {
+		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+		return;
+	}
+
+	vec->nb_elem = n;
+	enqueued = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+			links[0].event_port_id, ev, nb_events, 0);
+	if (enqueued != nb_events) {
+		RTE_LOG_DP(DEBUG, IPSEC, "Failed to enqueue to tx, ret = %u,"
+				" errno = %i\n", enqueued, rte_errno);
+		free_pkts(vec->mbufs, vec->nb_elem);
+		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+	} else {
+		core_stats_update_tx(n);
+	}
+}
+
+static inline int
+ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+			   struct rte_event *ev)
+{
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+
+	/* Get pkt data */
+	cop = ev->event_ptr;
+	pkt = cop->sym->m_src;
+
+	if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
+		return PKT_DROPPED;
+
 	/* Update event */
 	ev->mbuf = pkt;
 
@@ -1199,6 +1371,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			if (unlikely(ret != PKT_FORWARDED))
 				continue;
 			break;
+		case RTE_EVENT_TYPE_CRYPTODEV_VECTOR:
+			ipsec_ev_cryptodev_vector_process(&lconf, links, &ev);
+			continue;
 		default:
 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 				ev.event_type);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH v3 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                       ` (4 preceding siblings ...)
  2022-10-10 16:56     ` [PATCH v3 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
@ 2022-10-10 16:56     ` Volodymyr Fialko
  2022-10-10 19:02     ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Akhil Goyal
  6 siblings, 0 replies; 27+ messages in thread
From: Volodymyr Fialko @ 2022-10-10 16:56 UTC (permalink / raw)
  To: dev, Radu Nicolau, Akhil Goyal
  Cc: jerinj, anoobj, suanmingm, Volodymyr Fialko

Limit number of queue pairs to one for event lookaside mode, since all
cores are using same queue in this mode.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 examples/ipsec-secgw/ipsec-secgw.c | 56 +++++++++++++++++++-----------
 1 file changed, 36 insertions(+), 20 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 1d74aa60e5..e20bf50752 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1540,7 +1540,7 @@ add_mapping(const char *str, uint16_t cdev_id,
 }
 
 static int32_t
-add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
+add_cdev_mapping(const struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
 		uint16_t qp, struct lcore_params *params)
 {
 	int32_t ret = 0;
@@ -1596,6 +1596,37 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
 	return ret;
 }
 
+static uint16_t
+map_cdev_to_cores_from_config(enum eh_pkt_transfer_mode mode, int16_t cdev_id,
+		const struct rte_cryptodev_info *cdev_info,
+		uint16_t *last_used_lcore_id)
+{
+	uint16_t nb_qp = 0, i = 0, max_nb_qps;
+
+	/* For event lookaside mode all sessions are bound to single qp.
+	 * It's enough to bind one core, since all cores will share same qp
+	 * Event inline mode do not use this functionality.
+	 */
+	if (mode == EH_PKT_TRANSFER_MODE_EVENT) {
+		add_cdev_mapping(cdev_info, cdev_id, nb_qp, &lcore_params[0]);
+		return 1;
+	}
+
+	/* Check if there are enough queue pairs for all configured cores */
+	max_nb_qps = RTE_MIN(nb_lcore_params, cdev_info->max_nb_queue_pairs);
+
+	while (nb_qp < max_nb_qps && i < nb_lcore_params) {
+		if (add_cdev_mapping(cdev_info, cdev_id, nb_qp,
+					&lcore_params[*last_used_lcore_id]))
+			nb_qp++;
+		(*last_used_lcore_id)++;
+		*last_used_lcore_id %= nb_lcore_params;
+		i++;
+	}
+
+	return nb_qp;
+}
+
 /* Check if the device is enabled by cryptodev_mask */
 static int
 check_cryptodev_mask(uint8_t cdev_id)
@@ -1607,13 +1638,13 @@ check_cryptodev_mask(uint8_t cdev_id)
 }
 
 static uint16_t
-cryptodevs_init(uint16_t req_queue_num)
+cryptodevs_init(enum eh_pkt_transfer_mode mode)
 {
+	struct rte_hash_parameters params = { 0 };
 	struct rte_cryptodev_config dev_conf;
 	struct rte_cryptodev_qp_conf qp_conf;
-	uint16_t idx, max_nb_qps, qp, total_nb_qps, i;
+	uint16_t idx, qp, total_nb_qps;
 	int16_t cdev_id;
-	struct rte_hash_parameters params = { 0 };
 
 	const uint64_t mseg_flag = multi_seg_required() ?
 				RTE_CRYPTODEV_FF_IN_PLACE_SGL : 0;
@@ -1654,23 +1685,8 @@ cryptodevs_init(uint16_t req_queue_num)
 				cdev_id,
 				rte_cryptodev_get_feature_name(mseg_flag));
 
-		if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
-			max_nb_qps = cdev_info.max_nb_queue_pairs;
-		else
-			max_nb_qps = nb_lcore_params;
-
-		qp = 0;
-		i = 0;
-		while (qp < max_nb_qps && i < nb_lcore_params) {
-			if (add_cdev_mapping(&cdev_info, cdev_id, qp,
-						&lcore_params[idx]))
-				qp++;
-			idx++;
-			idx = idx % nb_lcore_params;
-			i++;
-		}
 
-		qp = RTE_MIN(max_nb_qps, RTE_MAX(req_queue_num, qp));
+		qp = map_cdev_to_cores_from_config(mode, cdev_id, &cdev_info, &idx);
 		if (qp == 0)
 			continue;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 27+ messages in thread

* RE: [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode
  2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
                       ` (5 preceding siblings ...)
  2022-10-10 16:56     ` [PATCH v3 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
@ 2022-10-10 19:02     ` Akhil Goyal
  6 siblings, 0 replies; 27+ messages in thread
From: Akhil Goyal @ 2022-10-10 19:02 UTC (permalink / raw)
  To: Volodymyr Fialko, dev
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, suanmingm, Volodymyr Fialko

> Subject: [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode
> 
> Add support for lookaside event mode for ipsec-secgw example application.
> 
> * Changes since v1
> - Resolve issue with ipv6 free packet in case of error
> - Rebase on top of dpdk-next-crypto
> - Update release note
> 
> * Changes since v2
> - Fix compilation with old gcc
> 
Series Acked-by: Akhil Goyal <gakhil@marvell.com>

Applied to dpdk-next-crypto

Nice work!!

^ permalink raw reply	[flat|nested] 27+ messages in thread

end of thread, other threads:[~2022-10-10 19:02 UTC | newest]

Thread overview: 27+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-08-05  3:26   ` Suanming Mou
2022-08-05 10:06     ` Volodymyr Fialko
2022-09-22  5:05   ` Gagandeep Singh
2022-09-22 11:07     ` Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
2022-09-21 18:28 ` [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Akhil Goyal
2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
2022-10-10 12:30   ` [PATCH v2 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
2022-10-10 12:30   ` [PATCH v2 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
2022-10-10 12:30   ` [PATCH v2 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-10-10 12:31   ` [PATCH v2 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
2022-10-10 12:31   ` [PATCH v2 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
2022-10-10 12:31   ` [PATCH v2 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
2022-10-10 19:02     ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Akhil Goyal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).