DPDK patches and discussions
 help / color / mirror / Atom feed
From: Volodymyr Fialko <vfialko@marvell.com>
To: <dev@dpdk.org>, Radu Nicolau <radu.nicolau@intel.com>,
	Akhil Goyal <gakhil@marvell.com>
Cc: <jerinj@marvell.com>, <anoobj@marvell.com>,
	<suanmingm@nvidia.com>, Volodymyr Fialko <vfialko@marvell.com>
Subject: [PATCH v2 5/6] examples/ipsec-secgw: add event vector support for lookaside
Date: Mon, 10 Oct 2022 14:31:01 +0200	[thread overview]
Message-ID: <20221010123102.3962719-6-vfialko@marvell.com> (raw)
In-Reply-To: <20221010123102.3962719-1-vfialko@marvell.com>

Add vector support for event crypto adapter in lookaside mode.
Once --event-vector enabled, event crypto adapter will group processed
crypto operation into rte_event_vector event with type
RTE_EVENT_TYPE_CRYPTODEV_VECTOR.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
---
 doc/guides/sample_app_ug/ipsec_secgw.rst |   3 +
 examples/ipsec-secgw/event_helper.c      |  34 ++-
 examples/ipsec-secgw/ipsec-secgw.c       |   2 +-
 examples/ipsec-secgw/ipsec-secgw.h       |   1 +
 examples/ipsec-secgw/ipsec_worker.c      | 281 ++++++++++++++++++-----
 5 files changed, 264 insertions(+), 57 deletions(-)

diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index c7b87889f1..2a1aeae7c5 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -94,6 +94,9 @@ The application supports two modes of operation: poll mode and event mode.
   (default vector-size is 16) and vector-tmo (default vector-tmo is 102400ns).
   By default event vectorization is disabled and it can be enabled using event-vector
   option.
+  For the event devices, crypto device pairs which support the capability
+  ``RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR`` vector aggregation could also be enable
+  using event-vector option.
 
 Additionally the event mode introduces two submodes of processing packets:
 
diff --git a/examples/ipsec-secgw/event_helper.c b/examples/ipsec-secgw/event_helper.c
index 90c5d716ff..89fb7e62a5 100644
--- a/examples/ipsec-secgw/event_helper.c
+++ b/examples/ipsec-secgw/event_helper.c
@@ -792,12 +792,15 @@ eh_start_eventdev(struct eventmode_conf *em_conf)
 static int
 eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
 {
+	struct rte_event_crypto_adapter_queue_conf queue_conf;
 	struct rte_event_dev_info evdev_default_conf = {0};
 	struct rte_event_port_conf port_conf = {0};
 	struct eventdev_params *eventdev_config;
+	char mp_name[RTE_MEMPOOL_NAMESIZE];
+	const uint8_t nb_qp_per_cdev = 1;
 	uint8_t eventdev_id, cdev_id, n;
-	uint32_t cap;
-	int ret;
+	uint32_t cap, nb_elem;
+	int ret, socket_id;
 
 	if (!em_conf->enable_event_crypto_adapter)
 		return 0;
@@ -852,10 +855,35 @@ eh_initialize_crypto_adapter(struct eventmode_conf *em_conf)
 			return ret;
 		}
 
+		memset(&queue_conf, 0, sizeof(queue_conf));
+		if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR) &&
+		    (em_conf->ext_params.event_vector)) {
+			queue_conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
+			queue_conf.vector_sz = em_conf->ext_params.vector_size;
+			/*
+			 * Currently all sessions configured with same response
+			 * info fields, so packets will be aggregated to the
+			 * same vector. This allows us to configure number of
+			 * vectors only to hold all queue pair descriptors.
+			 */
+			nb_elem = (qp_desc_nb / queue_conf.vector_sz) + 1;
+			nb_elem *= nb_qp_per_cdev;
+			socket_id = rte_cryptodev_socket_id(cdev_id);
+			snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+					"QP_VEC_%u_%u", socket_id, cdev_id);
+			queue_conf.vector_mp = rte_event_vector_pool_create(
+					mp_name, nb_elem, 0,
+					queue_conf.vector_sz, socket_id);
+			if (queue_conf.vector_mp == NULL) {
+				EH_LOG_ERR("failed to create event vector pool");
+				return -ENOMEM;
+			}
+		}
+
 		/* Add crypto queue pairs to event crypto adapter */
 		ret = rte_event_crypto_adapter_queue_pair_add(cdev_id, eventdev_id,
 				-1, /* adds all the pre configured queue pairs to the instance */
-				NULL);
+				&queue_conf);
 		if (ret < 0) {
 			EH_LOG_ERR("Failed to add queue pairs to event crypto adapter %d", ret);
 			return ret;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 912b73e5a8..1d74aa60e5 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -85,7 +85,7 @@ static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
 /*
  * Configurable number of descriptors per queue pair
  */
-static uint32_t qp_desc_nb = 2048;
+uint32_t qp_desc_nb = 2048;
 
 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
 		(addr)->addr_bytes[0], (addr)->addr_bytes[1], \
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index f02736075b..c6d11f3aac 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -145,6 +145,7 @@ extern bool per_port_pool;
 
 extern uint32_t mtu_size;
 extern uint32_t frag_tbl_sz;
+extern uint32_t qp_desc_nb;
 
 #define SS_F		(1U << 0)	/* Single SA mode */
 #define INL_PR_F	(1U << 1)	/* Inline Protocol */
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 034103bdf6..3b7f7f4928 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -349,18 +349,11 @@ crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
 	}
 }
 
-static inline int
-event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
-		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+static inline void
+crypto_prepare_event(struct rte_mbuf *pkt, struct rte_ipsec_session *sess, struct rte_event *ev)
 {
 	struct ipsec_mbuf_metadata *priv;
-	struct rte_ipsec_session *sess;
 	struct rte_crypto_op *cop;
-	struct rte_event cev;
-	int ret;
-
-	/* Get IPsec session */
-	sess = ipsec_get_primary_session(sa);
 
 	/* Get pkt private data */
 	priv = get_priv(pkt);
@@ -370,13 +363,39 @@ event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
 	crypto_op_reset(sess, &pkt, &cop, 1);
 
 	/* Update event_ptr with rte_crypto_op */
-	cev.event = 0;
-	cev.event_ptr = cop;
+	ev->event = 0;
+	ev->event_ptr = cop;
+}
+
+static inline void
+free_pkts_from_events(struct rte_event events[], uint16_t count)
+{
+	struct rte_crypto_op *cop;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		cop = events[i].event_ptr;
+		free_pkts(&cop->sym->m_src, 1);
+	}
+}
+
+static inline int
+event_crypto_enqueue(struct rte_mbuf *pkt,
+		struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
+{
+	struct rte_ipsec_session *sess;
+	struct rte_event ev;
+	int ret;
+
+	/* Get IPsec session */
+	sess = ipsec_get_primary_session(sa);
+
+	crypto_prepare_event(pkt, sess, &ev);
 
 	/* Enqueue event to crypto adapter */
 	ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
-			ev_link->event_port_id, &cev, 1);
-	if (unlikely(ret <= 0)) {
+			ev_link->event_port_id, &ev, 1);
+	if (unlikely(ret != 1)) {
 		/* pkt will be freed by the caller */
 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
 		return rte_errno;
@@ -448,7 +467,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 
-		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
 			goto drop_pkt_and_exit;
 
 		return PKT_POSTED;
@@ -463,7 +482,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 			goto drop_pkt_and_exit;
 		}
 
-		if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
+		if (unlikely(event_crypto_enqueue(pkt, sa, ev_link)))
 			goto drop_pkt_and_exit;
 
 		return PKT_POSTED;
@@ -615,7 +634,7 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 	/* prepare pkt - advance start to L3 */
 	rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
 
-	if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
+	if (likely(event_crypto_enqueue(pkt, sa, ev_link) == 0))
 		return PKT_POSTED;
 
 drop_pkt_and_exit:
@@ -626,15 +645,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 }
 
 static inline int
-ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
-		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
+ipsec_ev_route_ip_pkts(struct rte_event_vector *vec, struct route_table *rt,
+		    struct ipsec_traffic *t)
 {
-	struct rte_ipsec_session *sess;
 	struct rte_ether_hdr *ethhdr;
-	uint32_t sa_idx, i, j = 0;
-	uint16_t port_id = 0;
 	struct rte_mbuf *pkt;
-	struct ipsec_sa *sa;
+	uint16_t port_id = 0;
+	uint32_t i, j = 0;
 
 	/* Route IPv4 packets */
 	for (i = 0; i < t->ip4.num; i++) {
@@ -668,28 +685,90 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 			free_pkts(&pkt, 1);
 	}
 
+	return j;
+}
+
+static inline int
+ipsec_ev_inbound_route_pkts(struct rte_event_vector *vec,
+			    struct route_table *rt,
+			    struct ipsec_traffic *t,
+			    const struct eh_event_link_info *ev_link)
+{
+	uint32_t ret, i, j, ev_len = 0;
+	struct rte_event events[MAX_PKTS];
+	struct rte_ipsec_session *sess;
+	struct rte_mbuf *pkt;
+	struct ipsec_sa *sa;
+
+	j = ipsec_ev_route_ip_pkts(vec, rt, t);
+
 	/* Route ESP packets */
+	for (i = 0; i < t->ipsec.num; i++) {
+		pkt = t->ipsec.pkts[i];
+		sa = ipsec_mask_saptr(t->ipsec.saptr[i]);
+		if (unlikely(sa == NULL)) {
+			free_pkts(&pkt, 1);
+			continue;
+		}
+		sess = ipsec_get_primary_session(sa);
+		crypto_prepare_event(pkt, sess, &events[ev_len]);
+		ev_len++;
+	}
+
+	if (ev_len) {
+		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+				ev_link->event_port_id, events, ev_len);
+		if (ret < ev_len) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
+					ev_len, rte_errno);
+			free_pkts_from_events(&events[ret], ev_len - ret);
+			return -rte_errno;
+		}
+	}
+
+	return j;
+}
+
+static inline int
+ipsec_ev_outbound_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
+		    struct ipsec_traffic *t, struct sa_ctx *sa_ctx,
+		    const struct eh_event_link_info *ev_link)
+{
+	uint32_t sa_idx, ret, i, j, ev_len = 0;
+	struct rte_event events[MAX_PKTS];
+	struct rte_ipsec_session *sess;
+	struct rte_ether_hdr *ethhdr;
+	uint16_t port_id = 0;
+	struct rte_mbuf *pkt;
+	struct ipsec_sa *sa;
+
+	j = ipsec_ev_route_ip_pkts(vec, rt, t);
+
+	/* Handle IPsec packets.
+	 * For lookaside IPsec packets, submit to cryptodev queue.
+	 * For inline IPsec packets, route the packet.
+	 */
 	for (i = 0; i < t->ipsec.num; i++) {
 		/* Validate sa_idx */
 		sa_idx = t->ipsec.res[i];
 		pkt = t->ipsec.pkts[i];
-		if (unlikely(sa_idx >= sa_ctx->nb_sa))
+		if (unlikely(sa_idx >= sa_ctx->nb_sa)) {
 			free_pkts(&pkt, 1);
-		else {
-			/* Else the packet has to be protected */
-			sa = &(sa_ctx->sa[sa_idx]);
-			/* Get IPsec session */
-			sess = ipsec_get_primary_session(sa);
-			/* Allow only inline protocol for now */
-			if (unlikely(sess->type !=
-				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
-				RTE_LOG(ERR, IPSEC, "SA type not supported\n");
-				free_pkts(&pkt, 1);
-				continue;
-			}
+			continue;
+		}
+		/* Else the packet has to be protected */
+		sa = &(sa_ctx->sa[sa_idx]);
+		/* Get IPsec session */
+		sess = ipsec_get_primary_session(sa);
+		switch (sess->type) {
+		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+			rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+			crypto_prepare_event(pkt, sess, &events[ev_len]);
+			ev_len++;
+			break;
+		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
 			rte_security_set_pkt_metadata(sess->security.ctx,
 						sess->security.ses, pkt, NULL);
-
 			pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 			port_id = sa->portid;
 
@@ -703,6 +782,22 @@ ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 			ipsec_event_pre_forward(pkt, port_id);
 			ev_vector_attr_update(vec, pkt);
 			vec->mbufs[j++] = pkt;
+			break;
+		default:
+			RTE_LOG(ERR, IPSEC, "SA type not supported\n");
+			free_pkts(&pkt, 1);
+			break;
+		}
+	}
+
+	if (ev_len) {
+		ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
+				ev_link->event_port_id, events, ev_len);
+		if (ret < ev_len) {
+			RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue events: %i (errno: %i)\n",
+				   ev_len, rte_errno);
+			free_pkts_from_events(&events[ret], ev_len - ret);
+			return -rte_errno;
 		}
 	}
 
@@ -727,6 +822,19 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 		t->ip6.data[t->ip6.num] = nlp;
 		t->ip6.pkts[(t->ip6.num)++] = pkt;
 		break;
+	case PKT_TYPE_IPSEC_IPV4:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		ipv4_pkt_l3_len_set(pkt);
+		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+		break;
+	case PKT_TYPE_IPSEC_IPV6:
+		rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
+		if (ipv6_pkt_l3_len_set(pkt) != 0) {
+			free_pkts(&pkt, 1);
+			return;
+		}
+		t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+		break;
 	default:
 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 			   type);
@@ -737,7 +845,8 @@ classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 
 static inline int
 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
-				struct rte_event_vector *vec)
+				struct rte_event_vector *vec,
+				const struct eh_event_link_info *ev_link)
 {
 	struct ipsec_traffic t;
 	struct rte_mbuf *pkt;
@@ -767,12 +876,16 @@ process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 	check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
 	check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
 
-	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+	if (t.ipsec.num != 0)
+		sad_lookup(&ctx->sa_ctx->sad, t.ipsec.pkts, t.ipsec.saptr, t.ipsec.num);
+
+	return ipsec_ev_inbound_route_pkts(vec, rt, &t, ev_link);
 }
 
 static inline int
 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
-				 struct rte_event_vector *vec)
+				 struct rte_event_vector *vec,
+				 const struct eh_event_link_info *ev_link)
 {
 	struct ipsec_traffic t;
 	struct rte_mbuf *pkt;
@@ -795,7 +908,7 @@ process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 	check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
 	check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
 
-	return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
+	return ipsec_ev_outbound_route_pkts(vec, rt, &t, ctx->sa_ctx, ev_link);
 }
 
 static inline int
@@ -854,12 +967,13 @@ ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 
 	ev_vector_attr_init(vec);
 	core_stats_update_rx(vec->nb_elem);
+
 	if (is_unprotected_port(pkt->port))
 		ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
-						      &lconf->rt, vec);
+						      &lconf->rt, vec, links);
 	else
 		ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
-						       &lconf->rt, vec);
+						       &lconf->rt, vec, links);
 
 	if (likely(ret > 0)) {
 		core_stats_update_tx(vec->nb_elem);
@@ -899,24 +1013,19 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 }
 
 static inline int
-ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
-			   struct rte_event *ev)
+ipsec_ev_cryptodev_process_one_pkt(
+		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+		const struct rte_crypto_op *cop, struct rte_mbuf *pkt)
 {
 	struct rte_ether_hdr *ethhdr;
-	struct rte_crypto_op *cop;
-	struct rte_mbuf *pkt;
 	uint16_t port_id;
 	struct ip *ip;
 
-	/* Get pkt data */
-	cop = ev->event_ptr;
-	pkt = cop->sym->m_src;
-
-	/* If operation was not successful, drop the packet */
+	/* If operation was not successful, free the packet */
 	if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
 		RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
 		free_pkts(&pkt, 1);
-		return PKT_DROPPED;
+		return -1;
 	}
 
 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
@@ -946,13 +1055,76 @@ ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 	if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 		RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
 		free_pkts(&pkt, 1);
-		return PKT_DROPPED;
+		return -1;
 	}
 
 	/* Update Ether with port's MAC addresses */
 	memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
 	memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
 
+	ipsec_event_pre_forward(pkt, port_id);
+
+	return 0;
+}
+
+static inline void
+ipsec_ev_cryptodev_vector_process(
+		const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+		const struct eh_event_link_info *links,
+		struct rte_event *ev)
+{
+	struct rte_event_vector *vec = ev->vec;
+	const uint16_t nb_events = 1;
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+	uint16_t enqueued;
+	int i, n = 0;
+
+	ev_vector_attr_init(vec);
+	/* Transform cop vec into pkt vec */
+	for (i = 0; i < vec->nb_elem; i++) {
+		/* Get pkt data */
+		cop = vec->ptrs[i];
+		pkt = cop->sym->m_src;
+		if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
+			continue;
+
+		vec->mbufs[n++] = pkt;
+		ev_vector_attr_update(vec, pkt);
+	}
+
+	if (n == 0) {
+		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+		return;
+	}
+
+	vec->nb_elem = n;
+	enqueued = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
+			links[0].event_port_id, ev, nb_events, 0);
+	if (enqueued != nb_events) {
+		RTE_LOG_DP(DEBUG, IPSEC, "Failed to enqueue to tx, ret = %u,"
+				" errno = %i\n", enqueued, rte_errno);
+		free_pkts(vec->mbufs, vec->nb_elem);
+		rte_mempool_put(rte_mempool_from_obj(vec), vec);
+	} else {
+		core_stats_update_tx(n);
+	}
+}
+
+static inline int
+ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
+			   struct rte_event *ev)
+{
+	struct rte_crypto_op *cop;
+	struct rte_mbuf *pkt;
+
+	/* Get pkt data */
+	cop = ev->event_ptr;
+	pkt = cop->sym->m_src;
+
+	if (ipsec_ev_cryptodev_process_one_pkt(lconf, cop, pkt))
+		return PKT_DROPPED;
+
 	/* Update event */
 	ev->mbuf = pkt;
 
@@ -1199,6 +1371,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 			if (unlikely(ret != PKT_FORWARDED))
 				continue;
 			break;
+		case RTE_EVENT_TYPE_CRYPTODEV_VECTOR:
+			ipsec_ev_cryptodev_vector_process(&lconf, links, &ev);
+			continue;
 		default:
 			RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 				ev.event_type);
-- 
2.25.1


  parent reply	other threads:[~2022-10-10 12:31 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-04 10:36 [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-08-05  3:26   ` Suanming Mou
2022-08-05 10:06     ` Volodymyr Fialko
2022-09-22  5:05   ` Gagandeep Singh
2022-09-22 11:07     ` Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
2022-08-04 10:36 ` [PATCH 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
2022-09-21 18:28 ` [PATCH 0/6] examples/ipsec-secgw: add lookaside event mode Akhil Goyal
2022-10-10 12:30 ` [PATCH v2 " Volodymyr Fialko
2022-10-10 12:30   ` [PATCH v2 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
2022-10-10 12:30   ` [PATCH v2 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
2022-10-10 12:30   ` [PATCH v2 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-10-10 12:31   ` [PATCH v2 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
2022-10-10 12:31   ` Volodymyr Fialko [this message]
2022-10-10 12:31   ` [PATCH v2 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
2022-10-10 16:56   ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 1/6] examples/ipsec-secgw: add event crypto adapter init Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 2/6] examples/ipsec-secgw: add queue for event crypto adapter Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 3/6] examples/ipsec-secgw: add lookaside event mode Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 4/6] examples/ipsec-secgw: add stats for " Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 5/6] examples/ipsec-secgw: add event vector support for lookaside Volodymyr Fialko
2022-10-10 16:56     ` [PATCH v3 6/6] examples/ipsec-secgw: reduce number of QP for event lookaside Volodymyr Fialko
2022-10-10 19:02     ` [PATCH v3 0/6] examples/ipsec-secgw: add lookaside event mode Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221010123102.3962719-6-vfialko@marvell.com \
    --to=vfialko@marvell.com \
    --cc=anoobj@marvell.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=radu.nicolau@intel.com \
    --cc=suanmingm@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).