DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, <konstantin.ananyev@intel.com>,
	Radu Nicolau <radu.nicolau@intel.com>,
	Akhil Goyal <gakhil@marvell.com>
Cc: <dev@dpdk.org>, <anoobj@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>
Subject: [PATCH v3 7/7] examples/ipsec-secgw: add poll mode worker for inline proto
Date: Thu, 28 Apr 2022 20:34:59 +0530	[thread overview]
Message-ID: <20220428150459.23950-7-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20220428150459.23950-1-ndabilpuram@marvell.com>

Add separate worker thread when all SA's are of type
inline protocol offload and librte_ipsec is enabled
in order to make it more optimal for that case.
Current default worker supports all kinds of SA leading
to doing lot of per-packet checks and branching based on
SA type which can be of 5 types of SA's.

Also make a provision for choosing different poll mode workers
for different combinations of SA types with default being
existing poll mode worker that supports all kinds of SA's.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 examples/ipsec-secgw/ipsec-secgw.c  |   6 +-
 examples/ipsec-secgw/ipsec-secgw.h  |  10 +
 examples/ipsec-secgw/ipsec_worker.c | 365 +++++++++++++++++++++++++++++++++++-
 examples/ipsec-secgw/ipsec_worker.h |   4 +
 examples/ipsec-secgw/sa.c           |   9 +
 5 files changed, 390 insertions(+), 4 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 24ee6c0..4251952 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -68,8 +68,6 @@ volatile bool force_quit;
 #define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */
 #define MAX_QUEUE_PAIRS 1
 
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
 #define MAX_LCORE_PARAMS 1024
 
 /*
@@ -173,7 +171,7 @@ static uint64_t enabled_cryptodev_mask = UINT64_MAX;
 static int32_t promiscuous_on = 1;
 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
 static uint32_t nb_lcores;
-static uint32_t single_sa;
+uint32_t single_sa;
 uint32_t nb_bufs_in_pool;
 
 /*
@@ -238,6 +236,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS];
 
 bool per_port_pool;
 
+uint16_t wrkr_flags;
 /*
  * Determine is multi-segment support required:
  *  - either frame buffer size is smaller then mtu
@@ -1233,6 +1232,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf)
 			single_sa = 1;
 			single_sa_idx = ret;
 			eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
+			wrkr_flags |= SS_F;
 			printf("Configured with single SA index %u\n",
 					single_sa_idx);
 			break;
diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h
index 2edf631..f027360 100644
--- a/examples/ipsec-secgw/ipsec-secgw.h
+++ b/examples/ipsec-secgw/ipsec-secgw.h
@@ -135,6 +135,7 @@ extern uint32_t unprotected_port_mask;
 
 /* Index of SA in single mode */
 extern uint32_t single_sa_idx;
+extern uint32_t single_sa;
 
 extern volatile bool force_quit;
 
@@ -145,6 +146,15 @@ extern bool per_port_pool;
 extern uint32_t mtu_size;
 extern uint32_t frag_tbl_sz;
 
+#define SS_F		(1U << 0)	/* Single SA mode */
+#define INL_PR_F	(1U << 1)	/* Inline Protocol */
+#define INL_CR_F	(1U << 2)	/* Inline Crypto */
+#define LA_PR_F		(1U << 3)	/* Lookaside Protocol */
+#define LA_ANY_F	(1U << 4)	/* Lookaside Any */
+#define MAX_F		(LA_ANY_F << 1)
+
+extern uint16_t wrkr_flags;
+
 static inline uint8_t
 is_unprotected_port(uint16_t port_id)
 {
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8639426..65dcb51 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -17,6 +17,8 @@ struct port_drv_mode_data {
 	struct rte_security_ctx *ctx;
 };
 
+typedef void (*ipsec_worker_fn_t)(void);
+
 static inline enum pkt_type
 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
 {
@@ -1004,6 +1006,367 @@ ipsec_eventmode_worker(struct eh_conf *conf)
 	eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
 }
 
+static __rte_always_inline void
+outb_inl_pro_spd_process(struct sp_ctx *sp,
+			 struct sa_ctx *sa_ctx,
+			 struct traffic_type *ip,
+			 struct traffic_type *match,
+			 struct traffic_type *mismatch,
+			 bool match_flag,
+			 struct ipsec_spd_stats *stats)
+{
+	uint32_t prev_sa_idx = UINT32_MAX;
+	struct rte_mbuf *ipsec[MAX_PKT_BURST];
+	struct rte_ipsec_session *ips;
+	uint32_t i, j, j_mis, sa_idx;
+	struct ipsec_sa *sa = NULL;
+	uint32_t ipsec_num = 0;
+	struct rte_mbuf *m;
+	uint64_t satp;
+
+	if (ip->num == 0 || sp == NULL)
+		return;
+
+	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+			ip->num, DEFAULT_MAX_CATEGORIES);
+
+	j = match->num;
+	j_mis = mismatch->num;
+
+	for (i = 0; i < ip->num; i++) {
+		m = ip->pkts[i];
+		sa_idx = ip->res[i] - 1;
+
+		if (unlikely(ip->res[i] == DISCARD)) {
+			free_pkts(&m, 1);
+
+			stats->discard++;
+		} else if (unlikely(ip->res[i] == BYPASS)) {
+			match->pkts[j++] = m;
+
+			stats->bypass++;
+		} else {
+			if (prev_sa_idx == UINT32_MAX) {
+				prev_sa_idx = sa_idx;
+				sa = &sa_ctx->sa[sa_idx];
+				ips = ipsec_get_primary_session(sa);
+				satp = rte_ipsec_sa_type(ips->sa);
+			}
+
+			if (sa_idx != prev_sa_idx) {
+				prep_process_group(sa, ipsec, ipsec_num);
+
+				/* Prepare packets for outbound */
+				rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+				/* Copy to current tr or a different tr */
+				if (SATP_OUT_IPV4(satp) == match_flag) {
+					memcpy(&match->pkts[j], ipsec,
+					       ipsec_num * sizeof(void *));
+					j += ipsec_num;
+				} else {
+					memcpy(&mismatch->pkts[j_mis], ipsec,
+					       ipsec_num * sizeof(void *));
+					j_mis += ipsec_num;
+				}
+
+				/* Update to new SA */
+				sa = &sa_ctx->sa[sa_idx];
+				ips = ipsec_get_primary_session(sa);
+				satp = rte_ipsec_sa_type(ips->sa);
+				ipsec_num = 0;
+			}
+
+			ipsec[ipsec_num++] = m;
+			stats->protect++;
+		}
+	}
+
+	if (ipsec_num) {
+		prep_process_group(sa, ipsec, ipsec_num);
+
+		/* Prepare pacekts for outbound */
+		rte_ipsec_pkt_process(ips, ipsec, ipsec_num);
+
+		/* Copy to current tr or a different tr */
+		if (SATP_OUT_IPV4(satp) == match_flag) {
+			memcpy(&match->pkts[j], ipsec,
+			       ipsec_num * sizeof(void *));
+			j += ipsec_num;
+		} else {
+			memcpy(&mismatch->pkts[j_mis], ipsec,
+			       ipsec_num * sizeof(void *));
+			j_mis += ipsec_num;
+		}
+	}
+	match->num = j;
+	mismatch->num = j_mis;
+}
+
+/* Poll mode worker when all SA's are of type inline protocol */
+void
+ipsec_poll_mode_wrkr_inl_pr(void)
+{
+	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+			/ US_PER_S * BURST_TX_DRAIN_US;
+	struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out;
+	struct rte_mbuf *pkts[MAX_PKT_BURST];
+	uint64_t prev_tsc, diff_tsc, cur_tsc;
+	struct ipsec_core_statistics *stats;
+	struct rt_ctx *rt4_ctx, *rt6_ctx;
+	struct sa_ctx *sa_in, *sa_out;
+	struct traffic_type ip4, ip6;
+	struct lcore_rx_queue *rxql;
+	struct rte_mbuf **v4, **v6;
+	struct ipsec_traffic trf;
+	struct lcore_conf *qconf;
+	uint16_t v4_num, v6_num;
+	int32_t socket_id;
+	uint32_t lcore_id;
+	int32_t i, nb_rx;
+	uint16_t portid;
+	uint8_t queueid;
+
+	prev_tsc = 0;
+	lcore_id = rte_lcore_id();
+	qconf = &lcore_conf[lcore_id];
+	rxql = qconf->rx_queue_list;
+	socket_id = rte_lcore_to_socket_id(lcore_id);
+	stats = &core_statistics[lcore_id];
+
+	rt4_ctx = socket_ctx[socket_id].rt_ip4;
+	rt6_ctx = socket_ctx[socket_id].rt_ip6;
+
+	sp4_in = socket_ctx[socket_id].sp_ip4_in;
+	sp6_in = socket_ctx[socket_id].sp_ip6_in;
+	sa_in = socket_ctx[socket_id].sa_in;
+
+	sp4_out = socket_ctx[socket_id].sp_ip4_out;
+	sp6_out = socket_ctx[socket_id].sp_ip6_out;
+	sa_out = socket_ctx[socket_id].sa_out;
+
+	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+	if (qconf->nb_rx_queue == 0) {
+		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+			lcore_id);
+		return;
+	}
+
+	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+	for (i = 0; i < qconf->nb_rx_queue; i++) {
+		portid = rxql[i].port_id;
+		queueid = rxql[i].queue_id;
+		RTE_LOG(INFO, IPSEC,
+			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+			lcore_id, portid, queueid);
+	}
+
+	while (!force_quit) {
+		cur_tsc = rte_rdtsc();
+
+		/* TX queue buffer drain */
+		diff_tsc = cur_tsc - prev_tsc;
+
+		if (unlikely(diff_tsc > drain_tsc)) {
+			drain_tx_buffers(qconf);
+			prev_tsc = cur_tsc;
+		}
+
+		for (i = 0; i < qconf->nb_rx_queue; ++i) {
+			/* Read packets from RX queues */
+			portid = rxql[i].port_id;
+			queueid = rxql[i].queue_id;
+			nb_rx = rte_eth_rx_burst(portid, queueid,
+					pkts, MAX_PKT_BURST);
+
+			if (nb_rx <= 0)
+				continue;
+
+			core_stats_update_rx(nb_rx);
+
+			prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx);
+
+			/* Drop any IPsec traffic */
+			free_pkts(trf.ipsec.pkts, trf.ipsec.num);
+
+			if (is_unprotected_port(portid)) {
+				inbound_sp_sa(sp4_in, sa_in, &trf.ip4,
+					      trf.ip4.num,
+					      &stats->inbound.spd4);
+
+				inbound_sp_sa(sp6_in, sa_in, &trf.ip6,
+					      trf.ip6.num,
+					      &stats->inbound.spd6);
+
+				v4 = trf.ip4.pkts;
+				v4_num = trf.ip4.num;
+				v6 = trf.ip6.pkts;
+				v6_num = trf.ip6.num;
+			} else {
+				ip4.num = 0;
+				ip6.num = 0;
+
+				outb_inl_pro_spd_process(sp4_out, sa_out,
+							 &trf.ip4, &ip4, &ip6,
+							 true,
+							 &stats->outbound.spd4);
+
+				outb_inl_pro_spd_process(sp6_out, sa_out,
+							 &trf.ip6, &ip6, &ip4,
+							 false,
+							 &stats->outbound.spd6);
+				v4 = ip4.pkts;
+				v4_num = ip4.num;
+				v6 = ip6.pkts;
+				v6_num = ip6.num;
+			}
+
+			route4_pkts(rt4_ctx, v4, v4_num, 0, false);
+			route6_pkts(rt6_ctx, v6, v6_num);
+		}
+	}
+}
+
+/* Poll mode worker when all SA's are of type inline protocol
+ * and single sa mode is enabled.
+ */
+void
+ipsec_poll_mode_wrkr_inl_pr_ss(void)
+{
+	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+			/ US_PER_S * BURST_TX_DRAIN_US;
+	struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt;
+	uint64_t prev_tsc, diff_tsc, cur_tsc;
+	uint16_t sa_out_portid, sa_out_proto;
+	struct rte_ipsec_session *ips;
+	struct lcore_rx_queue *rxql;
+	struct lcore_conf *qconf;
+	struct sa_ctx *sa_out;
+	uint32_t i, nb_rx, j;
+	struct ipsec_sa *sa;
+	int32_t socket_id;
+	uint32_t lcore_id;
+	uint16_t portid;
+	uint8_t queueid;
+
+	prev_tsc = 0;
+	lcore_id = rte_lcore_id();
+	qconf = &lcore_conf[lcore_id];
+	rxql = qconf->rx_queue_list;
+	socket_id = rte_lcore_to_socket_id(lcore_id);
+
+	/* Get SA info */
+	sa_out = socket_ctx[socket_id].sa_out;
+	sa = &sa_out->sa[single_sa_idx];
+	ips = ipsec_get_primary_session(sa);
+	sa_out_portid = sa->portid;
+	if (sa->flags & IP6_TUNNEL)
+		sa_out_proto = IPPROTO_IPV6;
+	else
+		sa_out_proto = IPPROTO_IP;
+
+
+	qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir;
+
+	if (qconf->nb_rx_queue == 0) {
+		RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n",
+			lcore_id);
+		return;
+	}
+
+	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
+
+	for (i = 0; i < qconf->nb_rx_queue; i++) {
+		portid = rxql[i].port_id;
+		queueid = rxql[i].queue_id;
+		RTE_LOG(INFO, IPSEC,
+			" -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+			lcore_id, portid, queueid);
+	}
+
+	while (!force_quit) {
+		cur_tsc = rte_rdtsc();
+
+		/* TX queue buffer drain */
+		diff_tsc = cur_tsc - prev_tsc;
+
+		if (unlikely(diff_tsc > drain_tsc)) {
+			drain_tx_buffers(qconf);
+			prev_tsc = cur_tsc;
+		}
+
+		for (i = 0; i < qconf->nb_rx_queue; ++i) {
+			/* Read packets from RX queues */
+			portid = rxql[i].port_id;
+			queueid = rxql[i].queue_id;
+			nb_rx = rte_eth_rx_burst(portid, queueid,
+						 pkts, MAX_PKT_BURST);
+
+			if (nb_rx <= 0)
+				continue;
+
+			core_stats_update_rx(nb_rx);
+
+			if (is_unprotected_port(portid)) {
+				/* Nothing much to do for inbound inline
+				 * decrypted traffic.
+				 */
+				for (j = 0; j < nb_rx; j++) {
+					uint32_t ptype, proto;
+
+					pkt = pkts[j];
+					ptype = pkt->packet_type &
+						RTE_PTYPE_L3_MASK;
+					if (ptype == RTE_PTYPE_L3_IPV4)
+						proto = IPPROTO_IP;
+					else
+						proto = IPPROTO_IPV6;
+
+					send_single_packet(pkt, portid, proto);
+				}
+
+				continue;
+			}
+
+			rte_ipsec_pkt_process(ips, pkts, nb_rx);
+
+			/* Send pkts out */
+			for (j = 0; j < nb_rx; j++) {
+				pkt = pkts[j];
+
+				pkt->l2_len = RTE_ETHER_HDR_LEN;
+				send_single_packet(pkt, sa_out_portid,
+						   sa_out_proto);
+			}
+		}
+	}
+}
+
+static void
+ipsec_poll_mode_wrkr_launch(void)
+{
+	static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = {
+		[INL_PR_F]        = ipsec_poll_mode_wrkr_inl_pr,
+		[INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss,
+	};
+	ipsec_worker_fn_t fn;
+
+	if (!app_sa_prm.enable) {
+		fn = ipsec_poll_mode_worker;
+	} else {
+		fn = poll_mode_wrkrs[wrkr_flags];
+
+		/* Always default to all mode worker */
+		if (!fn)
+			fn = ipsec_poll_mode_worker;
+	}
+
+	/* Launch worker */
+	(*fn)();
+}
+
 int ipsec_launch_one_lcore(void *args)
 {
 	struct eh_conf *conf;
@@ -1012,7 +1375,7 @@ int ipsec_launch_one_lcore(void *args)
 
 	if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
 		/* Run in poll mode */
-		ipsec_poll_mode_worker();
+		ipsec_poll_mode_wrkr_launch();
 	} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
 		/* Run in event mode */
 		ipsec_eventmode_worker(conf);
diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h
index 7f21440..315f3d6 100644
--- a/examples/ipsec-secgw/ipsec_worker.h
+++ b/examples/ipsec-secgw/ipsec_worker.h
@@ -13,6 +13,8 @@
 
 /* Configure how many packets ahead to prefetch, when reading packets */
 #define PREFETCH_OFFSET	3
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+
 enum pkt_type {
 	PKT_TYPE_PLAIN_IPV4 = 1,
 	PKT_TYPE_IPSEC_IPV4,
@@ -42,6 +44,8 @@ struct lcore_conf_ev_tx_int_port_wrkr {
 } __rte_cache_aligned;
 
 void ipsec_poll_mode_worker(void);
+void ipsec_poll_mode_wrkr_inl_pr(void);
+void ipsec_poll_mode_wrkr_inl_pr_ss(void);
 
 int ipsec_launch_one_lcore(void *args);
 
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index e8f2598..13b9113 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -936,6 +936,15 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
 		ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
 	}
 
+	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+		wrkr_flags |= INL_CR_F;
+	else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+		wrkr_flags |= INL_PR_F;
+	else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
+		wrkr_flags |= LA_PR_F;
+	else
+		wrkr_flags |= LA_ANY_F;
+
 	nb_crypto_sessions++;
 	*ri = *ri + 1;
 }
-- 
2.8.4


  parent reply	other threads:[~2022-04-28 15:06 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-22 17:58 [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-14 15:43   ` Ananyev, Konstantin
2022-03-22 17:58 ` [PATCH 3/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 4/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-03-22 17:58 ` [PATCH 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-04-13  6:13 ` [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Kumar Dabilpuram
2022-04-14 14:07 ` Ananyev, Konstantin
2022-04-19 13:56   ` Nithin Kumar Dabilpuram
2022-04-20 10:42     ` Ananyev, Konstantin
2022-04-21 13:31 ` [PATCH v2 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-21 13:31   ` [PATCH v2 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-21 13:31   ` [PATCH v2 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-21 13:31   ` [PATCH v2 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-21 13:31   ` [PATCH v2 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-04-21 13:31   ` [PATCH v2 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-21 13:31   ` [PATCH v2 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-04-28 15:04 ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Dabilpuram
2022-04-28 15:04   ` [PATCH v3 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-04-28 15:04   ` [PATCH v3 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-28 15:04   ` [PATCH v3 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-28 15:04   ` [PATCH v3 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-04-28 15:04   ` [PATCH v3 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-28 15:04   ` Nithin Dabilpuram [this message]
2022-04-29 10:23   ` [PATCH v3 1/7] examples/ipsec-secgw: move fast path helper functions Nithin Kumar Dabilpuram
2022-04-29 10:29   ` Akhil Goyal
2022-04-29 20:44 ` [PATCH v4 " Nithin Dabilpuram
2022-04-29 20:44   ` [PATCH v4 2/7] examples/ipsec-secgw: disable Tx chksum offload for inline Nithin Dabilpuram
2022-05-01 17:10     ` Konstantin Ananyev
2022-04-29 20:44   ` [PATCH v4 3/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Nithin Dabilpuram
2022-04-29 20:44   ` [PATCH v4 4/7] examples/ipsec-secgw: allow larger burst size for vectors Nithin Dabilpuram
2022-04-29 20:44   ` [PATCH v4 5/7] examples/ipsec-secgw: get security context from lcore conf Nithin Dabilpuram
2022-04-29 20:44   ` [PATCH v4 6/7] examples/ipsec-secgw: update eth header during route lookup Nithin Dabilpuram
2022-04-29 20:44   ` [PATCH v4 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Nithin Dabilpuram
2022-05-11 19:34   ` [PATCH v4 1/7] examples/ipsec-secgw: move fast path helper functions Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220428150459.23950-7-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=anoobj@marvell.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=radu.nicolau@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).